repo_name
stringlengths
5
85
path
stringlengths
3
252
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
922
999k
license
stringclasses
15 values
dduval/kernel-rhel6
kernel/power/swsusp.c
527
4781
/* * linux/kernel/power/swsusp.c * * This file provides code to write suspend image to swap and read it back. * * Copyright (C) 1998-2001 Gabor Kuti <seasons@fornax.hu> * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@suse.cz> * * This file is released under the GPLv2. * * I'd like to thank the following people for their work: * * Pavel Machek <pavel@ucw.cz>: * Modifications, defectiveness pointing, being with me at the very beginning, * suspend to swap space, stop all tasks. Port to 2.4.18-ac and 2.5.17. * * Steve Doddi <dirk@loth.demon.co.uk>: * Support the possibility of hardware state restoring. * * Raph <grey.havens@earthling.net>: * Support for preserving states of network devices and virtual console * (including X and svgatextmode) * * Kurt Garloff <garloff@suse.de>: * Straightened the critical function in order to prevent compilers from * playing tricks with local variables. * * Andreas Mohr <a.mohr@mailto.de> * * Alex Badea <vampire@go.ro>: * Fixed runaway init * * Rafael J. Wysocki <rjw@sisk.pl> * Reworked the freeing of memory and the handling of swap * * More state savers are welcome. Especially for the scsi layer... * * For TODOs,FIXMEs also look in Documentation/power/swsusp.txt */ #include <linux/mm.h> #include <linux/suspend.h> #include <linux/spinlock.h> #include <linux/kernel.h> #include <linux/major.h> #include <linux/swap.h> #include <linux/pm.h> #include <linux/swapops.h> #include <linux/bootmem.h> #include <linux/syscalls.h> #include <linux/highmem.h> #include <linux/time.h> #include <linux/rbtree.h> #include <linux/io.h> #include "power.h" int in_suspend __nosavedata = 0; /** * The following functions are used for tracing the allocated * swap pages, so that they can be freed in case of an error. */ struct swsusp_extent { struct rb_node node; unsigned long start; unsigned long end; }; static struct rb_root swsusp_extents = RB_ROOT; static int swsusp_extents_insert(unsigned long swap_offset) { struct rb_node **new = &(swsusp_extents.rb_node); struct rb_node *parent = NULL; struct swsusp_extent *ext; /* Figure out where to put the new node */ while (*new) { ext = container_of(*new, struct swsusp_extent, node); parent = *new; if (swap_offset < ext->start) { /* Try to merge */ if (swap_offset == ext->start - 1) { ext->start--; return 0; } new = &((*new)->rb_left); } else if (swap_offset > ext->end) { /* Try to merge */ if (swap_offset == ext->end + 1) { ext->end++; return 0; } new = &((*new)->rb_right); } else { /* It already is in the tree */ return -EINVAL; } } /* Add the new node and rebalance the tree. */ ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL); if (!ext) return -ENOMEM; ext->start = swap_offset; ext->end = swap_offset; rb_link_node(&ext->node, parent, new); rb_insert_color(&ext->node, &swsusp_extents); return 0; } /** * alloc_swapdev_block - allocate a swap page and register that it has * been allocated, so that it can be freed in case of an error. */ sector_t alloc_swapdev_block(int swap) { unsigned long offset; offset = swp_offset(get_swap_page_of_type(swap)); if (offset) { if (swsusp_extents_insert(offset)) swap_free(swp_entry(swap, offset)); else return swapdev_block(swap, offset); } return 0; } /** * free_all_swap_pages - free swap pages allocated for saving image data. * It also frees the extents used to register which swap entres had been * allocated. */ void free_all_swap_pages(int swap) { struct rb_node *node; while ((node = swsusp_extents.rb_node)) { struct swsusp_extent *ext; unsigned long offset; ext = container_of(node, struct swsusp_extent, node); rb_erase(node, &swsusp_extents); for (offset = ext->start; offset <= ext->end; offset++) swap_free(swp_entry(swap, offset)); kfree(ext); } } int swsusp_swap_in_use(void) { return (swsusp_extents.rb_node != NULL); } /** * swsusp_show_speed - print the time elapsed between two events represented by * @start and @stop * * @nr_pages - number of pages processed between @start and @stop * @msg - introductory message to print */ void swsusp_show_speed(struct timeval *start, struct timeval *stop, unsigned nr_pages, char *msg) { s64 elapsed_centisecs64; int centisecs; int k; int kps; elapsed_centisecs64 = timeval_to_ns(stop) - timeval_to_ns(start); do_div(elapsed_centisecs64, NSEC_PER_SEC / 100); centisecs = elapsed_centisecs64; if (centisecs == 0) centisecs = 1; /* avoid div-by-zero */ k = nr_pages * (PAGE_SIZE / 1024); kps = (k * 100) / centisecs; printk(KERN_INFO "PM: %s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n", msg, k, centisecs / 100, centisecs % 100, kps / 1000, (kps % 1000) / 10); }
gpl-2.0
spotify/linux
drivers/usb/musb/davinci.c
527
14449
/* * Copyright (C) 2005-2006 by Texas Instruments * * This file is part of the Inventra Controller Driver for Linux. * * The Inventra Controller Driver for Linux is free software; you * can redistribute it and/or modify it under the terms of the GNU * General Public License version 2 as published by the Free Software * Foundation. * * The Inventra Controller Driver for Linux is distributed in * the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public * License for more details. * * You should have received a copy of the GNU General Public License * along with The Inventra Controller Driver for Linux ; if not, * write to the Free Software Foundation, Inc., 59 Temple Place, * Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/list.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/gpio.h> #include <mach/hardware.h> #include <mach/memory.h> #include <mach/gpio.h> #include <mach/cputype.h> #include <asm/mach-types.h> #include "musb_core.h" #ifdef CONFIG_MACH_DAVINCI_EVM #define GPIO_nVBUS_DRV 144 #endif #include "davinci.h" #include "cppi_dma.h" #define USB_PHY_CTRL IO_ADDRESS(USBPHY_CTL_PADDR) #define DM355_DEEPSLEEP IO_ADDRESS(DM355_DEEPSLEEP_PADDR) /* REVISIT (PM) we should be able to keep the PHY in low power mode most * of the time (24 MHZ oscillator and PLL off, etc) by setting POWER.D0 * and, when in host mode, autosuspending idle root ports... PHYPLLON * (overriding SUSPENDM?) then likely needs to stay off. */ static inline void phy_on(void) { u32 phy_ctrl = __raw_readl(USB_PHY_CTRL); /* power everything up; start the on-chip PHY and its PLL */ phy_ctrl &= ~(USBPHY_OSCPDWN | USBPHY_OTGPDWN | USBPHY_PHYPDWN); phy_ctrl |= USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON; __raw_writel(phy_ctrl, USB_PHY_CTRL); /* wait for PLL to lock before proceeding */ while ((__raw_readl(USB_PHY_CTRL) & USBPHY_PHYCLKGD) == 0) cpu_relax(); } static inline void phy_off(void) { u32 phy_ctrl = __raw_readl(USB_PHY_CTRL); /* powerdown the on-chip PHY, its PLL, and the OTG block */ phy_ctrl &= ~(USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON); phy_ctrl |= USBPHY_OSCPDWN | USBPHY_OTGPDWN | USBPHY_PHYPDWN; __raw_writel(phy_ctrl, USB_PHY_CTRL); } static int dma_off = 1; void musb_platform_enable(struct musb *musb) { u32 tmp, old, val; /* workaround: setup irqs through both register sets */ tmp = (musb->epmask & DAVINCI_USB_TX_ENDPTS_MASK) << DAVINCI_USB_TXINT_SHIFT; musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp); old = tmp; tmp = (musb->epmask & (0xfffe & DAVINCI_USB_RX_ENDPTS_MASK)) << DAVINCI_USB_RXINT_SHIFT; musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp); tmp |= old; val = ~MUSB_INTR_SOF; tmp |= ((val & 0x01ff) << DAVINCI_USB_USBINT_SHIFT); musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp); if (is_dma_capable() && !dma_off) printk(KERN_WARNING "%s %s: dma not reactivated\n", __FILE__, __func__); else dma_off = 0; /* force a DRVVBUS irq so we can start polling for ID change */ if (is_otg_enabled(musb)) musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG, DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT); } /* * Disable the HDRC and flush interrupts */ void musb_platform_disable(struct musb *musb) { /* because we don't set CTRLR.UINT, "important" to: * - not read/write INTRUSB/INTRUSBE * - (except during initial setup, as workaround) * - use INTSETR/INTCLRR instead */ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_CLR_REG, DAVINCI_USB_USBINT_MASK | DAVINCI_USB_TXINT_MASK | DAVINCI_USB_RXINT_MASK); musb_writeb(musb->mregs, MUSB_DEVCTL, 0); musb_writel(musb->ctrl_base, DAVINCI_USB_EOI_REG, 0); if (is_dma_capable() && !dma_off) WARNING("dma still active\n"); } #ifdef CONFIG_USB_MUSB_HDRC_HCD #define portstate(stmt) stmt #else #define portstate(stmt) #endif /* * VBUS SWITCHING IS BOARD-SPECIFIC ... at least for the DM6446 EVM, * which doesn't wire DRVVBUS to the FET that switches it. Unclear * if that's a problem with the DM6446 chip or just with that board. * * In either case, the DM355 EVM automates DRVVBUS the normal way, * when J10 is out, and TI documents it as handling OTG. */ #ifdef CONFIG_MACH_DAVINCI_EVM static int vbus_state = -1; /* I2C operations are always synchronous, and require a task context. * With unloaded systems, using the shared workqueue seems to suffice * to satisfy the 100msec A_WAIT_VRISE timeout... */ static void evm_deferred_drvvbus(struct work_struct *ignored) { gpio_set_value_cansleep(GPIO_nVBUS_DRV, vbus_state); vbus_state = !vbus_state; } #endif /* EVM */ static void davinci_source_power(struct musb *musb, int is_on, int immediate) { #ifdef CONFIG_MACH_DAVINCI_EVM if (is_on) is_on = 1; if (vbus_state == is_on) return; vbus_state = !is_on; /* 0/1 vs "-1 == unknown/init" */ if (machine_is_davinci_evm()) { static DECLARE_WORK(evm_vbus_work, evm_deferred_drvvbus); if (immediate) gpio_set_value_cansleep(GPIO_nVBUS_DRV, vbus_state); else schedule_work(&evm_vbus_work); } if (immediate) vbus_state = is_on; #endif } static void davinci_set_vbus(struct musb *musb, int is_on) { WARN_ON(is_on && is_peripheral_active(musb)); davinci_source_power(musb, is_on, 0); } #define POLL_SECONDS 2 static struct timer_list otg_workaround; static void otg_timer(unsigned long _musb) { struct musb *musb = (void *)_musb; void __iomem *mregs = musb->mregs; u8 devctl; unsigned long flags; /* We poll because DaVinci's won't expose several OTG-critical * status change events (from the transceiver) otherwise. */ devctl = musb_readb(mregs, MUSB_DEVCTL); DBG(7, "poll devctl %02x (%s)\n", devctl, otg_state_string(musb)); spin_lock_irqsave(&musb->lock, flags); switch (musb->xceiv->state) { case OTG_STATE_A_WAIT_VFALL: /* Wait till VBUS falls below SessionEnd (~0.2V); the 1.3 RTL * seems to mis-handle session "start" otherwise (or in our * case "recover"), in routine "VBUS was valid by the time * VBUSERR got reported during enumeration" cases. */ if (devctl & MUSB_DEVCTL_VBUS) { mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); break; } musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG, MUSB_INTR_VBUSERROR << DAVINCI_USB_USBINT_SHIFT); break; case OTG_STATE_B_IDLE: if (!is_peripheral_enabled(musb)) break; /* There's no ID-changed IRQ, so we have no good way to tell * when to switch to the A-Default state machine (by setting * the DEVCTL.SESSION flag). * * Workaround: whenever we're in B_IDLE, try setting the * session flag every few seconds. If it works, ID was * grounded and we're now in the A-Default state machine. * * NOTE setting the session flag is _supposed_ to trigger * SRP, but clearly it doesn't. */ musb_writeb(mregs, MUSB_DEVCTL, devctl | MUSB_DEVCTL_SESSION); devctl = musb_readb(mregs, MUSB_DEVCTL); if (devctl & MUSB_DEVCTL_BDEVICE) mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); else musb->xceiv->state = OTG_STATE_A_IDLE; break; default: break; } spin_unlock_irqrestore(&musb->lock, flags); } static irqreturn_t davinci_interrupt(int irq, void *__hci) { unsigned long flags; irqreturn_t retval = IRQ_NONE; struct musb *musb = __hci; void __iomem *tibase = musb->ctrl_base; struct cppi *cppi; u32 tmp; spin_lock_irqsave(&musb->lock, flags); /* NOTE: DaVinci shadows the Mentor IRQs. Don't manage them through * the Mentor registers (except for setup), use the TI ones and EOI. * * Docs describe irq "vector" registers asociated with the CPPI and * USB EOI registers. These hold a bitmask corresponding to the * current IRQ, not an irq handler address. Would using those bits * resolve some of the races observed in this dispatch code?? */ /* CPPI interrupts share the same IRQ line, but have their own * mask, state, "vector", and EOI registers. */ cppi = container_of(musb->dma_controller, struct cppi, controller); if (is_cppi_enabled() && musb->dma_controller && !cppi->irq) retval = cppi_interrupt(irq, __hci); /* ack and handle non-CPPI interrupts */ tmp = musb_readl(tibase, DAVINCI_USB_INT_SRC_MASKED_REG); musb_writel(tibase, DAVINCI_USB_INT_SRC_CLR_REG, tmp); DBG(4, "IRQ %08x\n", tmp); musb->int_rx = (tmp & DAVINCI_USB_RXINT_MASK) >> DAVINCI_USB_RXINT_SHIFT; musb->int_tx = (tmp & DAVINCI_USB_TXINT_MASK) >> DAVINCI_USB_TXINT_SHIFT; musb->int_usb = (tmp & DAVINCI_USB_USBINT_MASK) >> DAVINCI_USB_USBINT_SHIFT; /* DRVVBUS irqs are the only proxy we have (a very poor one!) for * DaVinci's missing ID change IRQ. We need an ID change IRQ to * switch appropriately between halves of the OTG state machine. * Managing DEVCTL.SESSION per Mentor docs requires we know its * value, but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set. * Also, DRVVBUS pulses for SRP (but not at 5V) ... */ if (tmp & (DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT)) { int drvvbus = musb_readl(tibase, DAVINCI_USB_STAT_REG); void __iomem *mregs = musb->mregs; u8 devctl = musb_readb(mregs, MUSB_DEVCTL); int err = musb->int_usb & MUSB_INTR_VBUSERROR; err = is_host_enabled(musb) && (musb->int_usb & MUSB_INTR_VBUSERROR); if (err) { /* The Mentor core doesn't debounce VBUS as needed * to cope with device connect current spikes. This * means it's not uncommon for bus-powered devices * to get VBUS errors during enumeration. * * This is a workaround, but newer RTL from Mentor * seems to allow a better one: "re"starting sessions * without waiting (on EVM, a **long** time) for VBUS * to stop registering in devctl. */ musb->int_usb &= ~MUSB_INTR_VBUSERROR; musb->xceiv->state = OTG_STATE_A_WAIT_VFALL; mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); WARNING("VBUS error workaround (delay coming)\n"); } else if (is_host_enabled(musb) && drvvbus) { MUSB_HST_MODE(musb); musb->xceiv->default_a = 1; musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; portstate(musb->port1_status |= USB_PORT_STAT_POWER); del_timer(&otg_workaround); } else { musb->is_active = 0; MUSB_DEV_MODE(musb); musb->xceiv->default_a = 0; musb->xceiv->state = OTG_STATE_B_IDLE; portstate(musb->port1_status &= ~USB_PORT_STAT_POWER); } /* NOTE: this must complete poweron within 100 msec * (OTG_TIME_A_WAIT_VRISE) but we don't check for that. */ davinci_source_power(musb, drvvbus, 0); DBG(2, "VBUS %s (%s)%s, devctl %02x\n", drvvbus ? "on" : "off", otg_state_string(musb), err ? " ERROR" : "", devctl); retval = IRQ_HANDLED; } if (musb->int_tx || musb->int_rx || musb->int_usb) retval |= musb_interrupt(musb); /* irq stays asserted until EOI is written */ musb_writel(tibase, DAVINCI_USB_EOI_REG, 0); /* poll for ID change */ if (is_otg_enabled(musb) && musb->xceiv->state == OTG_STATE_B_IDLE) mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); spin_unlock_irqrestore(&musb->lock, flags); return retval; } int musb_platform_set_mode(struct musb *musb, u8 mode) { /* EVM can't do this (right?) */ return -EIO; } int __init musb_platform_init(struct musb *musb) { void __iomem *tibase = musb->ctrl_base; u32 revision; usb_nop_xceiv_register(); musb->xceiv = otg_get_transceiver(); if (!musb->xceiv) return -ENODEV; musb->mregs += DAVINCI_BASE_OFFSET; clk_enable(musb->clock); /* returns zero if e.g. not clocked */ revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG); if (revision == 0) goto fail; if (is_host_enabled(musb)) setup_timer(&otg_workaround, otg_timer, (unsigned long) musb); musb->board_set_vbus = davinci_set_vbus; davinci_source_power(musb, 0, 1); /* dm355 EVM swaps D+/D- for signal integrity, and * is clocked from the main 24 MHz crystal. */ if (machine_is_davinci_dm355_evm()) { u32 phy_ctrl = __raw_readl(USB_PHY_CTRL); phy_ctrl &= ~(3 << 9); phy_ctrl |= USBPHY_DATAPOL; __raw_writel(phy_ctrl, USB_PHY_CTRL); } /* On dm355, the default-A state machine needs DRVVBUS control. * If we won't be a host, there's no need to turn it on. */ if (cpu_is_davinci_dm355()) { u32 deepsleep = __raw_readl(DM355_DEEPSLEEP); if (is_host_enabled(musb)) { deepsleep &= ~DRVVBUS_OVERRIDE; } else { deepsleep &= ~DRVVBUS_FORCE; deepsleep |= DRVVBUS_OVERRIDE; } __raw_writel(deepsleep, DM355_DEEPSLEEP); } /* reset the controller */ musb_writel(tibase, DAVINCI_USB_CTRL_REG, 0x1); /* start the on-chip PHY and its PLL */ phy_on(); msleep(5); /* NOTE: irqs are in mixed mode, not bypass to pure-musb */ pr_debug("DaVinci OTG revision %08x phy %03x control %02x\n", revision, __raw_readl(USB_PHY_CTRL), musb_readb(tibase, DAVINCI_USB_CTRL_REG)); musb->isr = davinci_interrupt; return 0; fail: usb_nop_xceiv_unregister(); return -ENODEV; } int musb_platform_exit(struct musb *musb) { if (is_host_enabled(musb)) del_timer_sync(&otg_workaround); /* force VBUS off */ if (cpu_is_davinci_dm355()) { u32 deepsleep = __raw_readl(DM355_DEEPSLEEP); deepsleep &= ~DRVVBUS_FORCE; deepsleep |= DRVVBUS_OVERRIDE; __raw_writel(deepsleep, DM355_DEEPSLEEP); } davinci_source_power(musb, 0 /*off*/, 1); /* delay, to avoid problems with module reload */ if (is_host_enabled(musb) && musb->xceiv->default_a) { int maxdelay = 30; u8 devctl, warn = 0; /* if there's no peripheral connected, this can take a * long time to fall, especially on EVM with huge C133. */ do { devctl = musb_readb(musb->mregs, MUSB_DEVCTL); if (!(devctl & MUSB_DEVCTL_VBUS)) break; if ((devctl & MUSB_DEVCTL_VBUS) != warn) { warn = devctl & MUSB_DEVCTL_VBUS; DBG(1, "VBUS %d\n", warn >> MUSB_DEVCTL_VBUS_SHIFT); } msleep(1000); maxdelay--; } while (maxdelay > 0); /* in OTG mode, another host might be connected */ if (devctl & MUSB_DEVCTL_VBUS) DBG(1, "VBUS off timeout (devctl %02x)\n", devctl); } phy_off(); clk_disable(musb->clock); usb_nop_xceiv_unregister(); return 0; }
gpl-2.0
PAC-lge/lge-kernel-star
kernel/time/tick-sched.c
783
22506
/* * linux/kernel/time/tick-sched.c * * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner * * No idle tick implementation for low and high resolution timers * * Started by: Thomas Gleixner and Ingo Molnar * * Distribute under GPLv2. */ #include <linux/cpu.h> #include <linux/err.h> #include <linux/hrtimer.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <linux/percpu.h> #include <linux/profile.h> #include <linux/sched.h> #include <linux/module.h> #include <asm/irq_regs.h> #include "tick-internal.h" /* * Per cpu nohz control structure */ static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); /* * The time, when the last jiffy update happened. Protected by xtime_lock. */ static ktime_t last_jiffies_update; struct tick_sched *tick_get_tick_sched(int cpu) { return &per_cpu(tick_cpu_sched, cpu); } /* * Must be called with interrupts disabled ! */ static void tick_do_update_jiffies64(ktime_t now) { unsigned long ticks = 0; ktime_t delta; /* * Do a quick check without holding xtime_lock: */ delta = ktime_sub(now, last_jiffies_update); if (delta.tv64 < tick_period.tv64) return; /* Reevalute with xtime_lock held */ write_seqlock(&xtime_lock); delta = ktime_sub(now, last_jiffies_update); if (delta.tv64 >= tick_period.tv64) { delta = ktime_sub(delta, tick_period); last_jiffies_update = ktime_add(last_jiffies_update, tick_period); /* Slow path for long timeouts */ if (unlikely(delta.tv64 >= tick_period.tv64)) { s64 incr = ktime_to_ns(tick_period); ticks = ktime_divns(delta, incr); last_jiffies_update = ktime_add_ns(last_jiffies_update, incr * ticks); } do_timer(++ticks); /* Keep the tick_next_period variable up to date */ tick_next_period = ktime_add(last_jiffies_update, tick_period); } write_sequnlock(&xtime_lock); } /* * Initialize and return retrieve the jiffies update. */ static ktime_t tick_init_jiffy_update(void) { ktime_t period; write_seqlock(&xtime_lock); /* Did we start the jiffies update yet ? */ if (last_jiffies_update.tv64 == 0) last_jiffies_update = tick_next_period; period = last_jiffies_update; write_sequnlock(&xtime_lock); return period; } /* * NOHZ - aka dynamic tick functionality */ #ifdef CONFIG_NO_HZ /* * NO HZ enabled ? */ static int tick_nohz_enabled __read_mostly = 1; /* * Enable / Disable tickless mode */ static int __init setup_tick_nohz(char *str) { if (!strcmp(str, "off")) tick_nohz_enabled = 0; else if (!strcmp(str, "on")) tick_nohz_enabled = 1; else return 0; return 1; } __setup("nohz=", setup_tick_nohz); /** * tick_nohz_update_jiffies - update jiffies when idle was interrupted * * Called from interrupt entry when the CPU was idle * * In case the sched_tick was stopped on this CPU, we have to check if jiffies * must be updated. Otherwise an interrupt handler could use a stale jiffy * value. We do this unconditionally on any cpu, as we don't know whether the * cpu, which has the update task assigned is in a long sleep. */ static void tick_nohz_update_jiffies(ktime_t now) { int cpu = smp_processor_id(); struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); unsigned long flags; cpumask_clear_cpu(cpu, nohz_cpu_mask); ts->idle_waketime = now; local_irq_save(flags); tick_do_update_jiffies64(now); local_irq_restore(flags); touch_softlockup_watchdog(); } /* * Updates the per cpu time idle statistics counters */ static void update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) { ktime_t delta; if (ts->idle_active) { delta = ktime_sub(now, ts->idle_entrytime); ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); if (nr_iowait_cpu(cpu) > 0) ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); ts->idle_entrytime = now; } if (last_update_time) *last_update_time = ktime_to_us(now); } static void tick_nohz_stop_idle(int cpu, ktime_t now) { struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); update_ts_time_stats(cpu, ts, now, NULL); ts->idle_active = 0; sched_clock_idle_wakeup_event(0); } static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts) { ktime_t now; now = ktime_get(); update_ts_time_stats(cpu, ts, now, NULL); ts->idle_entrytime = now; ts->idle_active = 1; sched_clock_idle_sleep_event(); return now; } /** * get_cpu_idle_time_us - get the total idle time of a cpu * @cpu: CPU number to query * @last_update_time: variable to store update time in * * Return the cummulative idle time (since boot) for a given * CPU, in microseconds. The idle time returned includes * the iowait time (unlike what "top" and co report). * * This time is measured via accounting rather than sampling, * and is as accurate as ktime_get() is. * * This function returns -1 if NOHZ is not enabled. */ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) { struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); if (!tick_nohz_enabled) return -1; update_ts_time_stats(cpu, ts, ktime_get(), last_update_time); return ktime_to_us(ts->idle_sleeptime); } EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); /* * get_cpu_iowait_time_us - get the total iowait time of a cpu * @cpu: CPU number to query * @last_update_time: variable to store update time in * * Return the cummulative iowait time (since boot) for a given * CPU, in microseconds. * * This time is measured via accounting rather than sampling, * and is as accurate as ktime_get() is. * * This function returns -1 if NOHZ is not enabled. */ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) { struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); if (!tick_nohz_enabled) return -1; update_ts_time_stats(cpu, ts, ktime_get(), last_update_time); return ktime_to_us(ts->iowait_sleeptime); } EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); /** * tick_nohz_stop_sched_tick - stop the idle tick from the idle task * * When the next event is more than a tick into the future, stop the idle tick * Called either from the idle loop or from irq_exit() when an idle period was * just interrupted by an interrupt which did not cause a reschedule. */ void tick_nohz_stop_sched_tick(int inidle) { unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags; struct tick_sched *ts; ktime_t last_update, expires, now; struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; u64 time_delta; int cpu; local_irq_save(flags); cpu = smp_processor_id(); ts = &per_cpu(tick_cpu_sched, cpu); /* * Call to tick_nohz_start_idle stops the last_update_time from being * updated. Thus, it must not be called in the event we are called from * irq_exit() with the prior state different than idle. */ if (!inidle && !ts->inidle) goto end; /* * Set ts->inidle unconditionally. Even if the system did not * switch to NOHZ mode the cpu frequency governers rely on the * update of the idle time accounting in tick_nohz_start_idle(). */ ts->inidle = 1; now = tick_nohz_start_idle(cpu, ts); /* * If this cpu is offline and it is the one which updates * jiffies, then give up the assignment and let it be taken by * the cpu which runs the tick timer next. If we don't drop * this here the jiffies might be stale and do_timer() never * invoked. */ if (unlikely(!cpu_online(cpu))) { if (cpu == tick_do_timer_cpu) tick_do_timer_cpu = TICK_DO_TIMER_NONE; } if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) goto end; if (need_resched()) goto end; if (unlikely(local_softirq_pending() && cpu_online(cpu))) { static int ratelimit; if (ratelimit < 10) { printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", (unsigned int) local_softirq_pending()); ratelimit++; } goto end; } ts->idle_calls++; /* Read jiffies and the time when jiffies were updated last */ do { seq = read_seqbegin(&xtime_lock); last_update = last_jiffies_update; last_jiffies = jiffies; time_delta = timekeeping_max_deferment(); } while (read_seqretry(&xtime_lock, seq)); if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) || arch_needs_cpu(cpu)) { next_jiffies = last_jiffies + 1; delta_jiffies = 1; } else { /* Get the next timer wheel timer */ next_jiffies = get_next_timer_interrupt(last_jiffies); delta_jiffies = next_jiffies - last_jiffies; } /* * Do not stop the tick, if we are only one off * or if the cpu is required for rcu */ if (!ts->tick_stopped && delta_jiffies == 1) goto out; /* Schedule the tick, if we are at least one jiffie off */ if ((long)delta_jiffies >= 1) { /* * If this cpu is the one which updates jiffies, then * give up the assignment and let it be taken by the * cpu which runs the tick timer next, which might be * this cpu as well. If we don't drop this here the * jiffies might be stale and do_timer() never * invoked. Keep track of the fact that it was the one * which had the do_timer() duty last. If this cpu is * the one which had the do_timer() duty last, we * limit the sleep time to the timekeeping * max_deferement value which we retrieved * above. Otherwise we can sleep as long as we want. */ if (cpu == tick_do_timer_cpu) { tick_do_timer_cpu = TICK_DO_TIMER_NONE; ts->do_timer_last = 1; } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { time_delta = KTIME_MAX; ts->do_timer_last = 0; } else if (!ts->do_timer_last) { time_delta = KTIME_MAX; } /* * calculate the expiry time for the next timer wheel * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals * that there is no timer pending or at least extremely * far into the future (12 days for HZ=1000). In this * case we set the expiry to the end of time. */ if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) { /* * Calculate the time delta for the next timer event. * If the time delta exceeds the maximum time delta * permitted by the current clocksource then adjust * the time delta accordingly to ensure the * clocksource does not wrap. */ time_delta = min_t(u64, time_delta, tick_period.tv64 * delta_jiffies); } if (time_delta < KTIME_MAX) expires = ktime_add_ns(last_update, time_delta); else expires.tv64 = KTIME_MAX; if (delta_jiffies > 1) cpumask_set_cpu(cpu, nohz_cpu_mask); /* Skip reprogram of event if its not changed */ if (ts->tick_stopped && ktime_equal(expires, dev->next_event)) goto out; /* * nohz_stop_sched_tick can be called several times before * the nohz_restart_sched_tick is called. This happens when * interrupts arrive which do not cause a reschedule. In the * first call we save the current tick time, so we can restart * the scheduler tick in nohz_restart_sched_tick. */ if (!ts->tick_stopped) { select_nohz_load_balancer(1); ts->idle_tick = hrtimer_get_expires(&ts->sched_timer); ts->tick_stopped = 1; ts->idle_jiffies = last_jiffies; rcu_enter_nohz(); } ts->idle_sleeps++; /* Mark expires */ ts->idle_expires = expires; /* * If the expiration time == KTIME_MAX, then * in this case we simply stop the tick timer. */ if (unlikely(expires.tv64 == KTIME_MAX)) { if (ts->nohz_mode == NOHZ_MODE_HIGHRES) hrtimer_cancel(&ts->sched_timer); goto out; } if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { hrtimer_start(&ts->sched_timer, expires, HRTIMER_MODE_ABS_PINNED); /* Check, if the timer was already in the past */ if (hrtimer_active(&ts->sched_timer)) goto out; } else if (!tick_program_event(expires, 0)) goto out; /* * We are past the event already. So we crossed a * jiffie boundary. Update jiffies and raise the * softirq. */ tick_do_update_jiffies64(ktime_get()); cpumask_clear_cpu(cpu, nohz_cpu_mask); } raise_softirq_irqoff(TIMER_SOFTIRQ); out: ts->next_jiffies = next_jiffies; ts->last_jiffies = last_jiffies; ts->sleep_length = ktime_sub(dev->next_event, now); end: local_irq_restore(flags); } /** * tick_nohz_get_sleep_length - return the length of the current sleep * * Called from power state control code with interrupts disabled */ ktime_t tick_nohz_get_sleep_length(void) { struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); return ts->sleep_length; } static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) { hrtimer_cancel(&ts->sched_timer); hrtimer_set_expires(&ts->sched_timer, ts->idle_tick); while (1) { /* Forward the time to expire in the future */ hrtimer_forward(&ts->sched_timer, now, tick_period); if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED); /* Check, if the timer was already in the past */ if (hrtimer_active(&ts->sched_timer)) break; } else { if (!tick_program_event( hrtimer_get_expires(&ts->sched_timer), 0)) break; } /* Update jiffies and reread time */ tick_do_update_jiffies64(now); now = ktime_get(); } } /** * tick_nohz_restart_sched_tick - restart the idle tick from the idle task * * Restart the idle tick when the CPU is woken up from idle */ void tick_nohz_restart_sched_tick(void) { int cpu = smp_processor_id(); struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); #ifndef CONFIG_VIRT_CPU_ACCOUNTING unsigned long ticks; #endif ktime_t now; local_irq_disable(); if (ts->idle_active || (ts->inidle && ts->tick_stopped)) now = ktime_get(); if (ts->idle_active) tick_nohz_stop_idle(cpu, now); if (!ts->inidle || !ts->tick_stopped) { ts->inidle = 0; local_irq_enable(); return; } ts->inidle = 0; rcu_exit_nohz(); /* Update jiffies first */ select_nohz_load_balancer(0); tick_do_update_jiffies64(now); cpumask_clear_cpu(cpu, nohz_cpu_mask); #ifndef CONFIG_VIRT_CPU_ACCOUNTING /* * We stopped the tick in idle. Update process times would miss the * time we slept as update_process_times does only a 1 tick * accounting. Enforce that this is accounted to idle ! */ ticks = jiffies - ts->idle_jiffies; /* * We might be one off. Do not randomly account a huge number of ticks! */ if (ticks && ticks < LONG_MAX) account_idle_ticks(ticks); #endif touch_softlockup_watchdog(); /* * Cancel the scheduled timer and restore the tick */ ts->tick_stopped = 0; ts->idle_exittime = now; tick_nohz_restart(ts, now); local_irq_enable(); } static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now) { hrtimer_forward(&ts->sched_timer, now, tick_period); return tick_program_event(hrtimer_get_expires(&ts->sched_timer), 0); } /* * The nohz low res interrupt handler */ static void tick_nohz_handler(struct clock_event_device *dev) { struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); struct pt_regs *regs = get_irq_regs(); int cpu = smp_processor_id(); ktime_t now = ktime_get(); dev->next_event.tv64 = KTIME_MAX; /* * Check if the do_timer duty was dropped. We don't care about * concurrency: This happens only when the cpu in charge went * into a long sleep. If two cpus happen to assign themself to * this duty, then the jiffies update is still serialized by * xtime_lock. */ if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) tick_do_timer_cpu = cpu; /* Check, if the jiffies need an update */ if (tick_do_timer_cpu == cpu) tick_do_update_jiffies64(now); /* * When we are idle and the tick is stopped, we have to touch * the watchdog as we might not schedule for a really long * time. This happens on complete idle SMP systems while * waiting on the login prompt. We also increment the "start * of idle" jiffy stamp so the idle accounting adjustment we * do when we go busy again does not account too much ticks. */ if (ts->tick_stopped) { touch_softlockup_watchdog(); ts->idle_jiffies++; } update_process_times(user_mode(regs)); profile_tick(CPU_PROFILING); while (tick_nohz_reprogram(ts, now)) { now = ktime_get(); tick_do_update_jiffies64(now); } } /** * tick_nohz_switch_to_nohz - switch to nohz mode */ static void tick_nohz_switch_to_nohz(void) { struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); ktime_t next; if (!tick_nohz_enabled) return; local_irq_disable(); if (tick_switch_to_oneshot(tick_nohz_handler)) { local_irq_enable(); return; } ts->nohz_mode = NOHZ_MODE_LOWRES; /* * Recycle the hrtimer in ts, so we can share the * hrtimer_forward with the highres code. */ hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); /* Get the next period */ next = tick_init_jiffy_update(); for (;;) { hrtimer_set_expires(&ts->sched_timer, next); if (!tick_program_event(next, 0)) break; next = ktime_add(next, tick_period); } local_irq_enable(); printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", smp_processor_id()); } /* * When NOHZ is enabled and the tick is stopped, we need to kick the * tick timer from irq_enter() so that the jiffies update is kept * alive during long running softirqs. That's ugly as hell, but * correctness is key even if we need to fix the offending softirq in * the first place. * * Note, this is different to tick_nohz_restart. We just kick the * timer and do not touch the other magic bits which need to be done * when idle is left. */ static void tick_nohz_kick_tick(int cpu, ktime_t now) { #if 0 /* Switch back to 2.6.27 behaviour */ struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); ktime_t delta; /* * Do not touch the tick device, when the next expiry is either * already reached or less/equal than the tick period. */ delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now); if (delta.tv64 <= tick_period.tv64) return; tick_nohz_restart(ts, now); #endif } static inline void tick_check_nohz(int cpu) { struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); ktime_t now; if (!ts->idle_active && !ts->tick_stopped) return; now = ktime_get(); if (ts->idle_active) tick_nohz_stop_idle(cpu, now); if (ts->tick_stopped) { tick_nohz_update_jiffies(now); tick_nohz_kick_tick(cpu, now); } } #else static inline void tick_nohz_switch_to_nohz(void) { } static inline void tick_check_nohz(int cpu) { } #endif /* NO_HZ */ /* * Called from irq_enter to notify about the possible interruption of idle() */ void tick_check_idle(int cpu) { tick_check_oneshot_broadcast(cpu); tick_check_nohz(cpu); } /* * High resolution timer specific code */ #ifdef CONFIG_HIGH_RES_TIMERS /* * We rearm the timer until we get disabled by the idle code. * Called with interrupts disabled and timer->base->cpu_base->lock held. */ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) { struct tick_sched *ts = container_of(timer, struct tick_sched, sched_timer); struct pt_regs *regs = get_irq_regs(); ktime_t now = ktime_get(); int cpu = smp_processor_id(); #ifdef CONFIG_NO_HZ /* * Check if the do_timer duty was dropped. We don't care about * concurrency: This happens only when the cpu in charge went * into a long sleep. If two cpus happen to assign themself to * this duty, then the jiffies update is still serialized by * xtime_lock. */ if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) tick_do_timer_cpu = cpu; #endif /* Check, if the jiffies need an update */ if (tick_do_timer_cpu == cpu) tick_do_update_jiffies64(now); /* * Do not call, when we are not in irq context and have * no valid regs pointer */ if (regs) { /* * When we are idle and the tick is stopped, we have to touch * the watchdog as we might not schedule for a really long * time. This happens on complete idle SMP systems while * waiting on the login prompt. We also increment the "start of * idle" jiffy stamp so the idle accounting adjustment we do * when we go busy again does not account too much ticks. */ if (ts->tick_stopped) { touch_softlockup_watchdog(); ts->idle_jiffies++; } update_process_times(user_mode(regs)); profile_tick(CPU_PROFILING); } hrtimer_forward(timer, now, tick_period); return HRTIMER_RESTART; } /** * tick_setup_sched_timer - setup the tick emulation timer */ void tick_setup_sched_timer(void) { struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); ktime_t now = ktime_get(); /* * Emulate tick processing via per-CPU hrtimers: */ hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); ts->sched_timer.function = tick_sched_timer; /* Get the next period (per cpu) */ hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); for (;;) { hrtimer_forward(&ts->sched_timer, now, tick_period); hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED); /* Check, if the timer was already in the past */ if (hrtimer_active(&ts->sched_timer)) break; now = ktime_get(); } #ifdef CONFIG_NO_HZ if (tick_nohz_enabled) { ts->nohz_mode = NOHZ_MODE_HIGHRES; printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", smp_processor_id()); } #endif } #endif /* HIGH_RES_TIMERS */ #if defined CONFIG_NO_HZ || defined CONFIG_HIGH_RES_TIMERS void tick_cancel_sched_timer(int cpu) { struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); # ifdef CONFIG_HIGH_RES_TIMERS if (ts->sched_timer.base) hrtimer_cancel(&ts->sched_timer); # endif ts->nohz_mode = NOHZ_MODE_INACTIVE; } #endif /** * Async notification about clocksource changes */ void tick_clock_notify(void) { int cpu; for_each_possible_cpu(cpu) set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); } /* * Async notification about clock event changes */ void tick_oneshot_notify(void) { struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); set_bit(0, &ts->check_clocks); } /** * Check, if a change happened, which makes oneshot possible. * * Called cyclic from the hrtimer softirq (driven by the timer * softirq) allow_nohz signals, that we can switch into low-res nohz * mode, because high resolution timers are disabled (either compile * or runtime). */ int tick_check_oneshot_change(int allow_nohz) { struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); if (!test_and_clear_bit(0, &ts->check_clocks)) return 0; if (ts->nohz_mode != NOHZ_MODE_INACTIVE) return 0; if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available()) return 0; if (!allow_nohz) return 1; tick_nohz_switch_to_nohz(); return 0; }
gpl-2.0
snishanth512/linux
mm/cleancache.c
783
10102
/* * Cleancache frontend * * This code provides the generic "frontend" layer to call a matching * "backend" driver implementation of cleancache. See * Documentation/vm/cleancache.txt for more information. * * Copyright (C) 2009-2010 Oracle Corp. All rights reserved. * Author: Dan Magenheimer * * This work is licensed under the terms of the GNU GPL, version 2. */ #include <linux/module.h> #include <linux/fs.h> #include <linux/exportfs.h> #include <linux/mm.h> #include <linux/debugfs.h> #include <linux/cleancache.h> /* * cleancache_ops is set by cleancache_register_ops to contain the pointers * to the cleancache "backend" implementation functions. */ static struct cleancache_ops *cleancache_ops __read_mostly; /* * Counters available via /sys/kernel/debug/cleancache (if debugfs is * properly configured. These are for information only so are not protected * against increment races. */ static u64 cleancache_succ_gets; static u64 cleancache_failed_gets; static u64 cleancache_puts; static u64 cleancache_invalidates; static void cleancache_register_ops_sb(struct super_block *sb, void *unused) { switch (sb->cleancache_poolid) { case CLEANCACHE_NO_BACKEND: __cleancache_init_fs(sb); break; case CLEANCACHE_NO_BACKEND_SHARED: __cleancache_init_shared_fs(sb); break; } } /* * Register operations for cleancache. Returns 0 on success. */ int cleancache_register_ops(struct cleancache_ops *ops) { if (cmpxchg(&cleancache_ops, NULL, ops)) return -EBUSY; /* * A cleancache backend can be built as a module and hence loaded after * a cleancache enabled filesystem has called cleancache_init_fs. To * handle such a scenario, here we call ->init_fs or ->init_shared_fs * for each active super block. To differentiate between local and * shared filesystems, we temporarily initialize sb->cleancache_poolid * to CLEANCACHE_NO_BACKEND or CLEANCACHE_NO_BACKEND_SHARED * respectively in case there is no backend registered at the time * cleancache_init_fs or cleancache_init_shared_fs is called. * * Since filesystems can be mounted concurrently with cleancache * backend registration, we have to be careful to guarantee that all * cleancache enabled filesystems that has been mounted by the time * cleancache_register_ops is called has got and all mounted later will * get cleancache_poolid. This is assured by the following statements * tied together: * * a) iterate_supers skips only those super blocks that has started * ->kill_sb * * b) if iterate_supers encounters a super block that has not finished * ->mount yet, it waits until it is finished * * c) cleancache_init_fs is called from ->mount and * cleancache_invalidate_fs is called from ->kill_sb * * d) we call iterate_supers after cleancache_ops has been set * * From a) it follows that if iterate_supers skips a super block, then * either the super block is already dead, in which case we do not need * to bother initializing cleancache for it, or it was mounted after we * initiated iterate_supers. In the latter case, it must have seen * cleancache_ops set according to d) and initialized cleancache from * ->mount by itself according to c). This proves that we call * ->init_fs at least once for each active super block. * * From b) and c) it follows that if iterate_supers encounters a super * block that has already started ->init_fs, it will wait until ->mount * and hence ->init_fs has finished, then check cleancache_poolid, see * that it has already been set and therefore do nothing. This proves * that we call ->init_fs no more than once for each super block. * * Combined together, the last two paragraphs prove the function * correctness. * * Note that various cleancache callbacks may proceed before this * function is called or even concurrently with it, but since * CLEANCACHE_NO_BACKEND is negative, they will all result in a noop * until the corresponding ->init_fs has been actually called and * cleancache_ops has been set. */ iterate_supers(cleancache_register_ops_sb, NULL); return 0; } EXPORT_SYMBOL(cleancache_register_ops); /* Called by a cleancache-enabled filesystem at time of mount */ void __cleancache_init_fs(struct super_block *sb) { int pool_id = CLEANCACHE_NO_BACKEND; if (cleancache_ops) { pool_id = cleancache_ops->init_fs(PAGE_SIZE); if (pool_id < 0) pool_id = CLEANCACHE_NO_POOL; } sb->cleancache_poolid = pool_id; } EXPORT_SYMBOL(__cleancache_init_fs); /* Called by a cleancache-enabled clustered filesystem at time of mount */ void __cleancache_init_shared_fs(struct super_block *sb) { int pool_id = CLEANCACHE_NO_BACKEND_SHARED; if (cleancache_ops) { pool_id = cleancache_ops->init_shared_fs(sb->s_uuid, PAGE_SIZE); if (pool_id < 0) pool_id = CLEANCACHE_NO_POOL; } sb->cleancache_poolid = pool_id; } EXPORT_SYMBOL(__cleancache_init_shared_fs); /* * If the filesystem uses exportable filehandles, use the filehandle as * the key, else use the inode number. */ static int cleancache_get_key(struct inode *inode, struct cleancache_filekey *key) { int (*fhfn)(struct inode *, __u32 *fh, int *, struct inode *); int len = 0, maxlen = CLEANCACHE_KEY_MAX; struct super_block *sb = inode->i_sb; key->u.ino = inode->i_ino; if (sb->s_export_op != NULL) { fhfn = sb->s_export_op->encode_fh; if (fhfn) { len = (*fhfn)(inode, &key->u.fh[0], &maxlen, NULL); if (len <= FILEID_ROOT || len == FILEID_INVALID) return -1; if (maxlen > CLEANCACHE_KEY_MAX) return -1; } } return 0; } /* * "Get" data from cleancache associated with the poolid/inode/index * that were specified when the data was put to cleanache and, if * successful, use it to fill the specified page with data and return 0. * The pageframe is unchanged and returns -1 if the get fails. * Page must be locked by caller. * * The function has two checks before any action is taken - whether * a backend is registered and whether the sb->cleancache_poolid * is correct. */ int __cleancache_get_page(struct page *page) { int ret = -1; int pool_id; struct cleancache_filekey key = { .u.key = { 0 } }; if (!cleancache_ops) { cleancache_failed_gets++; goto out; } VM_BUG_ON_PAGE(!PageLocked(page), page); pool_id = page->mapping->host->i_sb->cleancache_poolid; if (pool_id < 0) goto out; if (cleancache_get_key(page->mapping->host, &key) < 0) goto out; ret = cleancache_ops->get_page(pool_id, key, page->index, page); if (ret == 0) cleancache_succ_gets++; else cleancache_failed_gets++; out: return ret; } EXPORT_SYMBOL(__cleancache_get_page); /* * "Put" data from a page to cleancache and associate it with the * (previously-obtained per-filesystem) poolid and the page's, * inode and page index. Page must be locked. Note that a put_page * always "succeeds", though a subsequent get_page may succeed or fail. * * The function has two checks before any action is taken - whether * a backend is registered and whether the sb->cleancache_poolid * is correct. */ void __cleancache_put_page(struct page *page) { int pool_id; struct cleancache_filekey key = { .u.key = { 0 } }; if (!cleancache_ops) { cleancache_puts++; return; } VM_BUG_ON_PAGE(!PageLocked(page), page); pool_id = page->mapping->host->i_sb->cleancache_poolid; if (pool_id >= 0 && cleancache_get_key(page->mapping->host, &key) >= 0) { cleancache_ops->put_page(pool_id, key, page->index, page); cleancache_puts++; } } EXPORT_SYMBOL(__cleancache_put_page); /* * Invalidate any data from cleancache associated with the poolid and the * page's inode and page index so that a subsequent "get" will fail. * * The function has two checks before any action is taken - whether * a backend is registered and whether the sb->cleancache_poolid * is correct. */ void __cleancache_invalidate_page(struct address_space *mapping, struct page *page) { /* careful... page->mapping is NULL sometimes when this is called */ int pool_id = mapping->host->i_sb->cleancache_poolid; struct cleancache_filekey key = { .u.key = { 0 } }; if (!cleancache_ops) return; if (pool_id >= 0) { VM_BUG_ON_PAGE(!PageLocked(page), page); if (cleancache_get_key(mapping->host, &key) >= 0) { cleancache_ops->invalidate_page(pool_id, key, page->index); cleancache_invalidates++; } } } EXPORT_SYMBOL(__cleancache_invalidate_page); /* * Invalidate all data from cleancache associated with the poolid and the * mappings's inode so that all subsequent gets to this poolid/inode * will fail. * * The function has two checks before any action is taken - whether * a backend is registered and whether the sb->cleancache_poolid * is correct. */ void __cleancache_invalidate_inode(struct address_space *mapping) { int pool_id = mapping->host->i_sb->cleancache_poolid; struct cleancache_filekey key = { .u.key = { 0 } }; if (!cleancache_ops) return; if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0) cleancache_ops->invalidate_inode(pool_id, key); } EXPORT_SYMBOL(__cleancache_invalidate_inode); /* * Called by any cleancache-enabled filesystem at time of unmount; * note that pool_id is surrendered and may be returned by a subsequent * cleancache_init_fs or cleancache_init_shared_fs. */ void __cleancache_invalidate_fs(struct super_block *sb) { int pool_id; pool_id = sb->cleancache_poolid; sb->cleancache_poolid = CLEANCACHE_NO_POOL; if (cleancache_ops && pool_id >= 0) cleancache_ops->invalidate_fs(pool_id); } EXPORT_SYMBOL(__cleancache_invalidate_fs); static int __init init_cleancache(void) { #ifdef CONFIG_DEBUG_FS struct dentry *root = debugfs_create_dir("cleancache", NULL); if (root == NULL) return -ENXIO; debugfs_create_u64("succ_gets", S_IRUGO, root, &cleancache_succ_gets); debugfs_create_u64("failed_gets", S_IRUGO, root, &cleancache_failed_gets); debugfs_create_u64("puts", S_IRUGO, root, &cleancache_puts); debugfs_create_u64("invalidates", S_IRUGO, root, &cleancache_invalidates); #endif return 0; } module_init(init_cleancache)
gpl-2.0
vm03/android_kernel_asus_P024
drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c
1551
6196
/* * Copyright (C) 2014 Google, Inc. * Author: Colin Cross <ccross@android.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/ptrace.h> #include <asm/stacktrace.h> #include "fiq_debugger_priv.h" static char *mode_name(const struct pt_regs *regs) { if (compat_user_mode(regs)) { return "USR"; } else { switch (processor_mode(regs)) { case PSR_MODE_EL0t: return "EL0t"; case PSR_MODE_EL1t: return "EL1t"; case PSR_MODE_EL1h: return "EL1h"; case PSR_MODE_EL2t: return "EL2t"; case PSR_MODE_EL2h: return "EL2h"; default: return "???"; } } } void fiq_debugger_dump_pc(struct fiq_debugger_output *output, const struct pt_regs *regs) { output->printf(output, " pc %016lx cpsr %08lx mode %s\n", regs->pc, regs->pstate, mode_name(regs)); } void fiq_debugger_dump_regs_aarch32(struct fiq_debugger_output *output, const struct pt_regs *regs) { output->printf(output, " r0 %08x r1 %08x r2 %08x r3 %08x\n", regs->compat_usr(0), regs->compat_usr(1), regs->compat_usr(2), regs->compat_usr(3)); output->printf(output, " r4 %08x r5 %08x r6 %08x r7 %08x\n", regs->compat_usr(4), regs->compat_usr(5), regs->compat_usr(6), regs->compat_usr(7)); output->printf(output, " r8 %08x r9 %08x r10 %08x r11 %08x\n", regs->compat_usr(8), regs->compat_usr(9), regs->compat_usr(10), regs->compat_usr(11)); output->printf(output, " ip %08x sp %08x lr %08x pc %08x\n", regs->compat_usr(12), regs->compat_sp, regs->compat_lr, regs->pc); output->printf(output, " cpsr %08x (%s)\n", regs->pstate, mode_name(regs)); } void fiq_debugger_dump_regs_aarch64(struct fiq_debugger_output *output, const struct pt_regs *regs) { output->printf(output, " x0 %016lx x1 %016lx\n", regs->regs[0], regs->regs[1]); output->printf(output, " x2 %016lx x3 %016lx\n", regs->regs[2], regs->regs[3]); output->printf(output, " x4 %016lx x5 %016lx\n", regs->regs[4], regs->regs[5]); output->printf(output, " x6 %016lx x7 %016lx\n", regs->regs[6], regs->regs[7]); output->printf(output, " x8 %016lx x9 %016lx\n", regs->regs[8], regs->regs[9]); output->printf(output, " x10 %016lx x11 %016lx\n", regs->regs[10], regs->regs[11]); output->printf(output, " x12 %016lx x13 %016lx\n", regs->regs[12], regs->regs[13]); output->printf(output, " x14 %016lx x15 %016lx\n", regs->regs[14], regs->regs[15]); output->printf(output, " x16 %016lx x17 %016lx\n", regs->regs[16], regs->regs[17]); output->printf(output, " x18 %016lx x19 %016lx\n", regs->regs[18], regs->regs[19]); output->printf(output, " x20 %016lx x21 %016lx\n", regs->regs[20], regs->regs[21]); output->printf(output, " x22 %016lx x23 %016lx\n", regs->regs[22], regs->regs[23]); output->printf(output, " x24 %016lx x25 %016lx\n", regs->regs[24], regs->regs[25]); output->printf(output, " x26 %016lx x27 %016lx\n", regs->regs[26], regs->regs[27]); output->printf(output, " x28 %016lx x29 %016lx\n", regs->regs[28], regs->regs[29]); output->printf(output, " x30 %016lx sp %016lx\n", regs->regs[30], regs->sp); output->printf(output, " pc %016lx cpsr %08x (%s)\n", regs->pc, regs->pstate, mode_name(regs)); } void fiq_debugger_dump_regs(struct fiq_debugger_output *output, const struct pt_regs *regs) { if (compat_user_mode(regs)) fiq_debugger_dump_regs_aarch32(output, regs); else fiq_debugger_dump_regs_aarch64(output, regs); } #define READ_SPECIAL_REG(x) ({ \ u64 val; \ asm volatile ("mrs %0, " # x : "=r"(val)); \ val; \ }) void fiq_debugger_dump_allregs(struct fiq_debugger_output *output, const struct pt_regs *regs) { u32 pstate = READ_SPECIAL_REG(CurrentEl); bool in_el2 = (pstate & PSR_MODE_MASK) >= PSR_MODE_EL2t; fiq_debugger_dump_regs(output, regs); output->printf(output, " sp_el0 %016lx\n", READ_SPECIAL_REG(sp_el0)); if (in_el2) output->printf(output, " sp_el1 %016lx\n", READ_SPECIAL_REG(sp_el1)); output->printf(output, " elr_el1 %016lx\n", READ_SPECIAL_REG(elr_el1)); output->printf(output, " spsr_el1 %08lx\n", READ_SPECIAL_REG(spsr_el1)); if (in_el2) { output->printf(output, " spsr_irq %08lx\n", READ_SPECIAL_REG(spsr_irq)); output->printf(output, " spsr_abt %08lx\n", READ_SPECIAL_REG(spsr_abt)); output->printf(output, " spsr_und %08lx\n", READ_SPECIAL_REG(spsr_und)); output->printf(output, " spsr_fiq %08lx\n", READ_SPECIAL_REG(spsr_fiq)); output->printf(output, " spsr_el2 %08lx\n", READ_SPECIAL_REG(elr_el2)); output->printf(output, " spsr_el2 %08lx\n", READ_SPECIAL_REG(spsr_el2)); } } struct stacktrace_state { struct fiq_debugger_output *output; unsigned int depth; }; static int report_trace(struct stackframe *frame, void *d) { struct stacktrace_state *sts = d; if (sts->depth) { sts->output->printf(sts->output, "%pF:\n", frame->pc); sts->output->printf(sts->output, " pc %016lx sp %016lx fp %016lx\n", frame->pc, frame->sp, frame->fp); sts->depth--; return 0; } sts->output->printf(sts->output, " ...\n"); return sts->depth == 0; } void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output, const struct pt_regs *regs, unsigned int depth, void *ssp) { struct thread_info *real_thread_info = THREAD_INFO(ssp); struct stacktrace_state sts; sts.depth = depth; sts.output = output; *current_thread_info() = *real_thread_info; if (!current) output->printf(output, "current NULL\n"); else output->printf(output, "pid: %d comm: %s\n", current->pid, current->comm); fiq_debugger_dump_regs(output, regs); if (!user_mode(regs)) { struct stackframe frame; frame.fp = regs->regs[29]; frame.sp = regs->sp; frame.pc = regs->pc; output->printf(output, "\n"); walk_stackframe(&frame, report_trace, &sts); } }
gpl-2.0
Herna1994/android_kernel_bq_vegetalte
drivers/mfd/lp8788.c
2319
5384
/* * TI LP8788 MFD - core interface * * Copyright 2012 Texas Instruments * * Author: Milo(Woogyom) Kim <milo.kim@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/err.h> #include <linux/i2c.h> #include <linux/mfd/core.h> #include <linux/mfd/lp8788.h> #include <linux/module.h> #include <linux/slab.h> #define MAX_LP8788_REGISTERS 0xA2 #define MFD_DEV_SIMPLE(_name) \ { \ .name = LP8788_DEV_##_name, \ } #define MFD_DEV_WITH_ID(_name, _id) \ { \ .name = LP8788_DEV_##_name, \ .id = _id, \ } #define MFD_DEV_WITH_RESOURCE(_name, _resource, num_resource) \ { \ .name = LP8788_DEV_##_name, \ .resources = _resource, \ .num_resources = num_resource, \ } static struct resource chg_irqs[] = { /* Charger Interrupts */ { .start = LP8788_INT_CHG_INPUT_STATE, .end = LP8788_INT_PRECHG_TIMEOUT, .name = LP8788_CHG_IRQ, .flags = IORESOURCE_IRQ, }, /* Power Routing Switch Interrupts */ { .start = LP8788_INT_ENTER_SYS_SUPPORT, .end = LP8788_INT_EXIT_SYS_SUPPORT, .name = LP8788_PRSW_IRQ, .flags = IORESOURCE_IRQ, }, /* Battery Interrupts */ { .start = LP8788_INT_BATT_LOW, .end = LP8788_INT_NO_BATT, .name = LP8788_BATT_IRQ, .flags = IORESOURCE_IRQ, }, }; static struct resource rtc_irqs[] = { { .start = LP8788_INT_RTC_ALARM1, .end = LP8788_INT_RTC_ALARM2, .name = LP8788_ALM_IRQ, .flags = IORESOURCE_IRQ, }, }; static struct mfd_cell lp8788_devs[] = { /* 4 bucks */ MFD_DEV_WITH_ID(BUCK, 1), MFD_DEV_WITH_ID(BUCK, 2), MFD_DEV_WITH_ID(BUCK, 3), MFD_DEV_WITH_ID(BUCK, 4), /* 12 digital ldos */ MFD_DEV_WITH_ID(DLDO, 1), MFD_DEV_WITH_ID(DLDO, 2), MFD_DEV_WITH_ID(DLDO, 3), MFD_DEV_WITH_ID(DLDO, 4), MFD_DEV_WITH_ID(DLDO, 5), MFD_DEV_WITH_ID(DLDO, 6), MFD_DEV_WITH_ID(DLDO, 7), MFD_DEV_WITH_ID(DLDO, 8), MFD_DEV_WITH_ID(DLDO, 9), MFD_DEV_WITH_ID(DLDO, 10), MFD_DEV_WITH_ID(DLDO, 11), MFD_DEV_WITH_ID(DLDO, 12), /* 10 analog ldos */ MFD_DEV_WITH_ID(ALDO, 1), MFD_DEV_WITH_ID(ALDO, 2), MFD_DEV_WITH_ID(ALDO, 3), MFD_DEV_WITH_ID(ALDO, 4), MFD_DEV_WITH_ID(ALDO, 5), MFD_DEV_WITH_ID(ALDO, 6), MFD_DEV_WITH_ID(ALDO, 7), MFD_DEV_WITH_ID(ALDO, 8), MFD_DEV_WITH_ID(ALDO, 9), MFD_DEV_WITH_ID(ALDO, 10), /* ADC */ MFD_DEV_SIMPLE(ADC), /* battery charger */ MFD_DEV_WITH_RESOURCE(CHARGER, chg_irqs, ARRAY_SIZE(chg_irqs)), /* rtc */ MFD_DEV_WITH_RESOURCE(RTC, rtc_irqs, ARRAY_SIZE(rtc_irqs)), /* backlight */ MFD_DEV_SIMPLE(BACKLIGHT), /* current sink for vibrator */ MFD_DEV_SIMPLE(VIBRATOR), /* current sink for keypad LED */ MFD_DEV_SIMPLE(KEYLED), }; int lp8788_read_byte(struct lp8788 *lp, u8 reg, u8 *data) { int ret; unsigned int val; ret = regmap_read(lp->regmap, reg, &val); if (ret < 0) { dev_err(lp->dev, "failed to read 0x%.2x\n", reg); return ret; } *data = (u8)val; return 0; } EXPORT_SYMBOL_GPL(lp8788_read_byte); int lp8788_read_multi_bytes(struct lp8788 *lp, u8 reg, u8 *data, size_t count) { return regmap_bulk_read(lp->regmap, reg, data, count); } EXPORT_SYMBOL_GPL(lp8788_read_multi_bytes); int lp8788_write_byte(struct lp8788 *lp, u8 reg, u8 data) { return regmap_write(lp->regmap, reg, data); } EXPORT_SYMBOL_GPL(lp8788_write_byte); int lp8788_update_bits(struct lp8788 *lp, u8 reg, u8 mask, u8 data) { return regmap_update_bits(lp->regmap, reg, mask, data); } EXPORT_SYMBOL_GPL(lp8788_update_bits); static int lp8788_platform_init(struct lp8788 *lp) { struct lp8788_platform_data *pdata = lp->pdata; return (pdata && pdata->init_func) ? pdata->init_func(lp) : 0; } static const struct regmap_config lp8788_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = MAX_LP8788_REGISTERS, }; static int lp8788_probe(struct i2c_client *cl, const struct i2c_device_id *id) { struct lp8788 *lp; struct lp8788_platform_data *pdata = cl->dev.platform_data; int ret; lp = devm_kzalloc(&cl->dev, sizeof(struct lp8788), GFP_KERNEL); if (!lp) return -ENOMEM; lp->regmap = devm_regmap_init_i2c(cl, &lp8788_regmap_config); if (IS_ERR(lp->regmap)) { ret = PTR_ERR(lp->regmap); dev_err(&cl->dev, "regmap init i2c err: %d\n", ret); return ret; } lp->pdata = pdata; lp->dev = &cl->dev; i2c_set_clientdata(cl, lp); ret = lp8788_platform_init(lp); if (ret) return ret; ret = lp8788_irq_init(lp, cl->irq); if (ret) return ret; return mfd_add_devices(lp->dev, -1, lp8788_devs, ARRAY_SIZE(lp8788_devs), NULL, 0, NULL); } static int lp8788_remove(struct i2c_client *cl) { struct lp8788 *lp = i2c_get_clientdata(cl); mfd_remove_devices(lp->dev); lp8788_irq_exit(lp); return 0; } static const struct i2c_device_id lp8788_ids[] = { {"lp8788", 0}, { } }; MODULE_DEVICE_TABLE(i2c, lp8788_ids); static struct i2c_driver lp8788_driver = { .driver = { .name = "lp8788", .owner = THIS_MODULE, }, .probe = lp8788_probe, .remove = lp8788_remove, .id_table = lp8788_ids, }; static int __init lp8788_init(void) { return i2c_add_driver(&lp8788_driver); } subsys_initcall(lp8788_init); static void __exit lp8788_exit(void) { i2c_del_driver(&lp8788_driver); } module_exit(lp8788_exit); MODULE_DESCRIPTION("TI LP8788 MFD Driver"); MODULE_AUTHOR("Milo Kim"); MODULE_LICENSE("GPL");
gpl-2.0
SM-G920P-MM/G920P-MM
drivers/net/ethernet/freescale/ucc_geth_ethtool.c
2319
11669
/* * Copyright (c) 2007 Freescale Semiconductor, Inc. All rights reserved. * * Description: QE UCC Gigabit Ethernet Ethtool API Set * * Author: Li Yang <leoli@freescale.com> * * Limitation: * Can only get/set settings of the first queue. * Need to re-open the interface manually after changing some parameters. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/stddef.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/phy.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/uaccess.h> #include <asm/types.h> #include "ucc_geth.h" static const char hw_stat_gstrings[][ETH_GSTRING_LEN] = { "tx-64-frames", "tx-65-127-frames", "tx-128-255-frames", "rx-64-frames", "rx-65-127-frames", "rx-128-255-frames", "tx-bytes-ok", "tx-pause-frames", "tx-multicast-frames", "tx-broadcast-frames", "rx-frames", "rx-bytes-ok", "rx-bytes-all", "rx-multicast-frames", "rx-broadcast-frames", "stats-counter-carry", "stats-counter-mask", "rx-dropped-frames", }; static const char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { "tx-single-collision", "tx-multiple-collision", "tx-late-collsion", "tx-aborted-frames", "tx-lost-frames", "tx-carrier-sense-errors", "tx-frames-ok", "tx-excessive-differ-frames", "tx-256-511-frames", "tx-512-1023-frames", "tx-1024-1518-frames", "tx-jumbo-frames", }; static const char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { "rx-crc-errors", "rx-alignment-errors", "rx-in-range-length-errors", "rx-out-of-range-length-errors", "rx-too-long-frames", "rx-runt", "rx-very-long-event", "rx-symbol-errors", "rx-busy-drop-frames", "reserved", "reserved", "rx-mismatch-drop-frames", "rx-small-than-64", "rx-256-511-frames", "rx-512-1023-frames", "rx-1024-1518-frames", "rx-jumbo-frames", "rx-mac-error-loss", "rx-pause-frames", "reserved", "rx-vlan-removed", "rx-vlan-replaced", "rx-vlan-inserted", "rx-ip-checksum-errors", }; #define UEC_HW_STATS_LEN ARRAY_SIZE(hw_stat_gstrings) #define UEC_TX_FW_STATS_LEN ARRAY_SIZE(tx_fw_stat_gstrings) #define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings) static int uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct ucc_geth_private *ugeth = netdev_priv(netdev); struct phy_device *phydev = ugeth->phydev; struct ucc_geth_info *ug_info = ugeth->ug_info; if (!phydev) return -ENODEV; ecmd->maxtxpkt = 1; ecmd->maxrxpkt = ug_info->interruptcoalescingmaxvalue[0]; return phy_ethtool_gset(phydev, ecmd); } static int uec_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct ucc_geth_private *ugeth = netdev_priv(netdev); struct phy_device *phydev = ugeth->phydev; if (!phydev) return -ENODEV; return phy_ethtool_sset(phydev, ecmd); } static void uec_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct ucc_geth_private *ugeth = netdev_priv(netdev); pause->autoneg = ugeth->phydev->autoneg; if (ugeth->ug_info->receiveFlowControl) pause->rx_pause = 1; if (ugeth->ug_info->transmitFlowControl) pause->tx_pause = 1; } static int uec_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct ucc_geth_private *ugeth = netdev_priv(netdev); int ret = 0; ugeth->ug_info->receiveFlowControl = pause->rx_pause; ugeth->ug_info->transmitFlowControl = pause->tx_pause; if (ugeth->phydev->autoneg) { if (netif_running(netdev)) { /* FIXME: automatically restart */ netdev_info(netdev, "Please re-open the interface\n"); } } else { struct ucc_geth_info *ug_info = ugeth->ug_info; ret = init_flow_control_params(ug_info->aufc, ug_info->receiveFlowControl, ug_info->transmitFlowControl, ug_info->pausePeriod, ug_info->extensionField, &ugeth->uccf->uf_regs->upsmr, &ugeth->ug_regs->uempr, &ugeth->ug_regs->maccfg1); } return ret; } static uint32_t uec_get_msglevel(struct net_device *netdev) { struct ucc_geth_private *ugeth = netdev_priv(netdev); return ugeth->msg_enable; } static void uec_set_msglevel(struct net_device *netdev, uint32_t data) { struct ucc_geth_private *ugeth = netdev_priv(netdev); ugeth->msg_enable = data; } static int uec_get_regs_len(struct net_device *netdev) { return sizeof(struct ucc_geth); } static void uec_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { int i; struct ucc_geth_private *ugeth = netdev_priv(netdev); u32 __iomem *ug_regs = (u32 __iomem *)ugeth->ug_regs; u32 *buff = p; for (i = 0; i < sizeof(struct ucc_geth) / sizeof(u32); i++) buff[i] = in_be32(&ug_regs[i]); } static void uec_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct ucc_geth_private *ugeth = netdev_priv(netdev); struct ucc_geth_info *ug_info = ugeth->ug_info; int queue = 0; ring->rx_max_pending = UCC_GETH_BD_RING_SIZE_MAX; ring->rx_mini_max_pending = UCC_GETH_BD_RING_SIZE_MAX; ring->rx_jumbo_max_pending = UCC_GETH_BD_RING_SIZE_MAX; ring->tx_max_pending = UCC_GETH_BD_RING_SIZE_MAX; ring->rx_pending = ug_info->bdRingLenRx[queue]; ring->rx_mini_pending = ug_info->bdRingLenRx[queue]; ring->rx_jumbo_pending = ug_info->bdRingLenRx[queue]; ring->tx_pending = ug_info->bdRingLenTx[queue]; } static int uec_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct ucc_geth_private *ugeth = netdev_priv(netdev); struct ucc_geth_info *ug_info = ugeth->ug_info; int queue = 0, ret = 0; if (ring->rx_pending < UCC_GETH_RX_BD_RING_SIZE_MIN) { netdev_info(netdev, "RxBD ring size must be no smaller than %d\n", UCC_GETH_RX_BD_RING_SIZE_MIN); return -EINVAL; } if (ring->rx_pending % UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT) { netdev_info(netdev, "RxBD ring size must be multiple of %d\n", UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT); return -EINVAL; } if (ring->tx_pending < UCC_GETH_TX_BD_RING_SIZE_MIN) { netdev_info(netdev, "TxBD ring size must be no smaller than %d\n", UCC_GETH_TX_BD_RING_SIZE_MIN); return -EINVAL; } ug_info->bdRingLenRx[queue] = ring->rx_pending; ug_info->bdRingLenTx[queue] = ring->tx_pending; if (netif_running(netdev)) { /* FIXME: restart automatically */ netdev_info(netdev, "Please re-open the interface\n"); } return ret; } static int uec_get_sset_count(struct net_device *netdev, int sset) { struct ucc_geth_private *ugeth = netdev_priv(netdev); u32 stats_mode = ugeth->ug_info->statisticsMode; int len = 0; switch (sset) { case ETH_SS_STATS: if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) len += UEC_HW_STATS_LEN; if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) len += UEC_TX_FW_STATS_LEN; if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) len += UEC_RX_FW_STATS_LEN; return len; default: return -EOPNOTSUPP; } } static void uec_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) { struct ucc_geth_private *ugeth = netdev_priv(netdev); u32 stats_mode = ugeth->ug_info->statisticsMode; if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) { memcpy(buf, hw_stat_gstrings, UEC_HW_STATS_LEN * ETH_GSTRING_LEN); buf += UEC_HW_STATS_LEN * ETH_GSTRING_LEN; } if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { memcpy(buf, tx_fw_stat_gstrings, UEC_TX_FW_STATS_LEN * ETH_GSTRING_LEN); buf += UEC_TX_FW_STATS_LEN * ETH_GSTRING_LEN; } if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) memcpy(buf, rx_fw_stat_gstrings, UEC_RX_FW_STATS_LEN * ETH_GSTRING_LEN); } static void uec_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, uint64_t *data) { struct ucc_geth_private *ugeth = netdev_priv(netdev); u32 stats_mode = ugeth->ug_info->statisticsMode; u32 __iomem *base; int i, j = 0; if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) { if (ugeth->ug_regs) base = (u32 __iomem *)&ugeth->ug_regs->tx64; else base = NULL; for (i = 0; i < UEC_HW_STATS_LEN; i++) data[j++] = base ? in_be32(&base[i]) : 0; } if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { base = (u32 __iomem *)ugeth->p_tx_fw_statistics_pram; for (i = 0; i < UEC_TX_FW_STATS_LEN; i++) data[j++] = base ? in_be32(&base[i]) : 0; } if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) { base = (u32 __iomem *)ugeth->p_rx_fw_statistics_pram; for (i = 0; i < UEC_RX_FW_STATS_LEN; i++) data[j++] = base ? in_be32(&base[i]) : 0; } } static int uec_nway_reset(struct net_device *netdev) { struct ucc_geth_private *ugeth = netdev_priv(netdev); return phy_start_aneg(ugeth->phydev); } /* Report driver information */ static void uec_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info)); drvinfo->eedump_len = 0; drvinfo->regdump_len = uec_get_regs_len(netdev); } #ifdef CONFIG_PM static void uec_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct ucc_geth_private *ugeth = netdev_priv(netdev); struct phy_device *phydev = ugeth->phydev; if (phydev && phydev->irq) wol->supported |= WAKE_PHY; if (qe_alive_during_sleep()) wol->supported |= WAKE_MAGIC; wol->wolopts = ugeth->wol_en; } static int uec_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct ucc_geth_private *ugeth = netdev_priv(netdev); struct phy_device *phydev = ugeth->phydev; if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC)) return -EINVAL; else if (wol->wolopts & WAKE_PHY && (!phydev || !phydev->irq)) return -EINVAL; else if (wol->wolopts & WAKE_MAGIC && !qe_alive_during_sleep()) return -EINVAL; ugeth->wol_en = wol->wolopts; device_set_wakeup_enable(&netdev->dev, ugeth->wol_en); return 0; } #else #define uec_get_wol NULL #define uec_set_wol NULL #endif /* CONFIG_PM */ static const struct ethtool_ops uec_ethtool_ops = { .get_settings = uec_get_settings, .set_settings = uec_set_settings, .get_drvinfo = uec_get_drvinfo, .get_regs_len = uec_get_regs_len, .get_regs = uec_get_regs, .get_msglevel = uec_get_msglevel, .set_msglevel = uec_set_msglevel, .nway_reset = uec_nway_reset, .get_link = ethtool_op_get_link, .get_ringparam = uec_get_ringparam, .set_ringparam = uec_set_ringparam, .get_pauseparam = uec_get_pauseparam, .set_pauseparam = uec_set_pauseparam, .get_sset_count = uec_get_sset_count, .get_strings = uec_get_strings, .get_ethtool_stats = uec_get_ethtool_stats, .get_wol = uec_get_wol, .set_wol = uec_set_wol, .get_ts_info = ethtool_op_get_ts_info, }; void uec_set_ethtool_ops(struct net_device *netdev) { SET_ETHTOOL_OPS(netdev, &uec_ethtool_ops); }
gpl-2.0
andrewevans01/T889_Kernel_Recharged
arch/powerpc/platforms/cell/beat_interrupt.c
2831
6882
/* * Celleb/Beat Interrupt controller * * (C) Copyright 2006-2007 TOSHIBA CORPORATION * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/percpu.h> #include <linux/types.h> #include <asm/machdep.h> #include "beat_interrupt.h" #include "beat_wrapper.h" #define MAX_IRQS NR_IRQS static DEFINE_RAW_SPINLOCK(beatic_irq_mask_lock); static uint64_t beatic_irq_mask_enable[(MAX_IRQS+255)/64]; static uint64_t beatic_irq_mask_ack[(MAX_IRQS+255)/64]; static struct irq_host *beatic_host; /* * In this implementation, "virq" == "IRQ plug number", * "(irq_hw_number_t)hwirq" == "IRQ outlet number". */ /* assumption: locked */ static inline void beatic_update_irq_mask(unsigned int irq_plug) { int off; unsigned long masks[4]; off = (irq_plug / 256) * 4; masks[0] = beatic_irq_mask_enable[off + 0] & beatic_irq_mask_ack[off + 0]; masks[1] = beatic_irq_mask_enable[off + 1] & beatic_irq_mask_ack[off + 1]; masks[2] = beatic_irq_mask_enable[off + 2] & beatic_irq_mask_ack[off + 2]; masks[3] = beatic_irq_mask_enable[off + 3] & beatic_irq_mask_ack[off + 3]; if (beat_set_interrupt_mask(irq_plug&~255UL, masks[0], masks[1], masks[2], masks[3]) != 0) panic("Failed to set mask IRQ!"); } static void beatic_mask_irq(struct irq_data *d) { unsigned long flags; raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); beatic_irq_mask_enable[d->irq/64] &= ~(1UL << (63 - (d->irq%64))); beatic_update_irq_mask(d->irq); raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); } static void beatic_unmask_irq(struct irq_data *d) { unsigned long flags; raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); beatic_irq_mask_enable[d->irq/64] |= 1UL << (63 - (d->irq%64)); beatic_update_irq_mask(d->irq); raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); } static void beatic_ack_irq(struct irq_data *d) { unsigned long flags; raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); beatic_irq_mask_ack[d->irq/64] &= ~(1UL << (63 - (d->irq%64))); beatic_update_irq_mask(d->irq); raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); } static void beatic_end_irq(struct irq_data *d) { s64 err; unsigned long flags; err = beat_downcount_of_interrupt(d->irq); if (err != 0) { if ((err & 0xFFFFFFFF) != 0xFFFFFFF5) /* -11: wrong state */ panic("Failed to downcount IRQ! Error = %16llx", err); printk(KERN_ERR "IRQ over-downcounted, plug %d\n", d->irq); } raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); beatic_irq_mask_ack[d->irq/64] |= 1UL << (63 - (d->irq%64)); beatic_update_irq_mask(d->irq); raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); } static struct irq_chip beatic_pic = { .name = "CELL-BEAT", .irq_unmask = beatic_unmask_irq, .irq_mask = beatic_mask_irq, .irq_eoi = beatic_end_irq, }; /* * Dispose binding hardware IRQ number (hw) and Virtuql IRQ number (virq), * update flags. * * Note that the number (virq) is already assigned at upper layer. */ static void beatic_pic_host_unmap(struct irq_host *h, unsigned int virq) { beat_destruct_irq_plug(virq); } /* * Create or update binding hardware IRQ number (hw) and Virtuql * IRQ number (virq). This is called only once for a given mapping. * * Note that the number (virq) is already assigned at upper layer. */ static int beatic_pic_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { int64_t err; err = beat_construct_and_connect_irq_plug(virq, hw); if (err < 0) return -EIO; irq_set_status_flags(virq, IRQ_LEVEL); irq_set_chip_and_handler(virq, &beatic_pic, handle_fasteoi_irq); return 0; } /* * Translate device-tree interrupt spec to irq_hw_number_t style (ulong), * to pass away to irq_create_mapping(). * * Called from irq_create_of_mapping() only. * Note: We have only 1 entry to translate. */ static int beatic_pic_host_xlate(struct irq_host *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) { const u64 *intspec2 = (const u64 *)intspec; *out_hwirq = *intspec2; *out_flags |= IRQ_TYPE_LEVEL_LOW; return 0; } static int beatic_pic_host_match(struct irq_host *h, struct device_node *np) { /* Match all */ return 1; } static struct irq_host_ops beatic_pic_host_ops = { .map = beatic_pic_host_map, .unmap = beatic_pic_host_unmap, .xlate = beatic_pic_host_xlate, .match = beatic_pic_host_match, }; /* * Get an IRQ number * Note: returns VIRQ */ static inline unsigned int beatic_get_irq_plug(void) { int i; uint64_t pending[4], ub; for (i = 0; i < MAX_IRQS; i += 256) { beat_detect_pending_interrupts(i, pending); __asm__ ("cntlzd %0,%1":"=r"(ub): "r"(pending[0] & beatic_irq_mask_enable[i/64+0] & beatic_irq_mask_ack[i/64+0])); if (ub != 64) return i + ub + 0; __asm__ ("cntlzd %0,%1":"=r"(ub): "r"(pending[1] & beatic_irq_mask_enable[i/64+1] & beatic_irq_mask_ack[i/64+1])); if (ub != 64) return i + ub + 64; __asm__ ("cntlzd %0,%1":"=r"(ub): "r"(pending[2] & beatic_irq_mask_enable[i/64+2] & beatic_irq_mask_ack[i/64+2])); if (ub != 64) return i + ub + 128; __asm__ ("cntlzd %0,%1":"=r"(ub): "r"(pending[3] & beatic_irq_mask_enable[i/64+3] & beatic_irq_mask_ack[i/64+3])); if (ub != 64) return i + ub + 192; } return NO_IRQ; } unsigned int beatic_get_irq(void) { unsigned int ret; ret = beatic_get_irq_plug(); if (ret != NO_IRQ) beatic_ack_irq(irq_get_irq_data(ret)); return ret; } /* */ void __init beatic_init_IRQ(void) { int i; memset(beatic_irq_mask_enable, 0, sizeof(beatic_irq_mask_enable)); memset(beatic_irq_mask_ack, 255, sizeof(beatic_irq_mask_ack)); for (i = 0; i < MAX_IRQS; i += 256) beat_set_interrupt_mask(i, 0L, 0L, 0L, 0L); /* Set out get_irq function */ ppc_md.get_irq = beatic_get_irq; /* Allocate an irq host */ beatic_host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, &beatic_pic_host_ops, 0); BUG_ON(beatic_host == NULL); irq_set_default_host(beatic_host); } void beatic_deinit_IRQ(void) { int i; for (i = 1; i < NR_IRQS; i++) beat_destruct_irq_plug(i); }
gpl-2.0
grondinm/android_kernel_motorola_msm8974
net/caif/cfpkt_skbuff.c
4879
8802
/* * Copyright (C) ST-Ericsson AB 2010 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com * License terms: GNU General Public License (GPL) version 2 */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ #include <linux/string.h> #include <linux/skbuff.h> #include <linux/hardirq.h> #include <linux/export.h> #include <net/caif/cfpkt.h> #define PKT_PREFIX 48 #define PKT_POSTFIX 2 #define PKT_LEN_WHEN_EXTENDING 128 #define PKT_ERROR(pkt, errmsg) \ do { \ cfpkt_priv(pkt)->erronous = true; \ skb_reset_tail_pointer(&pkt->skb); \ pr_warn(errmsg); \ } while (0) struct cfpktq { struct sk_buff_head head; atomic_t count; /* Lock protects count updates */ spinlock_t lock; }; /* * net/caif/ is generic and does not * understand SKB, so we do this typecast */ struct cfpkt { struct sk_buff skb; }; /* Private data inside SKB */ struct cfpkt_priv_data { struct dev_info dev_info; bool erronous; }; static inline struct cfpkt_priv_data *cfpkt_priv(struct cfpkt *pkt) { return (struct cfpkt_priv_data *) pkt->skb.cb; } static inline bool is_erronous(struct cfpkt *pkt) { return cfpkt_priv(pkt)->erronous; } static inline struct sk_buff *pkt_to_skb(struct cfpkt *pkt) { return &pkt->skb; } static inline struct cfpkt *skb_to_pkt(struct sk_buff *skb) { return (struct cfpkt *) skb; } struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt) { struct cfpkt *pkt = skb_to_pkt(nativepkt); cfpkt_priv(pkt)->erronous = false; return pkt; } EXPORT_SYMBOL(cfpkt_fromnative); void *cfpkt_tonative(struct cfpkt *pkt) { return (void *) pkt; } EXPORT_SYMBOL(cfpkt_tonative); static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx) { struct sk_buff *skb; if (likely(in_interrupt())) skb = alloc_skb(len + pfx, GFP_ATOMIC); else skb = alloc_skb(len + pfx, GFP_KERNEL); if (unlikely(skb == NULL)) return NULL; skb_reserve(skb, pfx); return skb_to_pkt(skb); } inline struct cfpkt *cfpkt_create(u16 len) { return cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX); } void cfpkt_destroy(struct cfpkt *pkt) { struct sk_buff *skb = pkt_to_skb(pkt); kfree_skb(skb); } inline bool cfpkt_more(struct cfpkt *pkt) { struct sk_buff *skb = pkt_to_skb(pkt); return skb->len > 0; } int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); if (skb_headlen(skb) >= len) { memcpy(data, skb->data, len); return 0; } return !cfpkt_extr_head(pkt, data, len) && !cfpkt_add_head(pkt, data, len); } int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); u8 *from; if (unlikely(is_erronous(pkt))) return -EPROTO; if (unlikely(len > skb->len)) { PKT_ERROR(pkt, "read beyond end of packet\n"); return -EPROTO; } if (unlikely(len > skb_headlen(skb))) { if (unlikely(skb_linearize(skb) != 0)) { PKT_ERROR(pkt, "linearize failed\n"); return -EPROTO; } } from = skb_pull(skb, len); from -= len; if (data) memcpy(data, from, len); return 0; } EXPORT_SYMBOL(cfpkt_extr_head); int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); u8 *data = dta; u8 *from; if (unlikely(is_erronous(pkt))) return -EPROTO; if (unlikely(skb_linearize(skb) != 0)) { PKT_ERROR(pkt, "linearize failed\n"); return -EPROTO; } if (unlikely(skb->data + len > skb_tail_pointer(skb))) { PKT_ERROR(pkt, "read beyond end of packet\n"); return -EPROTO; } from = skb_tail_pointer(skb) - len; skb_trim(skb, skb->len - len); memcpy(data, from, len); return 0; } int cfpkt_pad_trail(struct cfpkt *pkt, u16 len) { return cfpkt_add_body(pkt, NULL, len); } int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); struct sk_buff *lastskb; u8 *to; u16 addlen = 0; if (unlikely(is_erronous(pkt))) return -EPROTO; lastskb = skb; /* Check whether we need to add space at the tail */ if (unlikely(skb_tailroom(skb) < len)) { if (likely(len < PKT_LEN_WHEN_EXTENDING)) addlen = PKT_LEN_WHEN_EXTENDING; else addlen = len; } /* Check whether we need to change the SKB before writing to the tail */ if (unlikely((addlen > 0) || skb_cloned(skb) || skb_shared(skb))) { /* Make sure data is writable */ if (unlikely(skb_cow_data(skb, addlen, &lastskb) < 0)) { PKT_ERROR(pkt, "cow failed\n"); return -EPROTO; } /* * Is the SKB non-linear after skb_cow_data()? If so, we are * going to add data to the last SKB, so we need to adjust * lengths of the top SKB. */ if (lastskb != skb) { pr_warn("Packet is non-linear\n"); skb->len += len; skb->data_len += len; } } /* All set to put the last SKB and optionally write data there. */ to = skb_put(lastskb, len); if (likely(data)) memcpy(to, data, len); return 0; } inline int cfpkt_addbdy(struct cfpkt *pkt, u8 data) { return cfpkt_add_body(pkt, &data, 1); } int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); struct sk_buff *lastskb; u8 *to; const u8 *data = data2; int ret; if (unlikely(is_erronous(pkt))) return -EPROTO; if (unlikely(skb_headroom(skb) < len)) { PKT_ERROR(pkt, "no headroom\n"); return -EPROTO; } /* Make sure data is writable */ ret = skb_cow_data(skb, 0, &lastskb); if (unlikely(ret < 0)) { PKT_ERROR(pkt, "cow failed\n"); return ret; } to = skb_push(skb, len); memcpy(to, data, len); return 0; } EXPORT_SYMBOL(cfpkt_add_head); inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len) { return cfpkt_add_body(pkt, data, len); } inline u16 cfpkt_getlen(struct cfpkt *pkt) { struct sk_buff *skb = pkt_to_skb(pkt); return skb->len; } inline u16 cfpkt_iterate(struct cfpkt *pkt, u16 (*iter_func)(u16, void *, u16), u16 data) { /* * Don't care about the performance hit of linearizing, * Checksum should not be used on high-speed interfaces anyway. */ if (unlikely(is_erronous(pkt))) return -EPROTO; if (unlikely(skb_linearize(&pkt->skb) != 0)) { PKT_ERROR(pkt, "linearize failed\n"); return -EPROTO; } return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt)); } int cfpkt_setlen(struct cfpkt *pkt, u16 len) { struct sk_buff *skb = pkt_to_skb(pkt); if (unlikely(is_erronous(pkt))) return -EPROTO; if (likely(len <= skb->len)) { if (unlikely(skb->data_len)) ___pskb_trim(skb, len); else skb_trim(skb, len); return cfpkt_getlen(pkt); } /* Need to expand SKB */ if (unlikely(!cfpkt_pad_trail(pkt, len - skb->len))) PKT_ERROR(pkt, "skb_pad_trail failed\n"); return cfpkt_getlen(pkt); } struct cfpkt *cfpkt_append(struct cfpkt *dstpkt, struct cfpkt *addpkt, u16 expectlen) { struct sk_buff *dst = pkt_to_skb(dstpkt); struct sk_buff *add = pkt_to_skb(addpkt); u16 addlen = skb_headlen(add); u16 neededtailspace; struct sk_buff *tmp; u16 dstlen; u16 createlen; if (unlikely(is_erronous(dstpkt) || is_erronous(addpkt))) { return dstpkt; } if (expectlen > addlen) neededtailspace = expectlen; else neededtailspace = addlen; if (dst->tail + neededtailspace > dst->end) { /* Create a dumplicate of 'dst' with more tail space */ struct cfpkt *tmppkt; dstlen = skb_headlen(dst); createlen = dstlen + neededtailspace; tmppkt = cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX); if (tmppkt == NULL) return NULL; tmp = pkt_to_skb(tmppkt); skb_set_tail_pointer(tmp, dstlen); tmp->len = dstlen; memcpy(tmp->data, dst->data, dstlen); cfpkt_destroy(dstpkt); dst = tmp; } memcpy(skb_tail_pointer(dst), add->data, skb_headlen(add)); cfpkt_destroy(addpkt); dst->tail += addlen; dst->len += addlen; return skb_to_pkt(dst); } struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos) { struct sk_buff *skb2; struct sk_buff *skb = pkt_to_skb(pkt); struct cfpkt *tmppkt; u8 *split = skb->data + pos; u16 len2nd = skb_tail_pointer(skb) - split; if (unlikely(is_erronous(pkt))) return NULL; if (skb->data + pos > skb_tail_pointer(skb)) { PKT_ERROR(pkt, "trying to split beyond end of packet\n"); return NULL; } /* Create a new packet for the second part of the data */ tmppkt = cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX, PKT_PREFIX); if (tmppkt == NULL) return NULL; skb2 = pkt_to_skb(tmppkt); if (skb2 == NULL) return NULL; /* Reduce the length of the original packet */ skb_set_tail_pointer(skb, pos); skb->len = pos; memcpy(skb2->data, split, len2nd); skb2->tail += len2nd; skb2->len += len2nd; return skb_to_pkt(skb2); } bool cfpkt_erroneous(struct cfpkt *pkt) { return cfpkt_priv(pkt)->erronous; } struct caif_payload_info *cfpkt_info(struct cfpkt *pkt) { return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb; } EXPORT_SYMBOL(cfpkt_info);
gpl-2.0
bilalliberty/android_kernel_htc_zaraul
drivers/scsi/mac_scsi.c
5135
14648
/* * Generic Macintosh NCR5380 driver * * Copyright 1998, Michael Schmitz <mschmitz@lbl.gov> * * derived in part from: */ /* * Generic Generic NCR5380 driver * * Copyright 1995, Russell King * * ALPHA RELEASE 1. * * For more information, please consult * * NCR 5380 Family * SCSI Protocol Controller * Databook * * NCR Microelectronics * 1635 Aeroplaza Drive * Colorado Springs, CO 80916 * 1+ (719) 578-3400 * 1+ (800) 334-5454 */ /* * $Log: mac_NCR5380.c,v $ */ #include <linux/types.h> #include <linux/stddef.h> #include <linux/ctype.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/signal.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/macintosh.h> #include <asm/macints.h> #include <asm/mac_via.h> #include "scsi.h" #include <scsi/scsi_host.h> #include "mac_scsi.h" /* These control the behaviour of the generic 5380 core */ #define AUTOSENSE #define PSEUDO_DMA #include "NCR5380.h" #if 0 #define NDEBUG (NDEBUG_INTR | NDEBUG_PSEUDO_DMA | NDEBUG_ARBITRATION | NDEBUG_SELECTION | NDEBUG_RESELECTION) #else #define NDEBUG (NDEBUG_ABORT) #endif #define RESET_BOOT #define DRIVER_SETUP extern void via_scsi_clear(void); #ifdef RESET_BOOT static void mac_scsi_reset_boot(struct Scsi_Host *instance); #endif static int setup_called = 0; static int setup_can_queue = -1; static int setup_cmd_per_lun = -1; static int setup_sg_tablesize = -1; static int setup_use_pdma = -1; #ifdef SUPPORT_TAGS static int setup_use_tagged_queuing = -1; #endif static int setup_hostid = -1; /* Time (in jiffies) to wait after a reset; the SCSI standard calls for 250ms, * we usually do 0.5s to be on the safe side. But Toshiba CD-ROMs once more * need ten times the standard value... */ #define TOSHIBA_DELAY #ifdef TOSHIBA_DELAY #define AFTER_RESET_DELAY (5*HZ/2) #else #define AFTER_RESET_DELAY (HZ/2) #endif static volatile unsigned char *mac_scsi_regp = NULL; static volatile unsigned char *mac_scsi_drq = NULL; static volatile unsigned char *mac_scsi_nodrq = NULL; /* * NCR 5380 register access functions */ #if 0 /* Debug versions */ #define CTRL(p,v) (*ctrl = (v)) static char macscsi_read(struct Scsi_Host *instance, int reg) { int iobase = instance->io_port; int i; int *ctrl = &((struct NCR5380_hostdata *)instance->hostdata)->ctrl; CTRL(iobase, 0); i = in_8(iobase + (reg<<4)); CTRL(iobase, 0x40); return i; } static void macscsi_write(struct Scsi_Host *instance, int reg, int value) { int iobase = instance->io_port; int *ctrl = &((struct NCR5380_hostdata *)instance->hostdata)->ctrl; CTRL(iobase, 0); out_8(iobase + (reg<<4), value); CTRL(iobase, 0x40); } #else /* Fast versions */ static __inline__ char macscsi_read(struct Scsi_Host *instance, int reg) { return in_8(instance->io_port + (reg<<4)); } static __inline__ void macscsi_write(struct Scsi_Host *instance, int reg, int value) { out_8(instance->io_port + (reg<<4), value); } #endif /* * Function : mac_scsi_setup(char *str) * * Purpose : booter command line initialization of the overrides array, * * Inputs : str - comma delimited list of options * */ static int __init mac_scsi_setup(char *str) { #ifdef DRIVER_SETUP int ints[7]; (void)get_options( str, ARRAY_SIZE(ints), ints); if (setup_called++ || ints[0] < 1 || ints[0] > 6) { printk(KERN_WARNING "scsi: <mac5380>" " Usage: mac5380=<can_queue>[,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags>,<use_pdma>]\n"); printk(KERN_ALERT "scsi: <mac5380> Bad Penguin parameters?\n"); return 0; } if (ints[0] >= 1) { if (ints[1] > 0) /* no limits on this, just > 0 */ setup_can_queue = ints[1]; } if (ints[0] >= 2) { if (ints[2] > 0) setup_cmd_per_lun = ints[2]; } if (ints[0] >= 3) { if (ints[3] >= 0) { setup_sg_tablesize = ints[3]; /* Must be <= SG_ALL (255) */ if (setup_sg_tablesize > SG_ALL) setup_sg_tablesize = SG_ALL; } } if (ints[0] >= 4) { /* Must be between 0 and 7 */ if (ints[4] >= 0 && ints[4] <= 7) setup_hostid = ints[4]; else if (ints[4] > 7) printk(KERN_WARNING "mac_scsi_setup: invalid host ID %d !\n", ints[4] ); } #ifdef SUPPORT_TAGS if (ints[0] >= 5) { if (ints[5] >= 0) setup_use_tagged_queuing = !!ints[5]; } if (ints[0] == 6) { if (ints[6] >= 0) setup_use_pdma = ints[6]; } #else if (ints[0] == 5) { if (ints[5] >= 0) setup_use_pdma = ints[5]; } #endif /* SUPPORT_TAGS */ #endif /* DRIVER_SETUP */ return 1; } __setup("mac5380=", mac_scsi_setup); /* * Function : int macscsi_detect(struct scsi_host_template * tpnt) * * Purpose : initializes mac NCR5380 driver based on the * command line / compile time port and irq definitions. * * Inputs : tpnt - template for this SCSI adapter. * * Returns : 1 if a host adapter was found, 0 if not. * */ int __init macscsi_detect(struct scsi_host_template * tpnt) { static int called = 0; int flags = 0; struct Scsi_Host *instance; if (!MACH_IS_MAC || called) return( 0 ); if (macintosh_config->scsi_type != MAC_SCSI_OLD) return( 0 ); /* setup variables */ tpnt->can_queue = (setup_can_queue > 0) ? setup_can_queue : CAN_QUEUE; tpnt->cmd_per_lun = (setup_cmd_per_lun > 0) ? setup_cmd_per_lun : CMD_PER_LUN; tpnt->sg_tablesize = (setup_sg_tablesize >= 0) ? setup_sg_tablesize : SG_TABLESIZE; if (setup_hostid >= 0) tpnt->this_id = setup_hostid; else { /* use 7 as default */ tpnt->this_id = 7; } #ifdef SUPPORT_TAGS if (setup_use_tagged_queuing < 0) setup_use_tagged_queuing = USE_TAGGED_QUEUING; #endif /* Once we support multiple 5380s (e.g. DuoDock) we'll do something different here */ instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata)); if (macintosh_config->ident == MAC_MODEL_IIFX) { mac_scsi_regp = via1+0x8000; mac_scsi_drq = via1+0xE000; mac_scsi_nodrq = via1+0xC000; /* The IIFX should be able to do true DMA, but pseudo-dma doesn't work */ flags = FLAG_NO_PSEUDO_DMA; } else { mac_scsi_regp = via1+0x10000; mac_scsi_drq = via1+0x6000; mac_scsi_nodrq = via1+0x12000; } if (! setup_use_pdma) flags = FLAG_NO_PSEUDO_DMA; instance->io_port = (unsigned long) mac_scsi_regp; instance->irq = IRQ_MAC_SCSI; #ifdef RESET_BOOT mac_scsi_reset_boot(instance); #endif NCR5380_init(instance, flags); instance->n_io_port = 255; ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0; if (instance->irq != SCSI_IRQ_NONE) if (request_irq(instance->irq, NCR5380_intr, 0, "ncr5380", instance)) { printk(KERN_WARNING "scsi%d: IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); instance->irq = SCSI_IRQ_NONE; } printk(KERN_INFO "scsi%d: generic 5380 at port %lX irq", instance->host_no, instance->io_port); if (instance->irq == SCSI_IRQ_NONE) printk (KERN_INFO "s disabled"); else printk (KERN_INFO " %d", instance->irq); printk(KERN_INFO " options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", instance->can_queue, instance->cmd_per_lun, MACSCSI_PUBLIC_RELEASE); printk(KERN_INFO "\nscsi%d:", instance->host_no); NCR5380_print_options(instance); printk("\n"); called = 1; return 1; } int macscsi_release (struct Scsi_Host *shpnt) { if (shpnt->irq != SCSI_IRQ_NONE) free_irq(shpnt->irq, shpnt); NCR5380_exit(shpnt); return 0; } #ifdef RESET_BOOT /* * Our 'bus reset on boot' function */ static void mac_scsi_reset_boot(struct Scsi_Host *instance) { unsigned long end; NCR5380_local_declare(); NCR5380_setup(instance); /* * Do a SCSI reset to clean up the bus during initialization. No messing * with the queues, interrupts, or locks necessary here. */ printk(KERN_INFO "Macintosh SCSI: resetting the SCSI bus..." ); /* get in phase */ NCR5380_write( TARGET_COMMAND_REG, PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) )); /* assert RST */ NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST ); /* The min. reset hold time is 25us, so 40us should be enough */ udelay( 50 ); /* reset RST and interrupt */ NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE ); NCR5380_read( RESET_PARITY_INTERRUPT_REG ); for( end = jiffies + AFTER_RESET_DELAY; time_before(jiffies, end); ) barrier(); printk(KERN_INFO " done\n" ); } #endif const char * macscsi_info (struct Scsi_Host *spnt) { return ""; } /* Pseudo-DMA: (Ove Edlund) The code attempts to catch bus errors that occur if one for example "trips over the cable". XXX: Since bus errors in the PDMA routines never happen on my computer, the bus error code is untested. If the code works as intended, a bus error results in Pseudo-DMA beeing disabled, meaning that the driver switches to slow handshake. If bus errors are NOT extremely rare, this has to be changed. */ #define CP_IO_TO_MEM(s,d,len) \ __asm__ __volatile__ \ (" cmp.w #4,%2\n" \ " bls 8f\n" \ " move.w %1,%%d0\n" \ " neg.b %%d0\n" \ " and.w #3,%%d0\n" \ " sub.w %%d0,%2\n" \ " bra 2f\n" \ " 1: move.b (%0),(%1)+\n" \ " 2: dbf %%d0,1b\n" \ " move.w %2,%%d0\n" \ " lsr.w #5,%%d0\n" \ " bra 4f\n" \ " 3: move.l (%0),(%1)+\n" \ "31: move.l (%0),(%1)+\n" \ "32: move.l (%0),(%1)+\n" \ "33: move.l (%0),(%1)+\n" \ "34: move.l (%0),(%1)+\n" \ "35: move.l (%0),(%1)+\n" \ "36: move.l (%0),(%1)+\n" \ "37: move.l (%0),(%1)+\n" \ " 4: dbf %%d0,3b\n" \ " move.w %2,%%d0\n" \ " lsr.w #2,%%d0\n" \ " and.w #7,%%d0\n" \ " bra 6f\n" \ " 5: move.l (%0),(%1)+\n" \ " 6: dbf %%d0,5b\n" \ " and.w #3,%2\n" \ " bra 8f\n" \ " 7: move.b (%0),(%1)+\n" \ " 8: dbf %2,7b\n" \ " moveq.l #0, %2\n" \ " 9: \n" \ ".section .fixup,\"ax\"\n" \ " .even\n" \ "90: moveq.l #1, %2\n" \ " jra 9b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ " .align 4\n" \ " .long 1b,90b\n" \ " .long 3b,90b\n" \ " .long 31b,90b\n" \ " .long 32b,90b\n" \ " .long 33b,90b\n" \ " .long 34b,90b\n" \ " .long 35b,90b\n" \ " .long 36b,90b\n" \ " .long 37b,90b\n" \ " .long 5b,90b\n" \ " .long 7b,90b\n" \ ".previous" \ : "=a"(s), "=a"(d), "=d"(len) \ : "0"(s), "1"(d), "2"(len) \ : "d0") static int macscsi_pread (struct Scsi_Host *instance, unsigned char *dst, int len) { unsigned char *d; volatile unsigned char *s; NCR5380_local_declare(); NCR5380_setup(instance); s = mac_scsi_drq+0x60; d = dst; /* These conditions are derived from MacOS */ while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && !(NCR5380_read(STATUS_REG) & SR_REQ)) ; if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) { printk(KERN_ERR "Error in macscsi_pread\n"); return -1; } CP_IO_TO_MEM(s, d, len); if (len != 0) { printk(KERN_NOTICE "Bus error in macscsi_pread\n"); return -1; } return 0; } #define CP_MEM_TO_IO(s,d,len) \ __asm__ __volatile__ \ (" cmp.w #4,%2\n" \ " bls 8f\n" \ " move.w %0,%%d0\n" \ " neg.b %%d0\n" \ " and.w #3,%%d0\n" \ " sub.w %%d0,%2\n" \ " bra 2f\n" \ " 1: move.b (%0)+,(%1)\n" \ " 2: dbf %%d0,1b\n" \ " move.w %2,%%d0\n" \ " lsr.w #5,%%d0\n" \ " bra 4f\n" \ " 3: move.l (%0)+,(%1)\n" \ "31: move.l (%0)+,(%1)\n" \ "32: move.l (%0)+,(%1)\n" \ "33: move.l (%0)+,(%1)\n" \ "34: move.l (%0)+,(%1)\n" \ "35: move.l (%0)+,(%1)\n" \ "36: move.l (%0)+,(%1)\n" \ "37: move.l (%0)+,(%1)\n" \ " 4: dbf %%d0,3b\n" \ " move.w %2,%%d0\n" \ " lsr.w #2,%%d0\n" \ " and.w #7,%%d0\n" \ " bra 6f\n" \ " 5: move.l (%0)+,(%1)\n" \ " 6: dbf %%d0,5b\n" \ " and.w #3,%2\n" \ " bra 8f\n" \ " 7: move.b (%0)+,(%1)\n" \ " 8: dbf %2,7b\n" \ " moveq.l #0, %2\n" \ " 9: \n" \ ".section .fixup,\"ax\"\n" \ " .even\n" \ "90: moveq.l #1, %2\n" \ " jra 9b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ " .align 4\n" \ " .long 1b,90b\n" \ " .long 3b,90b\n" \ " .long 31b,90b\n" \ " .long 32b,90b\n" \ " .long 33b,90b\n" \ " .long 34b,90b\n" \ " .long 35b,90b\n" \ " .long 36b,90b\n" \ " .long 37b,90b\n" \ " .long 5b,90b\n" \ " .long 7b,90b\n" \ ".previous" \ : "=a"(s), "=a"(d), "=d"(len) \ : "0"(s), "1"(d), "2"(len) \ : "d0") static int macscsi_pwrite (struct Scsi_Host *instance, unsigned char *src, int len) { unsigned char *s; volatile unsigned char *d; NCR5380_local_declare(); NCR5380_setup(instance); s = src; d = mac_scsi_drq; /* These conditions are derived from MacOS */ while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && (!(NCR5380_read(STATUS_REG) & SR_REQ) || (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))) ; if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ)) { printk(KERN_ERR "Error in macscsi_pwrite\n"); return -1; } CP_MEM_TO_IO(s, d, len); if (len != 0) { printk(KERN_NOTICE "Bus error in macscsi_pwrite\n"); return -1; } return 0; } #include "NCR5380.c" static struct scsi_host_template driver_template = { .proc_name = "Mac5380", .proc_info = macscsi_proc_info, .name = "Macintosh NCR5380 SCSI", .detect = macscsi_detect, .release = macscsi_release, .info = macscsi_info, .queuecommand = macscsi_queue_command, .eh_abort_handler = macscsi_abort, .eh_bus_reset_handler = macscsi_bus_reset, .can_queue = CAN_QUEUE, .this_id = 7, .sg_tablesize = SG_ALL, .cmd_per_lun = CMD_PER_LUN, .use_clustering = DISABLE_CLUSTERING }; #include "scsi_module.c"
gpl-2.0
flar2/m8-Sense-4.4.3
drivers/net/ethernet/fujitsu/fmvj18x_cs.c
5135
34182
/*====================================================================== fmvj18x_cs.c 2.8 2002/03/23 A fmvj18x (and its compatibles) PCMCIA client driver Contributed by Shingo Fujimoto, shingo@flab.fujitsu.co.jp TDK LAK-CD021 and CONTEC C-NET(PC)C support added by Nobuhiro Katayama, kata-n@po.iijnet.or.jp The PCMCIA client code is based on code written by David Hinds. Network code is based on the "FMV-18x driver" by Yutaka TAMIYA but is actually largely Donald Becker's AT1700 driver, which carries the following attribution: Written 1993-94 by Donald Becker. Copyright 1993 United States Government as represented by the Director, National Security Agency. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. The author may be reached as becker@scyld.com, or C/O Scyld Computing Corporation 410 Severn Ave., Suite 210 Annapolis MD 21403 ======================================================================*/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define DRV_NAME "fmvj18x_cs" #define DRV_VERSION "2.9" #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/delay.h> #include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/ioport.h> #include <linux/crc32.h> #include <pcmcia/cistpl.h> #include <pcmcia/ciscode.h> #include <pcmcia/ds.h> #include <asm/uaccess.h> #include <asm/io.h> /*====================================================================*/ /* Module parameters */ MODULE_DESCRIPTION("fmvj18x and compatible PCMCIA ethernet driver"); MODULE_LICENSE("GPL"); #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) /* SRAM configuration */ /* 0:4KB*2 TX buffer else:8KB*2 TX buffer */ INT_MODULE_PARM(sram_config, 0); /*====================================================================*/ /* PCMCIA event handlers */ static int fmvj18x_config(struct pcmcia_device *link); static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id); static int fmvj18x_setup_mfc(struct pcmcia_device *link); static void fmvj18x_release(struct pcmcia_device *link); static void fmvj18x_detach(struct pcmcia_device *p_dev); /* LAN controller(MBH86960A) specific routines */ static int fjn_config(struct net_device *dev, struct ifmap *map); static int fjn_open(struct net_device *dev); static int fjn_close(struct net_device *dev); static netdev_tx_t fjn_start_xmit(struct sk_buff *skb, struct net_device *dev); static irqreturn_t fjn_interrupt(int irq, void *dev_id); static void fjn_rx(struct net_device *dev); static void fjn_reset(struct net_device *dev); static void set_rx_mode(struct net_device *dev); static void fjn_tx_timeout(struct net_device *dev); static const struct ethtool_ops netdev_ethtool_ops; /* card type */ typedef enum { MBH10302, MBH10304, TDK, CONTEC, LA501, UNGERMANN, XXX10304, NEC, KME } cardtype_t; /* driver specific data structure */ typedef struct local_info_t { struct pcmcia_device *p_dev; long open_time; uint tx_started:1; uint tx_queue; u_short tx_queue_len; cardtype_t cardtype; u_short sent; u_char __iomem *base; } local_info_t; #define MC_FILTERBREAK 64 /*====================================================================*/ /* ioport offset from the base address */ #define TX_STATUS 0 /* transmit status register */ #define RX_STATUS 1 /* receive status register */ #define TX_INTR 2 /* transmit interrupt mask register */ #define RX_INTR 3 /* receive interrupt mask register */ #define TX_MODE 4 /* transmit mode register */ #define RX_MODE 5 /* receive mode register */ #define CONFIG_0 6 /* configuration register 0 */ #define CONFIG_1 7 /* configuration register 1 */ #define NODE_ID 8 /* node ID register (bank 0) */ #define MAR_ADR 8 /* multicast address registers (bank 1) */ #define DATAPORT 8 /* buffer mem port registers (bank 2) */ #define TX_START 10 /* transmit start register */ #define COL_CTRL 11 /* 16 collision control register */ #define BMPR12 12 /* reserved */ #define BMPR13 13 /* reserved */ #define RX_SKIP 14 /* skip received packet register */ #define LAN_CTRL 16 /* LAN card control register */ #define MAC_ID 0x1a /* hardware address */ #define UNGERMANN_MAC_ID 0x18 /* UNGERMANN-BASS hardware address */ /* control bits */ #define ENA_TMT_OK 0x80 #define ENA_TMT_REC 0x20 #define ENA_COL 0x04 #define ENA_16_COL 0x02 #define ENA_TBUS_ERR 0x01 #define ENA_PKT_RDY 0x80 #define ENA_BUS_ERR 0x40 #define ENA_LEN_ERR 0x08 #define ENA_ALG_ERR 0x04 #define ENA_CRC_ERR 0x02 #define ENA_OVR_FLO 0x01 /* flags */ #define F_TMT_RDY 0x80 /* can accept new packet */ #define F_NET_BSY 0x40 /* carrier is detected */ #define F_TMT_OK 0x20 /* send packet successfully */ #define F_SRT_PKT 0x10 /* short packet error */ #define F_COL_ERR 0x04 /* collision error */ #define F_16_COL 0x02 /* 16 collision error */ #define F_TBUS_ERR 0x01 /* bus read error */ #define F_PKT_RDY 0x80 /* packet(s) in buffer */ #define F_BUS_ERR 0x40 /* bus read error */ #define F_LEN_ERR 0x08 /* short packet */ #define F_ALG_ERR 0x04 /* frame error */ #define F_CRC_ERR 0x02 /* CRC error */ #define F_OVR_FLO 0x01 /* overflow error */ #define F_BUF_EMP 0x40 /* receive buffer is empty */ #define F_SKP_PKT 0x05 /* drop packet in buffer */ /* default bitmaps */ #define D_TX_INTR ( ENA_TMT_OK ) #define D_RX_INTR ( ENA_PKT_RDY | ENA_LEN_ERR \ | ENA_ALG_ERR | ENA_CRC_ERR | ENA_OVR_FLO ) #define TX_STAT_M ( F_TMT_RDY ) #define RX_STAT_M ( F_PKT_RDY | F_LEN_ERR \ | F_ALG_ERR | F_CRC_ERR | F_OVR_FLO ) /* commands */ #define D_TX_MODE 0x06 /* no tests, detect carrier */ #define ID_MATCHED 0x02 /* (RX_MODE) */ #define RECV_ALL 0x03 /* (RX_MODE) */ #define CONFIG0_DFL 0x5a /* 16bit bus, 4K x 2 Tx queues */ #define CONFIG0_DFL_1 0x5e /* 16bit bus, 8K x 2 Tx queues */ #define CONFIG0_RST 0xda /* Data Link Controller off (CONFIG_0) */ #define CONFIG0_RST_1 0xde /* Data Link Controller off (CONFIG_0) */ #define BANK_0 0xa0 /* bank 0 (CONFIG_1) */ #define BANK_1 0xa4 /* bank 1 (CONFIG_1) */ #define BANK_2 0xa8 /* bank 2 (CONFIG_1) */ #define CHIP_OFF 0x80 /* contrl chip power off (CONFIG_1) */ #define DO_TX 0x80 /* do transmit packet */ #define SEND_PKT 0x81 /* send a packet */ #define AUTO_MODE 0x07 /* Auto skip packet on 16 col detected */ #define MANU_MODE 0x03 /* Stop and skip packet on 16 col */ #define TDK_AUTO_MODE 0x47 /* Auto skip packet on 16 col detected */ #define TDK_MANU_MODE 0x43 /* Stop and skip packet on 16 col */ #define INTR_OFF 0x0d /* LAN controller ignores interrupts */ #define INTR_ON 0x1d /* LAN controller will catch interrupts */ #define TX_TIMEOUT ((400*HZ)/1000) #define BANK_0U 0x20 /* bank 0 (CONFIG_1) */ #define BANK_1U 0x24 /* bank 1 (CONFIG_1) */ #define BANK_2U 0x28 /* bank 2 (CONFIG_1) */ static const struct net_device_ops fjn_netdev_ops = { .ndo_open = fjn_open, .ndo_stop = fjn_close, .ndo_start_xmit = fjn_start_xmit, .ndo_tx_timeout = fjn_tx_timeout, .ndo_set_config = fjn_config, .ndo_set_rx_mode = set_rx_mode, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int fmvj18x_probe(struct pcmcia_device *link) { local_info_t *lp; struct net_device *dev; dev_dbg(&link->dev, "fmvj18x_attach()\n"); /* Make up a FMVJ18x specific data structure */ dev = alloc_etherdev(sizeof(local_info_t)); if (!dev) return -ENOMEM; lp = netdev_priv(dev); link->priv = dev; lp->p_dev = link; lp->base = NULL; /* The io structure describes IO port mapping */ link->resource[0]->end = 32; link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; /* General socket configuration */ link->config_flags |= CONF_ENABLE_IRQ; dev->netdev_ops = &fjn_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); return fmvj18x_config(link); } /* fmvj18x_attach */ /*====================================================================*/ static void fmvj18x_detach(struct pcmcia_device *link) { struct net_device *dev = link->priv; dev_dbg(&link->dev, "fmvj18x_detach\n"); unregister_netdev(dev); fmvj18x_release(link); free_netdev(dev); } /* fmvj18x_detach */ /*====================================================================*/ static int mfc_try_io_port(struct pcmcia_device *link) { int i, ret; static const unsigned int serial_base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; for (i = 0; i < 5; i++) { link->resource[1]->start = serial_base[i]; link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; if (link->resource[1]->start == 0) { link->resource[1]->end = 0; pr_notice("out of resource for serial\n"); } ret = pcmcia_request_io(link); if (ret == 0) return ret; } return ret; } static int ungermann_try_io_port(struct pcmcia_device *link) { int ret; unsigned int ioaddr; /* Ungermann-Bass Access/CARD accepts 0x300,0x320,0x340,0x360 0x380,0x3c0 only for ioport. */ for (ioaddr = 0x300; ioaddr < 0x3e0; ioaddr += 0x20) { link->resource[0]->start = ioaddr; ret = pcmcia_request_io(link); if (ret == 0) { /* calculate ConfigIndex value */ link->config_index = ((link->resource[0]->start & 0x0f0) >> 3) | 0x22; return ret; } } return ret; /* RequestIO failed */ } static int fmvj18x_ioprobe(struct pcmcia_device *p_dev, void *priv_data) { return 0; /* strange, but that's what the code did already before... */ } static int fmvj18x_config(struct pcmcia_device *link) { struct net_device *dev = link->priv; local_info_t *lp = netdev_priv(dev); int i, ret; unsigned int ioaddr; cardtype_t cardtype; char *card_name = "unknown"; u8 *buf; size_t len; u_char buggybuf[32]; dev_dbg(&link->dev, "fmvj18x_config\n"); link->io_lines = 5; len = pcmcia_get_tuple(link, CISTPL_FUNCE, &buf); kfree(buf); if (len) { /* Yes, I have CISTPL_FUNCE. Let's check CISTPL_MANFID */ ret = pcmcia_loop_config(link, fmvj18x_ioprobe, NULL); if (ret != 0) goto failed; switch (link->manf_id) { case MANFID_TDK: cardtype = TDK; if (link->card_id == PRODID_TDK_GN3410 || link->card_id == PRODID_TDK_NP9610 || link->card_id == PRODID_TDK_MN3200) { /* MultiFunction Card */ link->config_base = 0x800; link->config_index = 0x47; link->resource[1]->end = 8; } break; case MANFID_NEC: cardtype = NEC; /* MultiFunction Card */ link->config_base = 0x800; link->config_index = 0x47; link->resource[1]->end = 8; break; case MANFID_KME: cardtype = KME; /* MultiFunction Card */ link->config_base = 0x800; link->config_index = 0x47; link->resource[1]->end = 8; break; case MANFID_CONTEC: cardtype = CONTEC; break; case MANFID_FUJITSU: if (link->config_base == 0x0fe0) cardtype = MBH10302; else if (link->card_id == PRODID_FUJITSU_MBH10302) /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302), but these are MBH10304 based card. */ cardtype = MBH10304; else if (link->card_id == PRODID_FUJITSU_MBH10304) cardtype = MBH10304; else cardtype = LA501; break; default: cardtype = MBH10304; } } else { /* old type card */ switch (link->manf_id) { case MANFID_FUJITSU: if (link->card_id == PRODID_FUJITSU_MBH10304) { cardtype = XXX10304; /* MBH10304 with buggy CIS */ link->config_index = 0x20; } else { cardtype = MBH10302; /* NextCom NC5310, etc. */ link->config_index = 1; } break; case MANFID_UNGERMANN: cardtype = UNGERMANN; break; default: cardtype = MBH10302; link->config_index = 1; } } if (link->resource[1]->end != 0) { ret = mfc_try_io_port(link); if (ret != 0) goto failed; } else if (cardtype == UNGERMANN) { ret = ungermann_try_io_port(link); if (ret != 0) goto failed; } else { ret = pcmcia_request_io(link); if (ret) goto failed; } ret = pcmcia_request_irq(link, fjn_interrupt); if (ret) goto failed; ret = pcmcia_enable_device(link); if (ret) goto failed; dev->irq = link->irq; dev->base_addr = link->resource[0]->start; if (resource_size(link->resource[1]) != 0) { ret = fmvj18x_setup_mfc(link); if (ret != 0) goto failed; } ioaddr = dev->base_addr; /* Reset controller */ if (sram_config == 0) outb(CONFIG0_RST, ioaddr + CONFIG_0); else outb(CONFIG0_RST_1, ioaddr + CONFIG_0); /* Power On chip and select bank 0 */ if (cardtype == MBH10302) outb(BANK_0, ioaddr + CONFIG_1); else outb(BANK_0U, ioaddr + CONFIG_1); /* Set hardware address */ switch (cardtype) { case MBH10304: case TDK: case LA501: case CONTEC: case NEC: case KME: if (cardtype == MBH10304) { card_name = "FMV-J182"; len = pcmcia_get_tuple(link, CISTPL_FUNCE, &buf); if (len < 11) { kfree(buf); goto failed; } /* Read MACID from CIS */ for (i = 5; i < 11; i++) dev->dev_addr[i] = buf[i]; kfree(buf); } else { if (pcmcia_get_mac_from_cis(link, dev)) goto failed; if( cardtype == TDK ) { card_name = "TDK LAK-CD021"; } else if( cardtype == LA501 ) { card_name = "LA501"; } else if( cardtype == NEC ) { card_name = "PK-UG-J001"; } else if( cardtype == KME ) { card_name = "Panasonic"; } else { card_name = "C-NET(PC)C"; } } break; case UNGERMANN: /* Read MACID from register */ for (i = 0; i < 6; i++) dev->dev_addr[i] = inb(ioaddr + UNGERMANN_MAC_ID + i); card_name = "Access/CARD"; break; case XXX10304: /* Read MACID from Buggy CIS */ if (fmvj18x_get_hwinfo(link, buggybuf) == -1) { pr_notice("unable to read hardware net address\n"); goto failed; } for (i = 0 ; i < 6; i++) { dev->dev_addr[i] = buggybuf[i]; } card_name = "FMV-J182"; break; case MBH10302: default: /* Read MACID from register */ for (i = 0; i < 6; i++) dev->dev_addr[i] = inb(ioaddr + MAC_ID + i); card_name = "FMV-J181"; break; } lp->cardtype = cardtype; SET_NETDEV_DEV(dev, &link->dev); if (register_netdev(dev) != 0) { pr_notice("register_netdev() failed\n"); goto failed; } /* print current configuration */ netdev_info(dev, "%s, sram %s, port %#3lx, irq %d, hw_addr %pM\n", card_name, sram_config == 0 ? "4K TX*2" : "8K TX*2", dev->base_addr, dev->irq, dev->dev_addr); return 0; failed: fmvj18x_release(link); return -ENODEV; } /* fmvj18x_config */ /*====================================================================*/ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id) { u_char __iomem *base; int i, j; /* Allocate a small memory window */ link->resource[2]->flags |= WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; link->resource[2]->start = 0; link->resource[2]->end = 0; i = pcmcia_request_window(link, link->resource[2], 0); if (i != 0) return -1; base = ioremap(link->resource[2]->start, resource_size(link->resource[2])); pcmcia_map_mem_page(link, link->resource[2], 0); /* * MBH10304 CISTPL_FUNCE_LAN_NODE_ID format * 22 0d xx xx xx 04 06 yy yy yy yy yy yy ff * 'xx' is garbage. * 'yy' is MAC address. */ for (i = 0; i < 0x200; i++) { if (readb(base+i*2) == 0x22) { if (readb(base+(i-1)*2) == 0xff && readb(base+(i+5)*2) == 0x04 && readb(base+(i+6)*2) == 0x06 && readb(base+(i+13)*2) == 0xff) break; } } if (i != 0x200) { for (j = 0 ; j < 6; j++,i++) { node_id[j] = readb(base+(i+7)*2); } } iounmap(base); j = pcmcia_release_window(link, link->resource[2]); return (i != 0x200) ? 0 : -1; } /* fmvj18x_get_hwinfo */ /*====================================================================*/ static int fmvj18x_setup_mfc(struct pcmcia_device *link) { int i; struct net_device *dev = link->priv; unsigned int ioaddr; local_info_t *lp = netdev_priv(dev); /* Allocate a small memory window */ link->resource[3]->flags = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; link->resource[3]->start = link->resource[3]->end = 0; i = pcmcia_request_window(link, link->resource[3], 0); if (i != 0) return -1; lp->base = ioremap(link->resource[3]->start, resource_size(link->resource[3])); if (lp->base == NULL) { netdev_notice(dev, "ioremap failed\n"); return -1; } i = pcmcia_map_mem_page(link, link->resource[3], 0); if (i != 0) { iounmap(lp->base); lp->base = NULL; return -1; } ioaddr = dev->base_addr; writeb(0x47, lp->base+0x800); /* Config Option Register of LAN */ writeb(0x0, lp->base+0x802); /* Config and Status Register */ writeb(ioaddr & 0xff, lp->base+0x80a); /* I/O Base(Low) of LAN */ writeb((ioaddr >> 8) & 0xff, lp->base+0x80c); /* I/O Base(High) of LAN */ writeb(0x45, lp->base+0x820); /* Config Option Register of Modem */ writeb(0x8, lp->base+0x822); /* Config and Status Register */ return 0; } /*====================================================================*/ static void fmvj18x_release(struct pcmcia_device *link) { struct net_device *dev = link->priv; local_info_t *lp = netdev_priv(dev); u_char __iomem *tmp; dev_dbg(&link->dev, "fmvj18x_release\n"); if (lp->base != NULL) { tmp = lp->base; lp->base = NULL; /* set NULL before iounmap */ iounmap(tmp); } pcmcia_disable_device(link); } static int fmvj18x_suspend(struct pcmcia_device *link) { struct net_device *dev = link->priv; if (link->open) netif_device_detach(dev); return 0; } static int fmvj18x_resume(struct pcmcia_device *link) { struct net_device *dev = link->priv; if (link->open) { fjn_reset(dev); netif_device_attach(dev); } return 0; } /*====================================================================*/ static const struct pcmcia_device_id fmvj18x_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x0004, 0x0004), PCMCIA_DEVICE_PROD_ID12("EAGLE Technology", "NE200 ETHERNET LAN MBH10302 04", 0x528c88c4, 0x74f91e59), PCMCIA_DEVICE_PROD_ID12("Eiger Labs,Inc", "EPX-10BT PC Card Ethernet 10BT", 0x53af556e, 0x877f9922), PCMCIA_DEVICE_PROD_ID12("Eiger labs,Inc.", "EPX-10BT PC Card Ethernet 10BT", 0xf47e6c66, 0x877f9922), PCMCIA_DEVICE_PROD_ID12("FUJITSU", "LAN Card(FMV-J182)", 0x6ee5a3d8, 0x5baf31db), PCMCIA_DEVICE_PROD_ID12("FUJITSU", "MBH10308", 0x6ee5a3d8, 0x3f04875e), PCMCIA_DEVICE_PROD_ID12("FUJITSU TOWA", "LA501", 0xb8451188, 0x12939ba2), PCMCIA_DEVICE_PROD_ID12("HITACHI", "HT-4840-11", 0xf4f43949, 0x773910f4), PCMCIA_DEVICE_PROD_ID12("NextComK.K.", "NC5310B Ver1.0 ", 0x8cef4d3a, 0x075fc7b6), PCMCIA_DEVICE_PROD_ID12("NextComK.K.", "NC5310 Ver1.0 ", 0x8cef4d3a, 0xbccf43e6), PCMCIA_DEVICE_PROD_ID12("RATOC System Inc.", "10BASE_T CARD R280", 0x85c10e17, 0xd9413666), PCMCIA_DEVICE_PROD_ID12("TDK", "LAC-CD02x", 0x1eae9475, 0x8fa0ee70), PCMCIA_DEVICE_PROD_ID12("TDK", "LAC-CF010", 0x1eae9475, 0x7683bc9a), PCMCIA_DEVICE_PROD_ID1("CONTEC Co.,Ltd.", 0x58d8fee2), PCMCIA_DEVICE_PROD_ID1("PCMCIA LAN MBH10304 ES", 0x2599f454), PCMCIA_DEVICE_PROD_ID1("PCMCIA MBH10302", 0x8f4005da), PCMCIA_DEVICE_PROD_ID1("UBKK,V2.0", 0x90888080), PCMCIA_PFC_DEVICE_PROD_ID12(0, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed), PCMCIA_PFC_DEVICE_PROD_ID12(0, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0d0a), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0b05), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, fmvj18x_ids); static struct pcmcia_driver fmvj18x_cs_driver = { .owner = THIS_MODULE, .name = "fmvj18x_cs", .probe = fmvj18x_probe, .remove = fmvj18x_detach, .id_table = fmvj18x_ids, .suspend = fmvj18x_suspend, .resume = fmvj18x_resume, }; static int __init init_fmvj18x_cs(void) { return pcmcia_register_driver(&fmvj18x_cs_driver); } static void __exit exit_fmvj18x_cs(void) { pcmcia_unregister_driver(&fmvj18x_cs_driver); } module_init(init_fmvj18x_cs); module_exit(exit_fmvj18x_cs); /*====================================================================*/ static irqreturn_t fjn_interrupt(int dummy, void *dev_id) { struct net_device *dev = dev_id; local_info_t *lp = netdev_priv(dev); unsigned int ioaddr; unsigned short tx_stat, rx_stat; ioaddr = dev->base_addr; /* avoid multiple interrupts */ outw(0x0000, ioaddr + TX_INTR); /* wait for a while */ udelay(1); /* get status */ tx_stat = inb(ioaddr + TX_STATUS); rx_stat = inb(ioaddr + RX_STATUS); /* clear status */ outb(tx_stat, ioaddr + TX_STATUS); outb(rx_stat, ioaddr + RX_STATUS); pr_debug("%s: interrupt, rx_status %02x.\n", dev->name, rx_stat); pr_debug(" tx_status %02x.\n", tx_stat); if (rx_stat || (inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) { /* there is packet(s) in rx buffer */ fjn_rx(dev); } if (tx_stat & F_TMT_RDY) { dev->stats.tx_packets += lp->sent ; lp->sent = 0 ; if (lp->tx_queue) { outb(DO_TX | lp->tx_queue, ioaddr + TX_START); lp->sent = lp->tx_queue ; lp->tx_queue = 0; lp->tx_queue_len = 0; dev->trans_start = jiffies; } else { lp->tx_started = 0; } netif_wake_queue(dev); } pr_debug("%s: exiting interrupt,\n", dev->name); pr_debug(" tx_status %02x, rx_status %02x.\n", tx_stat, rx_stat); outb(D_TX_INTR, ioaddr + TX_INTR); outb(D_RX_INTR, ioaddr + RX_INTR); if (lp->base != NULL) { /* Ack interrupt for multifunction card */ writeb(0x01, lp->base+0x802); writeb(0x09, lp->base+0x822); } return IRQ_HANDLED; } /* fjn_interrupt */ /*====================================================================*/ static void fjn_tx_timeout(struct net_device *dev) { struct local_info_t *lp = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; netdev_notice(dev, "transmit timed out with status %04x, %s?\n", htons(inw(ioaddr + TX_STATUS)), inb(ioaddr + TX_STATUS) & F_TMT_RDY ? "IRQ conflict" : "network cable problem"); netdev_notice(dev, "timeout registers: %04x %04x %04x " "%04x %04x %04x %04x %04x.\n", htons(inw(ioaddr + 0)), htons(inw(ioaddr + 2)), htons(inw(ioaddr + 4)), htons(inw(ioaddr + 6)), htons(inw(ioaddr + 8)), htons(inw(ioaddr + 10)), htons(inw(ioaddr + 12)), htons(inw(ioaddr + 14))); dev->stats.tx_errors++; /* ToDo: We should try to restart the adaptor... */ local_irq_disable(); fjn_reset(dev); lp->tx_started = 0; lp->tx_queue = 0; lp->tx_queue_len = 0; lp->sent = 0; lp->open_time = jiffies; local_irq_enable(); netif_wake_queue(dev); } static netdev_tx_t fjn_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct local_info_t *lp = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; short length = skb->len; if (length < ETH_ZLEN) { if (skb_padto(skb, ETH_ZLEN)) return NETDEV_TX_OK; length = ETH_ZLEN; } netif_stop_queue(dev); { unsigned char *buf = skb->data; if (length > ETH_FRAME_LEN) { netdev_notice(dev, "Attempting to send a large packet (%d bytes)\n", length); return NETDEV_TX_BUSY; } netdev_dbg(dev, "Transmitting a packet of length %lu\n", (unsigned long)skb->len); dev->stats.tx_bytes += skb->len; /* Disable both interrupts. */ outw(0x0000, ioaddr + TX_INTR); /* wait for a while */ udelay(1); outw(length, ioaddr + DATAPORT); outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1); lp->tx_queue++; lp->tx_queue_len += ((length+3) & ~1); if (lp->tx_started == 0) { /* If the Tx is idle, always trigger a transmit. */ outb(DO_TX | lp->tx_queue, ioaddr + TX_START); lp->sent = lp->tx_queue ; lp->tx_queue = 0; lp->tx_queue_len = 0; lp->tx_started = 1; netif_start_queue(dev); } else { if( sram_config == 0 ) { if (lp->tx_queue_len < (4096 - (ETH_FRAME_LEN +2)) ) /* Yes, there is room for one more packet. */ netif_start_queue(dev); } else { if (lp->tx_queue_len < (8192 - (ETH_FRAME_LEN +2)) && lp->tx_queue < 127 ) /* Yes, there is room for one more packet. */ netif_start_queue(dev); } } /* Re-enable interrupts */ outb(D_TX_INTR, ioaddr + TX_INTR); outb(D_RX_INTR, ioaddr + RX_INTR); } dev_kfree_skb (skb); return NETDEV_TX_OK; } /* fjn_start_xmit */ /*====================================================================*/ static void fjn_reset(struct net_device *dev) { struct local_info_t *lp = netdev_priv(dev); unsigned int ioaddr = dev->base_addr; int i; netdev_dbg(dev, "fjn_reset() called\n"); /* Reset controller */ if( sram_config == 0 ) outb(CONFIG0_RST, ioaddr + CONFIG_0); else outb(CONFIG0_RST_1, ioaddr + CONFIG_0); /* Power On chip and select bank 0 */ if (lp->cardtype == MBH10302) outb(BANK_0, ioaddr + CONFIG_1); else outb(BANK_0U, ioaddr + CONFIG_1); /* Set Tx modes */ outb(D_TX_MODE, ioaddr + TX_MODE); /* set Rx modes */ outb(ID_MATCHED, ioaddr + RX_MODE); /* Set hardware address */ for (i = 0; i < 6; i++) outb(dev->dev_addr[i], ioaddr + NODE_ID + i); /* (re)initialize the multicast table */ set_rx_mode(dev); /* Switch to bank 2 (runtime mode) */ if (lp->cardtype == MBH10302) outb(BANK_2, ioaddr + CONFIG_1); else outb(BANK_2U, ioaddr + CONFIG_1); /* set 16col ctrl bits */ if( lp->cardtype == TDK || lp->cardtype == CONTEC) outb(TDK_AUTO_MODE, ioaddr + COL_CTRL); else outb(AUTO_MODE, ioaddr + COL_CTRL); /* clear Reserved Regs */ outb(0x00, ioaddr + BMPR12); outb(0x00, ioaddr + BMPR13); /* reset Skip packet reg. */ outb(0x01, ioaddr + RX_SKIP); /* Enable Tx and Rx */ if( sram_config == 0 ) outb(CONFIG0_DFL, ioaddr + CONFIG_0); else outb(CONFIG0_DFL_1, ioaddr + CONFIG_0); /* Init receive pointer ? */ inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT); /* Clear all status */ outb(0xff, ioaddr + TX_STATUS); outb(0xff, ioaddr + RX_STATUS); if (lp->cardtype == MBH10302) outb(INTR_OFF, ioaddr + LAN_CTRL); /* Turn on Rx interrupts */ outb(D_TX_INTR, ioaddr + TX_INTR); outb(D_RX_INTR, ioaddr + RX_INTR); /* Turn on interrupts from LAN card controller */ if (lp->cardtype == MBH10302) outb(INTR_ON, ioaddr + LAN_CTRL); } /* fjn_reset */ /*====================================================================*/ static void fjn_rx(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; int boguscount = 10; /* 5 -> 10: by agy 19940922 */ pr_debug("%s: in rx_packet(), rx_status %02x.\n", dev->name, inb(ioaddr + RX_STATUS)); while ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) { u_short status = inw(ioaddr + DATAPORT); netdev_dbg(dev, "Rxing packet mode %02x status %04x.\n", inb(ioaddr + RX_MODE), status); #ifndef final_version if (status == 0) { outb(F_SKP_PKT, ioaddr + RX_SKIP); break; } #endif if ((status & 0xF0) != 0x20) { /* There was an error. */ dev->stats.rx_errors++; if (status & F_LEN_ERR) dev->stats.rx_length_errors++; if (status & F_ALG_ERR) dev->stats.rx_frame_errors++; if (status & F_CRC_ERR) dev->stats.rx_crc_errors++; if (status & F_OVR_FLO) dev->stats.rx_over_errors++; } else { u_short pkt_len = inw(ioaddr + DATAPORT); /* Malloc up new buffer. */ struct sk_buff *skb; if (pkt_len > 1550) { netdev_notice(dev, "The FMV-18x claimed a very large packet, size %d\n", pkt_len); outb(F_SKP_PKT, ioaddr + RX_SKIP); dev->stats.rx_errors++; break; } skb = netdev_alloc_skb(dev, pkt_len + 2); if (skb == NULL) { netdev_notice(dev, "Memory squeeze, dropping packet (len %d)\n", pkt_len); outb(F_SKP_PKT, ioaddr + RX_SKIP); dev->stats.rx_dropped++; break; } skb_reserve(skb, 2); insw(ioaddr + DATAPORT, skb_put(skb, pkt_len), (pkt_len + 1) >> 1); skb->protocol = eth_type_trans(skb, dev); { int i; pr_debug("%s: Rxed packet of length %d: ", dev->name, pkt_len); for (i = 0; i < 14; i++) pr_debug(" %02x", skb->data[i]); pr_debug(".\n"); } netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; } if (--boguscount <= 0) break; } /* If any worth-while packets have been received, dev_rint() has done a netif_wake_queue() for us and will work on them when we get to the bottom-half routine. */ /* if (lp->cardtype != TDK) { int i; for (i = 0; i < 20; i++) { if ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == F_BUF_EMP) break; (void)inw(ioaddr + DATAPORT); /+ dummy status read +/ outb(F_SKP_PKT, ioaddr + RX_SKIP); } if (i > 0) pr_debug("%s: Exint Rx packet with mode %02x after " "%d ticks.\n", dev->name, inb(ioaddr + RX_MODE), i); } */ } /* fjn_rx */ /*====================================================================*/ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); snprintf(info->bus_info, sizeof(info->bus_info), "PCMCIA 0x%lx", dev->base_addr); } static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, }; static int fjn_config(struct net_device *dev, struct ifmap *map){ return 0; } static int fjn_open(struct net_device *dev) { struct local_info_t *lp = netdev_priv(dev); struct pcmcia_device *link = lp->p_dev; pr_debug("fjn_open('%s').\n", dev->name); if (!pcmcia_dev_present(link)) return -ENODEV; link->open++; fjn_reset(dev); lp->tx_started = 0; lp->tx_queue = 0; lp->tx_queue_len = 0; lp->open_time = jiffies; netif_start_queue(dev); return 0; } /* fjn_open */ /*====================================================================*/ static int fjn_close(struct net_device *dev) { struct local_info_t *lp = netdev_priv(dev); struct pcmcia_device *link = lp->p_dev; unsigned int ioaddr = dev->base_addr; pr_debug("fjn_close('%s').\n", dev->name); lp->open_time = 0; netif_stop_queue(dev); /* Set configuration register 0 to disable Tx and Rx. */ if( sram_config == 0 ) outb(CONFIG0_RST ,ioaddr + CONFIG_0); else outb(CONFIG0_RST_1 ,ioaddr + CONFIG_0); /* Update the statistics -- ToDo. */ /* Power-down the chip. Green, green, green! */ outb(CHIP_OFF ,ioaddr + CONFIG_1); /* Set the ethernet adaptor disable IRQ */ if (lp->cardtype == MBH10302) outb(INTR_OFF, ioaddr + LAN_CTRL); link->open--; return 0; } /* fjn_close */ /*====================================================================*/ /* Set the multicast/promiscuous mode for this adaptor. */ static void set_rx_mode(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; u_char mc_filter[8]; /* Multicast hash filter */ u_long flags; int i; int saved_bank; int saved_config_0 = inb(ioaddr + CONFIG_0); local_irq_save(flags); /* Disable Tx and Rx */ if (sram_config == 0) outb(CONFIG0_RST, ioaddr + CONFIG_0); else outb(CONFIG0_RST_1, ioaddr + CONFIG_0); if (dev->flags & IFF_PROMISC) { memset(mc_filter, 0xff, sizeof(mc_filter)); outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */ } else if (netdev_mc_count(dev) > MC_FILTERBREAK || (dev->flags & IFF_ALLMULTI)) { /* Too many to filter perfectly -- accept all multicasts. */ memset(mc_filter, 0xff, sizeof(mc_filter)); outb(2, ioaddr + RX_MODE); /* Use normal mode. */ } else if (netdev_mc_empty(dev)) { memset(mc_filter, 0x00, sizeof(mc_filter)); outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */ } else { struct netdev_hw_addr *ha; memset(mc_filter, 0, sizeof(mc_filter)); netdev_for_each_mc_addr(ha, dev) { unsigned int bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26; mc_filter[bit >> 3] |= (1 << (bit & 7)); } outb(2, ioaddr + RX_MODE); /* Use normal mode. */ } /* Switch to bank 1 and set the multicast table. */ saved_bank = inb(ioaddr + CONFIG_1); outb(0xe4, ioaddr + CONFIG_1); for (i = 0; i < 8; i++) outb(mc_filter[i], ioaddr + MAR_ADR + i); outb(saved_bank, ioaddr + CONFIG_1); outb(saved_config_0, ioaddr + CONFIG_0); local_irq_restore(flags); }
gpl-2.0
JellyBeanNitro/kernel-iproj-3.4
drivers/net/wireless/ath/ath5k/led.c
5135
6646
/* * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting * Copyright (c) 2004-2005 Atheros Communications, Inc. * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com> * Copyright (c) 2009 Bob Copeland <me@bobcopeland.com> * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. * */ #include <linux/pci.h> #include "ath5k.h" #define ATH_SDEVICE(subv, subd) \ .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \ .subvendor = (subv), .subdevice = (subd) #define ATH_LED(pin, polarity) .driver_data = (((pin) << 8) | (polarity)) #define ATH_PIN(data) ((data) >> 8) #define ATH_POLARITY(data) ((data) & 0xff) /* Devices we match on for LED config info (typically laptops) */ static DEFINE_PCI_DEVICE_TABLE(ath5k_led_devices) = { /* AR5211 */ { PCI_VDEVICE(ATHEROS, PCI_DEVICE_ID_ATHEROS_AR5211), ATH_LED(0, 0) }, /* HP Compaq nc6xx, nc4000, nx6000 */ { ATH_SDEVICE(PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID), ATH_LED(1, 1) }, /* Acer Aspire One A150 (maximlevitsky@gmail.com) */ { ATH_SDEVICE(PCI_VENDOR_ID_FOXCONN, 0xe008), ATH_LED(3, 0) }, /* Acer Aspire One AO531h AO751h (keng-yu.lin@canonical.com) */ { ATH_SDEVICE(PCI_VENDOR_ID_FOXCONN, 0xe00d), ATH_LED(3, 0) }, /* Acer Ferrari 5000 (russ.dill@gmail.com) */ { ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0422), ATH_LED(1, 1) }, /* E-machines E510 (tuliom@gmail.com) */ { ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0428), ATH_LED(3, 0) }, /* BenQ Joybook R55v (nowymarluk@wp.pl) */ { ATH_SDEVICE(PCI_VENDOR_ID_QMI, 0x0100), ATH_LED(1, 0) }, /* Acer Extensa 5620z (nekoreeve@gmail.com) */ { ATH_SDEVICE(PCI_VENDOR_ID_QMI, 0x0105), ATH_LED(3, 0) }, /* Fukato Datacask Jupiter 1014a (mrb74@gmx.at) */ { ATH_SDEVICE(PCI_VENDOR_ID_AZWAVE, 0x1026), ATH_LED(3, 0) }, /* IBM ThinkPad AR5BXB6 (legovini@spiro.fisica.unipd.it) */ { ATH_SDEVICE(PCI_VENDOR_ID_IBM, 0x058a), ATH_LED(1, 0) }, /* HP Compaq CQ60-206US (ddreggors@jumptv.com) */ { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137a), ATH_LED(3, 1) }, /* HP Compaq C700 (nitrousnrg@gmail.com) */ { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 1) }, /* LiteOn AR5BXB63 (magooz@salug.it) */ { ATH_SDEVICE(PCI_VENDOR_ID_ATHEROS, 0x3067), ATH_LED(3, 0) }, /* IBM-specific AR5212 (all others) */ { PCI_VDEVICE(ATHEROS, PCI_DEVICE_ID_ATHEROS_AR5212_IBM), ATH_LED(0, 0) }, /* Dell Vostro A860 (shahar@shahar-or.co.il) */ { ATH_SDEVICE(PCI_VENDOR_ID_QMI, 0x0112), ATH_LED(3, 0) }, { } }; void ath5k_led_enable(struct ath5k_hw *ah) { if (test_bit(ATH_STAT_LEDSOFT, ah->status)) { ath5k_hw_set_gpio_output(ah, ah->led_pin); ath5k_led_off(ah); } } static void ath5k_led_on(struct ath5k_hw *ah) { if (!test_bit(ATH_STAT_LEDSOFT, ah->status)) return; ath5k_hw_set_gpio(ah, ah->led_pin, ah->led_on); } void ath5k_led_off(struct ath5k_hw *ah) { if (!test_bit(ATH_STAT_LEDSOFT, ah->status)) return; ath5k_hw_set_gpio(ah, ah->led_pin, !ah->led_on); } static void ath5k_led_brightness_set(struct led_classdev *led_dev, enum led_brightness brightness) { struct ath5k_led *led = container_of(led_dev, struct ath5k_led, led_dev); if (brightness == LED_OFF) ath5k_led_off(led->ah); else ath5k_led_on(led->ah); } static int ath5k_register_led(struct ath5k_hw *ah, struct ath5k_led *led, const char *name, char *trigger) { int err; led->ah = ah; strncpy(led->name, name, sizeof(led->name)); led->led_dev.name = led->name; led->led_dev.default_trigger = trigger; led->led_dev.brightness_set = ath5k_led_brightness_set; err = led_classdev_register(ah->dev, &led->led_dev); if (err) { ATH5K_WARN(ah, "could not register LED %s\n", name); led->ah = NULL; } return err; } static void ath5k_unregister_led(struct ath5k_led *led) { if (!led->ah) return; led_classdev_unregister(&led->led_dev); ath5k_led_off(led->ah); led->ah = NULL; } void ath5k_unregister_leds(struct ath5k_hw *ah) { ath5k_unregister_led(&ah->rx_led); ath5k_unregister_led(&ah->tx_led); } int __devinit ath5k_init_leds(struct ath5k_hw *ah) { int ret = 0; struct ieee80211_hw *hw = ah->hw; #ifndef CONFIG_ATHEROS_AR231X struct pci_dev *pdev = ah->pdev; #endif char name[ATH5K_LED_MAX_NAME_LEN + 1]; const struct pci_device_id *match; if (!ah->pdev) return 0; #ifdef CONFIG_ATHEROS_AR231X match = NULL; #else match = pci_match_id(&ath5k_led_devices[0], pdev); #endif if (match) { __set_bit(ATH_STAT_LEDSOFT, ah->status); ah->led_pin = ATH_PIN(match->driver_data); ah->led_on = ATH_POLARITY(match->driver_data); } if (!test_bit(ATH_STAT_LEDSOFT, ah->status)) goto out; ath5k_led_enable(ah); snprintf(name, sizeof(name), "ath5k-%s::rx", wiphy_name(hw->wiphy)); ret = ath5k_register_led(ah, &ah->rx_led, name, ieee80211_get_rx_led_name(hw)); if (ret) goto out; snprintf(name, sizeof(name), "ath5k-%s::tx", wiphy_name(hw->wiphy)); ret = ath5k_register_led(ah, &ah->tx_led, name, ieee80211_get_tx_led_name(hw)); out: return ret; }
gpl-2.0
crdroid-devices/android_kernel_motorola_msm8226
arch/ia64/sn/pci/pcibr/pcibr_dma.c
9487
11726
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001-2005 Silicon Graphics, Inc. All rights reserved. */ #include <linux/types.h> #include <linux/pci.h> #include <linux/export.h> #include <asm/sn/addrs.h> #include <asm/sn/geo.h> #include <asm/sn/pcibr_provider.h> #include <asm/sn/pcibus_provider_defs.h> #include <asm/sn/pcidev.h> #include <asm/sn/pic.h> #include <asm/sn/sn_sal.h> #include <asm/sn/tiocp.h> #include "tio.h" #include "xtalk/xwidgetdev.h" #include "xtalk/hubdev.h" extern int sn_ioif_inited; /* ===================================================================== * DMA MANAGEMENT * * The Bridge ASIC provides three methods of doing DMA: via a "direct map" * register available in 32-bit PCI space (which selects a contiguous 2G * address space on some other widget), via "direct" addressing via 64-bit * PCI space (all destination information comes from the PCI address, * including transfer attributes), and via a "mapped" region that allows * a bunch of different small mappings to be established with the PMU. * * For efficiency, we most prefer to use the 32bit direct mapping facility, * since it requires no resource allocations. The advantage of using the * PMU over the 64-bit direct is that single-cycle PCI addressing can be * used; the advantage of using 64-bit direct over PMU addressing is that * we do not have to allocate entries in the PMU. */ static dma_addr_t pcibr_dmamap_ate32(struct pcidev_info *info, u64 paddr, size_t req_size, u64 flags, int dma_flags) { struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> pdi_pcibus_info; u8 internal_device = (PCI_SLOT(pcidev_info->pdi_host_pcidev_info-> pdi_linux_pcidev->devfn)) - 1; int ate_count; int ate_index; u64 ate_flags = flags | PCI32_ATE_V; u64 ate; u64 pci_addr; u64 xio_addr; u64 offset; /* PIC in PCI-X mode does not supports 32bit PageMap mode */ if (IS_PIC_SOFT(pcibus_info) && IS_PCIX(pcibus_info)) { return 0; } /* Calculate the number of ATEs needed. */ if (!(MINIMAL_ATE_FLAG(paddr, req_size))) { ate_count = IOPG((IOPGSIZE - 1) /* worst case start offset */ +req_size /* max mapping bytes */ - 1) + 1; /* round UP */ } else { /* assume requested target is page aligned */ ate_count = IOPG(req_size /* max mapping bytes */ - 1) + 1; /* round UP */ } /* Get the number of ATEs required. */ ate_index = pcibr_ate_alloc(pcibus_info, ate_count); if (ate_index < 0) return 0; /* In PCI-X mode, Prefetch not supported */ if (IS_PCIX(pcibus_info)) ate_flags &= ~(PCI32_ATE_PREF); if (SN_DMA_ADDRTYPE(dma_flags == SN_DMA_ADDR_PHYS)) xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : PHYS_TO_TIODMA(paddr); else xio_addr = paddr; offset = IOPGOFF(xio_addr); ate = ate_flags | (xio_addr - offset); /* If PIC, put the targetid in the ATE */ if (IS_PIC_SOFT(pcibus_info)) { ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT); } /* * If we're mapping for MSI, set the MSI bit in the ATE. If it's a * TIOCP based pci bus, we also need to set the PIO bit in the ATE. */ if (dma_flags & SN_DMA_MSI) { ate |= PCI32_ATE_MSI; if (IS_TIOCP_SOFT(pcibus_info)) ate |= PCI32_ATE_PIO; } ate_write(pcibus_info, ate_index, ate_count, ate); /* * Set up the DMA mapped Address. */ pci_addr = PCI32_MAPPED_BASE + offset + IOPGSIZE * ate_index; /* * If swap was set in device in pcibr_endian_set() * we need to turn swapping on. */ if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR) ATE_SWAP_ON(pci_addr); return pci_addr; } static dma_addr_t pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr, u64 dma_attributes, int dma_flags) { struct pcibus_info *pcibus_info = (struct pcibus_info *) ((info->pdi_host_pcidev_info)->pdi_pcibus_info); u64 pci_addr; /* Translate to Crosstalk View of Physical Address */ if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS) pci_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : PHYS_TO_TIODMA(paddr); else pci_addr = paddr; pci_addr |= dma_attributes; /* Handle Bus mode */ if (IS_PCIX(pcibus_info)) pci_addr &= ~PCI64_ATTR_PREF; /* Handle Bridge Chipset differences */ if (IS_PIC_SOFT(pcibus_info)) { pci_addr |= ((u64) pcibus_info-> pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT); } else pci_addr |= (dma_flags & SN_DMA_MSI) ? TIOCP_PCI64_CMDTYPE_MSI : TIOCP_PCI64_CMDTYPE_MEM; /* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */ if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn)) pci_addr |= PCI64_ATTR_VIRTUAL; return pci_addr; } static dma_addr_t pcibr_dmatrans_direct32(struct pcidev_info * info, u64 paddr, size_t req_size, u64 flags, int dma_flags) { struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info; struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> pdi_pcibus_info; u64 xio_addr; u64 xio_base; u64 offset; u64 endoff; if (IS_PCIX(pcibus_info)) { return 0; } if (dma_flags & SN_DMA_MSI) return 0; if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS) xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) : PHYS_TO_TIODMA(paddr); else xio_addr = paddr; xio_base = pcibus_info->pbi_dir_xbase; offset = xio_addr - xio_base; endoff = req_size + offset; if ((req_size > (1ULL << 31)) || /* Too Big */ (xio_addr < xio_base) || /* Out of range for mappings */ (endoff > (1ULL << 31))) { /* Too Big */ return 0; } return PCI32_DIRECT_BASE | offset; } /* * Wrapper routine for freeing DMA maps * DMA mappings for Direct 64 and 32 do not have any DMA maps. */ void pcibr_dma_unmap(struct pci_dev *hwdev, dma_addr_t dma_handle, int direction) { struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev); struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->pdi_pcibus_info; if (IS_PCI32_MAPPED(dma_handle)) { int ate_index; ate_index = IOPG((ATE_SWAP_OFF(dma_handle) - PCI32_MAPPED_BASE)); pcibr_ate_free(pcibus_info, ate_index); } } /* * On SN systems there is a race condition between a PIO read response and * DMA's. In rare cases, the read response may beat the DMA, causing the * driver to think that data in memory is complete and meaningful. This code * eliminates that race. This routine is called by the PIO read routines * after doing the read. For PIC this routine then forces a fake interrupt * on another line, which is logically associated with the slot that the PIO * is addressed to. It then spins while watching the memory location that * the interrupt is targeted to. When the interrupt response arrives, we * are sure that the DMA has landed in memory and it is safe for the driver * to proceed. For TIOCP use the Device(x) Write Request Buffer Flush * Bridge register since it ensures the data has entered the coherence domain, * unlike the PIC Device(x) Write Request Buffer Flush register. */ void sn_dma_flush(u64 addr) { nasid_t nasid; int is_tio; int wid_num; int i, j; unsigned long flags; u64 itte; struct hubdev_info *hubinfo; struct sn_flush_device_kernel *p; struct sn_flush_device_common *common; struct sn_flush_nasid_entry *flush_nasid_list; if (!sn_ioif_inited) return; nasid = NASID_GET(addr); if (-1 == nasid_to_cnodeid(nasid)) return; hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo; BUG_ON(!hubinfo); flush_nasid_list = &hubinfo->hdi_flush_nasid_list; if (flush_nasid_list->widget_p == NULL) return; is_tio = (nasid & 1); if (is_tio) { int itte_index; if (TIO_HWIN(addr)) itte_index = 0; else if (TIO_BWIN_WINDOWNUM(addr)) itte_index = TIO_BWIN_WINDOWNUM(addr); else itte_index = -1; if (itte_index >= 0) { itte = flush_nasid_list->iio_itte[itte_index]; if (! TIO_ITTE_VALID(itte)) return; wid_num = TIO_ITTE_WIDGET(itte); } else wid_num = TIO_SWIN_WIDGETNUM(addr); } else { if (BWIN_WINDOWNUM(addr)) { itte = flush_nasid_list->iio_itte[BWIN_WINDOWNUM(addr)]; wid_num = IIO_ITTE_WIDGET(itte); } else wid_num = SWIN_WIDGETNUM(addr); } if (flush_nasid_list->widget_p[wid_num] == NULL) return; p = &flush_nasid_list->widget_p[wid_num][0]; /* find a matching BAR */ for (i = 0; i < DEV_PER_WIDGET; i++,p++) { common = p->common; for (j = 0; j < PCI_ROM_RESOURCE; j++) { if (common->sfdl_bar_list[j].start == 0) break; if (addr >= common->sfdl_bar_list[j].start && addr <= common->sfdl_bar_list[j].end) break; } if (j < PCI_ROM_RESOURCE && common->sfdl_bar_list[j].start != 0) break; } /* if no matching BAR, return without doing anything. */ if (i == DEV_PER_WIDGET) return; /* * For TIOCP use the Device(x) Write Request Buffer Flush Bridge * register since it ensures the data has entered the coherence * domain, unlike PIC. */ if (is_tio) { /* * Note: devices behind TIOCE should never be matched in the * above code, and so the following code is PIC/CP centric. * If CE ever needs the sn_dma_flush mechanism, we will have * to account for that here and in tioce_bus_fixup(). */ u32 tio_id = HUB_L(TIO_IOSPACE_ADDR(nasid, TIO_NODE_ID)); u32 revnum = XWIDGET_PART_REV_NUM(tio_id); /* TIOCP BRINGUP WAR (PV907516): Don't write buffer flush reg */ if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) { return; } else { pcireg_wrb_flush_get(common->sfdl_pcibus_info, (common->sfdl_slot - 1)); } } else { spin_lock_irqsave(&p->sfdl_flush_lock, flags); *common->sfdl_flush_addr = 0; /* force an interrupt. */ *(volatile u32 *)(common->sfdl_force_int_addr) = 1; /* wait for the interrupt to come back. */ while (*(common->sfdl_flush_addr) != 0x10f) cpu_relax(); /* okay, everything is synched up. */ spin_unlock_irqrestore(&p->sfdl_flush_lock, flags); } return; } /* * DMA interfaces. Called from pci_dma.c routines. */ dma_addr_t pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size, int dma_flags) { dma_addr_t dma_handle; struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev); /* SN cannot support DMA addresses smaller than 32 bits. */ if (hwdev->dma_mask < 0x7fffffff) { return 0; } if (hwdev->dma_mask == ~0UL) { /* * Handle the most common case: 64 bit cards. This * call should always succeed. */ dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr, PCI64_ATTR_PREF, dma_flags); } else { /* Handle 32-63 bit cards via direct mapping */ dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr, size, 0, dma_flags); if (!dma_handle) { /* * It is a 32 bit card and we cannot do direct mapping, * so we use an ATE. */ dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr, size, PCI32_ATE_PREF, dma_flags); } } return dma_handle; } dma_addr_t pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr, size_t size, int dma_flags) { dma_addr_t dma_handle; struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev); if (hwdev->dev.coherent_dma_mask == ~0UL) { dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr, PCI64_ATTR_BAR, dma_flags); } else { dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info, phys_addr, size, PCI32_ATE_BAR, dma_flags); } return dma_handle; } EXPORT_SYMBOL(sn_dma_flush);
gpl-2.0
MoKee/android_kernel_samsung_d710
sound/pci/oxygen/xonar_lib.c
11023
3782
/* * helper functions for Asus Xonar cards * * Copyright (c) Clemens Ladisch <clemens@ladisch.de> * * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this driver; if not, see <http://www.gnu.org/licenses/>. */ #include <linux/delay.h> #include <sound/core.h> #include <sound/control.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include "xonar.h" #define GPIO_CS53x1_M_MASK 0x000c #define GPIO_CS53x1_M_SINGLE 0x0000 #define GPIO_CS53x1_M_DOUBLE 0x0004 #define GPIO_CS53x1_M_QUAD 0x0008 void xonar_enable_output(struct oxygen *chip) { struct xonar_generic *data = chip->model_data; oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, data->output_enable_bit); msleep(data->anti_pop_delay); oxygen_set_bits16(chip, OXYGEN_GPIO_DATA, data->output_enable_bit); } void xonar_disable_output(struct oxygen *chip) { struct xonar_generic *data = chip->model_data; oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, data->output_enable_bit); } static void xonar_ext_power_gpio_changed(struct oxygen *chip) { struct xonar_generic *data = chip->model_data; u8 has_power; has_power = !!(oxygen_read8(chip, data->ext_power_reg) & data->ext_power_bit); if (has_power != data->has_power) { data->has_power = has_power; if (has_power) { snd_printk(KERN_NOTICE "power restored\n"); } else { snd_printk(KERN_CRIT "Hey! Don't unplug the power cable!\n"); /* TODO: stop PCMs */ } } } void xonar_init_ext_power(struct oxygen *chip) { struct xonar_generic *data = chip->model_data; oxygen_set_bits8(chip, data->ext_power_int_reg, data->ext_power_bit); chip->interrupt_mask |= OXYGEN_INT_GPIO; chip->model.gpio_changed = xonar_ext_power_gpio_changed; data->has_power = !!(oxygen_read8(chip, data->ext_power_reg) & data->ext_power_bit); } void xonar_init_cs53x1(struct oxygen *chip) { oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_CS53x1_M_MASK); oxygen_write16_masked(chip, OXYGEN_GPIO_DATA, GPIO_CS53x1_M_SINGLE, GPIO_CS53x1_M_MASK); } void xonar_set_cs53x1_params(struct oxygen *chip, struct snd_pcm_hw_params *params) { unsigned int value; if (params_rate(params) <= 54000) value = GPIO_CS53x1_M_SINGLE; else if (params_rate(params) <= 108000) value = GPIO_CS53x1_M_DOUBLE; else value = GPIO_CS53x1_M_QUAD; oxygen_write16_masked(chip, OXYGEN_GPIO_DATA, value, GPIO_CS53x1_M_MASK); } int xonar_gpio_bit_switch_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; u16 bit = ctl->private_value; bool invert = ctl->private_value & XONAR_GPIO_BIT_INVERT; value->value.integer.value[0] = !!(oxygen_read16(chip, OXYGEN_GPIO_DATA) & bit) ^ invert; return 0; } int xonar_gpio_bit_switch_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct oxygen *chip = ctl->private_data; u16 bit = ctl->private_value; bool invert = ctl->private_value & XONAR_GPIO_BIT_INVERT; u16 old_bits, new_bits; int changed; spin_lock_irq(&chip->reg_lock); old_bits = oxygen_read16(chip, OXYGEN_GPIO_DATA); if (!!value->value.integer.value[0] ^ invert) new_bits = old_bits | bit; else new_bits = old_bits & ~bit; changed = new_bits != old_bits; if (changed) oxygen_write16(chip, OXYGEN_GPIO_DATA, new_bits); spin_unlock_irq(&chip->reg_lock); return changed; }
gpl-2.0
visi0nary/mediatek
mt6732/kernel/net/irda/irlap_event.c
11279
66551
/********************************************************************* * * Filename: irlap_event.c * Version: 0.9 * Description: IrLAP state machine implementation * Status: Experimental. * Author: Dag Brattli <dag@brattli.net> * Created at: Sat Aug 16 00:59:29 1997 * Modified at: Sat Dec 25 21:07:57 1999 * Modified by: Dag Brattli <dag@brattli.net> * * Copyright (c) 1998-2000 Dag Brattli <dag@brattli.net>, * Copyright (c) 1998 Thomas Davis <ratbert@radiks.net> * All Rights Reserved. * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * ********************************************************************/ #include <linux/string.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/irda/irda.h> #include <net/irda/irlap_event.h> #include <net/irda/timer.h> #include <net/irda/irlap.h> #include <net/irda/irlap_frame.h> #include <net/irda/qos.h> #include <net/irda/parameters.h> #include <net/irda/irlmp.h> /* irlmp_flow_indication(), ... */ #include <net/irda/irda_device.h> #ifdef CONFIG_IRDA_FAST_RR int sysctl_fast_poll_increase = 50; #endif static int irlap_state_ndm (struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info); static int irlap_state_query (struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info); static int irlap_state_reply (struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info); static int irlap_state_conn (struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info); static int irlap_state_setup (struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info); static int irlap_state_offline(struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info); static int irlap_state_xmit_p (struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info); static int irlap_state_pclose (struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info); static int irlap_state_nrm_p (struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info); static int irlap_state_reset_wait(struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info); static int irlap_state_reset (struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info); static int irlap_state_nrm_s (struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info); static int irlap_state_xmit_s (struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info); static int irlap_state_sclose (struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info); static int irlap_state_reset_check(struct irlap_cb *, IRLAP_EVENT event, struct sk_buff *, struct irlap_info *); #ifdef CONFIG_IRDA_DEBUG static const char *const irlap_event[] = { "DISCOVERY_REQUEST", "CONNECT_REQUEST", "CONNECT_RESPONSE", "DISCONNECT_REQUEST", "DATA_REQUEST", "RESET_REQUEST", "RESET_RESPONSE", "SEND_I_CMD", "SEND_UI_FRAME", "RECV_DISCOVERY_XID_CMD", "RECV_DISCOVERY_XID_RSP", "RECV_SNRM_CMD", "RECV_TEST_CMD", "RECV_TEST_RSP", "RECV_UA_RSP", "RECV_DM_RSP", "RECV_RD_RSP", "RECV_I_CMD", "RECV_I_RSP", "RECV_UI_FRAME", "RECV_FRMR_RSP", "RECV_RR_CMD", "RECV_RR_RSP", "RECV_RNR_CMD", "RECV_RNR_RSP", "RECV_REJ_CMD", "RECV_REJ_RSP", "RECV_SREJ_CMD", "RECV_SREJ_RSP", "RECV_DISC_CMD", "SLOT_TIMER_EXPIRED", "QUERY_TIMER_EXPIRED", "FINAL_TIMER_EXPIRED", "POLL_TIMER_EXPIRED", "DISCOVERY_TIMER_EXPIRED", "WD_TIMER_EXPIRED", "BACKOFF_TIMER_EXPIRED", "MEDIA_BUSY_TIMER_EXPIRED", }; #endif /* CONFIG_IRDA_DEBUG */ const char *const irlap_state[] = { "LAP_NDM", "LAP_QUERY", "LAP_REPLY", "LAP_CONN", "LAP_SETUP", "LAP_OFFLINE", "LAP_XMIT_P", "LAP_PCLOSE", "LAP_NRM_P", "LAP_RESET_WAIT", "LAP_RESET", "LAP_NRM_S", "LAP_XMIT_S", "LAP_SCLOSE", "LAP_RESET_CHECK", }; static int (*state[])(struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info) = { irlap_state_ndm, irlap_state_query, irlap_state_reply, irlap_state_conn, irlap_state_setup, irlap_state_offline, irlap_state_xmit_p, irlap_state_pclose, irlap_state_nrm_p, irlap_state_reset_wait, irlap_state_reset, irlap_state_nrm_s, irlap_state_xmit_s, irlap_state_sclose, irlap_state_reset_check, }; /* * Function irda_poll_timer_expired (data) * * Poll timer has expired. Normally we must now send a RR frame to the * remote device */ static void irlap_poll_timer_expired(void *data) { struct irlap_cb *self = (struct irlap_cb *) data; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); irlap_do_event(self, POLL_TIMER_EXPIRED, NULL, NULL); } /* * Calculate and set time before we will have to send back the pf bit * to the peer. Use in primary. * Make sure that state is XMIT_P/XMIT_S when calling this function * (and that nobody messed up with the state). - Jean II */ static void irlap_start_poll_timer(struct irlap_cb *self, int timeout) { IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); #ifdef CONFIG_IRDA_FAST_RR /* * Send out the RR frames faster if our own transmit queue is empty, or * if the peer is busy. The effect is a much faster conversation */ if (skb_queue_empty(&self->txq) || self->remote_busy) { if (self->fast_RR == TRUE) { /* * Assert that the fast poll timer has not reached the * normal poll timer yet */ if (self->fast_RR_timeout < timeout) { /* * FIXME: this should be a more configurable * function */ self->fast_RR_timeout += (sysctl_fast_poll_increase * HZ/1000); /* Use this fast(er) timeout instead */ timeout = self->fast_RR_timeout; } } else { self->fast_RR = TRUE; /* Start with just 0 ms */ self->fast_RR_timeout = 0; timeout = 0; } } else self->fast_RR = FALSE; IRDA_DEBUG(3, "%s(), timeout=%d (%ld)\n", __func__, timeout, jiffies); #endif /* CONFIG_IRDA_FAST_RR */ if (timeout == 0) irlap_do_event(self, POLL_TIMER_EXPIRED, NULL, NULL); else irda_start_timer(&self->poll_timer, timeout, self, irlap_poll_timer_expired); } /* * Function irlap_do_event (event, skb, info) * * Rushes through the state machine without any delay. If state == XMIT * then send queued data frames. */ void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info) { int ret; if (!self || self->magic != LAP_MAGIC) return; IRDA_DEBUG(3, "%s(), event = %s, state = %s\n", __func__, irlap_event[event], irlap_state[self->state]); ret = (*state[self->state])(self, event, skb, info); /* * Check if there are any pending events that needs to be executed */ switch (self->state) { case LAP_XMIT_P: /* FALLTHROUGH */ case LAP_XMIT_S: /* * We just received the pf bit and are at the beginning * of a new LAP transmit window. * Check if there are any queued data frames, and do not * try to disconnect link if we send any data frames, since * that will change the state away form XMIT */ IRDA_DEBUG(2, "%s() : queue len = %d\n", __func__, skb_queue_len(&self->txq)); if (!skb_queue_empty(&self->txq)) { /* Prevent race conditions with irlap_data_request() */ self->local_busy = TRUE; /* Theory of operation. * We send frames up to when we fill the window or * reach line capacity. Those frames will queue up * in the device queue, and the driver will slowly * send them. * After each frame that we send, we poll the higher * layer for more data. It's the right time to do * that because the link layer need to perform the mtt * and then send the first frame, so we can afford * to send a bit of time in kernel space. * The explicit flow indication allow to minimise * buffers (== lower latency), to avoid higher layer * polling via timers (== less context switches) and * to implement a crude scheduler - Jean II */ /* Try to send away all queued data frames */ while ((skb = skb_dequeue(&self->txq)) != NULL) { /* Send one frame */ ret = (*state[self->state])(self, SEND_I_CMD, skb, NULL); /* Drop reference count. * It will be increase as needed in * irlap_send_data_xxx() */ kfree_skb(skb); /* Poll the higher layers for one more frame */ irlmp_flow_indication(self->notify.instance, FLOW_START); if (ret == -EPROTO) break; /* Try again later! */ } /* Finished transmitting */ self->local_busy = FALSE; } else if (self->disconnect_pending) { self->disconnect_pending = FALSE; ret = (*state[self->state])(self, DISCONNECT_REQUEST, NULL, NULL); } break; /* case LAP_NDM: */ /* case LAP_CONN: */ /* case LAP_RESET_WAIT: */ /* case LAP_RESET_CHECK: */ default: break; } } /* * Function irlap_state_ndm (event, skb, frame) * * NDM (Normal Disconnected Mode) state * */ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info) { discovery_t *discovery_rsp; int ret = 0; IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); switch (event) { case CONNECT_REQUEST: IRDA_ASSERT(self->netdev != NULL, return -1;); if (self->media_busy) { /* Note : this will never happen, because we test * media busy in irlap_connect_request() and * postpone the event... - Jean II */ IRDA_DEBUG(0, "%s(), CONNECT_REQUEST: media busy!\n", __func__); /* Always switch state before calling upper layers */ irlap_next_state(self, LAP_NDM); irlap_disconnect_indication(self, LAP_MEDIA_BUSY); } else { irlap_send_snrm_frame(self, &self->qos_rx); /* Start Final-bit timer */ irlap_start_final_timer(self, self->final_timeout); self->retry_count = 0; irlap_next_state(self, LAP_SETUP); } break; case RECV_SNRM_CMD: /* Check if the frame contains and I field */ if (info) { self->daddr = info->daddr; self->caddr = info->caddr; irlap_next_state(self, LAP_CONN); irlap_connect_indication(self, skb); } else { IRDA_DEBUG(0, "%s(), SNRM frame does not " "contain an I field!\n", __func__); } break; case DISCOVERY_REQUEST: IRDA_ASSERT(info != NULL, return -1;); if (self->media_busy) { IRDA_DEBUG(1, "%s(), DISCOVERY_REQUEST: media busy!\n", __func__); /* irlap->log.condition = MEDIA_BUSY; */ /* This will make IrLMP try again */ irlap_discovery_confirm(self, NULL); /* Note : the discovery log is not cleaned up here, * it will be done in irlap_discovery_request() * Jean II */ return 0; } self->S = info->S; self->s = info->s; irlap_send_discovery_xid_frame(self, info->S, info->s, TRUE, info->discovery); self->frame_sent = FALSE; self->s++; irlap_start_slot_timer(self, self->slot_timeout); irlap_next_state(self, LAP_QUERY); break; case RECV_DISCOVERY_XID_CMD: IRDA_ASSERT(info != NULL, return -1;); /* Assert that this is not the final slot */ if (info->s <= info->S) { self->slot = irlap_generate_rand_time_slot(info->S, info->s); if (self->slot == info->s) { discovery_rsp = irlmp_get_discovery_response(); discovery_rsp->data.daddr = info->daddr; irlap_send_discovery_xid_frame(self, info->S, self->slot, FALSE, discovery_rsp); self->frame_sent = TRUE; } else self->frame_sent = FALSE; /* * Go to reply state until end of discovery to * inhibit our own transmissions. Set the timer * to not stay forever there... Jean II */ irlap_start_query_timer(self, info->S, info->s); irlap_next_state(self, LAP_REPLY); } else { /* This is the final slot. How is it possible ? * This would happen is both discoveries are just slightly * offset (if they are in sync, all packets are lost). * Most often, all the discovery requests will be received * in QUERY state (see my comment there), except for the * last frame that will come here. * The big trouble when it happen is that active discovery * doesn't happen, because nobody answer the discoveries * frame of the other guy, so the log shows up empty. * What should we do ? * Not much. It's too late to answer those discovery frames, * so we just pass the info to IrLMP who will put it in the * log (and post an event). * Another cause would be devices that do discovery much * slower than us, however the latest fixes should minimise * those cases... * Jean II */ IRDA_DEBUG(1, "%s(), Receiving final discovery request, missed the discovery slots :-(\n", __func__); /* Last discovery request -> in the log */ irlap_discovery_indication(self, info->discovery); } break; case MEDIA_BUSY_TIMER_EXPIRED: /* A bunch of events may be postponed because the media is * busy (usually immediately after we close a connection), * or while we are doing discovery (state query/reply). * In all those cases, the media busy flag will be cleared * when it's OK for us to process those postponed events. * This event is not mentioned in the state machines in the * IrLAP spec. It's because they didn't consider Ultra and * postponing connection request is optional. * Jean II */ #ifdef CONFIG_IRDA_ULTRA /* Send any pending Ultra frames if any */ if (!skb_queue_empty(&self->txq_ultra)) { /* We don't send the frame, just post an event. * Also, previously this code was in timer.c... * Jean II */ ret = (*state[self->state])(self, SEND_UI_FRAME, NULL, NULL); } #endif /* CONFIG_IRDA_ULTRA */ /* Check if we should try to connect. * This code was previously in irlap_do_event() */ if (self->connect_pending) { self->connect_pending = FALSE; /* This one *should* not pend in this state, except * if a socket try to connect and immediately * disconnect. - clear - Jean II */ if (self->disconnect_pending) irlap_disconnect_indication(self, LAP_DISC_INDICATION); else ret = (*state[self->state])(self, CONNECT_REQUEST, NULL, NULL); self->disconnect_pending = FALSE; } /* Note : one way to test if this code works well (including * media busy and small busy) is to create a user space * application generating an Ultra packet every 3.05 sec (or * 2.95 sec) and to see how it interact with discovery. * It's fairly easy to check that no packet is lost, that the * packets are postponed during discovery and that after * discovery indication you have a 100ms "gap". * As connection request and Ultra are now processed the same * way, this avoid the tedious job of trying IrLAP connection * in all those cases... * Jean II */ break; #ifdef CONFIG_IRDA_ULTRA case SEND_UI_FRAME: { int i; /* Only allowed to repeat an operation twice */ for (i=0; ((i<2) && (self->media_busy == FALSE)); i++) { skb = skb_dequeue(&self->txq_ultra); if (skb) irlap_send_ui_frame(self, skb, CBROADCAST, CMD_FRAME); else break; /* irlap_send_ui_frame() won't increase skb reference * count, so no dev_kfree_skb() - Jean II */ } if (i == 2) { /* Force us to listen 500 ms again */ irda_device_set_media_busy(self->netdev, TRUE); } break; } case RECV_UI_FRAME: /* Only accept broadcast frames in NDM mode */ if (info->caddr != CBROADCAST) { IRDA_DEBUG(0, "%s(), not a broadcast frame!\n", __func__); } else irlap_unitdata_indication(self, skb); break; #endif /* CONFIG_IRDA_ULTRA */ case RECV_TEST_CMD: /* Remove test frame header */ skb_pull(skb, sizeof(struct test_frame)); /* * Send response. This skb will not be sent out again, and * will only be used to send out the same info as the cmd */ irlap_send_test_frame(self, CBROADCAST, info->daddr, skb); break; case RECV_TEST_RSP: IRDA_DEBUG(0, "%s() not implemented!\n", __func__); break; default: IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__, irlap_event[event]); ret = -1; break; } return ret; } /* * Function irlap_state_query (event, skb, info) * * QUERY state * */ static int irlap_state_query(struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info) { int ret = 0; IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); switch (event) { case RECV_DISCOVERY_XID_RSP: IRDA_ASSERT(info != NULL, return -1;); IRDA_ASSERT(info->discovery != NULL, return -1;); IRDA_DEBUG(4, "%s(), daddr=%08x\n", __func__, info->discovery->data.daddr); if (!self->discovery_log) { IRDA_WARNING("%s: discovery log is gone! " "maybe the discovery timeout has been set" " too short?\n", __func__); break; } hashbin_insert(self->discovery_log, (irda_queue_t *) info->discovery, info->discovery->data.daddr, NULL); /* Keep state */ /* irlap_next_state(self, LAP_QUERY); */ break; case RECV_DISCOVERY_XID_CMD: /* Yes, it is possible to receive those frames in this mode. * Note that most often the last discovery request won't * occur here but in NDM state (see my comment there). * What should we do ? * Not much. We are currently performing our own discovery, * therefore we can't answer those frames. We don't want * to change state either. We just pass the info to * IrLMP who will put it in the log (and post an event). * Jean II */ IRDA_ASSERT(info != NULL, return -1;); IRDA_DEBUG(1, "%s(), Receiving discovery request (s = %d) while performing discovery :-(\n", __func__, info->s); /* Last discovery request ? */ if (info->s == 0xff) irlap_discovery_indication(self, info->discovery); break; case SLOT_TIMER_EXPIRED: /* * Wait a little longer if we detect an incoming frame. This * is not mentioned in the spec, but is a good thing to do, * since we want to work even with devices that violate the * timing requirements. */ if (irda_device_is_receiving(self->netdev) && !self->add_wait) { IRDA_DEBUG(2, "%s(), device is slow to answer, " "waiting some more!\n", __func__); irlap_start_slot_timer(self, msecs_to_jiffies(10)); self->add_wait = TRUE; return ret; } self->add_wait = FALSE; if (self->s < self->S) { irlap_send_discovery_xid_frame(self, self->S, self->s, TRUE, self->discovery_cmd); self->s++; irlap_start_slot_timer(self, self->slot_timeout); /* Keep state */ irlap_next_state(self, LAP_QUERY); } else { /* This is the final slot! */ irlap_send_discovery_xid_frame(self, self->S, 0xff, TRUE, self->discovery_cmd); /* Always switch state before calling upper layers */ irlap_next_state(self, LAP_NDM); /* * We are now finished with the discovery procedure, * so now we must return the results */ irlap_discovery_confirm(self, self->discovery_log); /* IrLMP should now have taken care of the log */ self->discovery_log = NULL; } break; default: IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__, irlap_event[event]); ret = -1; break; } return ret; } /* * Function irlap_state_reply (self, event, skb, info) * * REPLY, we have received a XID discovery frame from a device and we * are waiting for the right time slot to send a response XID frame * */ static int irlap_state_reply(struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info) { discovery_t *discovery_rsp; int ret=0; IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); switch (event) { case QUERY_TIMER_EXPIRED: IRDA_DEBUG(0, "%s(), QUERY_TIMER_EXPIRED <%ld>\n", __func__, jiffies); irlap_next_state(self, LAP_NDM); break; case RECV_DISCOVERY_XID_CMD: IRDA_ASSERT(info != NULL, return -1;); /* Last frame? */ if (info->s == 0xff) { del_timer(&self->query_timer); /* info->log.condition = REMOTE; */ /* Always switch state before calling upper layers */ irlap_next_state(self, LAP_NDM); irlap_discovery_indication(self, info->discovery); } else { /* If it's our slot, send our reply */ if ((info->s >= self->slot) && (!self->frame_sent)) { discovery_rsp = irlmp_get_discovery_response(); discovery_rsp->data.daddr = info->daddr; irlap_send_discovery_xid_frame(self, info->S, self->slot, FALSE, discovery_rsp); self->frame_sent = TRUE; } /* Readjust our timer to accommodate devices * doing faster or slower discovery than us... * Jean II */ irlap_start_query_timer(self, info->S, info->s); /* Keep state */ //irlap_next_state(self, LAP_REPLY); } break; default: IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __func__, event, irlap_event[event]); ret = -1; break; } return ret; } /* * Function irlap_state_conn (event, skb, info) * * CONN, we have received a SNRM command and is waiting for the upper * layer to accept or refuse connection * */ static int irlap_state_conn(struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info) { int ret = 0; IRDA_DEBUG(4, "%s(), event=%s\n", __func__, irlap_event[ event]); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); switch (event) { case CONNECT_RESPONSE: skb_pull(skb, sizeof(struct snrm_frame)); IRDA_ASSERT(self->netdev != NULL, return -1;); irlap_qos_negotiate(self, skb); irlap_initiate_connection_state(self); /* * Applying the parameters now will make sure we change speed * *after* we have sent the next frame */ irlap_apply_connection_parameters(self, FALSE); /* * Sending this frame will force a speed change after it has * been sent (i.e. the frame will be sent at 9600). */ irlap_send_ua_response_frame(self, &self->qos_rx); #if 0 /* * We are allowed to send two frames, but this may increase * the connect latency, so lets not do it for now. */ /* This is full of good intentions, but doesn't work in * practice. * After sending the first UA response, we switch the * dongle to the negotiated speed, which is usually * different than 9600 kb/s. * From there, there is two solutions : * 1) The other end has received the first UA response : * it will set up the connection, move to state LAP_NRM_P, * and will ignore and drop the second UA response. * Actually, it's even worse : the other side will almost * immediately send a RR that will likely collide with the * UA response (depending on negotiated turnaround). * 2) The other end has not received the first UA response, * will stay at 9600 and will never see the second UA response. * Jean II */ irlap_send_ua_response_frame(self, &self->qos_rx); #endif /* * The WD-timer could be set to the duration of the P-timer * for this case, but it is recommended to use twice the * value (note 3 IrLAP p. 60). */ irlap_start_wd_timer(self, self->wd_timeout); irlap_next_state(self, LAP_NRM_S); break; case RECV_DISCOVERY_XID_CMD: IRDA_DEBUG(3, "%s(), event RECV_DISCOVER_XID_CMD!\n", __func__); irlap_next_state(self, LAP_NDM); break; case DISCONNECT_REQUEST: IRDA_DEBUG(0, "%s(), Disconnect request!\n", __func__); irlap_send_dm_frame(self); irlap_next_state( self, LAP_NDM); irlap_disconnect_indication(self, LAP_DISC_INDICATION); break; default: IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __func__, event, irlap_event[event]); ret = -1; break; } return ret; } /* * Function irlap_state_setup (event, skb, frame) * * SETUP state, The local layer has transmitted a SNRM command frame to * a remote peer layer and is awaiting a reply . * */ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info) { int ret = 0; IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); switch (event) { case FINAL_TIMER_EXPIRED: if (self->retry_count < self->N3) { /* * Perform random backoff, Wait a random number of time units, minimum * duration half the time taken to transmitt a SNRM frame, maximum duration * 1.5 times the time taken to transmit a SNRM frame. So this time should * between 15 msecs and 45 msecs. */ irlap_start_backoff_timer(self, msecs_to_jiffies(20 + (jiffies % 30))); } else { /* Always switch state before calling upper layers */ irlap_next_state(self, LAP_NDM); irlap_disconnect_indication(self, LAP_FOUND_NONE); } break; case BACKOFF_TIMER_EXPIRED: irlap_send_snrm_frame(self, &self->qos_rx); irlap_start_final_timer(self, self->final_timeout); self->retry_count++; break; case RECV_SNRM_CMD: IRDA_DEBUG(4, "%s(), SNRM battle!\n", __func__); IRDA_ASSERT(skb != NULL, return 0;); IRDA_ASSERT(info != NULL, return 0;); /* * The device with the largest device address wins the battle * (both have sent a SNRM command!) */ if (info &&(info->daddr > self->saddr)) { del_timer(&self->final_timer); irlap_initiate_connection_state(self); IRDA_ASSERT(self->netdev != NULL, return -1;); skb_pull(skb, sizeof(struct snrm_frame)); irlap_qos_negotiate(self, skb); /* Send UA frame and then change link settings */ irlap_apply_connection_parameters(self, FALSE); irlap_send_ua_response_frame(self, &self->qos_rx); irlap_next_state(self, LAP_NRM_S); irlap_connect_confirm(self, skb); /* * The WD-timer could be set to the duration of the * P-timer for this case, but it is recommended * to use twice the value (note 3 IrLAP p. 60). */ irlap_start_wd_timer(self, self->wd_timeout); } else { /* We just ignore the other device! */ irlap_next_state(self, LAP_SETUP); } break; case RECV_UA_RSP: /* Stop F-timer */ del_timer(&self->final_timer); /* Initiate connection state */ irlap_initiate_connection_state(self); /* Negotiate connection parameters */ IRDA_ASSERT(skb->len > 10, return -1;); skb_pull(skb, sizeof(struct ua_frame)); IRDA_ASSERT(self->netdev != NULL, return -1;); irlap_qos_negotiate(self, skb); /* Set the new link setting *now* (before the rr frame) */ irlap_apply_connection_parameters(self, TRUE); self->retry_count = 0; /* Wait for turnaround time to give a chance to the other * device to be ready to receive us. * Note : the time to switch speed is typically larger * than the turnaround time, but as we don't have the other * side speed switch time, that's our best guess... * Jean II */ irlap_wait_min_turn_around(self, &self->qos_tx); /* This frame will actually be sent at the new speed */ irlap_send_rr_frame(self, CMD_FRAME); /* The timer is set to half the normal timer to quickly * detect a failure to negotiate the new connection * parameters. IrLAP 6.11.3.2, note 3. * Note that currently we don't process this failure * properly, as we should do a quick disconnect. * Jean II */ irlap_start_final_timer(self, self->final_timeout/2); irlap_next_state(self, LAP_NRM_P); irlap_connect_confirm(self, skb); break; case RECV_DM_RSP: /* FALLTHROUGH */ case RECV_DISC_CMD: del_timer(&self->final_timer); irlap_next_state(self, LAP_NDM); irlap_disconnect_indication(self, LAP_DISC_INDICATION); break; default: IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __func__, event, irlap_event[event]); ret = -1; break; } return ret; } /* * Function irlap_state_offline (self, event, skb, info) * * OFFLINE state, not used for now! * */ static int irlap_state_offline(struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info) { IRDA_DEBUG( 0, "%s(), Unknown event\n", __func__); return -1; } /* * Function irlap_state_xmit_p (self, event, skb, info) * * XMIT, Only the primary station has right to transmit, and we * therefore do not expect to receive any transmissions from other * stations. * */ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info) { int ret = 0; switch (event) { case SEND_I_CMD: /* * Only send frame if send-window > 0. */ if ((self->window > 0) && (!self->remote_busy)) { int nextfit; #ifdef CONFIG_IRDA_DYNAMIC_WINDOW struct sk_buff *skb_next; /* With DYNAMIC_WINDOW, we keep the window size * maximum, and adapt on the packets we are sending. * At 115k, we can send only 2 packets of 2048 bytes * in a 500 ms turnaround. Without this option, we * would always limit the window to 2. With this * option, if we send smaller packets, we can send * up to 7 of them (always depending on QoS). * Jean II */ /* Look at the next skb. This is safe, as we are * the only consumer of the Tx queue (if we are not, * we have other problems) - Jean II */ skb_next = skb_peek(&self->txq); /* Check if a subsequent skb exist and would fit in * the current window (with respect to turnaround * time). * This allow us to properly mark the current packet * with the pf bit, to avoid falling back on the * second test below, and avoid waiting the * end of the window and sending a extra RR. * Note : (skb_next != NULL) <=> (skb_queue_len() > 0) * Jean II */ nextfit = ((skb_next != NULL) && ((skb_next->len + skb->len) <= self->bytes_left)); /* * The current packet may not fit ! Because of test * above, this should not happen any more !!! * Test if we have transmitted more bytes over the * link than its possible to do with the current * speed and turn-around-time. */ if((!nextfit) && (skb->len > self->bytes_left)) { IRDA_DEBUG(0, "%s(), Not allowed to transmit" " more bytes!\n", __func__); /* Requeue the skb */ skb_queue_head(&self->txq, skb_get(skb)); /* * We should switch state to LAP_NRM_P, but * that is not possible since we must be sure * that we poll the other side. Since we have * used up our time, the poll timer should * trigger anyway now, so we just wait for it * DB */ /* * Sorry, but that's not totally true. If * we send 2000B packets, we may wait another * 1000B until our turnaround expire. That's * why we need to be proactive in avoiding * coming here. - Jean II */ return -EPROTO; } /* Subtract space used by this skb */ self->bytes_left -= skb->len; #else /* CONFIG_IRDA_DYNAMIC_WINDOW */ /* Window has been adjusted for the max packet * size, so much simpler... - Jean II */ nextfit = !skb_queue_empty(&self->txq); #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ /* * Send data with poll bit cleared only if window > 1 * and there is more frames after this one to be sent */ if ((self->window > 1) && (nextfit)) { /* More packet to send in current window */ irlap_send_data_primary(self, skb); irlap_next_state(self, LAP_XMIT_P); } else { /* Final packet of window */ irlap_send_data_primary_poll(self, skb); /* * Make sure state machine does not try to send * any more frames */ ret = -EPROTO; } #ifdef CONFIG_IRDA_FAST_RR /* Peer may want to reply immediately */ self->fast_RR = FALSE; #endif /* CONFIG_IRDA_FAST_RR */ } else { IRDA_DEBUG(4, "%s(), Unable to send! remote busy?\n", __func__); skb_queue_head(&self->txq, skb_get(skb)); /* * The next ret is important, because it tells * irlap_next_state _not_ to deliver more frames */ ret = -EPROTO; } break; case POLL_TIMER_EXPIRED: IRDA_DEBUG(3, "%s(), POLL_TIMER_EXPIRED <%ld>\n", __func__, jiffies); irlap_send_rr_frame(self, CMD_FRAME); /* Return to NRM properly - Jean II */ self->window = self->window_size; #ifdef CONFIG_IRDA_DYNAMIC_WINDOW /* Allowed to transmit a maximum number of bytes again. */ self->bytes_left = self->line_capacity; #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ irlap_start_final_timer(self, self->final_timeout); irlap_next_state(self, LAP_NRM_P); break; case DISCONNECT_REQUEST: del_timer(&self->poll_timer); irlap_wait_min_turn_around(self, &self->qos_tx); irlap_send_disc_frame(self); irlap_flush_all_queues(self); irlap_start_final_timer(self, self->final_timeout); self->retry_count = 0; irlap_next_state(self, LAP_PCLOSE); break; case DATA_REQUEST: /* Nothing to do, irlap_do_event() will send the packet * when we return... - Jean II */ break; default: IRDA_DEBUG(0, "%s(), Unknown event %s\n", __func__, irlap_event[event]); ret = -EINVAL; break; } return ret; } /* * Function irlap_state_pclose (event, skb, info) * * PCLOSE state */ static int irlap_state_pclose(struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info) { int ret = 0; IRDA_DEBUG(1, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); switch (event) { case RECV_UA_RSP: /* FALLTHROUGH */ case RECV_DM_RSP: del_timer(&self->final_timer); /* Set new link parameters */ irlap_apply_default_connection_parameters(self); /* Always switch state before calling upper layers */ irlap_next_state(self, LAP_NDM); irlap_disconnect_indication(self, LAP_DISC_INDICATION); break; case FINAL_TIMER_EXPIRED: if (self->retry_count < self->N3) { irlap_wait_min_turn_around(self, &self->qos_tx); irlap_send_disc_frame(self); irlap_start_final_timer(self, self->final_timeout); self->retry_count++; /* Keep state */ } else { irlap_apply_default_connection_parameters(self); /* Always switch state before calling upper layers */ irlap_next_state(self, LAP_NDM); irlap_disconnect_indication(self, LAP_NO_RESPONSE); } break; default: IRDA_DEBUG(1, "%s(), Unknown event %d\n", __func__, event); ret = -1; break; } return ret; } /* * Function irlap_state_nrm_p (self, event, skb, info) * * NRM_P (Normal Response Mode as Primary), The primary station has given * permissions to a secondary station to transmit IrLAP resonse frames * (by sending a frame with the P bit set). The primary station will not * transmit any frames and is expecting to receive frames only from the * secondary to which transmission permissions has been given. */ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info) { int ret = 0; int ns_status; int nr_status; switch (event) { case RECV_I_RSP: /* Optimize for the common case */ if (unlikely(skb->len <= LAP_ADDR_HEADER + LAP_CTRL_HEADER)) { /* * Input validation check: a stir4200/mcp2150 * combination sometimes results in an empty i:rsp. * This makes no sense; we can just ignore the frame * and send an rr:cmd immediately. This happens before * changing nr or ns so triggers a retransmit */ irlap_wait_min_turn_around(self, &self->qos_tx); irlap_send_rr_frame(self, CMD_FRAME); /* Keep state */ break; } /* FIXME: must check for remote_busy below */ #ifdef CONFIG_IRDA_FAST_RR /* * Reset the fast_RR so we can use the fast RR code with * full speed the next time since peer may have more frames * to transmitt */ self->fast_RR = FALSE; #endif /* CONFIG_IRDA_FAST_RR */ IRDA_ASSERT( info != NULL, return -1;); ns_status = irlap_validate_ns_received(self, info->ns); nr_status = irlap_validate_nr_received(self, info->nr); /* * Check for expected I(nformation) frame */ if ((ns_status == NS_EXPECTED) && (nr_status == NR_EXPECTED)) { /* Update Vr (next frame for us to receive) */ self->vr = (self->vr + 1) % 8; /* Update Nr received, cleanup our retry queue */ irlap_update_nr_received(self, info->nr); /* * Got expected NR, so reset the * retry_count. This is not done by IrLAP spec, * which is strange! */ self->retry_count = 0; self->ack_required = TRUE; /* poll bit cleared? */ if (!info->pf) { /* Keep state, do not move this line */ irlap_next_state(self, LAP_NRM_P); irlap_data_indication(self, skb, FALSE); } else { /* No longer waiting for pf */ del_timer(&self->final_timer); irlap_wait_min_turn_around(self, &self->qos_tx); /* Call higher layer *before* changing state * to give them a chance to send data in the * next LAP frame. * Jean II */ irlap_data_indication(self, skb, FALSE); /* XMIT states are the most dangerous state * to be in, because user requests are * processed directly and may change state. * On the other hand, in NDM_P, those * requests are queued and we will process * them when we return to irlap_do_event(). * Jean II */ irlap_next_state(self, LAP_XMIT_P); /* This is the last frame. * Make sure it's always called in XMIT state. * - Jean II */ irlap_start_poll_timer(self, self->poll_timeout); } break; } /* Unexpected next to send (Ns) */ if ((ns_status == NS_UNEXPECTED) && (nr_status == NR_EXPECTED)) { if (!info->pf) { irlap_update_nr_received(self, info->nr); /* * Wait until the last frame before doing * anything */ /* Keep state */ irlap_next_state(self, LAP_NRM_P); } else { IRDA_DEBUG(4, "%s(), missing or duplicate frame!\n", __func__); /* Update Nr received */ irlap_update_nr_received(self, info->nr); irlap_wait_min_turn_around(self, &self->qos_tx); irlap_send_rr_frame(self, CMD_FRAME); self->ack_required = FALSE; irlap_start_final_timer(self, self->final_timeout); irlap_next_state(self, LAP_NRM_P); } break; } /* * Unexpected next to receive (Nr) */ if ((ns_status == NS_EXPECTED) && (nr_status == NR_UNEXPECTED)) { if (info->pf) { self->vr = (self->vr + 1) % 8; /* Update Nr received */ irlap_update_nr_received(self, info->nr); /* Resend rejected frames */ irlap_resend_rejected_frames(self, CMD_FRAME); self->ack_required = FALSE; /* Make sure we account for the time * to transmit our frames. See comemnts * in irlap_send_data_primary_poll(). * Jean II */ irlap_start_final_timer(self, 2 * self->final_timeout); /* Keep state, do not move this line */ irlap_next_state(self, LAP_NRM_P); irlap_data_indication(self, skb, FALSE); } else { /* * Do not resend frames until the last * frame has arrived from the other * device. This is not documented in * IrLAP!! */ self->vr = (self->vr + 1) % 8; /* Update Nr received */ irlap_update_nr_received(self, info->nr); self->ack_required = FALSE; /* Keep state, do not move this line!*/ irlap_next_state(self, LAP_NRM_P); irlap_data_indication(self, skb, FALSE); } break; } /* * Unexpected next to send (Ns) and next to receive (Nr) * Not documented by IrLAP! */ if ((ns_status == NS_UNEXPECTED) && (nr_status == NR_UNEXPECTED)) { IRDA_DEBUG(4, "%s(), unexpected nr and ns!\n", __func__); if (info->pf) { /* Resend rejected frames */ irlap_resend_rejected_frames(self, CMD_FRAME); /* Give peer some time to retransmit! * But account for our own Tx. */ irlap_start_final_timer(self, 2 * self->final_timeout); /* Keep state, do not move this line */ irlap_next_state(self, LAP_NRM_P); } else { /* Update Nr received */ /* irlap_update_nr_received( info->nr); */ self->ack_required = FALSE; } break; } /* * Invalid NR or NS */ if ((nr_status == NR_INVALID) || (ns_status == NS_INVALID)) { if (info->pf) { del_timer(&self->final_timer); irlap_next_state(self, LAP_RESET_WAIT); irlap_disconnect_indication(self, LAP_RESET_INDICATION); self->xmitflag = TRUE; } else { del_timer(&self->final_timer); irlap_disconnect_indication(self, LAP_RESET_INDICATION); self->xmitflag = FALSE; } break; } IRDA_DEBUG(1, "%s(), Not implemented!\n", __func__); IRDA_DEBUG(1, "%s(), event=%s, ns_status=%d, nr_status=%d\n", __func__, irlap_event[event], ns_status, nr_status); break; case RECV_UI_FRAME: /* Poll bit cleared? */ if (!info->pf) { irlap_data_indication(self, skb, TRUE); irlap_next_state(self, LAP_NRM_P); } else { del_timer(&self->final_timer); irlap_data_indication(self, skb, TRUE); irlap_next_state(self, LAP_XMIT_P); IRDA_DEBUG(1, "%s: RECV_UI_FRAME: next state %s\n", __func__, irlap_state[self->state]); irlap_start_poll_timer(self, self->poll_timeout); } break; case RECV_RR_RSP: /* * If you get a RR, the remote isn't busy anymore, * no matter what the NR */ self->remote_busy = FALSE; /* Stop final timer */ del_timer(&self->final_timer); /* * Nr as expected? */ ret = irlap_validate_nr_received(self, info->nr); if (ret == NR_EXPECTED) { /* Update Nr received */ irlap_update_nr_received(self, info->nr); /* * Got expected NR, so reset the retry_count. This * is not done by the IrLAP standard , which is * strange! DB. */ self->retry_count = 0; irlap_wait_min_turn_around(self, &self->qos_tx); irlap_next_state(self, LAP_XMIT_P); /* Start poll timer */ irlap_start_poll_timer(self, self->poll_timeout); } else if (ret == NR_UNEXPECTED) { IRDA_ASSERT(info != NULL, return -1;); /* * Unexpected nr! */ /* Update Nr received */ irlap_update_nr_received(self, info->nr); IRDA_DEBUG(4, "RECV_RR_FRAME: Retrans:%d, nr=%d, va=%d, " "vs=%d, vr=%d\n", self->retry_count, info->nr, self->va, self->vs, self->vr); /* Resend rejected frames */ irlap_resend_rejected_frames(self, CMD_FRAME); irlap_start_final_timer(self, self->final_timeout * 2); irlap_next_state(self, LAP_NRM_P); } else if (ret == NR_INVALID) { IRDA_DEBUG(1, "%s(), Received RR with " "invalid nr !\n", __func__); irlap_next_state(self, LAP_RESET_WAIT); irlap_disconnect_indication(self, LAP_RESET_INDICATION); self->xmitflag = TRUE; } break; case RECV_RNR_RSP: IRDA_ASSERT(info != NULL, return -1;); /* Stop final timer */ del_timer(&self->final_timer); self->remote_busy = TRUE; /* Update Nr received */ irlap_update_nr_received(self, info->nr); irlap_next_state(self, LAP_XMIT_P); /* Start poll timer */ irlap_start_poll_timer(self, self->poll_timeout); break; case RECV_FRMR_RSP: del_timer(&self->final_timer); self->xmitflag = TRUE; irlap_next_state(self, LAP_RESET_WAIT); irlap_reset_indication(self); break; case FINAL_TIMER_EXPIRED: /* * We are allowed to wait for additional 300 ms if * final timer expires when we are in the middle * of receiving a frame (page 45, IrLAP). Check that * we only do this once for each frame. */ if (irda_device_is_receiving(self->netdev) && !self->add_wait) { IRDA_DEBUG(1, "FINAL_TIMER_EXPIRED when receiving a " "frame! Waiting a little bit more!\n"); irlap_start_final_timer(self, msecs_to_jiffies(300)); /* * Don't allow this to happen one more time in a row, * or else we can get a pretty tight loop here if * if we only receive half a frame. DB. */ self->add_wait = TRUE; break; } self->add_wait = FALSE; /* N2 is the disconnect timer. Until we reach it, we retry */ if (self->retry_count < self->N2) { if (skb_peek(&self->wx_list) == NULL) { /* Retry sending the pf bit to the secondary */ IRDA_DEBUG(4, "nrm_p: resending rr"); irlap_wait_min_turn_around(self, &self->qos_tx); irlap_send_rr_frame(self, CMD_FRAME); } else { IRDA_DEBUG(4, "nrm_p: resend frames"); irlap_resend_rejected_frames(self, CMD_FRAME); } irlap_start_final_timer(self, self->final_timeout); self->retry_count++; IRDA_DEBUG(4, "irlap_state_nrm_p: FINAL_TIMER_EXPIRED:" " retry_count=%d\n", self->retry_count); /* Early warning event. I'm using a pretty liberal * interpretation of the spec and generate an event * every time the timer is multiple of N1 (and not * only the first time). This allow application * to know precisely if connectivity restart... * Jean II */ if((self->retry_count % self->N1) == 0) irlap_status_indication(self, STATUS_NO_ACTIVITY); /* Keep state */ } else { irlap_apply_default_connection_parameters(self); /* Always switch state before calling upper layers */ irlap_next_state(self, LAP_NDM); irlap_disconnect_indication(self, LAP_NO_RESPONSE); } break; case RECV_REJ_RSP: irlap_update_nr_received(self, info->nr); if (self->remote_busy) { irlap_wait_min_turn_around(self, &self->qos_tx); irlap_send_rr_frame(self, CMD_FRAME); } else irlap_resend_rejected_frames(self, CMD_FRAME); irlap_start_final_timer(self, 2 * self->final_timeout); break; case RECV_SREJ_RSP: irlap_update_nr_received(self, info->nr); if (self->remote_busy) { irlap_wait_min_turn_around(self, &self->qos_tx); irlap_send_rr_frame(self, CMD_FRAME); } else irlap_resend_rejected_frame(self, CMD_FRAME); irlap_start_final_timer(self, 2 * self->final_timeout); break; case RECV_RD_RSP: IRDA_DEBUG(1, "%s(), RECV_RD_RSP\n", __func__); irlap_flush_all_queues(self); irlap_next_state(self, LAP_XMIT_P); /* Call back the LAP state machine to do a proper disconnect */ irlap_disconnect_request(self); break; default: IRDA_DEBUG(1, "%s(), Unknown event %s\n", __func__, irlap_event[event]); ret = -1; break; } return ret; } /* * Function irlap_state_reset_wait (event, skb, info) * * We have informed the service user of a reset condition, and is * awaiting reset of disconnect request. * */ static int irlap_state_reset_wait(struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info) { int ret = 0; IRDA_DEBUG(3, "%s(), event = %s\n", __func__, irlap_event[event]); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); switch (event) { case RESET_REQUEST: if (self->xmitflag) { irlap_wait_min_turn_around(self, &self->qos_tx); irlap_send_snrm_frame(self, NULL); irlap_start_final_timer(self, self->final_timeout); irlap_next_state(self, LAP_RESET); } else { irlap_start_final_timer(self, self->final_timeout); irlap_next_state(self, LAP_RESET); } break; case DISCONNECT_REQUEST: irlap_wait_min_turn_around( self, &self->qos_tx); irlap_send_disc_frame( self); irlap_flush_all_queues( self); irlap_start_final_timer( self, self->final_timeout); self->retry_count = 0; irlap_next_state( self, LAP_PCLOSE); break; default: IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__, irlap_event[event]); ret = -1; break; } return ret; } /* * Function irlap_state_reset (self, event, skb, info) * * We have sent a SNRM reset command to the peer layer, and is awaiting * reply. * */ static int irlap_state_reset(struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info) { int ret = 0; IRDA_DEBUG(3, "%s(), event = %s\n", __func__, irlap_event[event]); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); switch (event) { case RECV_DISC_CMD: del_timer(&self->final_timer); irlap_apply_default_connection_parameters(self); /* Always switch state before calling upper layers */ irlap_next_state(self, LAP_NDM); irlap_disconnect_indication(self, LAP_NO_RESPONSE); break; case RECV_UA_RSP: del_timer(&self->final_timer); /* Initiate connection state */ irlap_initiate_connection_state(self); irlap_reset_confirm(); self->remote_busy = FALSE; irlap_next_state(self, LAP_XMIT_P); irlap_start_poll_timer(self, self->poll_timeout); break; case FINAL_TIMER_EXPIRED: if (self->retry_count < 3) { irlap_wait_min_turn_around(self, &self->qos_tx); IRDA_ASSERT(self->netdev != NULL, return -1;); irlap_send_snrm_frame(self, self->qos_dev); self->retry_count++; /* Experimental!! */ irlap_start_final_timer(self, self->final_timeout); irlap_next_state(self, LAP_RESET); } else if (self->retry_count >= self->N3) { irlap_apply_default_connection_parameters(self); /* Always switch state before calling upper layers */ irlap_next_state(self, LAP_NDM); irlap_disconnect_indication(self, LAP_NO_RESPONSE); } break; case RECV_SNRM_CMD: /* * SNRM frame is not allowed to contain an I-field in this * state */ if (!info) { IRDA_DEBUG(3, "%s(), RECV_SNRM_CMD\n", __func__); irlap_initiate_connection_state(self); irlap_wait_min_turn_around(self, &self->qos_tx); irlap_send_ua_response_frame(self, &self->qos_rx); irlap_reset_confirm(); irlap_start_wd_timer(self, self->wd_timeout); irlap_next_state(self, LAP_NDM); } else { IRDA_DEBUG(0, "%s(), SNRM frame contained an I field!\n", __func__); } break; default: IRDA_DEBUG(1, "%s(), Unknown event %s\n", __func__, irlap_event[event]); ret = -1; break; } return ret; } /* * Function irlap_state_xmit_s (event, skb, info) * * XMIT_S, The secondary station has been given the right to transmit, * and we therefore do not expect to receive any transmissions from other * stations. */ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info) { int ret = 0; IRDA_DEBUG(4, "%s(), event=%s\n", __func__, irlap_event[event]); IRDA_ASSERT(self != NULL, return -ENODEV;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;); switch (event) { case SEND_I_CMD: /* * Send frame only if send window > 0 */ if ((self->window > 0) && (!self->remote_busy)) { int nextfit; #ifdef CONFIG_IRDA_DYNAMIC_WINDOW struct sk_buff *skb_next; /* * Same deal as in irlap_state_xmit_p(), so see * the comments at that point. * We are the secondary, so there are only subtle * differences. - Jean II */ /* Check if a subsequent skb exist and would fit in * the current window (with respect to turnaround * time). - Jean II */ skb_next = skb_peek(&self->txq); nextfit = ((skb_next != NULL) && ((skb_next->len + skb->len) <= self->bytes_left)); /* * Test if we have transmitted more bytes over the * link than its possible to do with the current * speed and turn-around-time. */ if((!nextfit) && (skb->len > self->bytes_left)) { IRDA_DEBUG(0, "%s(), Not allowed to transmit" " more bytes!\n", __func__); /* Requeue the skb */ skb_queue_head(&self->txq, skb_get(skb)); /* * Switch to NRM_S, this is only possible * when we are in secondary mode, since we * must be sure that we don't miss any RR * frames */ self->window = self->window_size; self->bytes_left = self->line_capacity; irlap_start_wd_timer(self, self->wd_timeout); irlap_next_state(self, LAP_NRM_S); /* Slight difference with primary : * here we would wait for the other side to * expire the turnaround. - Jean II */ return -EPROTO; /* Try again later */ } /* Subtract space used by this skb */ self->bytes_left -= skb->len; #else /* CONFIG_IRDA_DYNAMIC_WINDOW */ /* Window has been adjusted for the max packet * size, so much simpler... - Jean II */ nextfit = !skb_queue_empty(&self->txq); #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ /* * Send data with final bit cleared only if window > 1 * and there is more frames to be sent */ if ((self->window > 1) && (nextfit)) { irlap_send_data_secondary(self, skb); irlap_next_state(self, LAP_XMIT_S); } else { irlap_send_data_secondary_final(self, skb); irlap_next_state(self, LAP_NRM_S); /* * Make sure state machine does not try to send * any more frames */ ret = -EPROTO; } } else { IRDA_DEBUG(2, "%s(), Unable to send!\n", __func__); skb_queue_head(&self->txq, skb_get(skb)); ret = -EPROTO; } break; case DISCONNECT_REQUEST: irlap_send_rd_frame(self); irlap_flush_all_queues(self); irlap_start_wd_timer(self, self->wd_timeout); irlap_next_state(self, LAP_SCLOSE); break; case DATA_REQUEST: /* Nothing to do, irlap_do_event() will send the packet * when we return... - Jean II */ break; default: IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__, irlap_event[event]); ret = -EINVAL; break; } return ret; } /* * Function irlap_state_nrm_s (event, skb, info) * * NRM_S (Normal Response Mode as Secondary) state, in this state we are * expecting to receive frames from the primary station * */ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info) { int ns_status; int nr_status; int ret = 0; IRDA_DEBUG(4, "%s(), event=%s\n", __func__, irlap_event[ event]); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); switch (event) { case RECV_I_CMD: /* Optimize for the common case */ /* FIXME: must check for remote_busy below */ IRDA_DEBUG(4, "%s(), event=%s nr=%d, vs=%d, ns=%d, " "vr=%d, pf=%d\n", __func__, irlap_event[event], info->nr, self->vs, info->ns, self->vr, info->pf); self->retry_count = 0; ns_status = irlap_validate_ns_received(self, info->ns); nr_status = irlap_validate_nr_received(self, info->nr); /* * Check for expected I(nformation) frame */ if ((ns_status == NS_EXPECTED) && (nr_status == NR_EXPECTED)) { /* Update Vr (next frame for us to receive) */ self->vr = (self->vr + 1) % 8; /* Update Nr received */ irlap_update_nr_received(self, info->nr); /* * poll bit cleared? */ if (!info->pf) { self->ack_required = TRUE; /* * Starting WD-timer here is optional, but * not recommended. Note 6 IrLAP p. 83 */ #if 0 irda_start_timer(WD_TIMER, self->wd_timeout); #endif /* Keep state, do not move this line */ irlap_next_state(self, LAP_NRM_S); irlap_data_indication(self, skb, FALSE); break; } else { /* * We should wait before sending RR, and * also before changing to XMIT_S * state. (note 1, IrLAP p. 82) */ irlap_wait_min_turn_around(self, &self->qos_tx); /* * Give higher layers a chance to * immediately reply with some data before * we decide if we should send a RR frame * or not */ irlap_data_indication(self, skb, FALSE); /* Any pending data requests? */ if (!skb_queue_empty(&self->txq) && (self->window > 0)) { self->ack_required = TRUE; del_timer(&self->wd_timer); irlap_next_state(self, LAP_XMIT_S); } else { irlap_send_rr_frame(self, RSP_FRAME); irlap_start_wd_timer(self, self->wd_timeout); /* Keep the state */ irlap_next_state(self, LAP_NRM_S); } break; } } /* * Check for Unexpected next to send (Ns) */ if ((ns_status == NS_UNEXPECTED) && (nr_status == NR_EXPECTED)) { /* Unexpected next to send, with final bit cleared */ if (!info->pf) { irlap_update_nr_received(self, info->nr); irlap_start_wd_timer(self, self->wd_timeout); } else { /* Update Nr received */ irlap_update_nr_received(self, info->nr); irlap_wait_min_turn_around(self, &self->qos_tx); irlap_send_rr_frame(self, RSP_FRAME); irlap_start_wd_timer(self, self->wd_timeout); } break; } /* * Unexpected Next to Receive(NR) ? */ if ((ns_status == NS_EXPECTED) && (nr_status == NR_UNEXPECTED)) { if (info->pf) { IRDA_DEBUG(4, "RECV_I_RSP: frame(s) lost\n"); self->vr = (self->vr + 1) % 8; /* Update Nr received */ irlap_update_nr_received(self, info->nr); /* Resend rejected frames */ irlap_resend_rejected_frames(self, RSP_FRAME); /* Keep state, do not move this line */ irlap_next_state(self, LAP_NRM_S); irlap_data_indication(self, skb, FALSE); irlap_start_wd_timer(self, self->wd_timeout); break; } /* * This is not documented in IrLAP!! Unexpected NR * with poll bit cleared */ if (!info->pf) { self->vr = (self->vr + 1) % 8; /* Update Nr received */ irlap_update_nr_received(self, info->nr); /* Keep state, do not move this line */ irlap_next_state(self, LAP_NRM_S); irlap_data_indication(self, skb, FALSE); irlap_start_wd_timer(self, self->wd_timeout); } break; } if (ret == NR_INVALID) { IRDA_DEBUG(0, "NRM_S, NR_INVALID not implemented!\n"); } if (ret == NS_INVALID) { IRDA_DEBUG(0, "NRM_S, NS_INVALID not implemented!\n"); } break; case RECV_UI_FRAME: /* * poll bit cleared? */ if (!info->pf) { irlap_data_indication(self, skb, TRUE); irlap_next_state(self, LAP_NRM_S); /* Keep state */ } else { /* * Any pending data requests? */ if (!skb_queue_empty(&self->txq) && (self->window > 0) && !self->remote_busy) { irlap_data_indication(self, skb, TRUE); del_timer(&self->wd_timer); irlap_next_state(self, LAP_XMIT_S); } else { irlap_data_indication(self, skb, TRUE); irlap_wait_min_turn_around(self, &self->qos_tx); irlap_send_rr_frame(self, RSP_FRAME); self->ack_required = FALSE; irlap_start_wd_timer(self, self->wd_timeout); /* Keep the state */ irlap_next_state(self, LAP_NRM_S); } } break; case RECV_RR_CMD: self->retry_count = 0; /* * Nr as expected? */ nr_status = irlap_validate_nr_received(self, info->nr); if (nr_status == NR_EXPECTED) { if (!skb_queue_empty(&self->txq) && (self->window > 0)) { self->remote_busy = FALSE; /* Update Nr received */ irlap_update_nr_received(self, info->nr); del_timer(&self->wd_timer); irlap_wait_min_turn_around(self, &self->qos_tx); irlap_next_state(self, LAP_XMIT_S); } else { self->remote_busy = FALSE; /* Update Nr received */ irlap_update_nr_received(self, info->nr); irlap_wait_min_turn_around(self, &self->qos_tx); irlap_start_wd_timer(self, self->wd_timeout); /* Note : if the link is idle (this case), * we never go in XMIT_S, so we never get a * chance to process any DISCONNECT_REQUEST. * Do it now ! - Jean II */ if (self->disconnect_pending) { /* Disconnect */ irlap_send_rd_frame(self); irlap_flush_all_queues(self); irlap_next_state(self, LAP_SCLOSE); } else { /* Just send back pf bit */ irlap_send_rr_frame(self, RSP_FRAME); irlap_next_state(self, LAP_NRM_S); } } } else if (nr_status == NR_UNEXPECTED) { self->remote_busy = FALSE; irlap_update_nr_received(self, info->nr); irlap_resend_rejected_frames(self, RSP_FRAME); irlap_start_wd_timer(self, self->wd_timeout); /* Keep state */ irlap_next_state(self, LAP_NRM_S); } else { IRDA_DEBUG(1, "%s(), invalid nr not implemented!\n", __func__); } break; case RECV_SNRM_CMD: /* SNRM frame is not allowed to contain an I-field */ if (!info) { del_timer(&self->wd_timer); IRDA_DEBUG(1, "%s(), received SNRM cmd\n", __func__); irlap_next_state(self, LAP_RESET_CHECK); irlap_reset_indication(self); } else { IRDA_DEBUG(0, "%s(), SNRM frame contained an I-field!\n", __func__); } break; case RECV_REJ_CMD: irlap_update_nr_received(self, info->nr); if (self->remote_busy) { irlap_wait_min_turn_around(self, &self->qos_tx); irlap_send_rr_frame(self, RSP_FRAME); } else irlap_resend_rejected_frames(self, RSP_FRAME); irlap_start_wd_timer(self, self->wd_timeout); break; case RECV_SREJ_CMD: irlap_update_nr_received(self, info->nr); if (self->remote_busy) { irlap_wait_min_turn_around(self, &self->qos_tx); irlap_send_rr_frame(self, RSP_FRAME); } else irlap_resend_rejected_frame(self, RSP_FRAME); irlap_start_wd_timer(self, self->wd_timeout); break; case WD_TIMER_EXPIRED: /* * Wait until retry_count * n matches negotiated threshold/ * disconnect time (note 2 in IrLAP p. 82) * * Similar to irlap_state_nrm_p() -> FINAL_TIMER_EXPIRED * Note : self->wd_timeout = (self->final_timeout * 2), * which explain why we use (self->N2 / 2) here !!! * Jean II */ IRDA_DEBUG(1, "%s(), retry_count = %d\n", __func__, self->retry_count); if (self->retry_count < (self->N2 / 2)) { /* No retry, just wait for primary */ irlap_start_wd_timer(self, self->wd_timeout); self->retry_count++; if((self->retry_count % (self->N1 / 2)) == 0) irlap_status_indication(self, STATUS_NO_ACTIVITY); } else { irlap_apply_default_connection_parameters(self); /* Always switch state before calling upper layers */ irlap_next_state(self, LAP_NDM); irlap_disconnect_indication(self, LAP_NO_RESPONSE); } break; case RECV_DISC_CMD: /* Always switch state before calling upper layers */ irlap_next_state(self, LAP_NDM); /* Send disconnect response */ irlap_wait_min_turn_around(self, &self->qos_tx); irlap_send_ua_response_frame(self, NULL); del_timer(&self->wd_timer); irlap_flush_all_queues(self); /* Set default link parameters */ irlap_apply_default_connection_parameters(self); irlap_disconnect_indication(self, LAP_DISC_INDICATION); break; case RECV_DISCOVERY_XID_CMD: irlap_wait_min_turn_around(self, &self->qos_tx); irlap_send_rr_frame(self, RSP_FRAME); self->ack_required = TRUE; irlap_start_wd_timer(self, self->wd_timeout); irlap_next_state(self, LAP_NRM_S); break; case RECV_TEST_CMD: /* Remove test frame header (only LAP header in NRM) */ skb_pull(skb, LAP_ADDR_HEADER + LAP_CTRL_HEADER); irlap_wait_min_turn_around(self, &self->qos_tx); irlap_start_wd_timer(self, self->wd_timeout); /* Send response (info will be copied) */ irlap_send_test_frame(self, self->caddr, info->daddr, skb); break; default: IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__, event, irlap_event[event]); ret = -EINVAL; break; } return ret; } /* * Function irlap_state_sclose (self, event, skb, info) */ static int irlap_state_sclose(struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info) { IRDA_DEBUG(1, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return -ENODEV;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;); switch (event) { case RECV_DISC_CMD: /* Always switch state before calling upper layers */ irlap_next_state(self, LAP_NDM); /* Send disconnect response */ irlap_wait_min_turn_around(self, &self->qos_tx); irlap_send_ua_response_frame(self, NULL); del_timer(&self->wd_timer); /* Set default link parameters */ irlap_apply_default_connection_parameters(self); irlap_disconnect_indication(self, LAP_DISC_INDICATION); break; case RECV_DM_RSP: /* IrLAP-1.1 p.82: in SCLOSE, S and I type RSP frames * shall take us down into default NDM state, like DM_RSP */ case RECV_RR_RSP: case RECV_RNR_RSP: case RECV_REJ_RSP: case RECV_SREJ_RSP: case RECV_I_RSP: /* Always switch state before calling upper layers */ irlap_next_state(self, LAP_NDM); del_timer(&self->wd_timer); irlap_apply_default_connection_parameters(self); irlap_disconnect_indication(self, LAP_DISC_INDICATION); break; case WD_TIMER_EXPIRED: /* Always switch state before calling upper layers */ irlap_next_state(self, LAP_NDM); irlap_apply_default_connection_parameters(self); irlap_disconnect_indication(self, LAP_DISC_INDICATION); break; default: /* IrLAP-1.1 p.82: in SCLOSE, basically any received frame * with pf=1 shall restart the wd-timer and resend the rd:rsp */ if (info != NULL && info->pf) { del_timer(&self->wd_timer); irlap_wait_min_turn_around(self, &self->qos_tx); irlap_send_rd_frame(self); irlap_start_wd_timer(self, self->wd_timeout); break; /* stay in SCLOSE */ } IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__, event, irlap_event[event]); break; } return -1; } static int irlap_state_reset_check( struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info) { int ret = 0; IRDA_DEBUG(1, "%s(), event=%s\n", __func__, irlap_event[event]); IRDA_ASSERT(self != NULL, return -ENODEV;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;); switch (event) { case RESET_RESPONSE: irlap_send_ua_response_frame(self, &self->qos_rx); irlap_initiate_connection_state(self); irlap_start_wd_timer(self, WD_TIMEOUT); irlap_flush_all_queues(self); irlap_next_state(self, LAP_NRM_S); break; case DISCONNECT_REQUEST: irlap_wait_min_turn_around(self, &self->qos_tx); irlap_send_rd_frame(self); irlap_start_wd_timer(self, WD_TIMEOUT); irlap_next_state(self, LAP_SCLOSE); break; default: IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__, event, irlap_event[event]); ret = -EINVAL; break; } return ret; }
gpl-2.0
gtvhacker/Sony-x86-kexec
arch/powerpc/boot/cuboot-rainier.c
14095
1453
/* * Old U-boot compatibility for Rainier * * Valentine Barshak <vbarshak@ru.mvista.com> * Copyright 2007 MontaVista Software, Inc * * Based on Ebony code by David Gibson <david@gibson.dropbear.id.au> * Copyright IBM Corporation, 2007 * * Based on Bamboo code by Josh Boyer <jwboyer@linux.vnet.ibm.com> * Copyright IBM Corporation, 2007 * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 of the License */ #include <stdarg.h> #include <stddef.h> #include "types.h" #include "elf.h" #include "string.h" #include "stdio.h" #include "page.h" #include "ops.h" #include "dcr.h" #include "4xx.h" #include "44x.h" #include "cuboot.h" #define TARGET_4xx #define TARGET_44x #include "ppcboot.h" static bd_t bd; static void rainier_fixups(void) { unsigned long sysclk = 33333333; ibm440ep_fixup_clocks(sysclk, 11059200, 50000000); ibm4xx_fixup_ebc_ranges("/plb/opb/ebc"); ibm4xx_denali_fixup_memsize(); dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); platform_ops.fixups = rainier_fixups; platform_ops.exit = ibm44x_dbcr_reset; fdt_init(_dtb_start); serial_console_init(); }
gpl-2.0
sebastianscatularo/glibc
io/mkdirat.c
16
1465
/* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ #include <errno.h> #include <fcntl.h> #include <stddef.h> #include <sys/stat.h> #include <sys/types.h> /* Create a directory named PATH relative to FD with protections MODE. */ int mkdirat (fd, path, mode) int fd; const char *path; mode_t mode; { if (path == NULL) { __set_errno (EINVAL); return -1; } if (fd != AT_FDCWD && path[0] != '/') { /* Check FD is associated with a directory. */ struct stat64 st; if (__fxstat64 (_STAT_VER, fd, &st) != 0) return -1; if (!S_ISDIR (st.st_mode)) { __set_errno (ENOTDIR); return -1; } } __set_errno (ENOSYS); return -1; } stub_warning (mkdirat)
gpl-2.0
fcooper/sitara-ti-linux-kernel
arch/arm/mach-omap2/cpuidle33xx.c
16
3654
/* * AM33XX CPU idle Routines * * Copyright (C) 2011-2013 Texas Instruments, Inc. * Santosh Shilimkar <santosh.shilimkar@ti.com> * Rajendra Nayak <rnayak@ti.com> * Russ Dill <russ.dill@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/sched.h> #include <linux/cpuidle.h> #include <linux/cpu_pm.h> #include "common.h" #include "pm33xx.h" #include "powerdomain.h" #define AM33XX_FLAG_MPU_PLL BIT(16) #define AM33XX_FLAG_SELF_REFRESH BIT(17) #define AM33XX_FLAG_DISABLE_EMIF BIT(18) static int am33xx_enter_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct cpuidle_state *state; u32 wfi_flags = 0; u32 m3_flags = 0; if (omap_irq_pending() || need_resched()) return index; state = &drv->states[index]; if (state->flags & AM33XX_FLAG_SELF_REFRESH) wfi_flags |= WFI_SELF_REFRESH; if (state->flags & AM33XX_FLAG_DISABLE_EMIF) wfi_flags |= WFI_DISABLE_EMIF; if (state->flags & AM33XX_FLAG_MPU_PLL) { wfi_flags |= WFI_WAKE_M3; m3_flags = PWRDM_POWER_ON << M3_PARAM2_MPU_STATE_SHIFT | MEM_BANK_RET_ST_OFF << M3_PARAM2_MPU_RAM_RET_SHIFT | MEM_BANK_RET_ST_RET << M3_PARAM2_MPU_L1_RET_SHIFT | MEM_BANK_RET_ST_RET << M3_PARAM2_MPU_L2_RET_SHIFT | PWRDM_POWER_ON << M3_PARAM2_PER_STATE_SHIFT | MPU_WAKE << M3_PARAM2_WAKE_SOURCES_SHIFT; } am33xx_do_sram_cpuidle(wfi_flags, m3_flags); return index; } /* Power usage measured as a combination of CPU and DDR power rails */ struct cpuidle_state am33xx_ddr2_states[] = { { .exit_latency = 72, .target_residency = 150, .power_usage = 625, .flags = CPUIDLE_FLAG_TIME_VALID, .enter = am33xx_enter_idle, .name = "C0", .desc = "WFI", }, { .exit_latency = 176, .target_residency = 300, .power_usage = 562, .flags = CPUIDLE_FLAG_TIME_VALID | AM33XX_FLAG_MPU_PLL, .enter = am33xx_enter_idle, .name = "C1", .desc = "Bypass MPU PLL", }, { .exit_latency = 390, .target_residency = 500, .power_usage = 529, .flags = CPUIDLE_FLAG_TIME_VALID | AM33XX_FLAG_MPU_PLL | AM33XX_FLAG_SELF_REFRESH, .enter = am33xx_enter_idle, .name = "C1+SR", .desc = "Bypass MPU PLL + DDR SR", }, }; struct cpuidle_state am33xx_ddr3_states[] = { { .exit_latency = 68, .target_residency = 150, .power_usage = 557, .flags = CPUIDLE_FLAG_TIME_VALID, .enter = am33xx_enter_idle, .name = "C0", .desc = "WFI", }, { .exit_latency = 130, .target_residency = 200, .power_usage = 497, .flags = CPUIDLE_FLAG_TIME_VALID | AM33XX_FLAG_MPU_PLL, .enter = am33xx_enter_idle, .name = "C1", .desc = "Bypass MPU PLL", }, }; static struct cpuidle_driver am33xx_idle_driver = { .name = "am33xx_idle", .owner = THIS_MODULE, }; /** * am33xx_idle_init - Init routine for am33xx idle * * Registers the am33xx specific cpuidle driver to the cpuidle * framework with the valid set of states. */ int am33xx_idle_init(bool ddr3) { if (ddr3) { BUILD_BUG_ON(ARRAY_SIZE(am33xx_ddr3_states) > ARRAY_SIZE(am33xx_idle_driver.states)); memcpy(am33xx_idle_driver.states, am33xx_ddr3_states, sizeof(am33xx_ddr3_states)); am33xx_idle_driver.state_count = ARRAY_SIZE(am33xx_ddr3_states); } else { BUILD_BUG_ON(ARRAY_SIZE(am33xx_ddr2_states) > ARRAY_SIZE(am33xx_idle_driver.states)); memcpy(am33xx_idle_driver.states, am33xx_ddr2_states, sizeof(am33xx_ddr2_states)); am33xx_idle_driver.state_count = ARRAY_SIZE(am33xx_ddr2_states); } return cpuidle_register(&am33xx_idle_driver, NULL); }
gpl-2.0
SlimRoms/kernel_lge_msm7x27a-common
arch/arm/mach-msm/qdsp5/audio_wma.c
16
49073
/* audio_wma.c - wma audio decoder driver * * Copyright (c) 2009, 2011-2013, The Linux Foundation. All rights reserved. * * Based on the mp3 native driver in arch/arm/mach-msm/qdsp5/audio_mp3.c * * Copyright (C) 2008 Google, Inc. * Copyright (C) 2008 HTC Corporation * * All source code in this file is licensed under the following license except * where indicated. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * See the GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, you can find it at http://www.fsf.org */ #include <asm/atomic.h> #include <asm/ioctls.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/uaccess.h> #include <linux/kthread.h> #include <linux/wait.h> #include <linux/dma-mapping.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/list.h> #include <linux/earlysuspend.h> #include <linux/slab.h> #include <linux/msm_audio.h> #include <linux/msm_audio_wma.h> #include <linux/memory_alloc.h> #include <linux/msm_ion.h> #include <mach/msm_adsp.h> #include <mach/iommu.h> #include <mach/iommu_domains.h> #include <mach/qdsp5/qdsp5audppcmdi.h> #include <mach/qdsp5/qdsp5audppmsg.h> #include <mach/qdsp5/qdsp5audpp.h> #include <mach/qdsp5/qdsp5audplaycmdi.h> #include <mach/qdsp5/qdsp5audplaymsg.h> #include <mach/qdsp5/qdsp5rmtcmdi.h> #include <mach/debug_mm.h> #include <mach/msm_memtypes.h> #include "audmgr.h" /* Size must be power of 2 */ #define BUFSZ_MAX 2062 /* Includes meta in size */ #define BUFSZ_MIN 1038 /* Includes meta in size */ #define DMASZ_MAX (BUFSZ_MAX * 2) #define DMASZ_MIN (BUFSZ_MIN * 2) #define AUDPLAY_INVALID_READ_PTR_OFFSET 0xFFFF #define AUDDEC_DEC_WMA 4 #define PCM_BUFSZ_MIN 8216 /* Hold one stereo WMA frame and meta out*/ #define PCM_BUF_MAX_COUNT 5 /* DSP only accepts 5 buffers at most but support 2 buffers currently */ #define ROUTING_MODE_FTRT 1 #define ROUTING_MODE_RT 2 /* Decoder status received from AUDPPTASK */ #define AUDPP_DEC_STATUS_SLEEP 0 #define AUDPP_DEC_STATUS_INIT 1 #define AUDPP_DEC_STATUS_CFG 2 #define AUDPP_DEC_STATUS_PLAY 3 #define AUDWMA_METAFIELD_MASK 0xFFFF0000 #define AUDWMA_EOS_FLG_OFFSET 0x0A /* Offset from beginning of buffer */ #define AUDWMA_EOS_FLG_MASK 0x01 #define AUDWMA_EOS_NONE 0x0 /* No EOS detected */ #define AUDWMA_EOS_SET 0x1 /* EOS set in meta field */ #define AUDWMA_EVENT_NUM 10 /* Default number of pre-allocated event packets */ struct buffer { void *data; unsigned size; unsigned used; /* Input usage actual DSP produced PCM size */ unsigned addr; unsigned short mfield_sz; /*only useful for data has meta field */ }; #ifdef CONFIG_HAS_EARLYSUSPEND struct audwma_suspend_ctl { struct early_suspend node; struct audio *audio; }; #endif struct audwma_event{ struct list_head list; int event_type; union msm_audio_event_payload payload; }; struct audio { struct buffer out[2]; spinlock_t dsp_lock; uint8_t out_head; uint8_t out_tail; uint8_t out_needed; /* number of buffers the dsp is waiting for */ unsigned out_dma_sz; atomic_t out_bytes; struct mutex lock; struct mutex write_lock; wait_queue_head_t write_wait; /* Host PCM section */ struct buffer in[PCM_BUF_MAX_COUNT]; struct mutex read_lock; wait_queue_head_t read_wait; /* Wait queue for read */ char *read_data; /* pointer to reader buffer */ int32_t read_phys; /* physical address of reader buffer */ uint8_t read_next; /* index to input buffers to be read next */ uint8_t fill_next; /* index to buffer that DSP should be filling */ uint8_t pcm_buf_count; /* number of pcm buffer allocated */ /* ---- End of Host PCM section */ struct msm_adsp_module *audplay; /* configuration to use on next enable */ uint32_t out_sample_rate; uint32_t out_channel_mode; struct msm_audio_wma_config wma_config; struct audmgr audmgr; /* data allocated for various buffers */ char *data; int32_t phys; /* physical address of write buffer */ void *map_v_read; void *map_v_write; int mfield; /* meta field embedded in data */ int rflush; /* Read flush */ int wflush; /* Write flush */ int opened; int enabled; int running; int stopped; /* set when stopped, cleared on flush */ int pcm_feedback; int buf_refresh; int rmt_resource_released; int teos; /* valid only if tunnel mode & no data left for decoder */ enum msm_aud_decoder_state dec_state; /* Represents decoder state */ int reserved; /* A byte is being reserved */ char rsv_byte; /* Handle odd length user data */ const char *module_name; unsigned queue_id; uint16_t dec_id; uint32_t read_ptr_offset; #ifdef CONFIG_HAS_EARLYSUSPEND struct audwma_suspend_ctl suspend_ctl; #endif #ifdef CONFIG_DEBUG_FS struct dentry *dentry; #endif wait_queue_head_t wait; struct list_head free_event_queue; struct list_head event_queue; wait_queue_head_t event_wait; spinlock_t event_queue_lock; struct mutex get_event_lock; int event_abort; int eq_enable; int eq_needs_commit; audpp_cmd_cfg_object_params_eqalizer eq; audpp_cmd_cfg_object_params_volume vol_pan; struct ion_client *client; struct ion_handle *input_buff_handle; struct ion_handle *output_buff_handle; }; static int auddec_dsp_config(struct audio *audio, int enable); static void audpp_cmd_cfg_adec_params(struct audio *audio); static void audpp_cmd_cfg_routing_mode(struct audio *audio); static void audplay_send_data(struct audio *audio, unsigned needed); static void audplay_config_hostpcm(struct audio *audio); static void audplay_buffer_refresh(struct audio *audio); static void audio_dsp_event(void *private, unsigned id, uint16_t *msg); #ifdef CONFIG_HAS_EARLYSUSPEND static void audwma_post_event(struct audio *audio, int type, union msm_audio_event_payload payload); #endif static int rmt_put_resource(struct audio *audio) { struct aud_codec_config_cmd cmd; unsigned short client_idx; cmd.cmd_id = RM_CMD_AUD_CODEC_CFG; cmd.client_id = RM_AUD_CLIENT_ID; cmd.task_id = audio->dec_id; cmd.enable = RMT_DISABLE; cmd.dec_type = AUDDEC_DEC_WMA; client_idx = ((cmd.client_id << 8) | cmd.task_id); return put_adsp_resource(client_idx, &cmd, sizeof(cmd)); } static int rmt_get_resource(struct audio *audio) { struct aud_codec_config_cmd cmd; unsigned short client_idx; cmd.cmd_id = RM_CMD_AUD_CODEC_CFG; cmd.client_id = RM_AUD_CLIENT_ID; cmd.task_id = audio->dec_id; cmd.enable = RMT_ENABLE; cmd.dec_type = AUDDEC_DEC_WMA; client_idx = ((cmd.client_id << 8) | cmd.task_id); return get_adsp_resource(client_idx, &cmd, sizeof(cmd)); } /* must be called with audio->lock held */ static int audio_enable(struct audio *audio) { struct audmgr_config cfg; int rc; MM_DBG("\n"); /* Macro prints the file name and function */ if (audio->enabled) return 0; if (audio->rmt_resource_released == 1) { audio->rmt_resource_released = 0; rc = rmt_get_resource(audio); if (rc) { MM_ERR("ADSP resources are not available for WMA \ session 0x%08x on decoder: %d\n Ignoring \ error and going ahead with the playback\n", (int)audio, audio->dec_id); } } audio->dec_state = MSM_AUD_DECODER_STATE_NONE; audio->out_tail = 0; audio->out_needed = 0; if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) { cfg.tx_rate = RPC_AUD_DEF_SAMPLE_RATE_NONE; cfg.rx_rate = RPC_AUD_DEF_SAMPLE_RATE_48000; cfg.def_method = RPC_AUD_DEF_METHOD_PLAYBACK; cfg.codec = RPC_AUD_DEF_CODEC_WMA; cfg.snd_method = RPC_SND_METHOD_MIDI; rc = audmgr_enable(&audio->audmgr, &cfg); if (rc < 0) { msm_adsp_dump(audio->audplay); return rc; } } if (msm_adsp_enable(audio->audplay)) { MM_ERR("msm_adsp_enable(audplay) failed\n"); if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) audmgr_disable(&audio->audmgr); return -ENODEV; } if (audpp_enable(audio->dec_id, audio_dsp_event, audio)) { MM_ERR("audpp_enable() failed\n"); msm_adsp_disable(audio->audplay); if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) audmgr_disable(&audio->audmgr); return -ENODEV; } audio->enabled = 1; return 0; } /* must be called with audio->lock held */ static int audio_disable(struct audio *audio) { int rc = 0; MM_DBG("\n"); /* Macro prints the file name and function */ if (audio->enabled) { audio->enabled = 0; audio->dec_state = MSM_AUD_DECODER_STATE_NONE; auddec_dsp_config(audio, 0); rc = wait_event_interruptible_timeout(audio->wait, audio->dec_state != MSM_AUD_DECODER_STATE_NONE, msecs_to_jiffies(MSM_AUD_DECODER_WAIT_MS)); if (rc == 0) rc = -ETIMEDOUT; else if (audio->dec_state != MSM_AUD_DECODER_STATE_CLOSE) rc = -EFAULT; else rc = 0; audio->stopped = 1; wake_up(&audio->write_wait); wake_up(&audio->read_wait); msm_adsp_disable(audio->audplay); audpp_disable(audio->dec_id, audio); if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) { rc = audmgr_disable(&audio->audmgr); if (rc < 0) msm_adsp_dump(audio->audplay); } audio->out_needed = 0; rmt_put_resource(audio); audio->rmt_resource_released = 1; } return rc; } /* ------------------- dsp --------------------- */ static void audio_update_pcm_buf_entry(struct audio *audio, uint32_t *payload) { uint8_t index; unsigned long flags; if (audio->rflush) return; spin_lock_irqsave(&audio->dsp_lock, flags); for (index = 0; index < payload[1]; index++) { if (audio->in[audio->fill_next].addr == payload[2 + index * 2]) { MM_DBG("audio_update_pcm_buf_entry: \ in[%d] ready\n", audio->fill_next); audio->in[audio->fill_next].used = payload[3 + index * 2]; if ((++audio->fill_next) == audio->pcm_buf_count) audio->fill_next = 0; } else { MM_ERR("audio_update_pcm_buf_entry: \ expected=%x ret=%x\n", audio->in[audio->fill_next].addr, payload[1 + index * 2]); break; } } if (audio->in[audio->fill_next].used == 0) { audplay_buffer_refresh(audio); } else { MM_DBG("read cannot keep up\n"); audio->buf_refresh = 1; } wake_up(&audio->read_wait); spin_unlock_irqrestore(&audio->dsp_lock, flags); } static void audplay_dsp_event(void *data, unsigned id, size_t len, void (*getevent) (void *ptr, size_t len)) { struct audio *audio = data; uint32_t msg[28]; getevent(msg, sizeof(msg)); MM_DBG("msg_id=%x\n", id); switch (id) { case AUDPLAY_MSG_DEC_NEEDS_DATA: audplay_send_data(audio, 1); break; case AUDPLAY_MSG_BUFFER_UPDATE: audio_update_pcm_buf_entry(audio, msg); break; case ADSP_MESSAGE_ID: MM_DBG("Received ADSP event: module enable(audplaytask)\n"); break; default: MM_ERR("unexpected message from decoder \n"); break; } } static void audio_dsp_event(void *private, unsigned id, uint16_t *msg) { struct audio *audio = private; switch (id) { case AUDPP_MSG_STATUS_MSG:{ unsigned status = msg[1]; switch (status) { case AUDPP_DEC_STATUS_SLEEP: { uint16_t reason = msg[2]; MM_DBG("decoder status:sleep reason = \ 0x%04x\n", reason); if ((reason == AUDPP_MSG_REASON_MEM) || (reason == AUDPP_MSG_REASON_NODECODER)) { audio->dec_state = MSM_AUD_DECODER_STATE_FAILURE; wake_up(&audio->wait); } else if (reason == AUDPP_MSG_REASON_NONE) { /* decoder is in disable state */ audio->dec_state = MSM_AUD_DECODER_STATE_CLOSE; wake_up(&audio->wait); } break; } case AUDPP_DEC_STATUS_INIT: MM_DBG("decoder status: init\n"); if (audio->pcm_feedback) audpp_cmd_cfg_routing_mode(audio); else audpp_cmd_cfg_adec_params(audio); break; case AUDPP_DEC_STATUS_CFG: MM_DBG("decoder status: cfg\n"); break; case AUDPP_DEC_STATUS_PLAY: MM_DBG("decoder status: play\n"); if (audio->pcm_feedback) { audplay_config_hostpcm(audio); audplay_buffer_refresh(audio); } audio->dec_state = MSM_AUD_DECODER_STATE_SUCCESS; wake_up(&audio->wait); break; default: MM_ERR("unknown decoder status\n"); } break; } case AUDPP_MSG_CFG_MSG: if (msg[0] == AUDPP_MSG_ENA_ENA) { MM_DBG("CFG_MSG ENABLE\n"); auddec_dsp_config(audio, 1); audio->out_needed = 0; audio->running = 1; audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan); audpp_dsp_set_eq(audio->dec_id, audio->eq_enable, &audio->eq); audpp_avsync(audio->dec_id, 22050); } else if (msg[0] == AUDPP_MSG_ENA_DIS) { MM_DBG("CFG_MSG DISABLE\n"); audpp_avsync(audio->dec_id, 0); audio->running = 0; } else { MM_DBG("CFG_MSG %d?\n", msg[0]); } break; case AUDPP_MSG_ROUTING_ACK: MM_DBG("ROUTING_ACK mode=%d\n", msg[1]); audpp_cmd_cfg_adec_params(audio); break; case AUDPP_MSG_FLUSH_ACK: MM_DBG("FLUSH_ACK\n"); audio->wflush = 0; audio->rflush = 0; wake_up(&audio->write_wait); if (audio->pcm_feedback) audplay_buffer_refresh(audio); break; case AUDPP_MSG_PCMDMAMISSED: MM_DBG("PCMDMAMISSED\n"); audio->teos = 1; wake_up(&audio->write_wait); break; default: MM_ERR("UNKNOWN (%d)\n", id); } } static struct msm_adsp_ops audplay_adsp_ops_wma = { .event = audplay_dsp_event, }; #define audplay_send_queue0(audio, cmd, len) \ msm_adsp_write(audio->audplay, audio->queue_id, \ cmd, len) static int auddec_dsp_config(struct audio *audio, int enable) { u16 cfg_dec_cmd[AUDPP_CMD_CFG_DEC_TYPE_LEN / sizeof(unsigned short)]; memset(cfg_dec_cmd, 0, sizeof(cfg_dec_cmd)); cfg_dec_cmd[0] = AUDPP_CMD_CFG_DEC_TYPE; if (enable) cfg_dec_cmd[1 + audio->dec_id] = AUDPP_CMD_UPDATDE_CFG_DEC | AUDPP_CMD_ENA_DEC_V | AUDDEC_DEC_WMA; else cfg_dec_cmd[1 + audio->dec_id] = AUDPP_CMD_UPDATDE_CFG_DEC | AUDPP_CMD_DIS_DEC_V; return audpp_send_queue1(&cfg_dec_cmd, sizeof(cfg_dec_cmd)); } static void audpp_cmd_cfg_adec_params(struct audio *audio) { struct audpp_cmd_cfg_adec_params_wma cmd; memset(&cmd, 0, sizeof(cmd)); cmd.common.cmd_id = AUDPP_CMD_CFG_ADEC_PARAMS; cmd.common.length = AUDPP_CMD_CFG_ADEC_PARAMS_WMA_LEN; cmd.common.dec_id = audio->dec_id; cmd.common.input_sampling_frequency = audio->out_sample_rate; /* * Test done for sample with the following configuration * armdatareqthr = 1262 * channelsdecoded = 1(MONO)/2(STEREO) * wmabytespersec = Tested with 6003 Bytes per sec * wmasamplingfreq = 44100 * wmaencoderopts = 31 */ cmd.armdatareqthr = audio->wma_config.armdatareqthr; cmd.channelsdecoded = audio->wma_config.channelsdecoded; cmd.wmabytespersec = audio->wma_config.wmabytespersec; cmd.wmasamplingfreq = audio->wma_config.wmasamplingfreq; cmd.wmaencoderopts = audio->wma_config.wmaencoderopts; audpp_send_queue2(&cmd, sizeof(cmd)); } static void audpp_cmd_cfg_routing_mode(struct audio *audio) { struct audpp_cmd_routing_mode cmd; MM_DBG("\n"); /* Macro prints the file name and function */ memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDPP_CMD_ROUTING_MODE; cmd.object_number = audio->dec_id; if (audio->pcm_feedback) cmd.routing_mode = ROUTING_MODE_FTRT; else cmd.routing_mode = ROUTING_MODE_RT; audpp_send_queue1(&cmd, sizeof(cmd)); } static void audplay_buffer_refresh(struct audio *audio) { struct audplay_cmd_buffer_refresh refresh_cmd; refresh_cmd.cmd_id = AUDPLAY_CMD_BUFFER_REFRESH; refresh_cmd.num_buffers = 1; refresh_cmd.buf0_address = audio->in[audio->fill_next].addr; refresh_cmd.buf0_length = audio->in[audio->fill_next].size; refresh_cmd.buf_read_count = 0; MM_DBG("buf0_addr=%x buf0_len=%d\n", refresh_cmd.buf0_address, refresh_cmd.buf0_length); (void)audplay_send_queue0(audio, &refresh_cmd, sizeof(refresh_cmd)); } static void audplay_config_hostpcm(struct audio *audio) { struct audplay_cmd_hpcm_buf_cfg cfg_cmd; MM_DBG("\n"); /* Macro prints the file name and function */ cfg_cmd.cmd_id = AUDPLAY_CMD_HPCM_BUF_CFG; cfg_cmd.max_buffers = audio->pcm_buf_count; cfg_cmd.byte_swap = 0; cfg_cmd.hostpcm_config = (0x8000) | (0x4000); cfg_cmd.feedback_frequency = 1; cfg_cmd.partition_number = 0; (void)audplay_send_queue0(audio, &cfg_cmd, sizeof(cfg_cmd)); } static int audplay_dsp_send_data_avail(struct audio *audio, unsigned idx, unsigned len) { struct audplay_cmd_bitstream_data_avail_nt2 cmd; cmd.cmd_id = AUDPLAY_CMD_BITSTREAM_DATA_AVAIL_NT2; if (audio->mfield) cmd.decoder_id = AUDWMA_METAFIELD_MASK | (audio->out[idx].mfield_sz >> 1); else cmd.decoder_id = audio->dec_id; cmd.buf_ptr = audio->out[idx].addr; cmd.buf_size = len/2; cmd.partition_number = 0; /* complete writes to the input buffer */ wmb(); return audplay_send_queue0(audio, &cmd, sizeof(cmd)); } static void audplay_send_data(struct audio *audio, unsigned needed) { struct buffer *frame; unsigned long flags; spin_lock_irqsave(&audio->dsp_lock, flags); if (!audio->running) goto done; if (audio->wflush) { audio->out_needed = 1; goto done; } if (needed && !audio->wflush) { /* We were called from the callback because the DSP * requested more data. Note that the DSP does want * more data, and if a buffer was in-flight, mark it * as available (since the DSP must now be done with * it). */ audio->out_needed = 1; frame = audio->out + audio->out_tail; if (frame->used == 0xffffffff) { MM_DBG("frame %d free\n", audio->out_tail); frame->used = 0; audio->out_tail ^= 1; wake_up(&audio->write_wait); } } if (audio->out_needed) { /* If the DSP currently wants data and we have a * buffer available, we will send it and reset * the needed flag. We'll mark the buffer as in-flight * so that it won't be recycled until the next buffer * is requested */ MM_DBG("\n"); /* Macro prints the file name and function */ frame = audio->out + audio->out_tail; if (frame->used) { BUG_ON(frame->used == 0xffffffff); MM_DBG("frame %d busy\n", audio->out_tail); audplay_dsp_send_data_avail(audio, audio->out_tail, frame->used); frame->used = 0xffffffff; audio->out_needed = 0; } } done: spin_unlock_irqrestore(&audio->dsp_lock, flags); } /* ------------------- device --------------------- */ static void audio_flush(struct audio *audio) { unsigned long flags; spin_lock_irqsave(&audio->dsp_lock, flags); audio->out[0].used = 0; audio->out[1].used = 0; audio->out_head = 0; audio->out_tail = 0; audio->reserved = 0; spin_unlock_irqrestore(&audio->dsp_lock, flags); atomic_set(&audio->out_bytes, 0); } static void audio_flush_pcm_buf(struct audio *audio) { uint8_t index; unsigned long flags; spin_lock_irqsave(&audio->dsp_lock, flags); for (index = 0; index < PCM_BUF_MAX_COUNT; index++) audio->in[index].used = 0; audio->buf_refresh = 0; audio->read_next = 0; audio->fill_next = 0; spin_unlock_irqrestore(&audio->dsp_lock, flags); } static void audio_ioport_reset(struct audio *audio) { /* Make sure read/write thread are free from * sleep and knowing that system is not able * to process io request at the moment */ wake_up(&audio->write_wait); mutex_lock(&audio->write_lock); audio_flush(audio); mutex_unlock(&audio->write_lock); wake_up(&audio->read_wait); mutex_lock(&audio->read_lock); audio_flush_pcm_buf(audio); mutex_unlock(&audio->read_lock); } static int audwma_events_pending(struct audio *audio) { unsigned long flags; int empty; spin_lock_irqsave(&audio->event_queue_lock, flags); empty = !list_empty(&audio->event_queue); spin_unlock_irqrestore(&audio->event_queue_lock, flags); return empty || audio->event_abort; } static void audwma_reset_event_queue(struct audio *audio) { unsigned long flags; struct audwma_event *drv_evt; struct list_head *ptr, *next; spin_lock_irqsave(&audio->event_queue_lock, flags); list_for_each_safe(ptr, next, &audio->event_queue) { drv_evt = list_first_entry(&audio->event_queue, struct audwma_event, list); list_del(&drv_evt->list); kfree(drv_evt); } list_for_each_safe(ptr, next, &audio->free_event_queue) { drv_evt = list_first_entry(&audio->free_event_queue, struct audwma_event, list); list_del(&drv_evt->list); kfree(drv_evt); } spin_unlock_irqrestore(&audio->event_queue_lock, flags); return; } static long audwma_process_event_req(struct audio *audio, void __user *arg) { long rc; struct msm_audio_event usr_evt; struct audwma_event *drv_evt = NULL; int timeout; unsigned long flags; if (copy_from_user(&usr_evt, arg, sizeof(struct msm_audio_event))) return -EFAULT; timeout = (int) usr_evt.timeout_ms; if (timeout > 0) { rc = wait_event_interruptible_timeout( audio->event_wait, audwma_events_pending(audio), msecs_to_jiffies(timeout)); if (rc == 0) return -ETIMEDOUT; } else { rc = wait_event_interruptible( audio->event_wait, audwma_events_pending(audio)); } if (rc < 0) return rc; if (audio->event_abort) { audio->event_abort = 0; return -ENODEV; } rc = 0; spin_lock_irqsave(&audio->event_queue_lock, flags); if (!list_empty(&audio->event_queue)) { drv_evt = list_first_entry(&audio->event_queue, struct audwma_event, list); list_del(&drv_evt->list); } if (drv_evt) { usr_evt.event_type = drv_evt->event_type; usr_evt.event_payload = drv_evt->payload; list_add_tail(&drv_evt->list, &audio->free_event_queue); } else rc = -1; spin_unlock_irqrestore(&audio->event_queue_lock, flags); if (!rc && copy_to_user(arg, &usr_evt, sizeof(usr_evt))) rc = -EFAULT; return rc; } static int audio_enable_eq(struct audio *audio, int enable) { if (audio->eq_enable == enable && !audio->eq_needs_commit) return 0; audio->eq_enable = enable; if (audio->running) { audpp_dsp_set_eq(audio->dec_id, enable, &audio->eq); audio->eq_needs_commit = 0; } return 0; } static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct audio *audio = file->private_data; int rc = -EINVAL; unsigned long flags = 0; uint16_t enable_mask; int enable; int prev_state; unsigned long ionflag = 0; ion_phys_addr_t addr = 0; struct ion_handle *handle = NULL; int len = 0; MM_DBG("cmd = %d\n", cmd); if (cmd == AUDIO_GET_STATS) { struct msm_audio_stats stats; stats.byte_count = audpp_avsync_byte_count(audio->dec_id); stats.sample_count = audpp_avsync_sample_count(audio->dec_id); if (copy_to_user((void *)arg, &stats, sizeof(stats))) return -EFAULT; return 0; } switch (cmd) { case AUDIO_ENABLE_AUDPP: if (copy_from_user(&enable_mask, (void *) arg, sizeof(enable_mask))) { rc = -EFAULT; break; } spin_lock_irqsave(&audio->dsp_lock, flags); enable = (enable_mask & EQ_ENABLE) ? 1 : 0; audio_enable_eq(audio, enable); spin_unlock_irqrestore(&audio->dsp_lock, flags); rc = 0; break; case AUDIO_SET_VOLUME: spin_lock_irqsave(&audio->dsp_lock, flags); audio->vol_pan.volume = arg; if (audio->running) audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan); spin_unlock_irqrestore(&audio->dsp_lock, flags); rc = 0; break; case AUDIO_SET_PAN: spin_lock_irqsave(&audio->dsp_lock, flags); audio->vol_pan.pan = arg; if (audio->running) audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan); spin_unlock_irqrestore(&audio->dsp_lock, flags); rc = 0; break; case AUDIO_SET_EQ: prev_state = audio->eq_enable; audio->eq_enable = 0; if (copy_from_user(&audio->eq.num_bands, (void *) arg, sizeof(audio->eq) - (AUDPP_CMD_CFG_OBJECT_PARAMS_COMMON_LEN + 2))) { rc = -EFAULT; break; } audio->eq_enable = prev_state; audio->eq_needs_commit = 1; rc = 0; break; } if (-EINVAL != rc) return rc; if (cmd == AUDIO_GET_EVENT) { MM_DBG("AUDIO_GET_EVENT\n"); if (mutex_trylock(&audio->get_event_lock)) { rc = audwma_process_event_req(audio, (void __user *) arg); mutex_unlock(&audio->get_event_lock); } else rc = -EBUSY; return rc; } if (cmd == AUDIO_ABORT_GET_EVENT) { audio->event_abort = 1; wake_up(&audio->event_wait); return 0; } mutex_lock(&audio->lock); switch (cmd) { case AUDIO_START: MM_DBG("AUDIO_START\n"); rc = audio_enable(audio); if (!rc) { rc = wait_event_interruptible_timeout(audio->wait, audio->dec_state != MSM_AUD_DECODER_STATE_NONE, msecs_to_jiffies(MSM_AUD_DECODER_WAIT_MS)); MM_INFO("dec_state %d rc = %d\n", audio->dec_state, rc); if (audio->dec_state != MSM_AUD_DECODER_STATE_SUCCESS) rc = -ENODEV; else rc = 0; } break; case AUDIO_STOP: MM_DBG("AUDIO_STOP\n"); rc = audio_disable(audio); audio_ioport_reset(audio); audio->stopped = 0; break; case AUDIO_FLUSH: MM_DBG("AUDIO_FLUSH\n"); audio->rflush = 1; audio->wflush = 1; audio_ioport_reset(audio); if (audio->running) { audpp_flush(audio->dec_id); rc = wait_event_interruptible(audio->write_wait, !audio->wflush); if (rc < 0) { MM_ERR("AUDIO_FLUSH interrupted\n"); rc = -EINTR; } } else { audio->rflush = 0; audio->wflush = 0; } break; case AUDIO_SET_CONFIG: { struct msm_audio_config config; if (copy_from_user(&config, (void *) arg, sizeof(config))) { rc = -EFAULT; break; } if (config.channel_count == 1) { config.channel_count = AUDPP_CMD_PCM_INTF_MONO_V; } else if (config.channel_count == 2) { config.channel_count = AUDPP_CMD_PCM_INTF_STEREO_V; } else { rc = -EINVAL; break; } audio->mfield = config.meta_field; audio->out_sample_rate = config.sample_rate; audio->out_channel_mode = config.channel_count; rc = 0; break; } case AUDIO_GET_CONFIG: { struct msm_audio_config config; config.buffer_size = (audio->out_dma_sz >> 1); config.buffer_count = 2; config.sample_rate = audio->out_sample_rate; if (audio->out_channel_mode == AUDPP_CMD_PCM_INTF_MONO_V) config.channel_count = 1; else config.channel_count = 2; config.meta_field = 0; config.unused[0] = 0; config.unused[1] = 0; config.unused[2] = 0; if (copy_to_user((void *) arg, &config, sizeof(config))) rc = -EFAULT; else rc = 0; break; } case AUDIO_GET_WMA_CONFIG:{ if (copy_to_user((void *)arg, &audio->wma_config, sizeof(audio->wma_config))) rc = -EFAULT; else rc = 0; break; } case AUDIO_SET_WMA_CONFIG:{ struct msm_audio_wma_config usr_config; if (copy_from_user (&usr_config, (void *)arg, sizeof(usr_config))) { rc = -EFAULT; break; } audio->wma_config = usr_config; rc = 0; break; } case AUDIO_GET_PCM_CONFIG:{ struct msm_audio_pcm_config config; config.pcm_feedback = audio->pcm_feedback; config.buffer_count = PCM_BUF_MAX_COUNT; config.buffer_size = PCM_BUFSZ_MIN; if (copy_to_user((void *)arg, &config, sizeof(config))) rc = -EFAULT; else rc = 0; break; } case AUDIO_SET_PCM_CONFIG:{ struct msm_audio_pcm_config config; if (copy_from_user (&config, (void *)arg, sizeof(config))) { rc = -EFAULT; break; } if (config.pcm_feedback != audio->pcm_feedback) { MM_ERR("Not sufficient permission to" "change the playback mode\n"); rc = -EACCES; break; } if ((config.buffer_count > PCM_BUF_MAX_COUNT) || (config.buffer_count == 1)) config.buffer_count = PCM_BUF_MAX_COUNT; if (config.buffer_size < PCM_BUFSZ_MIN) config.buffer_size = PCM_BUFSZ_MIN; /* Check if pcm feedback is required */ if ((config.pcm_feedback) && (!audio->read_data)) { MM_DBG("allocate PCM buffer %d\n", config.buffer_count * config.buffer_size); handle = ion_alloc(audio->client, (config.buffer_size * config.buffer_count), SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID), 0); if (IS_ERR_OR_NULL(handle)) { MM_ERR("Unable to alloc I/P buffs\n"); audio->input_buff_handle = NULL; rc = -ENOMEM; break; } audio->input_buff_handle = handle; rc = ion_phys(audio->client , handle, &addr, &len); if (rc) { MM_ERR("Invalid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); ion_free(audio->client, handle); audio->input_buff_handle = NULL; rc = -ENOMEM; break; } else { MM_INFO("Got valid phy: %x sz: %x\n", (unsigned int) audio->read_phys, (unsigned int) len); } audio->read_phys = (int32_t)addr; rc = ion_handle_get_flags(audio->client, handle, &ionflag); if (rc) { MM_ERR("could not get flags\n"); ion_free(audio->client, handle); audio->input_buff_handle = NULL; rc = -ENOMEM; break; } audio->map_v_read = ion_map_kernel( audio->client, handle); if (IS_ERR(audio->map_v_read)) { MM_ERR("map of read buf failed\n"); ion_free(audio->client, handle); audio->input_buff_handle = NULL; rc = -ENOMEM; } else { uint8_t index; uint32_t offset = 0; audio->read_data = audio->map_v_read; audio->buf_refresh = 0; audio->pcm_buf_count = config.buffer_count; audio->read_next = 0; audio->fill_next = 0; for (index = 0; index < config.buffer_count; index++) { audio->in[index].data = audio->read_data + offset; audio->in[index].addr = audio->read_phys + offset; audio->in[index].size = config.buffer_size; audio->in[index].used = 0; offset += config.buffer_size; } MM_DBG("read buf: phy addr \ 0x%08x kernel addr 0x%08x\n", audio->read_phys, (int)audio->read_data); rc = 0; } } else { rc = 0; } break; } case AUDIO_PAUSE: MM_DBG("AUDIO_PAUSE %ld\n", arg); rc = audpp_pause(audio->dec_id, (int) arg); break; default: rc = -EINVAL; } mutex_unlock(&audio->lock); return rc; } /* Only useful in tunnel-mode */ static int audio_fsync(struct file *file, loff_t a, loff_t b, int datasync) { struct audio *audio = file->private_data; struct buffer *frame; int rc = 0; MM_DBG("\n"); /* Macro prints the file name and function */ if (!audio->running || audio->pcm_feedback) { rc = -EINVAL; goto done_nolock; } mutex_lock(&audio->write_lock); rc = wait_event_interruptible(audio->write_wait, (!audio->out[0].used && !audio->out[1].used && audio->out_needed) || audio->wflush); if (rc < 0) goto done; else if (audio->wflush) { rc = -EBUSY; goto done; } if (audio->reserved) { MM_DBG("send reserved byte\n"); frame = audio->out + audio->out_tail; ((char *) frame->data)[0] = audio->rsv_byte; ((char *) frame->data)[1] = 0; frame->used = 2; audplay_send_data(audio, 0); rc = wait_event_interruptible(audio->write_wait, (!audio->out[0].used && !audio->out[1].used && audio->out_needed) || audio->wflush); if (rc < 0) goto done; else if (audio->wflush) { rc = -EBUSY; goto done; } } /* pcm dmamiss message is sent continously * when decoder is starved so no race * condition concern */ audio->teos = 0; rc = wait_event_interruptible(audio->write_wait, audio->teos || audio->wflush); if (audio->wflush) rc = -EBUSY; done: mutex_unlock(&audio->write_lock); done_nolock: return rc; } static ssize_t audio_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct audio *audio = file->private_data; const char __user *start = buf; int rc = 0; if (!audio->pcm_feedback) return 0; /* PCM feedback is not enabled. Nothing to read */ mutex_lock(&audio->read_lock); MM_DBG("%d \n", count); while (count > 0) { rc = wait_event_interruptible(audio->read_wait, (audio->in[audio->read_next].used > 0) || (audio->stopped) || (audio->rflush)); if (rc < 0) break; if (audio->stopped || audio->rflush) { rc = -EBUSY; break; } if (count < audio->in[audio->read_next].used) { /* Read must happen in frame boundary. Since driver does not know frame size, read count must be greater or equal to size of PCM samples */ MM_DBG("audio_read: no partial frame done reading\n"); break; } else { MM_DBG("audio_read: read from in[%d]\n", audio->read_next); /* order reads from the output buffer */ rmb(); if (copy_to_user (buf, audio->in[audio->read_next].data, audio->in[audio->read_next].used)) { MM_ERR("invalid addr %x \n", (unsigned int)buf); rc = -EFAULT; break; } count -= audio->in[audio->read_next].used; buf += audio->in[audio->read_next].used; audio->in[audio->read_next].used = 0; if ((++audio->read_next) == audio->pcm_buf_count) audio->read_next = 0; break; /* Force to exit while loop * to prevent output thread * sleep too long if data is * not ready at this moment. */ } } /* don't feed output buffer to HW decoder during flushing * buffer refresh command will be sent once flush completes * send buf refresh command here can confuse HW decoder */ if (audio->buf_refresh && !audio->rflush) { audio->buf_refresh = 0; MM_DBG("kick start pcm feedback again\n"); audplay_buffer_refresh(audio); } mutex_unlock(&audio->read_lock); if (buf > start) rc = buf - start; MM_DBG("read %d bytes\n", rc); return rc; } static int audwma_process_eos(struct audio *audio, const char __user *buf_start, unsigned short mfield_size) { int rc = 0; struct buffer *frame; char *buf_ptr; if (audio->reserved) { MM_DBG("flush reserve byte\n"); frame = audio->out + audio->out_head; buf_ptr = frame->data; rc = wait_event_interruptible(audio->write_wait, (frame->used == 0) || (audio->stopped) || (audio->wflush)); if (rc < 0) goto done; if (audio->stopped || audio->wflush) { rc = -EBUSY; goto done; } buf_ptr[0] = audio->rsv_byte; buf_ptr[1] = 0; audio->out_head ^= 1; frame->mfield_sz = 0; frame->used = 2; audio->reserved = 0; audplay_send_data(audio, 0); } frame = audio->out + audio->out_head; rc = wait_event_interruptible(audio->write_wait, (audio->out_needed && audio->out[0].used == 0 && audio->out[1].used == 0) || (audio->stopped) || (audio->wflush)); if (rc < 0) goto done; if (audio->stopped || audio->wflush) { rc = -EBUSY; goto done; } if (mfield_size > audio->out[0].size) { rc = -EINVAL; goto done; } if (copy_from_user(frame->data, buf_start, mfield_size)) { rc = -EFAULT; goto done; } frame->mfield_sz = mfield_size; audio->out_head ^= 1; frame->used = mfield_size; audplay_send_data(audio, 0); done: return rc; } static ssize_t audio_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { struct audio *audio = file->private_data; const char __user *start = buf; struct buffer *frame; size_t xfer; char *cpy_ptr; int rc = 0, eos_condition = AUDWMA_EOS_NONE; unsigned dsize; unsigned short mfield_size = 0; MM_DBG("cnt=%d\n", count); mutex_lock(&audio->write_lock); while (count > 0) { frame = audio->out + audio->out_head; cpy_ptr = frame->data; dsize = 0; rc = wait_event_interruptible(audio->write_wait, (frame->used == 0) || (audio->stopped) || (audio->wflush)); if (rc < 0) break; if (audio->stopped || audio->wflush) { rc = -EBUSY; break; } if (audio->mfield) { if (buf == start) { /* Processing beginning of user buffer */ if (__get_user(mfield_size, (unsigned short __user *) buf)) { rc = -EFAULT; break; } else if (mfield_size > count) { rc = -EINVAL; break; } if (mfield_size > audio->out[0].size) { rc = -EINVAL; break; } MM_DBG("audio_write: mf offset_val %x\n", mfield_size); if (copy_from_user(cpy_ptr, buf, mfield_size)) { rc = -EFAULT; break; } /* Check if EOS flag is set and buffer has * contains just meta field */ if (cpy_ptr[AUDWMA_EOS_FLG_OFFSET] & AUDWMA_EOS_FLG_MASK) { MM_DBG("audio_write: EOS SET\n"); eos_condition = AUDWMA_EOS_SET; if (mfield_size == count) { buf += mfield_size; break; } else cpy_ptr[AUDWMA_EOS_FLG_OFFSET] &= ~AUDWMA_EOS_FLG_MASK; } cpy_ptr += mfield_size; count -= mfield_size; dsize += mfield_size; buf += mfield_size; } else { mfield_size = 0; MM_DBG("audio_write: continuous buffer\n"); } frame->mfield_sz = mfield_size; } if (audio->reserved) { MM_DBG("append reserved byte %x\n", audio->rsv_byte); *cpy_ptr = audio->rsv_byte; xfer = (count > ((frame->size - mfield_size) - 1)) ? (frame->size - mfield_size) - 1 : count; cpy_ptr++; dsize += 1; audio->reserved = 0; } else xfer = (count > (frame->size - mfield_size)) ? (frame->size - mfield_size) : count; if (copy_from_user(cpy_ptr, buf, xfer)) { rc = -EFAULT; break; } dsize += xfer; if (dsize & 1) { audio->rsv_byte = ((char *) frame->data)[dsize - 1]; MM_DBG("odd length buf reserve last byte %x\n", audio->rsv_byte); audio->reserved = 1; dsize--; } count -= xfer; buf += xfer; if (dsize > 0) { audio->out_head ^= 1; frame->used = dsize; audplay_send_data(audio, 0); } } if (eos_condition == AUDWMA_EOS_SET) rc = audwma_process_eos(audio, start, mfield_size); mutex_unlock(&audio->write_lock); if (!rc) { if (buf > start) return buf - start; } return rc; } static int audio_release(struct inode *inode, struct file *file) { struct audio *audio = file->private_data; MM_INFO("audio instance 0x%08x freeing\n", (int)audio); mutex_lock(&audio->lock); audio_disable(audio); if (audio->rmt_resource_released == 0) rmt_put_resource(audio); audio_flush(audio); audio_flush_pcm_buf(audio); msm_adsp_put(audio->audplay); audpp_adec_free(audio->dec_id); #ifdef CONFIG_HAS_EARLYSUSPEND unregister_early_suspend(&audio->suspend_ctl.node); #endif audio->event_abort = 1; wake_up(&audio->event_wait); audwma_reset_event_queue(audio); ion_unmap_kernel(audio->client, audio->output_buff_handle); ion_free(audio->client, audio->output_buff_handle); if (audio->input_buff_handle != NULL) { ion_unmap_kernel(audio->client, audio->input_buff_handle); ion_free(audio->client, audio->input_buff_handle); } ion_client_destroy(audio->client); mutex_unlock(&audio->lock); #ifdef CONFIG_DEBUG_FS if (audio->dentry) debugfs_remove(audio->dentry); #endif kfree(audio); return 0; } #ifdef CONFIG_HAS_EARLYSUSPEND static void audwma_post_event(struct audio *audio, int type, union msm_audio_event_payload payload) { struct audwma_event *e_node = NULL; unsigned long flags; spin_lock_irqsave(&audio->event_queue_lock, flags); if (!list_empty(&audio->free_event_queue)) { e_node = list_first_entry(&audio->free_event_queue, struct audwma_event, list); list_del(&e_node->list); } else { e_node = kmalloc(sizeof(struct audwma_event), GFP_ATOMIC); if (!e_node) { MM_ERR("No mem to post event %d\n", type); spin_unlock_irqrestore(&audio->event_queue_lock, flags); return; } } e_node->event_type = type; e_node->payload = payload; list_add_tail(&e_node->list, &audio->event_queue); spin_unlock_irqrestore(&audio->event_queue_lock, flags); wake_up(&audio->event_wait); } static void audwma_suspend(struct early_suspend *h) { struct audwma_suspend_ctl *ctl = container_of(h, struct audwma_suspend_ctl, node); union msm_audio_event_payload payload; MM_DBG("\n"); /* Macro prints the file name and function */ audwma_post_event(ctl->audio, AUDIO_EVENT_SUSPEND, payload); } static void audwma_resume(struct early_suspend *h) { struct audwma_suspend_ctl *ctl = container_of(h, struct audwma_suspend_ctl, node); union msm_audio_event_payload payload; MM_DBG("\n"); /* Macro prints the file name and function */ audwma_post_event(ctl->audio, AUDIO_EVENT_RESUME, payload); } #endif #ifdef CONFIG_DEBUG_FS static ssize_t audwma_debug_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } static ssize_t audwma_debug_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { const int debug_bufmax = 4096; static char buffer[4096]; int n = 0, i; struct audio *audio = file->private_data; mutex_lock(&audio->lock); n = scnprintf(buffer, debug_bufmax, "opened %d\n", audio->opened); n += scnprintf(buffer + n, debug_bufmax - n, "enabled %d\n", audio->enabled); n += scnprintf(buffer + n, debug_bufmax - n, "stopped %d\n", audio->stopped); n += scnprintf(buffer + n, debug_bufmax - n, "pcm_feedback %d\n", audio->pcm_feedback); n += scnprintf(buffer + n, debug_bufmax - n, "out_buf_sz %d\n", audio->out[0].size); n += scnprintf(buffer + n, debug_bufmax - n, "pcm_buf_count %d \n", audio->pcm_buf_count); n += scnprintf(buffer + n, debug_bufmax - n, "pcm_buf_sz %d \n", audio->in[0].size); n += scnprintf(buffer + n, debug_bufmax - n, "volume %x \n", audio->vol_pan.volume); n += scnprintf(buffer + n, debug_bufmax - n, "sample rate %d \n", audio->out_sample_rate); n += scnprintf(buffer + n, debug_bufmax - n, "channel mode %d \n", audio->out_channel_mode); mutex_unlock(&audio->lock); /* Following variables are only useful for debugging when * when playback halts unexpectedly. Thus, no mutual exclusion * enforced */ n += scnprintf(buffer + n, debug_bufmax - n, "wflush %d\n", audio->wflush); n += scnprintf(buffer + n, debug_bufmax - n, "rflush %d\n", audio->rflush); n += scnprintf(buffer + n, debug_bufmax - n, "running %d \n", audio->running); n += scnprintf(buffer + n, debug_bufmax - n, "dec state %d \n", audio->dec_state); n += scnprintf(buffer + n, debug_bufmax - n, "out_needed %d \n", audio->out_needed); n += scnprintf(buffer + n, debug_bufmax - n, "out_head %d \n", audio->out_head); n += scnprintf(buffer + n, debug_bufmax - n, "out_tail %d \n", audio->out_tail); n += scnprintf(buffer + n, debug_bufmax - n, "out[0].used %d \n", audio->out[0].used); n += scnprintf(buffer + n, debug_bufmax - n, "out[1].used %d \n", audio->out[1].used); n += scnprintf(buffer + n, debug_bufmax - n, "buffer_refresh %d \n", audio->buf_refresh); n += scnprintf(buffer + n, debug_bufmax - n, "read_next %d \n", audio->read_next); n += scnprintf(buffer + n, debug_bufmax - n, "fill_next %d \n", audio->fill_next); for (i = 0; i < audio->pcm_buf_count; i++) n += scnprintf(buffer + n, debug_bufmax - n, "in[%d].size %d \n", i, audio->in[i].used); buffer[n] = 0; return simple_read_from_buffer(buf, count, ppos, buffer, n); } static const struct file_operations audwma_debug_fops = { .read = audwma_debug_read, .open = audwma_debug_open, }; #endif static int audio_open(struct inode *inode, struct file *file) { struct audio *audio = NULL; int rc, dec_attrb, decid, i; unsigned mem_sz = DMASZ_MAX; struct audwma_event *e_node = NULL; unsigned long ionflag = 0; ion_phys_addr_t addr = 0; struct ion_handle *handle = NULL; struct ion_client *client = NULL; int len = 0; #ifdef CONFIG_DEBUG_FS /* 4 bytes represents decoder number, 1 byte for terminate string */ char name[sizeof "msm_wma_" + 5]; #endif /* Allocate Mem for audio instance */ audio = kzalloc(sizeof(struct audio), GFP_KERNEL); if (!audio) { MM_ERR("no memory to allocate audio instance \n"); rc = -ENOMEM; goto done; } MM_INFO("audio instance 0x%08x created\n", (int)audio); /* Allocate the decoder */ dec_attrb = AUDDEC_DEC_WMA; if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { dec_attrb |= MSM_AUD_MODE_NONTUNNEL; audio->pcm_feedback = NON_TUNNEL_MODE_PLAYBACK; } else if ((file->f_mode & FMODE_WRITE) && !(file->f_mode & FMODE_READ)) { dec_attrb |= MSM_AUD_MODE_TUNNEL; audio->pcm_feedback = TUNNEL_MODE_PLAYBACK; } else { kfree(audio); rc = -EACCES; goto done; } decid = audpp_adec_alloc(dec_attrb, &audio->module_name, &audio->queue_id); if (decid < 0) { MM_ERR("No free decoder available, freeing instance 0x%08x\n", (int)audio); rc = -ENODEV; kfree(audio); goto done; } audio->dec_id = decid & MSM_AUD_DECODER_MASK; client = msm_ion_client_create(UINT_MAX, "Audio_WMA_Client"); if (IS_ERR_OR_NULL(client)) { pr_err("Unable to create ION client\n"); rc = -ENOMEM; goto client_create_error; } audio->client = client; handle = ion_alloc(client, mem_sz, SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID), 0); if (IS_ERR_OR_NULL(handle)) { MM_ERR("Unable to create allocate O/P buffers\n"); rc = -ENOMEM; goto output_buff_alloc_error; } audio->output_buff_handle = handle; rc = ion_phys(client, handle, &addr, &len); if (rc) { MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); goto output_buff_get_phys_error; } else { MM_INFO("O/P buffers:valid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); } audio->phys = (int32_t)addr; rc = ion_handle_get_flags(client, handle, &ionflag); if (rc) { MM_ERR("could not get flags for the handle\n"); goto output_buff_get_flags_error; } audio->map_v_write = ion_map_kernel(client, handle); if (IS_ERR(audio->map_v_write)) { MM_ERR("could not map write buffers\n"); rc = -ENOMEM; goto output_buff_map_error; } audio->data = audio->map_v_write; MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n", audio->phys, (int)audio->data); audio->out_dma_sz = mem_sz; if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) { rc = audmgr_open(&audio->audmgr); if (rc) { MM_ERR("audmgr open failed, freeing instance \ 0x%08x\n", (int)audio); goto err; } } rc = msm_adsp_get(audio->module_name, &audio->audplay, &audplay_adsp_ops_wma, audio); if (rc) { MM_ERR("failed to get %s module, freeing instance 0x%08x\n", audio->module_name, (int)audio); if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) audmgr_close(&audio->audmgr); goto err; } rc = rmt_get_resource(audio); if (rc) { MM_ERR("ADSP resources are not available for WMA session \ 0x%08x on decoder: %d\n", (int)audio, audio->dec_id); if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) audmgr_close(&audio->audmgr); msm_adsp_put(audio->audplay); goto err; } audio->input_buff_handle = NULL; mutex_init(&audio->lock); mutex_init(&audio->write_lock); mutex_init(&audio->read_lock); mutex_init(&audio->get_event_lock); spin_lock_init(&audio->dsp_lock); init_waitqueue_head(&audio->write_wait); init_waitqueue_head(&audio->read_wait); INIT_LIST_HEAD(&audio->free_event_queue); INIT_LIST_HEAD(&audio->event_queue); init_waitqueue_head(&audio->wait); init_waitqueue_head(&audio->event_wait); spin_lock_init(&audio->event_queue_lock); audio->out[0].data = audio->data + 0; audio->out[0].addr = audio->phys + 0; audio->out[0].size = audio->out_dma_sz >> 1; audio->out[1].data = audio->data + audio->out[0].size; audio->out[1].addr = audio->phys + audio->out[0].size; audio->out[1].size = audio->out[0].size; audio->wma_config.armdatareqthr = 1262; audio->wma_config.channelsdecoded = 2; audio->wma_config.wmabytespersec = 6003; audio->wma_config.wmasamplingfreq = 44100; audio->wma_config.wmaencoderopts = 31; audio->out_sample_rate = 44100; audio->out_channel_mode = AUDPP_CMD_PCM_INTF_STEREO_V; audio->vol_pan.volume = 0x2000; audio_flush(audio); file->private_data = audio; audio->opened = 1; #ifdef CONFIG_DEBUG_FS snprintf(name, sizeof name, "msm_wma_%04x", audio->dec_id); audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, NULL, (void *) audio, &audwma_debug_fops); if (IS_ERR(audio->dentry)) MM_DBG("debugfs_create_file failed\n"); #endif #ifdef CONFIG_HAS_EARLYSUSPEND audio->suspend_ctl.node.level = EARLY_SUSPEND_LEVEL_DISABLE_FB; audio->suspend_ctl.node.resume = audwma_resume; audio->suspend_ctl.node.suspend = audwma_suspend; audio->suspend_ctl.audio = audio; register_early_suspend(&audio->suspend_ctl.node); #endif for (i = 0; i < AUDWMA_EVENT_NUM; i++) { e_node = kmalloc(sizeof(struct audwma_event), GFP_KERNEL); if (e_node) list_add_tail(&e_node->list, &audio->free_event_queue); else { MM_ERR("event pkt alloc failed\n"); break; } } done: return rc; err: ion_unmap_kernel(client, audio->output_buff_handle); output_buff_map_error: output_buff_get_phys_error: output_buff_get_flags_error: ion_free(client, audio->output_buff_handle); output_buff_alloc_error: ion_client_destroy(client); client_create_error: audpp_adec_free(audio->dec_id); kfree(audio); return rc; } static const struct file_operations audio_wma_fops = { .owner = THIS_MODULE, .open = audio_open, .release = audio_release, .read = audio_read, .write = audio_write, .unlocked_ioctl = audio_ioctl, .fsync = audio_fsync, }; struct miscdevice audio_wma_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "msm_wma", .fops = &audio_wma_fops, }; static int __init audio_init(void) { return misc_register(&audio_wma_misc); } device_initcall(audio_init);
gpl-2.0
MartynShaw/audacity
lib-src/lv2/serd/tests/serd_test.c
16
21687
/* Copyright 2011-2014 David Robillard <http://drobilla.net> Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THIS SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <float.h> #include <math.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "serd/serd.h" #define USTR(s) ((const uint8_t*)(s)) #ifdef _WIN32 # define INFINITY (DBL_MAX + DBL_MAX) # define NAN (INFINITY - INFINITY) #endif static int failure(const char* fmt, ...) { va_list args; va_start(args, fmt); fprintf(stderr, "error: "); vfprintf(stderr, fmt, args); va_end(args); return 1; } static bool test_strtod(double dbl, double max_delta) { char buf[1024]; snprintf(buf, sizeof(buf), "%lf", dbl); char* endptr = NULL; const double out = serd_strtod(buf, &endptr); const double diff = fabs(out - dbl); if (diff > max_delta) { return !failure("Parsed %lf != %lf (delta %lf)\n", dbl, out, diff); } return true; } static SerdStatus count_prefixes(void* handle, const SerdNode* name, const SerdNode* uri) { ++*(int*)handle; return SERD_SUCCESS; } typedef struct { int n_statements; const SerdNode* graph; } ReaderTest; static SerdStatus test_sink(void* handle, SerdStatementFlags flags, const SerdNode* graph, const SerdNode* subject, const SerdNode* predicate, const SerdNode* object, const SerdNode* object_datatype, const SerdNode* object_lang) { ReaderTest* rt = (ReaderTest*)handle; ++rt->n_statements; rt->graph = graph; return SERD_SUCCESS; } int main(void) { #define MAX 1000000 #define NUM_TESTS 1000 for (int i = 0; i < NUM_TESTS; ++i) { double dbl = rand() % MAX; dbl += (rand() % MAX) / (double)MAX; if (!test_strtod(dbl, 1 / (double)MAX)) { return 1; } } const double expt_test_nums[] = { 2.0E18, -5e19, +8e20, 2e+34, -5e-5, 8e0, 9e-0, 2e+0 }; const char* expt_test_strs[] = { "02e18", "-5e019", "+8e20", "2E+34", "-5E-5", "8E0", "9e-0", " 2e+0" }; for (unsigned i = 0; i < sizeof(expt_test_nums) / sizeof(double); ++i) { const double num = serd_strtod(expt_test_strs[i], NULL); const double delta = fabs(num - expt_test_nums[i]); if (delta > DBL_EPSILON) { return failure("Parsed `%s' %lf != %lf (delta %lf)\n", expt_test_strs[i], num, expt_test_nums[i], delta); } } // Test serd_node_new_decimal const double dbl_test_nums[] = { 0.0, 9.0, 10.0, .01, 2.05, -16.00001, 5.000000005, 0.0000000001, NAN, INFINITY }; const char* dbl_test_strs[] = { "0.0", "9.0", "10.0", "0.01", "2.05", "-16.00001", "5.00000001", "0.0", NULL, NULL }; for (unsigned i = 0; i < sizeof(dbl_test_nums) / sizeof(double); ++i) { SerdNode node = serd_node_new_decimal(dbl_test_nums[i], 8); const bool pass = (node.buf && dbl_test_strs[i]) ? !strcmp((const char*)node.buf, (const char*)dbl_test_strs[i]) : ((const char*)node.buf == dbl_test_strs[i]); if (!pass) { return failure("Serialised `%s' != %s\n", node.buf, dbl_test_strs[i]); } const size_t len = node.buf ? strlen((const char*)node.buf) : 0; if (node.n_bytes != len || node.n_chars != len) { return failure("Length %zu,%zu != %zu\n", node.n_bytes, node.n_chars, len); } serd_node_free(&node); } // Test serd_node_new_integer const long int_test_nums[] = { 0, -0, -23, 23, -12340, 1000, -1000 }; const char* int_test_strs[] = { "0", "0", "-23", "23", "-12340", "1000", "-1000" }; for (unsigned i = 0; i < sizeof(int_test_nums) / sizeof(double); ++i) { SerdNode node = serd_node_new_integer(int_test_nums[i]); if (strcmp((const char*)node.buf, (const char*)int_test_strs[i])) { return failure("Serialised `%s' != %s\n", node.buf, int_test_strs[i]); } const size_t len = strlen((const char*)node.buf); if (node.n_bytes != len || node.n_chars != len) { return failure("Length %zu,%zu != %zu\n", node.n_bytes, node.n_chars, len); } serd_node_free(&node); } // Test serd_node_new_blob for (size_t size = 0; size < 256; ++size) { uint8_t* data = (uint8_t*)malloc(size); for (size_t i = 0; i < size; ++i) { data[i] = (uint8_t)(rand() % 256); } SerdNode blob = serd_node_new_blob(data, size, size % 5); if (blob.n_bytes != blob.n_chars) { return failure("Blob %zu bytes != %zu chars\n", blob.n_bytes, blob.n_chars); } size_t out_size; uint8_t* out = (uint8_t*)serd_base64_decode( blob.buf, blob.n_bytes, &out_size); if (out_size != size) { return failure("Blob size %zu != %zu\n", out_size, size); } for (size_t i = 0; i < size; ++i) { if (out[i] != data[i]) { return failure("Corrupt blob at byte %zu\n", i); } } serd_node_free(&blob); free(out); free(data); } // Test serd_strlen const uint8_t str[] = { '"', '5', 0xE2, 0x82, 0xAC, '"', '\n', 0 }; size_t n_bytes; SerdNodeFlags flags; size_t len = serd_strlen(str, &n_bytes, &flags); if (len != 5 || n_bytes != 7 || flags != (SERD_HAS_QUOTE|SERD_HAS_NEWLINE)) { return failure("Bad serd_strlen(%s) len=%zu n_bytes=%zu flags=%u\n", str, len, n_bytes, flags); } len = serd_strlen(str, NULL, &flags); if (len != 5) { return failure("Bad serd_strlen(%s) len=%zu flags=%u\n", str, len, flags); } // Test serd_strerror const uint8_t* msg = NULL; if (strcmp((const char*)(msg = serd_strerror(SERD_SUCCESS)), "Success")) { return failure("Bad message `%s' for SERD_SUCCESS\n", msg); } for (int i = SERD_FAILURE; i <= SERD_ERR_INTERNAL; ++i) { msg = serd_strerror((SerdStatus)i); if (!strcmp((const char*)msg, "Success")) { return failure("Bad message `%s' for (SerdStatus)%d\n", msg, i); } } msg = serd_strerror((SerdStatus)-1); // Test serd_uri_to_path const uint8_t* uri = (const uint8_t*)"file:///home/user/foo.ttl"; if (strcmp((const char*)serd_uri_to_path(uri), "/home/user/foo.ttl")) { return failure("Bad path %s for %s\n", serd_uri_to_path(uri), uri); } uri = (const uint8_t*)"file://localhost/home/user/foo.ttl"; if (strcmp((const char*)serd_uri_to_path(uri), "/home/user/foo.ttl")) { return failure("Bad path %s for %s\n", serd_uri_to_path(uri), uri); } uri = (const uint8_t*)"file:illegal/file/uri"; if (serd_uri_to_path(uri)) { return failure("Converted invalid URI `%s' to path `%s'\n", uri, serd_uri_to_path(uri)); } uri = (const uint8_t*)"file:///c:/awful/system"; if (strcmp((const char*)serd_uri_to_path(uri), "c:/awful/system")) { return failure("Bad path %s for %s\n", serd_uri_to_path(uri), uri); } uri = (const uint8_t*)"file:///c:awful/system"; if (strcmp((const char*)serd_uri_to_path(uri), "/c:awful/system")) { return failure("Bad path %s for %s\n", serd_uri_to_path(uri), uri); } uri = (const uint8_t*)"file:///0/1"; if (strcmp((const char*)serd_uri_to_path(uri), "/0/1")) { return failure("Bad path %s for %s\n", serd_uri_to_path(uri), uri); } uri = (const uint8_t*)"C:\\Windows\\Sucks"; if (strcmp((const char*)serd_uri_to_path(uri), "C:\\Windows\\Sucks")) { return failure("Bad path %s for %s\n", serd_uri_to_path(uri), uri); } uri = (const uint8_t*)"C|/Windows/Sucks"; if (strcmp((const char*)serd_uri_to_path(uri), "C|/Windows/Sucks")) { return failure("Bad path %s for %s\n", serd_uri_to_path(uri), uri); } // Test serd_node_new_file_uri and serd_file_uri_parse SerdURI furi; const uint8_t* path_str = USTR("C:/My 100%"); SerdNode file_node = serd_node_new_file_uri(path_str, 0, &furi, true); uint8_t* hostname = NULL; uint8_t* out_path = serd_file_uri_parse(file_node.buf, &hostname); if (strcmp((const char*)file_node.buf, "file:///C:/My%20100%%")) { return failure("Bad URI %s\n", file_node.buf); } else if (hostname) { return failure("hostname `%s' shouldn't exist\n", hostname); } else if (strcmp((const char*)path_str, (const char*)out_path)) { return failure("path=>URI=>path failure %s => %s => %s\n", path_str, file_node.buf, out_path); } free(out_path); serd_node_free(&file_node); path_str = USTR("C:\\Pointless Space"); file_node = serd_node_new_file_uri(path_str, USTR("pwned"), 0, true); hostname = NULL; out_path = serd_file_uri_parse(file_node.buf, &hostname); if (strcmp((const char*)file_node.buf, "file://pwned/C:/Pointless%20Space")) { return failure("Bad URI %s\n", file_node.buf); } else if (!hostname || strcmp((const char*)hostname, "pwned")) { return failure("Bad hostname `%s'\n", hostname); } else if (strcmp((const char*)out_path, "C:/Pointless Space")) { return failure("path=>URI=>path failure %s => %s => %s\n", path_str, file_node.buf, out_path); } free(hostname); free(out_path); serd_node_free(&file_node); path_str = USTR("/foo/bar"); file_node = serd_node_new_file_uri(path_str, 0, 0, true); hostname = NULL; out_path = serd_file_uri_parse(file_node.buf, &hostname); if (strcmp((const char*)file_node.buf, "file:///foo/bar")) { return failure("Bad URI %s\n", file_node.buf); } else if (hostname) { return failure("hostname `%s' shouldn't exist\n", hostname); } else if (strcmp((const char*)path_str, (const char*)out_path)) { return failure("path=>URI=>path failure %s => %s => %s\n", path_str, file_node.buf, out_path); } free(out_path); serd_node_free(&file_node); path_str = USTR("/foo/bar"); file_node = serd_node_new_file_uri(path_str, USTR("localhost"), 0, true); out_path = serd_file_uri_parse(file_node.buf, &hostname); if (strcmp((const char*)file_node.buf, "file://localhost/foo/bar")) { return failure("Bad URI %s\n", file_node.buf); } else if (strcmp((const char*)hostname, "localhost")) { return failure("incorrect hostname `%s'\n", hostname); } else if (strcmp((const char*)path_str, (const char*)out_path)) { return failure("path=>URI=>path failure %s => %s => %s\n", path_str, file_node.buf, out_path); } free(hostname); free(out_path); serd_node_free(&file_node); path_str = USTR("a/relative path"); file_node = serd_node_new_file_uri(path_str, 0, 0, false); out_path = serd_file_uri_parse(file_node.buf, &hostname); if (strcmp((const char*)file_node.buf, "a/relative path")) { return failure("Bad URI %s\n", file_node.buf); } else if (hostname) { return failure("hostname `%s' shouldn't exist\n", hostname); } else if (strcmp((const char*)path_str, (const char*)out_path)) { return failure("path=>URI=>path failure %s => %s => %s\n", path_str, file_node.buf, out_path); } free(hostname); free(out_path); serd_node_free(&file_node); if (serd_file_uri_parse(USTR("file://invalid"), NULL)) { return failure("successfully parsed bogus URI <file://invalid>\n"); } out_path = serd_file_uri_parse(USTR("file://host/foo/%XYbar"), NULL); if (strcmp((const char*)out_path, "/foo/bar")) { return failure("bad tolerance of junk escape: `%s'\n", out_path); } free(out_path); out_path = serd_file_uri_parse(USTR("file://host/foo/%0Abar"), NULL); if (strcmp((const char*)out_path, "/foo/bar")) { return failure("bad tolerance of junk escape: `%s'\n", out_path); } free(out_path); // Test serd_node_equals const uint8_t replacement_char_str[] = { 0xEF, 0xBF, 0xBD, 0 }; SerdNode lhs = serd_node_from_string(SERD_LITERAL, replacement_char_str); SerdNode rhs = serd_node_from_string(SERD_LITERAL, USTR("123")); if (serd_node_equals(&lhs, &rhs)) { return failure("%s == %s\n", lhs.buf, rhs.buf); } SerdNode qnode = serd_node_from_string(SERD_CURIE, USTR("foo:bar")); if (serd_node_equals(&lhs, &qnode)) { return failure("%s == %s\n", lhs.buf, qnode.buf); } if (!serd_node_equals(&lhs, &lhs)) { return failure("%s != %s\n", lhs.buf, lhs.buf); } SerdNode null_copy = serd_node_copy(&SERD_NODE_NULL); if (!serd_node_equals(&SERD_NODE_NULL, &null_copy)) { return failure("copy of null node != null node\n"); } // Test serd_node_from_string SerdNode node = serd_node_from_string(SERD_LITERAL, (const uint8_t*)"hello\""); if (node.n_bytes != 6 || node.n_chars != 6 || node.flags != SERD_HAS_QUOTE || strcmp((const char*)node.buf, "hello\"")) { return failure("Bad node %s %zu %zu %d %d\n", node.buf, node.n_bytes, node.n_chars, node.flags, node.type); } node = serd_node_from_string(SERD_URI, NULL); if (!serd_node_equals(&node, &SERD_NODE_NULL)) { return failure("Creating node from NULL string failed\n"); } // Test serd_node_new_uri_from_string SerdURI base_uri; SerdNode base = serd_node_new_uri_from_string(USTR("http://example.org/"), NULL, &base_uri); SerdNode nil = serd_node_new_uri_from_string(NULL, &base_uri, NULL); SerdNode nil2 = serd_node_new_uri_from_string(USTR(""), &base_uri, NULL); if (nil.type != SERD_URI || strcmp((const char*)nil.buf, (const char*)base.buf) || nil2.type != SERD_URI || strcmp((const char*)nil2.buf, (const char*)base.buf)) { return failure("URI %s != base %s\n", nil.buf, base.buf); } serd_node_free(&base); serd_node_free(&nil); serd_node_free(&nil2); // Test SerdEnv SerdNode u = serd_node_from_string(SERD_URI, USTR("http://example.org/foo")); SerdNode b = serd_node_from_string(SERD_CURIE, USTR("invalid")); SerdNode c = serd_node_from_string(SERD_CURIE, USTR("eg.2:b")); SerdEnv* env = serd_env_new(NULL); serd_env_set_prefix_from_strings(env, USTR("eg.2"), USTR("http://example.org/")); if (!serd_env_set_base_uri(env, &node)) { return failure("Set base URI to %s\n", node.buf); } if (!serd_node_equals(serd_env_get_base_uri(env, NULL), &node)) { return failure("Base URI mismatch\n"); } SerdChunk prefix, suffix; if (!serd_env_expand(env, &b, &prefix, &suffix)) { return failure("Expanded invalid curie %s\n", b.buf); } SerdNode xnode = serd_env_expand_node(env, &node); if (!serd_node_equals(&xnode, &SERD_NODE_NULL)) { return failure("Expanded %s to %s\n", c.buf, xnode.buf); } SerdNode xu = serd_env_expand_node(env, &u); if (strcmp((const char*)xu.buf, "http://example.org/foo")) { return failure("Expanded %s to %s\n", c.buf, xu.buf); } serd_node_free(&xu); SerdNode badpre = serd_node_from_string(SERD_CURIE, USTR("hm:what")); SerdNode xbadpre = serd_env_expand_node(env, &badpre); if (!serd_node_equals(&xbadpre, &SERD_NODE_NULL)) { return failure("Expanded invalid curie %s\n", badpre.buf); } SerdNode xc = serd_env_expand_node(env, &c); if (strcmp((const char*)xc.buf, "http://example.org/b")) { return failure("Expanded %s to %s\n", c.buf, xc.buf); } serd_node_free(&xc); if (!serd_env_set_prefix(env, &SERD_NODE_NULL, &SERD_NODE_NULL)) { return failure("Set NULL prefix\n"); } const SerdNode lit = serd_node_from_string(SERD_LITERAL, USTR("hello")); if (!serd_env_set_prefix(env, &b, &lit)) { return failure("Set prefix to literal\n"); } int n_prefixes = 0; serd_env_set_prefix_from_strings(env, USTR("eg.2"), USTR("http://example.org/")); serd_env_foreach(env, count_prefixes, &n_prefixes); if (n_prefixes != 1) { return failure("Bad prefix count %d\n", n_prefixes); } SerdNode shorter_uri = serd_node_from_string(SERD_URI, USTR("urn:foo")); SerdNode prefix_name; if (serd_env_qualify(env, &shorter_uri, &prefix_name, &suffix)) { return failure("Qualified %s\n", shorter_uri.buf); } // Test SerdReader and SerdWriter const char* path = "serd_test.ttl"; FILE* fd = fopen(path, "w"); if (!fd) { return failure("Failed to open file %s\n", path); } SerdWriter* writer = serd_writer_new( SERD_TURTLE, (SerdStyle)0, env, NULL, serd_file_sink, fd); if (!writer) { return failure("Failed to create writer\n"); } serd_writer_chop_blank_prefix(writer, USTR("tmp")); serd_writer_chop_blank_prefix(writer, NULL); if (!serd_writer_set_base_uri(writer, &lit)) { return failure("Set base URI to %s\n", lit.buf); } if (!serd_writer_set_prefix(writer, &lit, &lit)) { return failure("Set prefix %s to %s\n", lit.buf, lit.buf); } if (!serd_writer_end_anon(writer, NULL)) { return failure("Ended non-existent anonymous node\n"); } if (serd_writer_get_env(writer) != env) { return failure("Writer has incorrect env\n"); } uint8_t buf[] = { 0x80, 0, 0, 0, 0 }; SerdNode s = serd_node_from_string(SERD_URI, USTR("")); SerdNode p = serd_node_from_string(SERD_URI, USTR("http://example.org/pred")); SerdNode o = serd_node_from_string(SERD_LITERAL, buf); // Write 3 invalid statements (should write nothing) const SerdNode* junk[][5] = { { &s, &p, NULL, NULL, NULL }, { &s, NULL, &o, NULL, NULL }, { NULL, &p, &o, NULL, NULL }, { &s, &p, &SERD_NODE_NULL, NULL, NULL }, { &s, &SERD_NODE_NULL, &o, NULL, NULL }, { &SERD_NODE_NULL, &p, &o, NULL, NULL }, { &s, &o, &o, NULL, NULL }, { &o, &p, &o, NULL, NULL }, { NULL, NULL, NULL, NULL, NULL } }; for (unsigned i = 0; i < sizeof(junk) / (sizeof(SerdNode*) * 5); ++i) { if (!serd_writer_write_statement( writer, 0, NULL, junk[i][0], junk[i][1], junk[i][2], junk[i][3], junk[i][4])) { return failure("Successfully wrote junk statement %d\n", i); } } const SerdNode t = serd_node_from_string(SERD_URI, USTR("urn:Type")); const SerdNode l = serd_node_from_string(SERD_LITERAL, USTR("en")); const SerdNode* good[][5] = { { &s, &p, &o, NULL, NULL }, { &s, &p, &o, &SERD_NODE_NULL, &SERD_NODE_NULL }, { &s, &p, &o, &t, NULL }, { &s, &p, &o, NULL, &l }, { &s, &p, &o, &t, &l }, { &s, &p, &o, &t, &SERD_NODE_NULL }, { &s, &p, &o, &SERD_NODE_NULL, &l }, { &s, &p, &o, NULL, &SERD_NODE_NULL }, { &s, &p, &o, &SERD_NODE_NULL, NULL }, { &s, &p, &o, &SERD_NODE_NULL, NULL } }; for (unsigned i = 0; i < sizeof(good) / (sizeof(SerdNode*) * 5); ++i) { if (serd_writer_write_statement( writer, 0, NULL, good[i][0], good[i][1], good[i][2], good[i][3], good[i][4])) { return failure("Failed to write good statement %d\n", i); } } // Write 1 statement with bad UTF-8 (should be replaced) if (serd_writer_write_statement(writer, 0, NULL, &s, &p, &o, NULL, NULL)) { return failure("Failed to write junk UTF-8\n"); } // Write 1 valid statement o = serd_node_from_string(SERD_LITERAL, USTR("hello")); if (serd_writer_write_statement(writer, 0, NULL, &s, &p, &o, NULL, NULL)) { return failure("Failed to write valid statement\n"); } serd_writer_free(writer); // Test chunk sink SerdChunk chunk = { NULL, 0 }; writer = serd_writer_new( SERD_TURTLE, (SerdStyle)0, env, NULL, serd_chunk_sink, &chunk); o = serd_node_from_string(SERD_URI, USTR("http://example.org/base")); if (serd_writer_set_base_uri(writer, &o)) { return failure("Failed to write to chunk sink\n"); } serd_writer_free(writer); uint8_t* out = serd_chunk_sink_finish(&chunk); if (strcmp((const char*)out, "@base <http://example.org/base> .\n")) { return failure("Incorrect chunk output:\n%s\n", chunk.buf); } free(out); // Rewind and test reader fseek(fd, 0, SEEK_SET); ReaderTest* rt = (ReaderTest*)malloc(sizeof(ReaderTest)); rt->n_statements = 0; rt->graph = NULL; SerdReader* reader = serd_reader_new( SERD_TURTLE, rt, free, NULL, NULL, test_sink, NULL); if (!reader) { return failure("Failed to create reader\n"); } if (serd_reader_get_handle(reader) != rt) { return failure("Corrupt reader handle\n"); } SerdNode g = serd_node_from_string(SERD_URI, USTR("http://example.org/")); serd_reader_set_default_graph(reader, &g); serd_reader_add_blank_prefix(reader, USTR("tmp")); serd_reader_add_blank_prefix(reader, NULL); if (!serd_reader_read_file(reader, USTR("http://notafile"))) { return failure("Apparently read an http URI\n"); } if (!serd_reader_read_file(reader, USTR("file:///better/not/exist"))) { return failure("Apprently read a non-existent file\n"); } SerdStatus st = serd_reader_read_file(reader, USTR(path)); if (st) { return failure("Error reading file (%s)\n", serd_strerror(st)); } if (rt->n_statements != 12) { return failure("Bad statement count %d\n", rt->n_statements); } else if (!rt->graph || !rt->graph->buf || strcmp((const char*)rt->graph->buf, "http://example.org/")) { return failure("Bad graph %p\n", rt->graph); } if (!serd_reader_read_string(reader, USTR("This isn't Turtle at all."))) { return failure("Parsed invalid string successfully.\n"); } serd_reader_free(reader); fclose(fd); serd_env_free(env); printf("Success\n"); return 0; }
gpl-2.0
Mouseomics/R
src/library/grDevices/src/init.c
16
3402
/* * R : A Computer Language for Statistical Data Analysis * Copyright (C) 2004-2015 The R Core Team. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, a copy is available at * https://www.R-project.org/Licenses/ */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include <R.h> #include <Rinternals.h> #include <R_ext/GraphicsEngine.h> #include "grDevices.h" #include <R_ext/Rdynload.h> #ifndef _WIN32 /* This really belongs with the X11 module, but it is about devices */ static SEXP cairoProps(SEXP in) { int which = asInteger(in); if(which == 1) return ScalarLogical( #ifdef HAVE_WORKING_CAIRO 1 #else 0 #endif ); else if(which == 2) return ScalarLogical( #ifdef HAVE_PANGOCAIRO 1 #else 0 #endif ); return R_NilValue; } #endif #define CALLDEF(name, n) {#name, (DL_FUNC) &name, n} static const R_CallMethodDef CallEntries[] = { CALLDEF(Type1FontInUse, 2), CALLDEF(CIDFontInUse, 2), CALLDEF(R_CreateAtVector, 4), CALLDEF(R_GAxisPars, 3), CALLDEF(chull, 1), CALLDEF(gray, 2), CALLDEF(RGB2hsv, 1), CALLDEF(rgb, 6), CALLDEF(hsv, 4), CALLDEF(hcl, 5), CALLDEF(col2rgb, 2), CALLDEF(colors, 0), CALLDEF(palette, 1), CALLDEF(palette2, 1), CALLDEF(cairoVersion, 0), CALLDEF(bmVersion, 0), #ifndef _WIN32 CALLDEF(makeQuartzDefault, 0), CALLDEF(cairoProps, 1), #else CALLDEF(bringToTop, 2), CALLDEF(msgWindow, 2), #endif {NULL, NULL, 0} }; #define EXTDEF(name, n) {#name, (DL_FUNC) &name, n} static const R_ExternalMethodDef ExtEntries[] = { EXTDEF(PicTeX, 6), EXTDEF(PostScript, 19), EXTDEF(XFig, 14), EXTDEF(PDF, 20), EXTDEF(devCairo, 11), EXTDEF(devcap, 0), EXTDEF(devcapture, 1), EXTDEF(devcontrol, 1), EXTDEF(devcopy, 1), EXTDEF(devcur, 0), EXTDEF(devdisplaylist, 0), EXTDEF(devholdflush, 1), EXTDEF(devnext, 1), EXTDEF(devoff, 1), EXTDEF(devprev, 1), EXTDEF(devset, 1), EXTDEF(devsize, 0), EXTDEF(contourLines, 4), EXTDEF(getSnapshot, 0), EXTDEF(playSnapshot, 1), EXTDEF(getGraphicsEvent, 1), EXTDEF(getGraphicsEventEnv, 1), EXTDEF(setGraphicsEventEnv, 2), EXTDEF(devAskNewPage, 1), #ifdef _WIN32 EXTDEF(savePlot, 4), EXTDEF(devga, 21), #else EXTDEF(savePlot, 3), EXTDEF(Quartz, 11), EXTDEF(X11, 17), #endif {NULL, NULL, 0} }; #ifdef HAVE_AQUA extern void setup_RdotApp(void); extern Rboolean useaqua; #endif void R_init_grDevices(DllInfo *dll) { initPalette(); R_registerRoutines(dll, NULL, CallEntries, NULL, ExtEntries); R_useDynamicSymbols(dll, FALSE); R_forceSymbols(dll, TRUE); #ifdef HAVE_AQUA /* R.app will run event loop, so if we are running under that we don't need to run one here */ if(useaqua) setup_RdotApp(); #endif }
gpl-2.0
joisonwk/linux
drivers/watchdog/bcm47xx_wdt.c
528
6340
/* * Watchdog driver for Broadcom BCM47XX * * Copyright (C) 2008 Aleksandar Radovanovic <biblbroks@sezampro.rs> * Copyright (C) 2009 Matthieu CASTET <castet.matthieu@free.fr> * Copyright (C) 2012-2013 Hauke Mehrtens <hauke@hauke-m.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/bcm47xx_wdt.h> #include <linux/bitops.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/platform_device.h> #include <linux/reboot.h> #include <linux/types.h> #include <linux/watchdog.h> #include <linux/timer.h> #include <linux/jiffies.h> #define DRV_NAME "bcm47xx_wdt" #define WDT_DEFAULT_TIME 30 /* seconds */ #define WDT_SOFTTIMER_MAX 255 /* seconds */ #define WDT_SOFTTIMER_THRESHOLD 60 /* seconds */ static int timeout = WDT_DEFAULT_TIME; static bool nowayout = WATCHDOG_NOWAYOUT; module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog time in seconds. (default=" __MODULE_STRING(WDT_DEFAULT_TIME) ")"); module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); static inline struct bcm47xx_wdt *bcm47xx_wdt_get(struct watchdog_device *wdd) { return container_of(wdd, struct bcm47xx_wdt, wdd); } static int bcm47xx_wdt_hard_keepalive(struct watchdog_device *wdd) { struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd); wdt->timer_set_ms(wdt, wdd->timeout * 1000); return 0; } static int bcm47xx_wdt_hard_start(struct watchdog_device *wdd) { return 0; } static int bcm47xx_wdt_hard_stop(struct watchdog_device *wdd) { struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd); wdt->timer_set(wdt, 0); return 0; } static int bcm47xx_wdt_hard_set_timeout(struct watchdog_device *wdd, unsigned int new_time) { struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd); u32 max_timer = wdt->max_timer_ms; if (new_time < 1 || new_time > max_timer / 1000) { pr_warn("timeout value must be 1<=x<=%d, using %d\n", max_timer / 1000, new_time); return -EINVAL; } wdd->timeout = new_time; return 0; } static struct watchdog_ops bcm47xx_wdt_hard_ops = { .owner = THIS_MODULE, .start = bcm47xx_wdt_hard_start, .stop = bcm47xx_wdt_hard_stop, .ping = bcm47xx_wdt_hard_keepalive, .set_timeout = bcm47xx_wdt_hard_set_timeout, }; static void bcm47xx_wdt_soft_timer_tick(unsigned long data) { struct bcm47xx_wdt *wdt = (struct bcm47xx_wdt *)data; u32 next_tick = min(wdt->wdd.timeout * 1000, wdt->max_timer_ms); if (!atomic_dec_and_test(&wdt->soft_ticks)) { wdt->timer_set_ms(wdt, next_tick); mod_timer(&wdt->soft_timer, jiffies + HZ); } else { pr_crit("Watchdog will fire soon!!!\n"); } } static int bcm47xx_wdt_soft_keepalive(struct watchdog_device *wdd) { struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd); atomic_set(&wdt->soft_ticks, wdd->timeout); return 0; } static int bcm47xx_wdt_soft_start(struct watchdog_device *wdd) { struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd); bcm47xx_wdt_soft_keepalive(wdd); bcm47xx_wdt_soft_timer_tick((unsigned long)wdt); return 0; } static int bcm47xx_wdt_soft_stop(struct watchdog_device *wdd) { struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd); del_timer_sync(&wdt->soft_timer); wdt->timer_set(wdt, 0); return 0; } static int bcm47xx_wdt_soft_set_timeout(struct watchdog_device *wdd, unsigned int new_time) { if (new_time < 1 || new_time > WDT_SOFTTIMER_MAX) { pr_warn("timeout value must be 1<=x<=%d, using %d\n", WDT_SOFTTIMER_MAX, new_time); return -EINVAL; } wdd->timeout = new_time; return 0; } static const struct watchdog_info bcm47xx_wdt_info = { .identity = DRV_NAME, .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, }; static int bcm47xx_wdt_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { struct bcm47xx_wdt *wdt; wdt = container_of(this, struct bcm47xx_wdt, notifier); if (code == SYS_DOWN || code == SYS_HALT) wdt->wdd.ops->stop(&wdt->wdd); return NOTIFY_DONE; } static struct watchdog_ops bcm47xx_wdt_soft_ops = { .owner = THIS_MODULE, .start = bcm47xx_wdt_soft_start, .stop = bcm47xx_wdt_soft_stop, .ping = bcm47xx_wdt_soft_keepalive, .set_timeout = bcm47xx_wdt_soft_set_timeout, }; static int bcm47xx_wdt_probe(struct platform_device *pdev) { int ret; bool soft; struct bcm47xx_wdt *wdt = dev_get_platdata(&pdev->dev); if (!wdt) return -ENXIO; soft = wdt->max_timer_ms < WDT_SOFTTIMER_THRESHOLD * 1000; if (soft) { wdt->wdd.ops = &bcm47xx_wdt_soft_ops; setup_timer(&wdt->soft_timer, bcm47xx_wdt_soft_timer_tick, (long unsigned int)wdt); } else { wdt->wdd.ops = &bcm47xx_wdt_hard_ops; } wdt->wdd.info = &bcm47xx_wdt_info; wdt->wdd.timeout = WDT_DEFAULT_TIME; ret = wdt->wdd.ops->set_timeout(&wdt->wdd, timeout); if (ret) goto err_timer; watchdog_set_nowayout(&wdt->wdd, nowayout); wdt->notifier.notifier_call = &bcm47xx_wdt_notify_sys; ret = register_reboot_notifier(&wdt->notifier); if (ret) goto err_timer; ret = watchdog_register_device(&wdt->wdd); if (ret) goto err_notifier; dev_info(&pdev->dev, "BCM47xx Watchdog Timer enabled (%d seconds%s%s)\n", timeout, nowayout ? ", nowayout" : "", soft ? ", Software Timer" : ""); return 0; err_notifier: unregister_reboot_notifier(&wdt->notifier); err_timer: if (soft) del_timer_sync(&wdt->soft_timer); return ret; } static int bcm47xx_wdt_remove(struct platform_device *pdev) { struct bcm47xx_wdt *wdt = dev_get_platdata(&pdev->dev); if (!wdt) return -ENXIO; watchdog_unregister_device(&wdt->wdd); unregister_reboot_notifier(&wdt->notifier); return 0; } static struct platform_driver bcm47xx_wdt_driver = { .driver = { .owner = THIS_MODULE, .name = "bcm47xx-wdt", }, .probe = bcm47xx_wdt_probe, .remove = bcm47xx_wdt_remove, }; module_platform_driver(bcm47xx_wdt_driver); MODULE_AUTHOR("Aleksandar Radovanovic"); MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>"); MODULE_DESCRIPTION("Watchdog driver for Broadcom BCM47xx"); MODULE_LICENSE("GPL");
gpl-2.0
TimWSpence/linux
drivers/media/rc/st_rc.c
784
11221
/* * Copyright (C) 2013 STMicroelectronics Limited * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/clk.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/reset.h> #include <media/rc-core.h> #include <linux/pinctrl/consumer.h> struct st_rc_device { struct device *dev; int irq; int irq_wake; struct clk *sys_clock; volatile void __iomem *base; /* Register base address */ volatile void __iomem *rx_base;/* RX Register base address */ struct rc_dev *rdev; bool overclocking; int sample_mult; int sample_div; bool rxuhfmode; struct reset_control *rstc; }; /* Registers */ #define IRB_SAMPLE_RATE_COMM 0x64 /* sample freq divisor*/ #define IRB_CLOCK_SEL 0x70 /* clock select */ #define IRB_CLOCK_SEL_STATUS 0x74 /* clock status */ /* IRB IR/UHF receiver registers */ #define IRB_RX_ON 0x40 /* pulse time capture */ #define IRB_RX_SYS 0X44 /* sym period capture */ #define IRB_RX_INT_EN 0x48 /* IRQ enable (R/W) */ #define IRB_RX_INT_STATUS 0x4c /* IRQ status (R/W) */ #define IRB_RX_EN 0x50 /* Receive enable */ #define IRB_MAX_SYM_PERIOD 0x54 /* max sym value */ #define IRB_RX_INT_CLEAR 0x58 /* overrun status */ #define IRB_RX_STATUS 0x6c /* receive status */ #define IRB_RX_NOISE_SUPPR 0x5c /* noise suppression */ #define IRB_RX_POLARITY_INV 0x68 /* polarity inverter */ /** * IRQ set: Enable full FIFO 1 -> bit 3; * Enable overrun IRQ 1 -> bit 2; * Enable last symbol IRQ 1 -> bit 1: * Enable RX interrupt 1 -> bit 0; */ #define IRB_RX_INTS 0x0f #define IRB_RX_OVERRUN_INT 0x04 /* maximum symbol period (microsecs),timeout to detect end of symbol train */ #define MAX_SYMB_TIME 0x5000 #define IRB_SAMPLE_FREQ 10000000 #define IRB_FIFO_NOT_EMPTY 0xff00 #define IRB_OVERFLOW 0x4 #define IRB_TIMEOUT 0xffff #define IR_ST_NAME "st-rc" static void st_rc_send_lirc_timeout(struct rc_dev *rdev) { DEFINE_IR_RAW_EVENT(ev); ev.timeout = true; ir_raw_event_store(rdev, &ev); } /** * RX graphical example to better understand the difference between ST IR block * output and standard definition used by LIRC (and most of the world!) * * mark mark * |-IRB_RX_ON-| |-IRB_RX_ON-| * ___ ___ ___ ___ ___ ___ _ * | | | | | | | | | | | | | * | | | | | | space 0 | | | | | | space 1 | * _____| |__| |__| |____________________________| |__| |__| |_____________| * * |--------------- IRB_RX_SYS -------------|------ IRB_RX_SYS -------| * * |------------- encoding bit 0 -----------|---- encoding bit 1 -----| * * ST hardware returns mark (IRB_RX_ON) and total symbol time (IRB_RX_SYS), so * convert to standard mark/space we have to calculate space=(IRB_RX_SYS-mark) * The mark time represents the amount of time the carrier (usually 36-40kHz) * is detected.The above examples shows Pulse Width Modulation encoding where * bit 0 is represented by space>mark. */ static irqreturn_t st_rc_rx_interrupt(int irq, void *data) { unsigned int symbol, mark = 0; struct st_rc_device *dev = data; int last_symbol = 0; u32 status; DEFINE_IR_RAW_EVENT(ev); if (dev->irq_wake) pm_wakeup_event(dev->dev, 0); status = readl(dev->rx_base + IRB_RX_STATUS); while (status & (IRB_FIFO_NOT_EMPTY | IRB_OVERFLOW)) { u32 int_status = readl(dev->rx_base + IRB_RX_INT_STATUS); if (unlikely(int_status & IRB_RX_OVERRUN_INT)) { /* discard the entire collection in case of errors! */ ir_raw_event_reset(dev->rdev); dev_info(dev->dev, "IR RX overrun\n"); writel(IRB_RX_OVERRUN_INT, dev->rx_base + IRB_RX_INT_CLEAR); continue; } symbol = readl(dev->rx_base + IRB_RX_SYS); mark = readl(dev->rx_base + IRB_RX_ON); if (symbol == IRB_TIMEOUT) last_symbol = 1; /* Ignore any noise */ if ((mark > 2) && (symbol > 1)) { symbol -= mark; if (dev->overclocking) { /* adjustments to timings */ symbol *= dev->sample_mult; symbol /= dev->sample_div; mark *= dev->sample_mult; mark /= dev->sample_div; } ev.duration = US_TO_NS(mark); ev.pulse = true; ir_raw_event_store(dev->rdev, &ev); if (!last_symbol) { ev.duration = US_TO_NS(symbol); ev.pulse = false; ir_raw_event_store(dev->rdev, &ev); } else { st_rc_send_lirc_timeout(dev->rdev); } } last_symbol = 0; status = readl(dev->rx_base + IRB_RX_STATUS); } writel(IRB_RX_INTS, dev->rx_base + IRB_RX_INT_CLEAR); /* Empty software fifo */ ir_raw_event_handle(dev->rdev); return IRQ_HANDLED; } static void st_rc_hardware_init(struct st_rc_device *dev) { int baseclock, freqdiff; unsigned int rx_max_symbol_per = MAX_SYMB_TIME; unsigned int rx_sampling_freq_div; /* Enable the IP */ if (dev->rstc) reset_control_deassert(dev->rstc); clk_prepare_enable(dev->sys_clock); baseclock = clk_get_rate(dev->sys_clock); /* IRB input pins are inverted internally from high to low. */ writel(1, dev->rx_base + IRB_RX_POLARITY_INV); rx_sampling_freq_div = baseclock / IRB_SAMPLE_FREQ; writel(rx_sampling_freq_div, dev->base + IRB_SAMPLE_RATE_COMM); freqdiff = baseclock - (rx_sampling_freq_div * IRB_SAMPLE_FREQ); if (freqdiff) { /* over clocking, workout the adjustment factors */ dev->overclocking = true; dev->sample_mult = 1000; dev->sample_div = baseclock / (10000 * rx_sampling_freq_div); rx_max_symbol_per = (rx_max_symbol_per * 1000)/dev->sample_div; } writel(rx_max_symbol_per, dev->rx_base + IRB_MAX_SYM_PERIOD); } static int st_rc_remove(struct platform_device *pdev) { struct st_rc_device *rc_dev = platform_get_drvdata(pdev); clk_disable_unprepare(rc_dev->sys_clock); rc_unregister_device(rc_dev->rdev); return 0; } static int st_rc_open(struct rc_dev *rdev) { struct st_rc_device *dev = rdev->priv; unsigned long flags; local_irq_save(flags); /* enable interrupts and receiver */ writel(IRB_RX_INTS, dev->rx_base + IRB_RX_INT_EN); writel(0x01, dev->rx_base + IRB_RX_EN); local_irq_restore(flags); return 0; } static void st_rc_close(struct rc_dev *rdev) { struct st_rc_device *dev = rdev->priv; /* disable interrupts and receiver */ writel(0x00, dev->rx_base + IRB_RX_EN); writel(0x00, dev->rx_base + IRB_RX_INT_EN); } static int st_rc_probe(struct platform_device *pdev) { int ret = -EINVAL; struct rc_dev *rdev; struct device *dev = &pdev->dev; struct resource *res; struct st_rc_device *rc_dev; struct device_node *np = pdev->dev.of_node; const char *rx_mode; rc_dev = devm_kzalloc(dev, sizeof(struct st_rc_device), GFP_KERNEL); if (!rc_dev) return -ENOMEM; rdev = rc_allocate_device(); if (!rdev) return -ENOMEM; if (np && !of_property_read_string(np, "rx-mode", &rx_mode)) { if (!strcmp(rx_mode, "uhf")) { rc_dev->rxuhfmode = true; } else if (!strcmp(rx_mode, "infrared")) { rc_dev->rxuhfmode = false; } else { dev_err(dev, "Unsupported rx mode [%s]\n", rx_mode); goto err; } } else { goto err; } rc_dev->sys_clock = devm_clk_get(dev, NULL); if (IS_ERR(rc_dev->sys_clock)) { dev_err(dev, "System clock not found\n"); ret = PTR_ERR(rc_dev->sys_clock); goto err; } rc_dev->irq = platform_get_irq(pdev, 0); if (rc_dev->irq < 0) { ret = rc_dev->irq; goto err; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); rc_dev->base = devm_ioremap_resource(dev, res); if (IS_ERR((__force void *)rc_dev->base)) { ret = PTR_ERR((__force void *)rc_dev->base); goto err; } if (rc_dev->rxuhfmode) rc_dev->rx_base = rc_dev->base + 0x40; else rc_dev->rx_base = rc_dev->base; rc_dev->rstc = reset_control_get_optional(dev, NULL); if (IS_ERR(rc_dev->rstc)) rc_dev->rstc = NULL; rc_dev->dev = dev; platform_set_drvdata(pdev, rc_dev); st_rc_hardware_init(rc_dev); rdev->driver_type = RC_DRIVER_IR_RAW; rdev->allowed_protocols = RC_BIT_ALL; /* rx sampling rate is 10Mhz */ rdev->rx_resolution = 100; rdev->timeout = US_TO_NS(MAX_SYMB_TIME); rdev->priv = rc_dev; rdev->open = st_rc_open; rdev->close = st_rc_close; rdev->driver_name = IR_ST_NAME; rdev->map_name = RC_MAP_LIRC; rdev->input_name = "ST Remote Control Receiver"; /* enable wake via this device */ device_set_wakeup_capable(dev, true); device_set_wakeup_enable(dev, true); ret = rc_register_device(rdev); if (ret < 0) goto clkerr; rc_dev->rdev = rdev; if (devm_request_irq(dev, rc_dev->irq, st_rc_rx_interrupt, IRQF_NO_SUSPEND, IR_ST_NAME, rc_dev) < 0) { dev_err(dev, "IRQ %d register failed\n", rc_dev->irq); ret = -EINVAL; goto rcerr; } /** * for LIRC_MODE_MODE2 or LIRC_MODE_PULSE or LIRC_MODE_RAW * lircd expects a long space first before a signal train to sync. */ st_rc_send_lirc_timeout(rdev); dev_info(dev, "setup in %s mode\n", rc_dev->rxuhfmode ? "UHF" : "IR"); return ret; rcerr: rc_unregister_device(rdev); rdev = NULL; clkerr: clk_disable_unprepare(rc_dev->sys_clock); err: rc_free_device(rdev); dev_err(dev, "Unable to register device (%d)\n", ret); return ret; } #ifdef CONFIG_PM static int st_rc_suspend(struct device *dev) { struct st_rc_device *rc_dev = dev_get_drvdata(dev); if (device_may_wakeup(dev)) { if (!enable_irq_wake(rc_dev->irq)) rc_dev->irq_wake = 1; else return -EINVAL; } else { pinctrl_pm_select_sleep_state(dev); writel(0x00, rc_dev->rx_base + IRB_RX_EN); writel(0x00, rc_dev->rx_base + IRB_RX_INT_EN); clk_disable_unprepare(rc_dev->sys_clock); if (rc_dev->rstc) reset_control_assert(rc_dev->rstc); } return 0; } static int st_rc_resume(struct device *dev) { struct st_rc_device *rc_dev = dev_get_drvdata(dev); struct rc_dev *rdev = rc_dev->rdev; if (rc_dev->irq_wake) { disable_irq_wake(rc_dev->irq); rc_dev->irq_wake = 0; } else { pinctrl_pm_select_default_state(dev); st_rc_hardware_init(rc_dev); if (rdev->users) { writel(IRB_RX_INTS, rc_dev->rx_base + IRB_RX_INT_EN); writel(0x01, rc_dev->rx_base + IRB_RX_EN); } } return 0; } #endif static SIMPLE_DEV_PM_OPS(st_rc_pm_ops, st_rc_suspend, st_rc_resume); #ifdef CONFIG_OF static struct of_device_id st_rc_match[] = { { .compatible = "st,comms-irb", }, {}, }; MODULE_DEVICE_TABLE(of, st_rc_match); #endif static struct platform_driver st_rc_driver = { .driver = { .name = IR_ST_NAME, .of_match_table = of_match_ptr(st_rc_match), .pm = &st_rc_pm_ops, }, .probe = st_rc_probe, .remove = st_rc_remove, }; module_platform_driver(st_rc_driver); MODULE_DESCRIPTION("RC Transceiver driver for STMicroelectronics platforms"); MODULE_AUTHOR("STMicroelectronics (R&D) Ltd"); MODULE_LICENSE("GPL");
gpl-2.0
LorDClockaN/shooter-ics
net/ipv6/mcast.c
1040
64136
/* * Multicast support for IPv6 * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * * Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ /* Changes: * * yoshfuji : fix format of router-alert option * YOSHIFUJI Hideaki @USAGI: * Fixed source address for MLD message based on * <draft-ietf-magma-mld-source-05.txt>. * YOSHIFUJI Hideaki @USAGI: * - Ignore Queries for invalid addresses. * - MLD for link-local addresses. * David L Stevens <dlstevens@us.ibm.com>: * - MLDv2 support */ #include <linux/module.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/jiffies.h> #include <linux/times.h> #include <linux/net.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/route.h> #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <net/mld.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv6.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/snmp.h> #include <net/ipv6.h> #include <net/protocol.h> #include <net/if_inet6.h> #include <net/ndisc.h> #include <net/addrconf.h> #include <net/ip6_route.h> #include <net/inet_common.h> #include <net/ip6_checksum.h> /* Set to 3 to get tracing... */ #define MCAST_DEBUG 2 #if MCAST_DEBUG >= 3 #define MDBG(x) printk x #else #define MDBG(x) #endif /* Ensure that we have struct in6_addr aligned on 32bit word. */ static void *__mld2_query_bugs[] __attribute__((__unused__)) = { BUILD_BUG_ON_NULL(offsetof(struct mld2_query, mld2q_srcs) % 4), BUILD_BUG_ON_NULL(offsetof(struct mld2_report, mld2r_grec) % 4), BUILD_BUG_ON_NULL(offsetof(struct mld2_grec, grec_mca) % 4) }; static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT; /* Big mc list lock for all the sockets */ static DEFINE_SPINLOCK(ipv6_sk_mc_lock); static void igmp6_join_group(struct ifmcaddr6 *ma); static void igmp6_leave_group(struct ifmcaddr6 *ma); static void igmp6_timer_handler(unsigned long data); static void mld_gq_timer_expire(unsigned long data); static void mld_ifc_timer_expire(unsigned long data); static void mld_ifc_event(struct inet6_dev *idev); static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc); static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr); static void mld_clear_delrec(struct inet6_dev *idev); static int sf_setstate(struct ifmcaddr6 *pmc); static void sf_markstate(struct ifmcaddr6 *pmc); static void ip6_mc_clear_src(struct ifmcaddr6 *pmc); static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca, int sfmode, int sfcount, const struct in6_addr *psfsrc, int delta); static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca, int sfmode, int sfcount, const struct in6_addr *psfsrc, int delta); static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, struct inet6_dev *idev); #define IGMP6_UNSOLICITED_IVAL (10*HZ) #define MLD_QRV_DEFAULT 2 #define MLD_V1_SEEN(idev) (dev_net((idev)->dev)->ipv6.devconf_all->force_mld_version == 1 || \ (idev)->cnf.force_mld_version == 1 || \ ((idev)->mc_v1_seen && \ time_before(jiffies, (idev)->mc_v1_seen))) #define IPV6_MLD_MAX_MSF 64 int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF; /* * socket join on multicast group */ #define for_each_pmc_rcu(np, pmc) \ for (pmc = rcu_dereference(np->ipv6_mc_list); \ pmc != NULL; \ pmc = rcu_dereference(pmc->next)) int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) { struct net_device *dev = NULL; struct ipv6_mc_socklist *mc_lst; struct ipv6_pinfo *np = inet6_sk(sk); struct net *net = sock_net(sk); int err; if (!ipv6_addr_is_multicast(addr)) return -EINVAL; rcu_read_lock(); for_each_pmc_rcu(np, mc_lst) { if ((ifindex == 0 || mc_lst->ifindex == ifindex) && ipv6_addr_equal(&mc_lst->addr, addr)) { rcu_read_unlock(); return -EADDRINUSE; } } rcu_read_unlock(); mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL); if (mc_lst == NULL) return -ENOMEM; mc_lst->next = NULL; ipv6_addr_copy(&mc_lst->addr, addr); rcu_read_lock(); if (ifindex == 0) { struct rt6_info *rt; rt = rt6_lookup(net, addr, NULL, 0, 0); if (rt) { dev = rt->rt6i_dev; dst_release(&rt->dst); } } else dev = dev_get_by_index_rcu(net, ifindex); if (dev == NULL) { rcu_read_unlock(); sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); return -ENODEV; } mc_lst->ifindex = dev->ifindex; mc_lst->sfmode = MCAST_EXCLUDE; rwlock_init(&mc_lst->sflock); mc_lst->sflist = NULL; /* * now add/increase the group membership on the device */ err = ipv6_dev_mc_inc(dev, addr); if (err) { rcu_read_unlock(); sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); return err; } spin_lock(&ipv6_sk_mc_lock); mc_lst->next = np->ipv6_mc_list; rcu_assign_pointer(np->ipv6_mc_list, mc_lst); spin_unlock(&ipv6_sk_mc_lock); rcu_read_unlock(); return 0; } /* * socket leave on multicast group */ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_mc_socklist *mc_lst; struct ipv6_mc_socklist __rcu **lnk; struct net *net = sock_net(sk); spin_lock(&ipv6_sk_mc_lock); for (lnk = &np->ipv6_mc_list; (mc_lst = rcu_dereference_protected(*lnk, lockdep_is_held(&ipv6_sk_mc_lock))) !=NULL ; lnk = &mc_lst->next) { if ((ifindex == 0 || mc_lst->ifindex == ifindex) && ipv6_addr_equal(&mc_lst->addr, addr)) { struct net_device *dev; *lnk = mc_lst->next; spin_unlock(&ipv6_sk_mc_lock); rcu_read_lock(); dev = dev_get_by_index_rcu(net, mc_lst->ifindex); if (dev != NULL) { struct inet6_dev *idev = __in6_dev_get(dev); (void) ip6_mc_leave_src(sk, mc_lst, idev); if (idev) __ipv6_dev_mc_dec(idev, &mc_lst->addr); } else (void) ip6_mc_leave_src(sk, mc_lst, NULL); rcu_read_unlock(); atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); kfree_rcu(mc_lst, rcu); return 0; } } spin_unlock(&ipv6_sk_mc_lock); return -EADDRNOTAVAIL; } /* called with rcu_read_lock() */ static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net, const struct in6_addr *group, int ifindex) { struct net_device *dev = NULL; struct inet6_dev *idev = NULL; if (ifindex == 0) { struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, 0); if (rt) { dev = rt->rt6i_dev; dst_release(&rt->dst); } } else dev = dev_get_by_index_rcu(net, ifindex); if (!dev) return NULL; idev = __in6_dev_get(dev); if (!idev) return NULL; read_lock_bh(&idev->lock); if (idev->dead) { read_unlock_bh(&idev->lock); return NULL; } return idev; } void ipv6_sock_mc_close(struct sock *sk) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_mc_socklist *mc_lst; struct net *net = sock_net(sk); spin_lock(&ipv6_sk_mc_lock); while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list, lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) { struct net_device *dev; np->ipv6_mc_list = mc_lst->next; spin_unlock(&ipv6_sk_mc_lock); rcu_read_lock(); dev = dev_get_by_index_rcu(net, mc_lst->ifindex); if (dev) { struct inet6_dev *idev = __in6_dev_get(dev); (void) ip6_mc_leave_src(sk, mc_lst, idev); if (idev) __ipv6_dev_mc_dec(idev, &mc_lst->addr); } else (void) ip6_mc_leave_src(sk, mc_lst, NULL); rcu_read_unlock(); atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); kfree_rcu(mc_lst, rcu); spin_lock(&ipv6_sk_mc_lock); } spin_unlock(&ipv6_sk_mc_lock); } int ip6_mc_source(int add, int omode, struct sock *sk, struct group_source_req *pgsr) { struct in6_addr *source, *group; struct ipv6_mc_socklist *pmc; struct inet6_dev *idev; struct ipv6_pinfo *inet6 = inet6_sk(sk); struct ip6_sf_socklist *psl; struct net *net = sock_net(sk); int i, j, rv; int leavegroup = 0; int pmclocked = 0; int err; source = &((struct sockaddr_in6 *)&pgsr->gsr_source)->sin6_addr; group = &((struct sockaddr_in6 *)&pgsr->gsr_group)->sin6_addr; if (!ipv6_addr_is_multicast(group)) return -EINVAL; rcu_read_lock(); idev = ip6_mc_find_dev_rcu(net, group, pgsr->gsr_interface); if (!idev) { rcu_read_unlock(); return -ENODEV; } err = -EADDRNOTAVAIL; for_each_pmc_rcu(inet6, pmc) { if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface) continue; if (ipv6_addr_equal(&pmc->addr, group)) break; } if (!pmc) { /* must have a prior join */ err = -EINVAL; goto done; } /* if a source filter was set, must be the same mode as before */ if (pmc->sflist) { if (pmc->sfmode != omode) { err = -EINVAL; goto done; } } else if (pmc->sfmode != omode) { /* allow mode switches for empty-set filters */ ip6_mc_add_src(idev, group, omode, 0, NULL, 0); ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0); pmc->sfmode = omode; } write_lock(&pmc->sflock); pmclocked = 1; psl = pmc->sflist; if (!add) { if (!psl) goto done; /* err = -EADDRNOTAVAIL */ rv = !0; for (i=0; i<psl->sl_count; i++) { rv = memcmp(&psl->sl_addr[i], source, sizeof(struct in6_addr)); if (rv == 0) break; } if (rv) /* source not found */ goto done; /* err = -EADDRNOTAVAIL */ /* special case - (INCLUDE, empty) == LEAVE_GROUP */ if (psl->sl_count == 1 && omode == MCAST_INCLUDE) { leavegroup = 1; goto done; } /* update the interface filter */ ip6_mc_del_src(idev, group, omode, 1, source, 1); for (j=i+1; j<psl->sl_count; j++) psl->sl_addr[j-1] = psl->sl_addr[j]; psl->sl_count--; err = 0; goto done; } /* else, add a new source to the filter */ if (psl && psl->sl_count >= sysctl_mld_max_msf) { err = -ENOBUFS; goto done; } if (!psl || psl->sl_count == psl->sl_max) { struct ip6_sf_socklist *newpsl; int count = IP6_SFBLOCK; if (psl) count += psl->sl_max; newpsl = sock_kmalloc(sk, IP6_SFLSIZE(count), GFP_ATOMIC); if (!newpsl) { err = -ENOBUFS; goto done; } newpsl->sl_max = count; newpsl->sl_count = count - IP6_SFBLOCK; if (psl) { for (i=0; i<psl->sl_count; i++) newpsl->sl_addr[i] = psl->sl_addr[i]; sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max)); } pmc->sflist = psl = newpsl; } rv = 1; /* > 0 for insert logic below if sl_count is 0 */ for (i=0; i<psl->sl_count; i++) { rv = memcmp(&psl->sl_addr[i], source, sizeof(struct in6_addr)); if (rv == 0) break; } if (rv == 0) /* address already there is an error */ goto done; for (j=psl->sl_count-1; j>=i; j--) psl->sl_addr[j+1] = psl->sl_addr[j]; psl->sl_addr[i] = *source; psl->sl_count++; err = 0; /* update the interface list */ ip6_mc_add_src(idev, group, omode, 1, source, 1); done: if (pmclocked) write_unlock(&pmc->sflock); read_unlock_bh(&idev->lock); rcu_read_unlock(); if (leavegroup) return ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group); return err; } int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) { const struct in6_addr *group; struct ipv6_mc_socklist *pmc; struct inet6_dev *idev; struct ipv6_pinfo *inet6 = inet6_sk(sk); struct ip6_sf_socklist *newpsl, *psl; struct net *net = sock_net(sk); int leavegroup = 0; int i, err; group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr; if (!ipv6_addr_is_multicast(group)) return -EINVAL; if (gsf->gf_fmode != MCAST_INCLUDE && gsf->gf_fmode != MCAST_EXCLUDE) return -EINVAL; rcu_read_lock(); idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface); if (!idev) { rcu_read_unlock(); return -ENODEV; } err = 0; if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) { leavegroup = 1; goto done; } for_each_pmc_rcu(inet6, pmc) { if (pmc->ifindex != gsf->gf_interface) continue; if (ipv6_addr_equal(&pmc->addr, group)) break; } if (!pmc) { /* must have a prior join */ err = -EINVAL; goto done; } if (gsf->gf_numsrc) { newpsl = sock_kmalloc(sk, IP6_SFLSIZE(gsf->gf_numsrc), GFP_ATOMIC); if (!newpsl) { err = -ENOBUFS; goto done; } newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc; for (i=0; i<newpsl->sl_count; ++i) { struct sockaddr_in6 *psin6; psin6 = (struct sockaddr_in6 *)&gsf->gf_slist[i]; newpsl->sl_addr[i] = psin6->sin6_addr; } err = ip6_mc_add_src(idev, group, gsf->gf_fmode, newpsl->sl_count, newpsl->sl_addr, 0); if (err) { sock_kfree_s(sk, newpsl, IP6_SFLSIZE(newpsl->sl_max)); goto done; } } else { newpsl = NULL; (void) ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0); } write_lock(&pmc->sflock); psl = pmc->sflist; if (psl) { (void) ip6_mc_del_src(idev, group, pmc->sfmode, psl->sl_count, psl->sl_addr, 0); sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max)); } else (void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0); pmc->sflist = newpsl; pmc->sfmode = gsf->gf_fmode; write_unlock(&pmc->sflock); err = 0; done: read_unlock_bh(&idev->lock); rcu_read_unlock(); if (leavegroup) err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group); return err; } int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, struct group_filter __user *optval, int __user *optlen) { int err, i, count, copycount; const struct in6_addr *group; struct ipv6_mc_socklist *pmc; struct inet6_dev *idev; struct ipv6_pinfo *inet6 = inet6_sk(sk); struct ip6_sf_socklist *psl; struct net *net = sock_net(sk); group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr; if (!ipv6_addr_is_multicast(group)) return -EINVAL; rcu_read_lock(); idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface); if (!idev) { rcu_read_unlock(); return -ENODEV; } err = -EADDRNOTAVAIL; /* * changes to the ipv6_mc_list require the socket lock and * a read lock on ip6_sk_mc_lock. We have the socket lock, * so reading the list is safe. */ for_each_pmc_rcu(inet6, pmc) { if (pmc->ifindex != gsf->gf_interface) continue; if (ipv6_addr_equal(group, &pmc->addr)) break; } if (!pmc) /* must have a prior join */ goto done; gsf->gf_fmode = pmc->sfmode; psl = pmc->sflist; count = psl ? psl->sl_count : 0; read_unlock_bh(&idev->lock); rcu_read_unlock(); copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc; gsf->gf_numsrc = count; if (put_user(GROUP_FILTER_SIZE(copycount), optlen) || copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) { return -EFAULT; } /* changes to psl require the socket lock, a read lock on * on ipv6_sk_mc_lock and a write lock on pmc->sflock. We * have the socket lock, so reading here is safe. */ for (i=0; i<copycount; i++) { struct sockaddr_in6 *psin6; struct sockaddr_storage ss; psin6 = (struct sockaddr_in6 *)&ss; memset(&ss, 0, sizeof(ss)); psin6->sin6_family = AF_INET6; psin6->sin6_addr = psl->sl_addr[i]; if (copy_to_user(&optval->gf_slist[i], &ss, sizeof(ss))) return -EFAULT; } return 0; done: read_unlock_bh(&idev->lock); rcu_read_unlock(); return err; } int inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr, const struct in6_addr *src_addr) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_mc_socklist *mc; struct ip6_sf_socklist *psl; int rv = 1; rcu_read_lock(); for_each_pmc_rcu(np, mc) { if (ipv6_addr_equal(&mc->addr, mc_addr)) break; } if (!mc) { rcu_read_unlock(); return 1; } read_lock(&mc->sflock); psl = mc->sflist; if (!psl) { rv = mc->sfmode == MCAST_EXCLUDE; } else { int i; for (i=0; i<psl->sl_count; i++) { if (ipv6_addr_equal(&psl->sl_addr[i], src_addr)) break; } if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count) rv = 0; if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count) rv = 0; } read_unlock(&mc->sflock); rcu_read_unlock(); return rv; } static void ma_put(struct ifmcaddr6 *mc) { if (atomic_dec_and_test(&mc->mca_refcnt)) { in6_dev_put(mc->idev); kfree(mc); } } static void igmp6_group_added(struct ifmcaddr6 *mc) { struct net_device *dev = mc->idev->dev; char buf[MAX_ADDR_LEN]; spin_lock_bh(&mc->mca_lock); if (!(mc->mca_flags&MAF_LOADED)) { mc->mca_flags |= MAF_LOADED; if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) dev_mc_add(dev, buf); } spin_unlock_bh(&mc->mca_lock); if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT)) return; if (MLD_V1_SEEN(mc->idev)) { igmp6_join_group(mc); return; } /* else v2 */ mc->mca_crcount = mc->idev->mc_qrv; mld_ifc_event(mc->idev); } static void igmp6_group_dropped(struct ifmcaddr6 *mc) { struct net_device *dev = mc->idev->dev; char buf[MAX_ADDR_LEN]; spin_lock_bh(&mc->mca_lock); if (mc->mca_flags&MAF_LOADED) { mc->mca_flags &= ~MAF_LOADED; if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) dev_mc_del(dev, buf); } if (mc->mca_flags & MAF_NOREPORT) goto done; spin_unlock_bh(&mc->mca_lock); if (!mc->idev->dead) igmp6_leave_group(mc); spin_lock_bh(&mc->mca_lock); if (del_timer(&mc->mca_timer)) atomic_dec(&mc->mca_refcnt); done: ip6_mc_clear_src(mc); spin_unlock_bh(&mc->mca_lock); } /* * deleted ifmcaddr6 manipulation */ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) { struct ifmcaddr6 *pmc; /* this is an "ifmcaddr6" for convenience; only the fields below * are actually used. In particular, the refcnt and users are not * used for management of the delete list. Using the same structure * for deleted items allows change reports to use common code with * non-deleted or query-response MCA's. */ pmc = kzalloc(sizeof(*pmc), GFP_ATOMIC); if (!pmc) return; spin_lock_bh(&im->mca_lock); spin_lock_init(&pmc->mca_lock); pmc->idev = im->idev; in6_dev_hold(idev); pmc->mca_addr = im->mca_addr; pmc->mca_crcount = idev->mc_qrv; pmc->mca_sfmode = im->mca_sfmode; if (pmc->mca_sfmode == MCAST_INCLUDE) { struct ip6_sf_list *psf; pmc->mca_tomb = im->mca_tomb; pmc->mca_sources = im->mca_sources; im->mca_tomb = im->mca_sources = NULL; for (psf=pmc->mca_sources; psf; psf=psf->sf_next) psf->sf_crcount = pmc->mca_crcount; } spin_unlock_bh(&im->mca_lock); spin_lock_bh(&idev->mc_lock); pmc->next = idev->mc_tomb; idev->mc_tomb = pmc; spin_unlock_bh(&idev->mc_lock); } static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca) { struct ifmcaddr6 *pmc, *pmc_prev; struct ip6_sf_list *psf, *psf_next; spin_lock_bh(&idev->mc_lock); pmc_prev = NULL; for (pmc=idev->mc_tomb; pmc; pmc=pmc->next) { if (ipv6_addr_equal(&pmc->mca_addr, pmca)) break; pmc_prev = pmc; } if (pmc) { if (pmc_prev) pmc_prev->next = pmc->next; else idev->mc_tomb = pmc->next; } spin_unlock_bh(&idev->mc_lock); if (pmc) { for (psf=pmc->mca_tomb; psf; psf=psf_next) { psf_next = psf->sf_next; kfree(psf); } in6_dev_put(pmc->idev); kfree(pmc); } } static void mld_clear_delrec(struct inet6_dev *idev) { struct ifmcaddr6 *pmc, *nextpmc; spin_lock_bh(&idev->mc_lock); pmc = idev->mc_tomb; idev->mc_tomb = NULL; spin_unlock_bh(&idev->mc_lock); for (; pmc; pmc = nextpmc) { nextpmc = pmc->next; ip6_mc_clear_src(pmc); in6_dev_put(pmc->idev); kfree(pmc); } /* clear dead sources, too */ read_lock_bh(&idev->lock); for (pmc=idev->mc_list; pmc; pmc=pmc->next) { struct ip6_sf_list *psf, *psf_next; spin_lock_bh(&pmc->mca_lock); psf = pmc->mca_tomb; pmc->mca_tomb = NULL; spin_unlock_bh(&pmc->mca_lock); for (; psf; psf=psf_next) { psf_next = psf->sf_next; kfree(psf); } } read_unlock_bh(&idev->lock); } /* * device multicast group inc (add if not found) */ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr) { struct ifmcaddr6 *mc; struct inet6_dev *idev; /* we need to take a reference on idev */ idev = in6_dev_get(dev); if (idev == NULL) return -EINVAL; write_lock_bh(&idev->lock); if (idev->dead) { write_unlock_bh(&idev->lock); in6_dev_put(idev); return -ENODEV; } for (mc = idev->mc_list; mc; mc = mc->next) { if (ipv6_addr_equal(&mc->mca_addr, addr)) { mc->mca_users++; write_unlock_bh(&idev->lock); ip6_mc_add_src(idev, &mc->mca_addr, MCAST_EXCLUDE, 0, NULL, 0); in6_dev_put(idev); return 0; } } /* * not found: create a new one. */ mc = kzalloc(sizeof(struct ifmcaddr6), GFP_ATOMIC); if (mc == NULL) { write_unlock_bh(&idev->lock); in6_dev_put(idev); return -ENOMEM; } setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc); ipv6_addr_copy(&mc->mca_addr, addr); mc->idev = idev; /* (reference taken) */ mc->mca_users = 1; /* mca_stamp should be updated upon changes */ mc->mca_cstamp = mc->mca_tstamp = jiffies; atomic_set(&mc->mca_refcnt, 2); spin_lock_init(&mc->mca_lock); /* initial mode is (EX, empty) */ mc->mca_sfmode = MCAST_EXCLUDE; mc->mca_sfcount[MCAST_EXCLUDE] = 1; if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) || IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL) mc->mca_flags |= MAF_NOREPORT; mc->next = idev->mc_list; idev->mc_list = mc; write_unlock_bh(&idev->lock); mld_del_delrec(idev, &mc->mca_addr); igmp6_group_added(mc); ma_put(mc); return 0; } /* * device multicast group del */ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr) { struct ifmcaddr6 *ma, **map; write_lock_bh(&idev->lock); for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) { if (ipv6_addr_equal(&ma->mca_addr, addr)) { if (--ma->mca_users == 0) { *map = ma->next; write_unlock_bh(&idev->lock); igmp6_group_dropped(ma); ma_put(ma); return 0; } write_unlock_bh(&idev->lock); return 0; } } write_unlock_bh(&idev->lock); return -ENOENT; } int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr) { struct inet6_dev *idev; int err; rcu_read_lock(); idev = __in6_dev_get(dev); if (!idev) err = -ENODEV; else err = __ipv6_dev_mc_dec(idev, addr); rcu_read_unlock(); return err; } /* * identify MLD packets for MLD filter exceptions */ int ipv6_is_mld(struct sk_buff *skb, int nexthdr) { struct icmp6hdr *pic; if (nexthdr != IPPROTO_ICMPV6) return 0; if (!pskb_may_pull(skb, sizeof(struct icmp6hdr))) return 0; pic = icmp6_hdr(skb); switch (pic->icmp6_type) { case ICMPV6_MGM_QUERY: case ICMPV6_MGM_REPORT: case ICMPV6_MGM_REDUCTION: case ICMPV6_MLD2_REPORT: return 1; default: break; } return 0; } /* * check if the interface/address pair is valid */ int ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, const struct in6_addr *src_addr) { struct inet6_dev *idev; struct ifmcaddr6 *mc; int rv = 0; rcu_read_lock(); idev = __in6_dev_get(dev); if (idev) { read_lock_bh(&idev->lock); for (mc = idev->mc_list; mc; mc=mc->next) { if (ipv6_addr_equal(&mc->mca_addr, group)) break; } if (mc) { if (src_addr && !ipv6_addr_any(src_addr)) { struct ip6_sf_list *psf; spin_lock_bh(&mc->mca_lock); for (psf=mc->mca_sources;psf;psf=psf->sf_next) { if (ipv6_addr_equal(&psf->sf_addr, src_addr)) break; } if (psf) rv = psf->sf_count[MCAST_INCLUDE] || psf->sf_count[MCAST_EXCLUDE] != mc->mca_sfcount[MCAST_EXCLUDE]; else rv = mc->mca_sfcount[MCAST_EXCLUDE] !=0; spin_unlock_bh(&mc->mca_lock); } else rv = 1; /* don't filter unspecified source */ } read_unlock_bh(&idev->lock); } rcu_read_unlock(); return rv; } static void mld_gq_start_timer(struct inet6_dev *idev) { int tv = net_random() % idev->mc_maxdelay; idev->mc_gq_running = 1; if (!mod_timer(&idev->mc_gq_timer, jiffies+tv+2)) in6_dev_hold(idev); } static void mld_ifc_start_timer(struct inet6_dev *idev, int delay) { int tv = net_random() % delay; if (!mod_timer(&idev->mc_ifc_timer, jiffies+tv+2)) in6_dev_hold(idev); } /* * IGMP handling (alias multicast ICMPv6 messages) */ static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime) { unsigned long delay = resptime; /* Do not start timer for these addresses */ if (ipv6_addr_is_ll_all_nodes(&ma->mca_addr) || IPV6_ADDR_MC_SCOPE(&ma->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL) return; if (del_timer(&ma->mca_timer)) { atomic_dec(&ma->mca_refcnt); delay = ma->mca_timer.expires - jiffies; } if (delay >= resptime) { if (resptime) delay = net_random() % resptime; else delay = 1; } ma->mca_timer.expires = jiffies + delay; if (!mod_timer(&ma->mca_timer, jiffies + delay)) atomic_inc(&ma->mca_refcnt); ma->mca_flags |= MAF_TIMER_RUNNING; } /* mark EXCLUDE-mode sources */ static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs, const struct in6_addr *srcs) { struct ip6_sf_list *psf; int i, scount; scount = 0; for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { if (scount == nsrcs) break; for (i=0; i<nsrcs; i++) { /* skip inactive filters */ if (psf->sf_count[MCAST_INCLUDE] || pmc->mca_sfcount[MCAST_EXCLUDE] != psf->sf_count[MCAST_EXCLUDE]) continue; if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) { scount++; break; } } } pmc->mca_flags &= ~MAF_GSQUERY; if (scount == nsrcs) /* all sources excluded */ return 0; return 1; } static int mld_marksources(struct ifmcaddr6 *pmc, int nsrcs, const struct in6_addr *srcs) { struct ip6_sf_list *psf; int i, scount; if (pmc->mca_sfmode == MCAST_EXCLUDE) return mld_xmarksources(pmc, nsrcs, srcs); /* mark INCLUDE-mode sources */ scount = 0; for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { if (scount == nsrcs) break; for (i=0; i<nsrcs; i++) { if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) { psf->sf_gsresp = 1; scount++; break; } } } if (!scount) { pmc->mca_flags &= ~MAF_GSQUERY; return 0; } pmc->mca_flags |= MAF_GSQUERY; return 1; } /* called with rcu_read_lock() */ int igmp6_event_query(struct sk_buff *skb) { struct mld2_query *mlh2 = NULL; struct ifmcaddr6 *ma; const struct in6_addr *group; unsigned long max_delay; struct inet6_dev *idev; struct mld_msg *mld; int group_type; int mark = 0; int len; if (!pskb_may_pull(skb, sizeof(struct in6_addr))) return -EINVAL; /* compute payload length excluding extension headers */ len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr); len -= skb_network_header_len(skb); /* Drop queries with not link local source */ if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) return -EINVAL; idev = __in6_dev_get(skb->dev); if (idev == NULL) return 0; mld = (struct mld_msg *)icmp6_hdr(skb); group = &mld->mld_mca; group_type = ipv6_addr_type(group); if (group_type != IPV6_ADDR_ANY && !(group_type&IPV6_ADDR_MULTICAST)) return -EINVAL; if (len == 24) { int switchback; /* MLDv1 router present */ /* Translate milliseconds to jiffies */ max_delay = (ntohs(mld->mld_maxdelay)*HZ)/1000; switchback = (idev->mc_qrv + 1) * max_delay; idev->mc_v1_seen = jiffies + switchback; /* cancel the interface change timer */ idev->mc_ifc_count = 0; if (del_timer(&idev->mc_ifc_timer)) __in6_dev_put(idev); /* clear deleted report items */ mld_clear_delrec(idev); } else if (len >= 28) { int srcs_offset = sizeof(struct mld2_query) - sizeof(struct icmp6hdr); if (!pskb_may_pull(skb, srcs_offset)) return -EINVAL; mlh2 = (struct mld2_query *)skb_transport_header(skb); max_delay = (MLDV2_MRC(ntohs(mlh2->mld2q_mrc))*HZ)/1000; if (!max_delay) max_delay = 1; idev->mc_maxdelay = max_delay; if (mlh2->mld2q_qrv) idev->mc_qrv = mlh2->mld2q_qrv; if (group_type == IPV6_ADDR_ANY) { /* general query */ if (mlh2->mld2q_nsrcs) return -EINVAL; /* no sources allowed */ mld_gq_start_timer(idev); return 0; } /* mark sources to include, if group & source-specific */ if (mlh2->mld2q_nsrcs != 0) { if (!pskb_may_pull(skb, srcs_offset + ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr))) return -EINVAL; mlh2 = (struct mld2_query *)skb_transport_header(skb); mark = 1; } } else return -EINVAL; read_lock_bh(&idev->lock); if (group_type == IPV6_ADDR_ANY) { for (ma = idev->mc_list; ma; ma=ma->next) { spin_lock_bh(&ma->mca_lock); igmp6_group_queried(ma, max_delay); spin_unlock_bh(&ma->mca_lock); } } else { for (ma = idev->mc_list; ma; ma=ma->next) { if (!ipv6_addr_equal(group, &ma->mca_addr)) continue; spin_lock_bh(&ma->mca_lock); if (ma->mca_flags & MAF_TIMER_RUNNING) { /* gsquery <- gsquery && mark */ if (!mark) ma->mca_flags &= ~MAF_GSQUERY; } else { /* gsquery <- mark */ if (mark) ma->mca_flags |= MAF_GSQUERY; else ma->mca_flags &= ~MAF_GSQUERY; } if (!(ma->mca_flags & MAF_GSQUERY) || mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs)) igmp6_group_queried(ma, max_delay); spin_unlock_bh(&ma->mca_lock); break; } } read_unlock_bh(&idev->lock); return 0; } /* called with rcu_read_lock() */ int igmp6_event_report(struct sk_buff *skb) { struct ifmcaddr6 *ma; struct inet6_dev *idev; struct mld_msg *mld; int addr_type; /* Our own report looped back. Ignore it. */ if (skb->pkt_type == PACKET_LOOPBACK) return 0; /* send our report if the MC router may not have heard this report */ if (skb->pkt_type != PACKET_MULTICAST && skb->pkt_type != PACKET_BROADCAST) return 0; if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr))) return -EINVAL; mld = (struct mld_msg *)icmp6_hdr(skb); /* Drop reports with not link local source */ addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr); if (addr_type != IPV6_ADDR_ANY && !(addr_type&IPV6_ADDR_LINKLOCAL)) return -EINVAL; idev = __in6_dev_get(skb->dev); if (idev == NULL) return -ENODEV; /* * Cancel the timer for this group */ read_lock_bh(&idev->lock); for (ma = idev->mc_list; ma; ma=ma->next) { if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) { spin_lock(&ma->mca_lock); if (del_timer(&ma->mca_timer)) atomic_dec(&ma->mca_refcnt); ma->mca_flags &= ~(MAF_LAST_REPORTER|MAF_TIMER_RUNNING); spin_unlock(&ma->mca_lock); break; } } read_unlock_bh(&idev->lock); return 0; } static int is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type, int gdeleted, int sdeleted) { switch (type) { case MLD2_MODE_IS_INCLUDE: case MLD2_MODE_IS_EXCLUDE: if (gdeleted || sdeleted) return 0; if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) { if (pmc->mca_sfmode == MCAST_INCLUDE) return 1; /* don't include if this source is excluded * in all filters */ if (psf->sf_count[MCAST_INCLUDE]) return type == MLD2_MODE_IS_INCLUDE; return pmc->mca_sfcount[MCAST_EXCLUDE] == psf->sf_count[MCAST_EXCLUDE]; } return 0; case MLD2_CHANGE_TO_INCLUDE: if (gdeleted || sdeleted) return 0; return psf->sf_count[MCAST_INCLUDE] != 0; case MLD2_CHANGE_TO_EXCLUDE: if (gdeleted || sdeleted) return 0; if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 || psf->sf_count[MCAST_INCLUDE]) return 0; return pmc->mca_sfcount[MCAST_EXCLUDE] == psf->sf_count[MCAST_EXCLUDE]; case MLD2_ALLOW_NEW_SOURCES: if (gdeleted || !psf->sf_crcount) return 0; return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted; case MLD2_BLOCK_OLD_SOURCES: if (pmc->mca_sfmode == MCAST_INCLUDE) return gdeleted || (psf->sf_crcount && sdeleted); return psf->sf_crcount && !gdeleted && !sdeleted; } return 0; } static int mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted) { struct ip6_sf_list *psf; int scount = 0; for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { if (!is_in(pmc, psf, type, gdeleted, sdeleted)) continue; scount++; } return scount; } static struct sk_buff *mld_newpack(struct net_device *dev, int size) { struct net *net = dev_net(dev); struct sock *sk = net->ipv6.igmp_sk; struct sk_buff *skb; struct mld2_report *pmr; struct in6_addr addr_buf; const struct in6_addr *saddr; int err; u8 ra[8] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT, 2, 0, 0, IPV6_TLV_PADN, 0 }; /* we assume size > sizeof(ra) here */ size += LL_ALLOCATED_SPACE(dev); /* limit our allocations to order-0 page */ size = min_t(int, size, SKB_MAX_ORDER(0, 0)); skb = sock_alloc_send_skb(sk, size, 1, &err); if (!skb) return NULL; skb_reserve(skb, LL_RESERVED_SPACE(dev)); if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) { /* <draft-ietf-magma-mld-source-05.txt>: * use unspecified address as the source address * when a valid link-local address is not available. */ saddr = &in6addr_any; } else saddr = &addr_buf; ip6_nd_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0); memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra)); skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data); skb_put(skb, sizeof(*pmr)); pmr = (struct mld2_report *)skb_transport_header(skb); pmr->mld2r_type = ICMPV6_MLD2_REPORT; pmr->mld2r_resv1 = 0; pmr->mld2r_cksum = 0; pmr->mld2r_resv2 = 0; pmr->mld2r_ngrec = 0; return skb; } static void mld_sendpack(struct sk_buff *skb) { struct ipv6hdr *pip6 = ipv6_hdr(skb); struct mld2_report *pmr = (struct mld2_report *)skb_transport_header(skb); int payload_len, mldlen; struct inet6_dev *idev; struct net *net = dev_net(skb->dev); int err; struct flowi6 fl6; struct dst_entry *dst; rcu_read_lock(); idev = __in6_dev_get(skb->dev); IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); payload_len = (skb->tail - skb->network_header) - sizeof(*pip6); mldlen = skb->tail - skb->transport_header; pip6->payload_len = htons(payload_len); pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen, IPPROTO_ICMPV6, csum_partial(skb_transport_header(skb), mldlen, 0)); dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr); if (!dst) { err = -ENOMEM; goto err_out; } icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT, &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->dev->ifindex); dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); err = 0; if (IS_ERR(dst)) { err = PTR_ERR(dst); dst = NULL; } skb_dst_set(skb, dst); if (err) goto err_out; payload_len = skb->len; err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, dst_output); out: if (!err) { ICMP6MSGOUT_INC_STATS_BH(net, idev, ICMPV6_MLD2_REPORT); ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_OUTMCAST, payload_len); } else IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS); rcu_read_unlock(); return; err_out: kfree_skb(skb); goto out; } static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel) { return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel); } static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, int type, struct mld2_grec **ppgr) { struct net_device *dev = pmc->idev->dev; struct mld2_report *pmr; struct mld2_grec *pgr; if (!skb) skb = mld_newpack(dev, dev->mtu); if (!skb) return NULL; pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec)); pgr->grec_type = type; pgr->grec_auxwords = 0; pgr->grec_nsrcs = 0; pgr->grec_mca = pmc->mca_addr; /* structure copy */ pmr = (struct mld2_report *)skb_transport_header(skb); pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1); *ppgr = pgr; return skb; } #define AVAILABLE(skb) ((skb) ? ((skb)->dev ? (skb)->dev->mtu - (skb)->len : \ skb_tailroom(skb)) : 0) static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted) { struct net_device *dev = pmc->idev->dev; struct mld2_report *pmr; struct mld2_grec *pgr = NULL; struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list; int scount, stotal, first, isquery, truncate; if (pmc->mca_flags & MAF_NOREPORT) return skb; isquery = type == MLD2_MODE_IS_INCLUDE || type == MLD2_MODE_IS_EXCLUDE; truncate = type == MLD2_MODE_IS_EXCLUDE || type == MLD2_CHANGE_TO_EXCLUDE; stotal = scount = 0; psf_list = sdeleted ? &pmc->mca_tomb : &pmc->mca_sources; if (!*psf_list) goto empty_source; pmr = skb ? (struct mld2_report *)skb_transport_header(skb) : NULL; /* EX and TO_EX get a fresh packet, if needed */ if (truncate) { if (pmr && pmr->mld2r_ngrec && AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { if (skb) mld_sendpack(skb); skb = mld_newpack(dev, dev->mtu); } } first = 1; psf_prev = NULL; for (psf=*psf_list; psf; psf=psf_next) { struct in6_addr *psrc; psf_next = psf->sf_next; if (!is_in(pmc, psf, type, gdeleted, sdeleted)) { psf_prev = psf; continue; } /* clear marks on query responses */ if (isquery) psf->sf_gsresp = 0; if (AVAILABLE(skb) < sizeof(*psrc) + first*sizeof(struct mld2_grec)) { if (truncate && !first) break; /* truncate these */ if (pgr) pgr->grec_nsrcs = htons(scount); if (skb) mld_sendpack(skb); skb = mld_newpack(dev, dev->mtu); first = 1; scount = 0; } if (first) { skb = add_grhead(skb, pmc, type, &pgr); first = 0; } if (!skb) return NULL; psrc = (struct in6_addr *)skb_put(skb, sizeof(*psrc)); *psrc = psf->sf_addr; scount++; stotal++; if ((type == MLD2_ALLOW_NEW_SOURCES || type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) { psf->sf_crcount--; if ((sdeleted || gdeleted) && psf->sf_crcount == 0) { if (psf_prev) psf_prev->sf_next = psf->sf_next; else *psf_list = psf->sf_next; kfree(psf); continue; } } psf_prev = psf; } empty_source: if (!stotal) { if (type == MLD2_ALLOW_NEW_SOURCES || type == MLD2_BLOCK_OLD_SOURCES) return skb; if (pmc->mca_crcount || isquery) { /* make sure we have room for group header */ if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)) { mld_sendpack(skb); skb = NULL; /* add_grhead will get a new one */ } skb = add_grhead(skb, pmc, type, &pgr); } } if (pgr) pgr->grec_nsrcs = htons(scount); if (isquery) pmc->mca_flags &= ~MAF_GSQUERY; /* clear query state */ return skb; } static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc) { struct sk_buff *skb = NULL; int type; if (!pmc) { read_lock_bh(&idev->lock); for (pmc=idev->mc_list; pmc; pmc=pmc->next) { if (pmc->mca_flags & MAF_NOREPORT) continue; spin_lock_bh(&pmc->mca_lock); if (pmc->mca_sfcount[MCAST_EXCLUDE]) type = MLD2_MODE_IS_EXCLUDE; else type = MLD2_MODE_IS_INCLUDE; skb = add_grec(skb, pmc, type, 0, 0); spin_unlock_bh(&pmc->mca_lock); } read_unlock_bh(&idev->lock); } else { spin_lock_bh(&pmc->mca_lock); if (pmc->mca_sfcount[MCAST_EXCLUDE]) type = MLD2_MODE_IS_EXCLUDE; else type = MLD2_MODE_IS_INCLUDE; skb = add_grec(skb, pmc, type, 0, 0); spin_unlock_bh(&pmc->mca_lock); } if (skb) mld_sendpack(skb); } /* * remove zero-count source records from a source filter list */ static void mld_clear_zeros(struct ip6_sf_list **ppsf) { struct ip6_sf_list *psf_prev, *psf_next, *psf; psf_prev = NULL; for (psf=*ppsf; psf; psf = psf_next) { psf_next = psf->sf_next; if (psf->sf_crcount == 0) { if (psf_prev) psf_prev->sf_next = psf->sf_next; else *ppsf = psf->sf_next; kfree(psf); } else psf_prev = psf; } } static void mld_send_cr(struct inet6_dev *idev) { struct ifmcaddr6 *pmc, *pmc_prev, *pmc_next; struct sk_buff *skb = NULL; int type, dtype; read_lock_bh(&idev->lock); spin_lock(&idev->mc_lock); /* deleted MCA's */ pmc_prev = NULL; for (pmc=idev->mc_tomb; pmc; pmc=pmc_next) { pmc_next = pmc->next; if (pmc->mca_sfmode == MCAST_INCLUDE) { type = MLD2_BLOCK_OLD_SOURCES; dtype = MLD2_BLOCK_OLD_SOURCES; skb = add_grec(skb, pmc, type, 1, 0); skb = add_grec(skb, pmc, dtype, 1, 1); } if (pmc->mca_crcount) { if (pmc->mca_sfmode == MCAST_EXCLUDE) { type = MLD2_CHANGE_TO_INCLUDE; skb = add_grec(skb, pmc, type, 1, 0); } pmc->mca_crcount--; if (pmc->mca_crcount == 0) { mld_clear_zeros(&pmc->mca_tomb); mld_clear_zeros(&pmc->mca_sources); } } if (pmc->mca_crcount == 0 && !pmc->mca_tomb && !pmc->mca_sources) { if (pmc_prev) pmc_prev->next = pmc_next; else idev->mc_tomb = pmc_next; in6_dev_put(pmc->idev); kfree(pmc); } else pmc_prev = pmc; } spin_unlock(&idev->mc_lock); /* change recs */ for (pmc=idev->mc_list; pmc; pmc=pmc->next) { spin_lock_bh(&pmc->mca_lock); if (pmc->mca_sfcount[MCAST_EXCLUDE]) { type = MLD2_BLOCK_OLD_SOURCES; dtype = MLD2_ALLOW_NEW_SOURCES; } else { type = MLD2_ALLOW_NEW_SOURCES; dtype = MLD2_BLOCK_OLD_SOURCES; } skb = add_grec(skb, pmc, type, 0, 0); skb = add_grec(skb, pmc, dtype, 0, 1); /* deleted sources */ /* filter mode changes */ if (pmc->mca_crcount) { if (pmc->mca_sfmode == MCAST_EXCLUDE) type = MLD2_CHANGE_TO_EXCLUDE; else type = MLD2_CHANGE_TO_INCLUDE; skb = add_grec(skb, pmc, type, 0, 0); pmc->mca_crcount--; } spin_unlock_bh(&pmc->mca_lock); } read_unlock_bh(&idev->lock); if (!skb) return; (void) mld_sendpack(skb); } static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) { struct net *net = dev_net(dev); struct sock *sk = net->ipv6.igmp_sk; struct inet6_dev *idev; struct sk_buff *skb; struct mld_msg *hdr; const struct in6_addr *snd_addr, *saddr; struct in6_addr addr_buf; int err, len, payload_len, full_len; u8 ra[8] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT, 2, 0, 0, IPV6_TLV_PADN, 0 }; struct flowi6 fl6; struct dst_entry *dst; if (type == ICMPV6_MGM_REDUCTION) snd_addr = &in6addr_linklocal_allrouters; else snd_addr = addr; len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr); payload_len = len + sizeof(ra); full_len = sizeof(struct ipv6hdr) + payload_len; rcu_read_lock(); IP6_UPD_PO_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_OUT, full_len); rcu_read_unlock(); skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + full_len, 1, &err); if (skb == NULL) { rcu_read_lock(); IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_OUTDISCARDS); rcu_read_unlock(); return; } skb_reserve(skb, LL_RESERVED_SPACE(dev)); if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) { /* <draft-ietf-magma-mld-source-05.txt>: * use unspecified address as the source address * when a valid link-local address is not available. */ saddr = &in6addr_any; } else saddr = &addr_buf; ip6_nd_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len); memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra)); hdr = (struct mld_msg *) skb_put(skb, sizeof(struct mld_msg)); memset(hdr, 0, sizeof(struct mld_msg)); hdr->mld_type = type; ipv6_addr_copy(&hdr->mld_mca, addr); hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len, IPPROTO_ICMPV6, csum_partial(hdr, len, 0)); rcu_read_lock(); idev = __in6_dev_get(skb->dev); dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr); if (!dst) { err = -ENOMEM; goto err_out; } icmpv6_flow_init(sk, &fl6, type, &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->dev->ifindex); dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto err_out; } skb_dst_set(skb, dst); err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb->dev, dst_output); out: if (!err) { ICMP6MSGOUT_INC_STATS(net, idev, type); ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, full_len); } else IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); rcu_read_unlock(); return; err_out: kfree_skb(skb); goto out; } static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode, const struct in6_addr *psfsrc) { struct ip6_sf_list *psf, *psf_prev; int rv = 0; psf_prev = NULL; for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { if (ipv6_addr_equal(&psf->sf_addr, psfsrc)) break; psf_prev = psf; } if (!psf || psf->sf_count[sfmode] == 0) { /* source filter not found, or count wrong => bug */ return -ESRCH; } psf->sf_count[sfmode]--; if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) { struct inet6_dev *idev = pmc->idev; /* no more filters for this source */ if (psf_prev) psf_prev->sf_next = psf->sf_next; else pmc->mca_sources = psf->sf_next; if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) && !MLD_V1_SEEN(idev)) { psf->sf_crcount = idev->mc_qrv; psf->sf_next = pmc->mca_tomb; pmc->mca_tomb = psf; rv = 1; } else kfree(psf); } return rv; } static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca, int sfmode, int sfcount, const struct in6_addr *psfsrc, int delta) { struct ifmcaddr6 *pmc; int changerec = 0; int i, err; if (!idev) return -ENODEV; read_lock_bh(&idev->lock); for (pmc=idev->mc_list; pmc; pmc=pmc->next) { if (ipv6_addr_equal(pmca, &pmc->mca_addr)) break; } if (!pmc) { /* MCA not found?? bug */ read_unlock_bh(&idev->lock); return -ESRCH; } spin_lock_bh(&pmc->mca_lock); sf_markstate(pmc); if (!delta) { if (!pmc->mca_sfcount[sfmode]) { spin_unlock_bh(&pmc->mca_lock); read_unlock_bh(&idev->lock); return -EINVAL; } pmc->mca_sfcount[sfmode]--; } err = 0; for (i=0; i<sfcount; i++) { int rv = ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]); changerec |= rv > 0; if (!err && rv < 0) err = rv; } if (pmc->mca_sfmode == MCAST_EXCLUDE && pmc->mca_sfcount[MCAST_EXCLUDE] == 0 && pmc->mca_sfcount[MCAST_INCLUDE]) { struct ip6_sf_list *psf; /* filter mode change */ pmc->mca_sfmode = MCAST_INCLUDE; pmc->mca_crcount = idev->mc_qrv; idev->mc_ifc_count = pmc->mca_crcount; for (psf=pmc->mca_sources; psf; psf = psf->sf_next) psf->sf_crcount = 0; mld_ifc_event(pmc->idev); } else if (sf_setstate(pmc) || changerec) mld_ifc_event(pmc->idev); spin_unlock_bh(&pmc->mca_lock); read_unlock_bh(&idev->lock); return err; } /* * Add multicast single-source filter to the interface list */ static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode, const struct in6_addr *psfsrc, int delta) { struct ip6_sf_list *psf, *psf_prev; psf_prev = NULL; for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { if (ipv6_addr_equal(&psf->sf_addr, psfsrc)) break; psf_prev = psf; } if (!psf) { psf = kzalloc(sizeof(*psf), GFP_ATOMIC); if (!psf) return -ENOBUFS; psf->sf_addr = *psfsrc; if (psf_prev) { psf_prev->sf_next = psf; } else pmc->mca_sources = psf; } psf->sf_count[sfmode]++; return 0; } static void sf_markstate(struct ifmcaddr6 *pmc) { struct ip6_sf_list *psf; int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE]; for (psf=pmc->mca_sources; psf; psf=psf->sf_next) if (pmc->mca_sfcount[MCAST_EXCLUDE]) { psf->sf_oldin = mca_xcount == psf->sf_count[MCAST_EXCLUDE] && !psf->sf_count[MCAST_INCLUDE]; } else psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0; } static int sf_setstate(struct ifmcaddr6 *pmc) { struct ip6_sf_list *psf, *dpsf; int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE]; int qrv = pmc->idev->mc_qrv; int new_in, rv; rv = 0; for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { if (pmc->mca_sfcount[MCAST_EXCLUDE]) { new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] && !psf->sf_count[MCAST_INCLUDE]; } else new_in = psf->sf_count[MCAST_INCLUDE] != 0; if (new_in) { if (!psf->sf_oldin) { struct ip6_sf_list *prev = NULL; for (dpsf=pmc->mca_tomb; dpsf; dpsf=dpsf->sf_next) { if (ipv6_addr_equal(&dpsf->sf_addr, &psf->sf_addr)) break; prev = dpsf; } if (dpsf) { if (prev) prev->sf_next = dpsf->sf_next; else pmc->mca_tomb = dpsf->sf_next; kfree(dpsf); } psf->sf_crcount = qrv; rv++; } } else if (psf->sf_oldin) { psf->sf_crcount = 0; /* * add or update "delete" records if an active filter * is now inactive */ for (dpsf=pmc->mca_tomb; dpsf; dpsf=dpsf->sf_next) if (ipv6_addr_equal(&dpsf->sf_addr, &psf->sf_addr)) break; if (!dpsf) { dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC); if (!dpsf) continue; *dpsf = *psf; /* pmc->mca_lock held by callers */ dpsf->sf_next = pmc->mca_tomb; pmc->mca_tomb = dpsf; } dpsf->sf_crcount = qrv; rv++; } } return rv; } /* * Add multicast source filter list to the interface list */ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca, int sfmode, int sfcount, const struct in6_addr *psfsrc, int delta) { struct ifmcaddr6 *pmc; int isexclude; int i, err; if (!idev) return -ENODEV; read_lock_bh(&idev->lock); for (pmc=idev->mc_list; pmc; pmc=pmc->next) { if (ipv6_addr_equal(pmca, &pmc->mca_addr)) break; } if (!pmc) { /* MCA not found?? bug */ read_unlock_bh(&idev->lock); return -ESRCH; } spin_lock_bh(&pmc->mca_lock); sf_markstate(pmc); isexclude = pmc->mca_sfmode == MCAST_EXCLUDE; if (!delta) pmc->mca_sfcount[sfmode]++; err = 0; for (i=0; i<sfcount; i++) { err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i], delta); if (err) break; } if (err) { int j; if (!delta) pmc->mca_sfcount[sfmode]--; for (j=0; j<i; j++) ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]); } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) { struct ip6_sf_list *psf; /* filter mode change */ if (pmc->mca_sfcount[MCAST_EXCLUDE]) pmc->mca_sfmode = MCAST_EXCLUDE; else if (pmc->mca_sfcount[MCAST_INCLUDE]) pmc->mca_sfmode = MCAST_INCLUDE; /* else no filters; keep old mode for reports */ pmc->mca_crcount = idev->mc_qrv; idev->mc_ifc_count = pmc->mca_crcount; for (psf=pmc->mca_sources; psf; psf = psf->sf_next) psf->sf_crcount = 0; mld_ifc_event(idev); } else if (sf_setstate(pmc)) mld_ifc_event(idev); spin_unlock_bh(&pmc->mca_lock); read_unlock_bh(&idev->lock); return err; } static void ip6_mc_clear_src(struct ifmcaddr6 *pmc) { struct ip6_sf_list *psf, *nextpsf; for (psf=pmc->mca_tomb; psf; psf=nextpsf) { nextpsf = psf->sf_next; kfree(psf); } pmc->mca_tomb = NULL; for (psf=pmc->mca_sources; psf; psf=nextpsf) { nextpsf = psf->sf_next; kfree(psf); } pmc->mca_sources = NULL; pmc->mca_sfmode = MCAST_EXCLUDE; pmc->mca_sfcount[MCAST_INCLUDE] = 0; pmc->mca_sfcount[MCAST_EXCLUDE] = 1; } static void igmp6_join_group(struct ifmcaddr6 *ma) { unsigned long delay; if (ma->mca_flags & MAF_NOREPORT) return; igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT); delay = net_random() % IGMP6_UNSOLICITED_IVAL; spin_lock_bh(&ma->mca_lock); if (del_timer(&ma->mca_timer)) { atomic_dec(&ma->mca_refcnt); delay = ma->mca_timer.expires - jiffies; } if (!mod_timer(&ma->mca_timer, jiffies + delay)) atomic_inc(&ma->mca_refcnt); ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER; spin_unlock_bh(&ma->mca_lock); } static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, struct inet6_dev *idev) { int err; /* callers have the socket lock and a write lock on ipv6_sk_mc_lock, * so no other readers or writers of iml or its sflist */ if (!iml->sflist) { /* any-source empty exclude case */ return ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0); } err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, iml->sflist->sl_count, iml->sflist->sl_addr, 0); sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max)); iml->sflist = NULL; return err; } static void igmp6_leave_group(struct ifmcaddr6 *ma) { if (MLD_V1_SEEN(ma->idev)) { if (ma->mca_flags & MAF_LAST_REPORTER) igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REDUCTION); } else { mld_add_delrec(ma->idev, ma); mld_ifc_event(ma->idev); } } static void mld_gq_timer_expire(unsigned long data) { struct inet6_dev *idev = (struct inet6_dev *)data; idev->mc_gq_running = 0; mld_send_report(idev, NULL); __in6_dev_put(idev); } static void mld_ifc_timer_expire(unsigned long data) { struct inet6_dev *idev = (struct inet6_dev *)data; mld_send_cr(idev); if (idev->mc_ifc_count) { idev->mc_ifc_count--; if (idev->mc_ifc_count) mld_ifc_start_timer(idev, idev->mc_maxdelay); } __in6_dev_put(idev); } static void mld_ifc_event(struct inet6_dev *idev) { if (MLD_V1_SEEN(idev)) return; idev->mc_ifc_count = idev->mc_qrv; mld_ifc_start_timer(idev, 1); } static void igmp6_timer_handler(unsigned long data) { struct ifmcaddr6 *ma = (struct ifmcaddr6 *) data; if (MLD_V1_SEEN(ma->idev)) igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT); else mld_send_report(ma->idev, ma); spin_lock(&ma->mca_lock); ma->mca_flags |= MAF_LAST_REPORTER; ma->mca_flags &= ~MAF_TIMER_RUNNING; spin_unlock(&ma->mca_lock); ma_put(ma); } /* Device changing type */ void ipv6_mc_unmap(struct inet6_dev *idev) { struct ifmcaddr6 *i; /* Install multicast list, except for all-nodes (already installed) */ read_lock_bh(&idev->lock); for (i = idev->mc_list; i; i = i->next) igmp6_group_dropped(i); read_unlock_bh(&idev->lock); } void ipv6_mc_remap(struct inet6_dev *idev) { ipv6_mc_up(idev); } /* Device going down */ void ipv6_mc_down(struct inet6_dev *idev) { struct ifmcaddr6 *i; /* Withdraw multicast list */ read_lock_bh(&idev->lock); idev->mc_ifc_count = 0; if (del_timer(&idev->mc_ifc_timer)) __in6_dev_put(idev); idev->mc_gq_running = 0; if (del_timer(&idev->mc_gq_timer)) __in6_dev_put(idev); for (i = idev->mc_list; i; i=i->next) igmp6_group_dropped(i); read_unlock_bh(&idev->lock); mld_clear_delrec(idev); } /* Device going up */ void ipv6_mc_up(struct inet6_dev *idev) { struct ifmcaddr6 *i; /* Install multicast list, except for all-nodes (already installed) */ read_lock_bh(&idev->lock); for (i = idev->mc_list; i; i=i->next) igmp6_group_added(i); read_unlock_bh(&idev->lock); } /* IPv6 device initialization. */ void ipv6_mc_init_dev(struct inet6_dev *idev) { write_lock_bh(&idev->lock); spin_lock_init(&idev->mc_lock); idev->mc_gq_running = 0; setup_timer(&idev->mc_gq_timer, mld_gq_timer_expire, (unsigned long)idev); idev->mc_tomb = NULL; idev->mc_ifc_count = 0; setup_timer(&idev->mc_ifc_timer, mld_ifc_timer_expire, (unsigned long)idev); idev->mc_qrv = MLD_QRV_DEFAULT; idev->mc_maxdelay = IGMP6_UNSOLICITED_IVAL; idev->mc_v1_seen = 0; write_unlock_bh(&idev->lock); } /* * Device is about to be destroyed: clean up. */ void ipv6_mc_destroy_dev(struct inet6_dev *idev) { struct ifmcaddr6 *i; /* Deactivate timers */ ipv6_mc_down(idev); /* Delete all-nodes address. */ /* We cannot call ipv6_dev_mc_dec() directly, our caller in * addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will * fail. */ __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allnodes); if (idev->cnf.forwarding) __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allrouters); write_lock_bh(&idev->lock); while ((i = idev->mc_list) != NULL) { idev->mc_list = i->next; write_unlock_bh(&idev->lock); igmp6_group_dropped(i); ma_put(i); write_lock_bh(&idev->lock); } write_unlock_bh(&idev->lock); } #ifdef CONFIG_PROC_FS struct igmp6_mc_iter_state { struct seq_net_private p; struct net_device *dev; struct inet6_dev *idev; }; #define igmp6_mc_seq_private(seq) ((struct igmp6_mc_iter_state *)(seq)->private) static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq) { struct ifmcaddr6 *im = NULL; struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); struct net *net = seq_file_net(seq); state->idev = NULL; for_each_netdev_rcu(net, state->dev) { struct inet6_dev *idev; idev = __in6_dev_get(state->dev); if (!idev) continue; read_lock_bh(&idev->lock); im = idev->mc_list; if (im) { state->idev = idev; break; } read_unlock_bh(&idev->lock); } return im; } static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr6 *im) { struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); im = im->next; while (!im) { if (likely(state->idev != NULL)) read_unlock_bh(&state->idev->lock); state->dev = next_net_device_rcu(state->dev); if (!state->dev) { state->idev = NULL; break; } state->idev = __in6_dev_get(state->dev); if (!state->idev) continue; read_lock_bh(&state->idev->lock); im = state->idev->mc_list; } return im; } static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos) { struct ifmcaddr6 *im = igmp6_mc_get_first(seq); if (im) while (pos && (im = igmp6_mc_get_next(seq, im)) != NULL) --pos; return pos ? NULL : im; } static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { rcu_read_lock(); return igmp6_mc_get_idx(seq, *pos); } static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v); ++*pos; return im; } static void igmp6_mc_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); if (likely(state->idev != NULL)) { read_unlock_bh(&state->idev->lock); state->idev = NULL; } state->dev = NULL; rcu_read_unlock(); } static int igmp6_mc_seq_show(struct seq_file *seq, void *v) { struct ifmcaddr6 *im = (struct ifmcaddr6 *)v; struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); seq_printf(seq, "%-4d %-15s %pi6 %5d %08X %ld\n", state->dev->ifindex, state->dev->name, &im->mca_addr, im->mca_users, im->mca_flags, (im->mca_flags&MAF_TIMER_RUNNING) ? jiffies_to_clock_t(im->mca_timer.expires-jiffies) : 0); return 0; } static const struct seq_operations igmp6_mc_seq_ops = { .start = igmp6_mc_seq_start, .next = igmp6_mc_seq_next, .stop = igmp6_mc_seq_stop, .show = igmp6_mc_seq_show, }; static int igmp6_mc_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &igmp6_mc_seq_ops, sizeof(struct igmp6_mc_iter_state)); } static const struct file_operations igmp6_mc_seq_fops = { .owner = THIS_MODULE, .open = igmp6_mc_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; struct igmp6_mcf_iter_state { struct seq_net_private p; struct net_device *dev; struct inet6_dev *idev; struct ifmcaddr6 *im; }; #define igmp6_mcf_seq_private(seq) ((struct igmp6_mcf_iter_state *)(seq)->private) static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq) { struct ip6_sf_list *psf = NULL; struct ifmcaddr6 *im = NULL; struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); struct net *net = seq_file_net(seq); state->idev = NULL; state->im = NULL; for_each_netdev_rcu(net, state->dev) { struct inet6_dev *idev; idev = __in6_dev_get(state->dev); if (unlikely(idev == NULL)) continue; read_lock_bh(&idev->lock); im = idev->mc_list; if (likely(im != NULL)) { spin_lock_bh(&im->mca_lock); psf = im->mca_sources; if (likely(psf != NULL)) { state->im = im; state->idev = idev; break; } spin_unlock_bh(&im->mca_lock); } read_unlock_bh(&idev->lock); } return psf; } static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_sf_list *psf) { struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); psf = psf->sf_next; while (!psf) { spin_unlock_bh(&state->im->mca_lock); state->im = state->im->next; while (!state->im) { if (likely(state->idev != NULL)) read_unlock_bh(&state->idev->lock); state->dev = next_net_device_rcu(state->dev); if (!state->dev) { state->idev = NULL; goto out; } state->idev = __in6_dev_get(state->dev); if (!state->idev) continue; read_lock_bh(&state->idev->lock); state->im = state->idev->mc_list; } if (!state->im) break; spin_lock_bh(&state->im->mca_lock); psf = state->im->mca_sources; } out: return psf; } static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos) { struct ip6_sf_list *psf = igmp6_mcf_get_first(seq); if (psf) while (pos && (psf = igmp6_mcf_get_next(seq, psf)) != NULL) --pos; return pos ? NULL : psf; } static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { rcu_read_lock(); return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; } static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct ip6_sf_list *psf; if (v == SEQ_START_TOKEN) psf = igmp6_mcf_get_first(seq); else psf = igmp6_mcf_get_next(seq, v); ++*pos; return psf; } static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); if (likely(state->im != NULL)) { spin_unlock_bh(&state->im->mca_lock); state->im = NULL; } if (likely(state->idev != NULL)) { read_unlock_bh(&state->idev->lock); state->idev = NULL; } state->dev = NULL; rcu_read_unlock(); } static int igmp6_mcf_seq_show(struct seq_file *seq, void *v) { struct ip6_sf_list *psf = (struct ip6_sf_list *)v; struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); if (v == SEQ_START_TOKEN) { seq_printf(seq, "%3s %6s " "%32s %32s %6s %6s\n", "Idx", "Device", "Multicast Address", "Source Address", "INC", "EXC"); } else { seq_printf(seq, "%3d %6.6s %pi6 %pi6 %6lu %6lu\n", state->dev->ifindex, state->dev->name, &state->im->mca_addr, &psf->sf_addr, psf->sf_count[MCAST_INCLUDE], psf->sf_count[MCAST_EXCLUDE]); } return 0; } static const struct seq_operations igmp6_mcf_seq_ops = { .start = igmp6_mcf_seq_start, .next = igmp6_mcf_seq_next, .stop = igmp6_mcf_seq_stop, .show = igmp6_mcf_seq_show, }; static int igmp6_mcf_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &igmp6_mcf_seq_ops, sizeof(struct igmp6_mcf_iter_state)); } static const struct file_operations igmp6_mcf_seq_fops = { .owner = THIS_MODULE, .open = igmp6_mcf_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; static int __net_init igmp6_proc_init(struct net *net) { int err; err = -ENOMEM; if (!proc_net_fops_create(net, "igmp6", S_IRUGO, &igmp6_mc_seq_fops)) goto out; if (!proc_net_fops_create(net, "mcfilter6", S_IRUGO, &igmp6_mcf_seq_fops)) goto out_proc_net_igmp6; err = 0; out: return err; out_proc_net_igmp6: proc_net_remove(net, "igmp6"); goto out; } static void __net_exit igmp6_proc_exit(struct net *net) { proc_net_remove(net, "mcfilter6"); proc_net_remove(net, "igmp6"); } #else static inline int igmp6_proc_init(struct net *net) { return 0; } static inline void igmp6_proc_exit(struct net *net) { } #endif static int __net_init igmp6_net_init(struct net *net) { int err; err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, net); if (err < 0) { printk(KERN_ERR "Failed to initialize the IGMP6 control socket (err %d).\n", err); goto out; } inet6_sk(net->ipv6.igmp_sk)->hop_limit = 1; err = igmp6_proc_init(net); if (err) goto out_sock_create; out: return err; out_sock_create: inet_ctl_sock_destroy(net->ipv6.igmp_sk); goto out; } static void __net_exit igmp6_net_exit(struct net *net) { inet_ctl_sock_destroy(net->ipv6.igmp_sk); igmp6_proc_exit(net); } static struct pernet_operations igmp6_net_ops = { .init = igmp6_net_init, .exit = igmp6_net_exit, }; int __init igmp6_init(void) { return register_pernet_subsys(&igmp6_net_ops); } void igmp6_cleanup(void) { unregister_pernet_subsys(&igmp6_net_ops); }
gpl-2.0
Distrotech/linux
drivers/ssb/pci.c
1552
37860
/* * Sonics Silicon Backplane PCI-Hostbus related functions. * * Copyright (C) 2005-2006 Michael Buesch <m@bues.ch> * Copyright (C) 2005 Martin Langer <martin-langer@gmx.de> * Copyright (C) 2005 Stefano Brivio <st3@riseup.net> * Copyright (C) 2005 Danny van Dyk <kugelfang@gentoo.org> * Copyright (C) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch> * * Derived from the Broadcom 4400 device driver. * Copyright (C) 2002 David S. Miller (davem@redhat.com) * Fixed by Pekka Pietikainen (pp@ee.oulu.fi) * Copyright (C) 2006 Broadcom Corporation. * * Licensed under the GNU/GPL. See COPYING for details. */ #include <linux/ssb/ssb.h> #include <linux/ssb/ssb_regs.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/delay.h> #include "ssb_private.h" /* Define the following to 1 to enable a printk on each coreswitch. */ #define SSB_VERBOSE_PCICORESWITCH_DEBUG 0 /* Lowlevel coreswitching */ int ssb_pci_switch_coreidx(struct ssb_bus *bus, u8 coreidx) { int err; int attempts = 0; u32 cur_core; while (1) { err = pci_write_config_dword(bus->host_pci, SSB_BAR0_WIN, (coreidx * SSB_CORE_SIZE) + SSB_ENUM_BASE); if (err) goto error; err = pci_read_config_dword(bus->host_pci, SSB_BAR0_WIN, &cur_core); if (err) goto error; cur_core = (cur_core - SSB_ENUM_BASE) / SSB_CORE_SIZE; if (cur_core == coreidx) break; if (attempts++ > SSB_BAR0_MAX_RETRIES) goto error; udelay(10); } return 0; error: ssb_err("Failed to switch to core %u\n", coreidx); return -ENODEV; } int ssb_pci_switch_core(struct ssb_bus *bus, struct ssb_device *dev) { int err; unsigned long flags; #if SSB_VERBOSE_PCICORESWITCH_DEBUG ssb_info("Switching to %s core, index %d\n", ssb_core_name(dev->id.coreid), dev->core_index); #endif spin_lock_irqsave(&bus->bar_lock, flags); err = ssb_pci_switch_coreidx(bus, dev->core_index); if (!err) bus->mapped_device = dev; spin_unlock_irqrestore(&bus->bar_lock, flags); return err; } /* Enable/disable the on board crystal oscillator and/or PLL. */ int ssb_pci_xtal(struct ssb_bus *bus, u32 what, int turn_on) { int err; u32 in, out, outenable; u16 pci_status; if (bus->bustype != SSB_BUSTYPE_PCI) return 0; err = pci_read_config_dword(bus->host_pci, SSB_GPIO_IN, &in); if (err) goto err_pci; err = pci_read_config_dword(bus->host_pci, SSB_GPIO_OUT, &out); if (err) goto err_pci; err = pci_read_config_dword(bus->host_pci, SSB_GPIO_OUT_ENABLE, &outenable); if (err) goto err_pci; outenable |= what; if (turn_on) { /* Avoid glitching the clock if GPRS is already using it. * We can't actually read the state of the PLLPD so we infer it * by the value of XTAL_PU which *is* readable via gpioin. */ if (!(in & SSB_GPIO_XTAL)) { if (what & SSB_GPIO_XTAL) { /* Turn the crystal on */ out |= SSB_GPIO_XTAL; if (what & SSB_GPIO_PLL) out |= SSB_GPIO_PLL; err = pci_write_config_dword(bus->host_pci, SSB_GPIO_OUT, out); if (err) goto err_pci; err = pci_write_config_dword(bus->host_pci, SSB_GPIO_OUT_ENABLE, outenable); if (err) goto err_pci; msleep(1); } if (what & SSB_GPIO_PLL) { /* Turn the PLL on */ out &= ~SSB_GPIO_PLL; err = pci_write_config_dword(bus->host_pci, SSB_GPIO_OUT, out); if (err) goto err_pci; msleep(5); } } err = pci_read_config_word(bus->host_pci, PCI_STATUS, &pci_status); if (err) goto err_pci; pci_status &= ~PCI_STATUS_SIG_TARGET_ABORT; err = pci_write_config_word(bus->host_pci, PCI_STATUS, pci_status); if (err) goto err_pci; } else { if (what & SSB_GPIO_XTAL) { /* Turn the crystal off */ out &= ~SSB_GPIO_XTAL; } if (what & SSB_GPIO_PLL) { /* Turn the PLL off */ out |= SSB_GPIO_PLL; } err = pci_write_config_dword(bus->host_pci, SSB_GPIO_OUT, out); if (err) goto err_pci; err = pci_write_config_dword(bus->host_pci, SSB_GPIO_OUT_ENABLE, outenable); if (err) goto err_pci; } out: return err; err_pci: printk(KERN_ERR PFX "Error: ssb_pci_xtal() could not access PCI config space!\n"); err = -EBUSY; goto out; } /* Get the word-offset for a SSB_SPROM_XXX define. */ #define SPOFF(offset) ((offset) / sizeof(u16)) /* Helper to extract some _offset, which is one of the SSB_SPROM_XXX defines. */ #define SPEX16(_outvar, _offset, _mask, _shift) \ out->_outvar = ((in[SPOFF(_offset)] & (_mask)) >> (_shift)) #define SPEX32(_outvar, _offset, _mask, _shift) \ out->_outvar = ((((u32)in[SPOFF((_offset)+2)] << 16 | \ in[SPOFF(_offset)]) & (_mask)) >> (_shift)) #define SPEX(_outvar, _offset, _mask, _shift) \ SPEX16(_outvar, _offset, _mask, _shift) #define SPEX_ARRAY8(_field, _offset, _mask, _shift) \ do { \ SPEX(_field[0], _offset + 0, _mask, _shift); \ SPEX(_field[1], _offset + 2, _mask, _shift); \ SPEX(_field[2], _offset + 4, _mask, _shift); \ SPEX(_field[3], _offset + 6, _mask, _shift); \ SPEX(_field[4], _offset + 8, _mask, _shift); \ SPEX(_field[5], _offset + 10, _mask, _shift); \ SPEX(_field[6], _offset + 12, _mask, _shift); \ SPEX(_field[7], _offset + 14, _mask, _shift); \ } while (0) static inline u8 ssb_crc8(u8 crc, u8 data) { /* Polynomial: x^8 + x^7 + x^6 + x^4 + x^2 + 1 */ static const u8 t[] = { 0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B, 0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21, 0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF, 0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5, 0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14, 0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E, 0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80, 0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA, 0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95, 0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF, 0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01, 0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B, 0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA, 0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0, 0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E, 0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34, 0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0, 0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A, 0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54, 0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E, 0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF, 0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5, 0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B, 0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61, 0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E, 0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74, 0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA, 0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0, 0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41, 0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B, 0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5, 0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F, }; return t[crc ^ data]; } static void sprom_get_mac(char *mac, const u16 *in) { int i; for (i = 0; i < 3; i++) { *mac++ = in[i] >> 8; *mac++ = in[i]; } } static u8 ssb_sprom_crc(const u16 *sprom, u16 size) { int word; u8 crc = 0xFF; for (word = 0; word < size - 1; word++) { crc = ssb_crc8(crc, sprom[word] & 0x00FF); crc = ssb_crc8(crc, (sprom[word] & 0xFF00) >> 8); } crc = ssb_crc8(crc, sprom[size - 1] & 0x00FF); crc ^= 0xFF; return crc; } static int sprom_check_crc(const u16 *sprom, size_t size) { u8 crc; u8 expected_crc; u16 tmp; crc = ssb_sprom_crc(sprom, size); tmp = sprom[size - 1] & SSB_SPROM_REVISION_CRC; expected_crc = tmp >> SSB_SPROM_REVISION_CRC_SHIFT; if (crc != expected_crc) return -EPROTO; return 0; } static int sprom_do_read(struct ssb_bus *bus, u16 *sprom) { int i; for (i = 0; i < bus->sprom_size; i++) sprom[i] = ioread16(bus->mmio + bus->sprom_offset + (i * 2)); return 0; } static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom) { struct pci_dev *pdev = bus->host_pci; int i, err; u32 spromctl; u16 size = bus->sprom_size; ssb_notice("Writing SPROM. Do NOT turn off the power! Please stand by...\n"); err = pci_read_config_dword(pdev, SSB_SPROMCTL, &spromctl); if (err) goto err_ctlreg; spromctl |= SSB_SPROMCTL_WE; err = pci_write_config_dword(pdev, SSB_SPROMCTL, spromctl); if (err) goto err_ctlreg; ssb_notice("[ 0%%"); msleep(500); for (i = 0; i < size; i++) { if (i == size / 4) ssb_cont("25%%"); else if (i == size / 2) ssb_cont("50%%"); else if (i == (size * 3) / 4) ssb_cont("75%%"); else if (i % 2) ssb_cont("."); writew(sprom[i], bus->mmio + bus->sprom_offset + (i * 2)); mmiowb(); msleep(20); } err = pci_read_config_dword(pdev, SSB_SPROMCTL, &spromctl); if (err) goto err_ctlreg; spromctl &= ~SSB_SPROMCTL_WE; err = pci_write_config_dword(pdev, SSB_SPROMCTL, spromctl); if (err) goto err_ctlreg; msleep(500); ssb_cont("100%% ]\n"); ssb_notice("SPROM written\n"); return 0; err_ctlreg: ssb_err("Could not access SPROM control register.\n"); return err; } static s8 sprom_extract_antgain(u8 sprom_revision, const u16 *in, u16 offset, u16 mask, u16 shift) { u16 v; u8 gain; v = in[SPOFF(offset)]; gain = (v & mask) >> shift; if (gain == 0xFF) gain = 2; /* If unset use 2dBm */ if (sprom_revision == 1) { /* Convert to Q5.2 */ gain <<= 2; } else { /* Q5.2 Fractional part is stored in 0xC0 */ gain = ((gain & 0xC0) >> 6) | ((gain & 0x3F) << 2); } return (s8)gain; } static void sprom_extract_r23(struct ssb_sprom *out, const u16 *in) { SPEX(boardflags_hi, SSB_SPROM2_BFLHI, 0xFFFF, 0); SPEX(opo, SSB_SPROM2_OPO, SSB_SPROM2_OPO_VALUE, 0); SPEX(pa1lob0, SSB_SPROM2_PA1LOB0, 0xFFFF, 0); SPEX(pa1lob1, SSB_SPROM2_PA1LOB1, 0xFFFF, 0); SPEX(pa1lob2, SSB_SPROM2_PA1LOB2, 0xFFFF, 0); SPEX(pa1hib0, SSB_SPROM2_PA1HIB0, 0xFFFF, 0); SPEX(pa1hib1, SSB_SPROM2_PA1HIB1, 0xFFFF, 0); SPEX(pa1hib2, SSB_SPROM2_PA1HIB2, 0xFFFF, 0); SPEX(maxpwr_ah, SSB_SPROM2_MAXP_A, SSB_SPROM2_MAXP_A_HI, 0); SPEX(maxpwr_al, SSB_SPROM2_MAXP_A, SSB_SPROM2_MAXP_A_LO, SSB_SPROM2_MAXP_A_LO_SHIFT); } static void sprom_extract_r123(struct ssb_sprom *out, const u16 *in) { u16 loc[3]; if (out->revision == 3) /* rev 3 moved MAC */ loc[0] = SSB_SPROM3_IL0MAC; else { loc[0] = SSB_SPROM1_IL0MAC; loc[1] = SSB_SPROM1_ET0MAC; loc[2] = SSB_SPROM1_ET1MAC; } sprom_get_mac(out->il0mac, &in[SPOFF(loc[0])]); if (out->revision < 3) { /* only rev 1-2 have et0, et1 */ sprom_get_mac(out->et0mac, &in[SPOFF(loc[1])]); sprom_get_mac(out->et1mac, &in[SPOFF(loc[2])]); } SPEX(et0phyaddr, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET0A, 0); SPEX(et1phyaddr, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET1A, SSB_SPROM1_ETHPHY_ET1A_SHIFT); SPEX(et0mdcport, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET0M, 14); SPEX(et1mdcport, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET1M, 15); SPEX(board_rev, SSB_SPROM1_BINF, SSB_SPROM1_BINF_BREV, 0); SPEX(board_type, SSB_SPROM1_SPID, 0xFFFF, 0); if (out->revision == 1) SPEX(country_code, SSB_SPROM1_BINF, SSB_SPROM1_BINF_CCODE, SSB_SPROM1_BINF_CCODE_SHIFT); SPEX(ant_available_a, SSB_SPROM1_BINF, SSB_SPROM1_BINF_ANTA, SSB_SPROM1_BINF_ANTA_SHIFT); SPEX(ant_available_bg, SSB_SPROM1_BINF, SSB_SPROM1_BINF_ANTBG, SSB_SPROM1_BINF_ANTBG_SHIFT); SPEX(pa0b0, SSB_SPROM1_PA0B0, 0xFFFF, 0); SPEX(pa0b1, SSB_SPROM1_PA0B1, 0xFFFF, 0); SPEX(pa0b2, SSB_SPROM1_PA0B2, 0xFFFF, 0); SPEX(pa1b0, SSB_SPROM1_PA1B0, 0xFFFF, 0); SPEX(pa1b1, SSB_SPROM1_PA1B1, 0xFFFF, 0); SPEX(pa1b2, SSB_SPROM1_PA1B2, 0xFFFF, 0); SPEX(gpio0, SSB_SPROM1_GPIOA, SSB_SPROM1_GPIOA_P0, 0); SPEX(gpio1, SSB_SPROM1_GPIOA, SSB_SPROM1_GPIOA_P1, SSB_SPROM1_GPIOA_P1_SHIFT); SPEX(gpio2, SSB_SPROM1_GPIOB, SSB_SPROM1_GPIOB_P2, 0); SPEX(gpio3, SSB_SPROM1_GPIOB, SSB_SPROM1_GPIOB_P3, SSB_SPROM1_GPIOB_P3_SHIFT); SPEX(maxpwr_a, SSB_SPROM1_MAXPWR, SSB_SPROM1_MAXPWR_A, SSB_SPROM1_MAXPWR_A_SHIFT); SPEX(maxpwr_bg, SSB_SPROM1_MAXPWR, SSB_SPROM1_MAXPWR_BG, 0); SPEX(itssi_a, SSB_SPROM1_ITSSI, SSB_SPROM1_ITSSI_A, SSB_SPROM1_ITSSI_A_SHIFT); SPEX(itssi_bg, SSB_SPROM1_ITSSI, SSB_SPROM1_ITSSI_BG, 0); SPEX(boardflags_lo, SSB_SPROM1_BFLLO, 0xFFFF, 0); SPEX(alpha2[0], SSB_SPROM1_CCODE, 0xff00, 8); SPEX(alpha2[1], SSB_SPROM1_CCODE, 0x00ff, 0); /* Extract the antenna gain values. */ out->antenna_gain.a0 = sprom_extract_antgain(out->revision, in, SSB_SPROM1_AGAIN, SSB_SPROM1_AGAIN_BG, SSB_SPROM1_AGAIN_BG_SHIFT); out->antenna_gain.a1 = sprom_extract_antgain(out->revision, in, SSB_SPROM1_AGAIN, SSB_SPROM1_AGAIN_A, SSB_SPROM1_AGAIN_A_SHIFT); if (out->revision >= 2) sprom_extract_r23(out, in); } /* Revs 4 5 and 8 have partially shared layout */ static void sprom_extract_r458(struct ssb_sprom *out, const u16 *in) { SPEX(txpid2g[0], SSB_SPROM4_TXPID2G01, SSB_SPROM4_TXPID2G0, SSB_SPROM4_TXPID2G0_SHIFT); SPEX(txpid2g[1], SSB_SPROM4_TXPID2G01, SSB_SPROM4_TXPID2G1, SSB_SPROM4_TXPID2G1_SHIFT); SPEX(txpid2g[2], SSB_SPROM4_TXPID2G23, SSB_SPROM4_TXPID2G2, SSB_SPROM4_TXPID2G2_SHIFT); SPEX(txpid2g[3], SSB_SPROM4_TXPID2G23, SSB_SPROM4_TXPID2G3, SSB_SPROM4_TXPID2G3_SHIFT); SPEX(txpid5gl[0], SSB_SPROM4_TXPID5GL01, SSB_SPROM4_TXPID5GL0, SSB_SPROM4_TXPID5GL0_SHIFT); SPEX(txpid5gl[1], SSB_SPROM4_TXPID5GL01, SSB_SPROM4_TXPID5GL1, SSB_SPROM4_TXPID5GL1_SHIFT); SPEX(txpid5gl[2], SSB_SPROM4_TXPID5GL23, SSB_SPROM4_TXPID5GL2, SSB_SPROM4_TXPID5GL2_SHIFT); SPEX(txpid5gl[3], SSB_SPROM4_TXPID5GL23, SSB_SPROM4_TXPID5GL3, SSB_SPROM4_TXPID5GL3_SHIFT); SPEX(txpid5g[0], SSB_SPROM4_TXPID5G01, SSB_SPROM4_TXPID5G0, SSB_SPROM4_TXPID5G0_SHIFT); SPEX(txpid5g[1], SSB_SPROM4_TXPID5G01, SSB_SPROM4_TXPID5G1, SSB_SPROM4_TXPID5G1_SHIFT); SPEX(txpid5g[2], SSB_SPROM4_TXPID5G23, SSB_SPROM4_TXPID5G2, SSB_SPROM4_TXPID5G2_SHIFT); SPEX(txpid5g[3], SSB_SPROM4_TXPID5G23, SSB_SPROM4_TXPID5G3, SSB_SPROM4_TXPID5G3_SHIFT); SPEX(txpid5gh[0], SSB_SPROM4_TXPID5GH01, SSB_SPROM4_TXPID5GH0, SSB_SPROM4_TXPID5GH0_SHIFT); SPEX(txpid5gh[1], SSB_SPROM4_TXPID5GH01, SSB_SPROM4_TXPID5GH1, SSB_SPROM4_TXPID5GH1_SHIFT); SPEX(txpid5gh[2], SSB_SPROM4_TXPID5GH23, SSB_SPROM4_TXPID5GH2, SSB_SPROM4_TXPID5GH2_SHIFT); SPEX(txpid5gh[3], SSB_SPROM4_TXPID5GH23, SSB_SPROM4_TXPID5GH3, SSB_SPROM4_TXPID5GH3_SHIFT); } static void sprom_extract_r45(struct ssb_sprom *out, const u16 *in) { static const u16 pwr_info_offset[] = { SSB_SPROM4_PWR_INFO_CORE0, SSB_SPROM4_PWR_INFO_CORE1, SSB_SPROM4_PWR_INFO_CORE2, SSB_SPROM4_PWR_INFO_CORE3 }; u16 il0mac_offset; int i; BUILD_BUG_ON(ARRAY_SIZE(pwr_info_offset) != ARRAY_SIZE(out->core_pwr_info)); if (out->revision == 4) il0mac_offset = SSB_SPROM4_IL0MAC; else il0mac_offset = SSB_SPROM5_IL0MAC; sprom_get_mac(out->il0mac, &in[SPOFF(il0mac_offset)]); SPEX(et0phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET0A, 0); SPEX(et1phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET1A, SSB_SPROM4_ETHPHY_ET1A_SHIFT); SPEX(board_rev, SSB_SPROM4_BOARDREV, 0xFFFF, 0); SPEX(board_type, SSB_SPROM1_SPID, 0xFFFF, 0); if (out->revision == 4) { SPEX(alpha2[0], SSB_SPROM4_CCODE, 0xff00, 8); SPEX(alpha2[1], SSB_SPROM4_CCODE, 0x00ff, 0); SPEX(boardflags_lo, SSB_SPROM4_BFLLO, 0xFFFF, 0); SPEX(boardflags_hi, SSB_SPROM4_BFLHI, 0xFFFF, 0); SPEX(boardflags2_lo, SSB_SPROM4_BFL2LO, 0xFFFF, 0); SPEX(boardflags2_hi, SSB_SPROM4_BFL2HI, 0xFFFF, 0); } else { SPEX(alpha2[0], SSB_SPROM5_CCODE, 0xff00, 8); SPEX(alpha2[1], SSB_SPROM5_CCODE, 0x00ff, 0); SPEX(boardflags_lo, SSB_SPROM5_BFLLO, 0xFFFF, 0); SPEX(boardflags_hi, SSB_SPROM5_BFLHI, 0xFFFF, 0); SPEX(boardflags2_lo, SSB_SPROM5_BFL2LO, 0xFFFF, 0); SPEX(boardflags2_hi, SSB_SPROM5_BFL2HI, 0xFFFF, 0); } SPEX(ant_available_a, SSB_SPROM4_ANTAVAIL, SSB_SPROM4_ANTAVAIL_A, SSB_SPROM4_ANTAVAIL_A_SHIFT); SPEX(ant_available_bg, SSB_SPROM4_ANTAVAIL, SSB_SPROM4_ANTAVAIL_BG, SSB_SPROM4_ANTAVAIL_BG_SHIFT); SPEX(maxpwr_bg, SSB_SPROM4_MAXP_BG, SSB_SPROM4_MAXP_BG_MASK, 0); SPEX(itssi_bg, SSB_SPROM4_MAXP_BG, SSB_SPROM4_ITSSI_BG, SSB_SPROM4_ITSSI_BG_SHIFT); SPEX(maxpwr_a, SSB_SPROM4_MAXP_A, SSB_SPROM4_MAXP_A_MASK, 0); SPEX(itssi_a, SSB_SPROM4_MAXP_A, SSB_SPROM4_ITSSI_A, SSB_SPROM4_ITSSI_A_SHIFT); if (out->revision == 4) { SPEX(gpio0, SSB_SPROM4_GPIOA, SSB_SPROM4_GPIOA_P0, 0); SPEX(gpio1, SSB_SPROM4_GPIOA, SSB_SPROM4_GPIOA_P1, SSB_SPROM4_GPIOA_P1_SHIFT); SPEX(gpio2, SSB_SPROM4_GPIOB, SSB_SPROM4_GPIOB_P2, 0); SPEX(gpio3, SSB_SPROM4_GPIOB, SSB_SPROM4_GPIOB_P3, SSB_SPROM4_GPIOB_P3_SHIFT); } else { SPEX(gpio0, SSB_SPROM5_GPIOA, SSB_SPROM5_GPIOA_P0, 0); SPEX(gpio1, SSB_SPROM5_GPIOA, SSB_SPROM5_GPIOA_P1, SSB_SPROM5_GPIOA_P1_SHIFT); SPEX(gpio2, SSB_SPROM5_GPIOB, SSB_SPROM5_GPIOB_P2, 0); SPEX(gpio3, SSB_SPROM5_GPIOB, SSB_SPROM5_GPIOB_P3, SSB_SPROM5_GPIOB_P3_SHIFT); } /* Extract the antenna gain values. */ out->antenna_gain.a0 = sprom_extract_antgain(out->revision, in, SSB_SPROM4_AGAIN01, SSB_SPROM4_AGAIN0, SSB_SPROM4_AGAIN0_SHIFT); out->antenna_gain.a1 = sprom_extract_antgain(out->revision, in, SSB_SPROM4_AGAIN01, SSB_SPROM4_AGAIN1, SSB_SPROM4_AGAIN1_SHIFT); out->antenna_gain.a2 = sprom_extract_antgain(out->revision, in, SSB_SPROM4_AGAIN23, SSB_SPROM4_AGAIN2, SSB_SPROM4_AGAIN2_SHIFT); out->antenna_gain.a3 = sprom_extract_antgain(out->revision, in, SSB_SPROM4_AGAIN23, SSB_SPROM4_AGAIN3, SSB_SPROM4_AGAIN3_SHIFT); /* Extract cores power info info */ for (i = 0; i < ARRAY_SIZE(pwr_info_offset); i++) { u16 o = pwr_info_offset[i]; SPEX(core_pwr_info[i].itssi_2g, o + SSB_SPROM4_2G_MAXP_ITSSI, SSB_SPROM4_2G_ITSSI, SSB_SPROM4_2G_ITSSI_SHIFT); SPEX(core_pwr_info[i].maxpwr_2g, o + SSB_SPROM4_2G_MAXP_ITSSI, SSB_SPROM4_2G_MAXP, 0); SPEX(core_pwr_info[i].pa_2g[0], o + SSB_SPROM4_2G_PA_0, ~0, 0); SPEX(core_pwr_info[i].pa_2g[1], o + SSB_SPROM4_2G_PA_1, ~0, 0); SPEX(core_pwr_info[i].pa_2g[2], o + SSB_SPROM4_2G_PA_2, ~0, 0); SPEX(core_pwr_info[i].pa_2g[3], o + SSB_SPROM4_2G_PA_3, ~0, 0); SPEX(core_pwr_info[i].itssi_5g, o + SSB_SPROM4_5G_MAXP_ITSSI, SSB_SPROM4_5G_ITSSI, SSB_SPROM4_5G_ITSSI_SHIFT); SPEX(core_pwr_info[i].maxpwr_5g, o + SSB_SPROM4_5G_MAXP_ITSSI, SSB_SPROM4_5G_MAXP, 0); SPEX(core_pwr_info[i].maxpwr_5gh, o + SSB_SPROM4_5GHL_MAXP, SSB_SPROM4_5GH_MAXP, 0); SPEX(core_pwr_info[i].maxpwr_5gl, o + SSB_SPROM4_5GHL_MAXP, SSB_SPROM4_5GL_MAXP, SSB_SPROM4_5GL_MAXP_SHIFT); SPEX(core_pwr_info[i].pa_5gl[0], o + SSB_SPROM4_5GL_PA_0, ~0, 0); SPEX(core_pwr_info[i].pa_5gl[1], o + SSB_SPROM4_5GL_PA_1, ~0, 0); SPEX(core_pwr_info[i].pa_5gl[2], o + SSB_SPROM4_5GL_PA_2, ~0, 0); SPEX(core_pwr_info[i].pa_5gl[3], o + SSB_SPROM4_5GL_PA_3, ~0, 0); SPEX(core_pwr_info[i].pa_5g[0], o + SSB_SPROM4_5G_PA_0, ~0, 0); SPEX(core_pwr_info[i].pa_5g[1], o + SSB_SPROM4_5G_PA_1, ~0, 0); SPEX(core_pwr_info[i].pa_5g[2], o + SSB_SPROM4_5G_PA_2, ~0, 0); SPEX(core_pwr_info[i].pa_5g[3], o + SSB_SPROM4_5G_PA_3, ~0, 0); SPEX(core_pwr_info[i].pa_5gh[0], o + SSB_SPROM4_5GH_PA_0, ~0, 0); SPEX(core_pwr_info[i].pa_5gh[1], o + SSB_SPROM4_5GH_PA_1, ~0, 0); SPEX(core_pwr_info[i].pa_5gh[2], o + SSB_SPROM4_5GH_PA_2, ~0, 0); SPEX(core_pwr_info[i].pa_5gh[3], o + SSB_SPROM4_5GH_PA_3, ~0, 0); } sprom_extract_r458(out, in); /* TODO - get remaining rev 4 stuff needed */ } static void sprom_extract_r8(struct ssb_sprom *out, const u16 *in) { int i; u16 o; u16 pwr_info_offset[] = { SSB_SROM8_PWR_INFO_CORE0, SSB_SROM8_PWR_INFO_CORE1, SSB_SROM8_PWR_INFO_CORE2, SSB_SROM8_PWR_INFO_CORE3 }; BUILD_BUG_ON(ARRAY_SIZE(pwr_info_offset) != ARRAY_SIZE(out->core_pwr_info)); /* extract the MAC address */ sprom_get_mac(out->il0mac, &in[SPOFF(SSB_SPROM8_IL0MAC)]); SPEX(board_rev, SSB_SPROM8_BOARDREV, 0xFFFF, 0); SPEX(board_type, SSB_SPROM1_SPID, 0xFFFF, 0); SPEX(alpha2[0], SSB_SPROM8_CCODE, 0xff00, 8); SPEX(alpha2[1], SSB_SPROM8_CCODE, 0x00ff, 0); SPEX(boardflags_lo, SSB_SPROM8_BFLLO, 0xFFFF, 0); SPEX(boardflags_hi, SSB_SPROM8_BFLHI, 0xFFFF, 0); SPEX(boardflags2_lo, SSB_SPROM8_BFL2LO, 0xFFFF, 0); SPEX(boardflags2_hi, SSB_SPROM8_BFL2HI, 0xFFFF, 0); SPEX(ant_available_a, SSB_SPROM8_ANTAVAIL, SSB_SPROM8_ANTAVAIL_A, SSB_SPROM8_ANTAVAIL_A_SHIFT); SPEX(ant_available_bg, SSB_SPROM8_ANTAVAIL, SSB_SPROM8_ANTAVAIL_BG, SSB_SPROM8_ANTAVAIL_BG_SHIFT); SPEX(maxpwr_bg, SSB_SPROM8_MAXP_BG, SSB_SPROM8_MAXP_BG_MASK, 0); SPEX(itssi_bg, SSB_SPROM8_MAXP_BG, SSB_SPROM8_ITSSI_BG, SSB_SPROM8_ITSSI_BG_SHIFT); SPEX(maxpwr_a, SSB_SPROM8_MAXP_A, SSB_SPROM8_MAXP_A_MASK, 0); SPEX(itssi_a, SSB_SPROM8_MAXP_A, SSB_SPROM8_ITSSI_A, SSB_SPROM8_ITSSI_A_SHIFT); SPEX(maxpwr_ah, SSB_SPROM8_MAXP_AHL, SSB_SPROM8_MAXP_AH_MASK, 0); SPEX(maxpwr_al, SSB_SPROM8_MAXP_AHL, SSB_SPROM8_MAXP_AL_MASK, SSB_SPROM8_MAXP_AL_SHIFT); SPEX(gpio0, SSB_SPROM8_GPIOA, SSB_SPROM8_GPIOA_P0, 0); SPEX(gpio1, SSB_SPROM8_GPIOA, SSB_SPROM8_GPIOA_P1, SSB_SPROM8_GPIOA_P1_SHIFT); SPEX(gpio2, SSB_SPROM8_GPIOB, SSB_SPROM8_GPIOB_P2, 0); SPEX(gpio3, SSB_SPROM8_GPIOB, SSB_SPROM8_GPIOB_P3, SSB_SPROM8_GPIOB_P3_SHIFT); SPEX(tri2g, SSB_SPROM8_TRI25G, SSB_SPROM8_TRI2G, 0); SPEX(tri5g, SSB_SPROM8_TRI25G, SSB_SPROM8_TRI5G, SSB_SPROM8_TRI5G_SHIFT); SPEX(tri5gl, SSB_SPROM8_TRI5GHL, SSB_SPROM8_TRI5GL, 0); SPEX(tri5gh, SSB_SPROM8_TRI5GHL, SSB_SPROM8_TRI5GH, SSB_SPROM8_TRI5GH_SHIFT); SPEX(rxpo2g, SSB_SPROM8_RXPO, SSB_SPROM8_RXPO2G, 0); SPEX(rxpo5g, SSB_SPROM8_RXPO, SSB_SPROM8_RXPO5G, SSB_SPROM8_RXPO5G_SHIFT); SPEX(rssismf2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_RSSISMF2G, 0); SPEX(rssismc2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_RSSISMC2G, SSB_SPROM8_RSSISMC2G_SHIFT); SPEX(rssisav2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_RSSISAV2G, SSB_SPROM8_RSSISAV2G_SHIFT); SPEX(bxa2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_BXA2G, SSB_SPROM8_BXA2G_SHIFT); SPEX(rssismf5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_RSSISMF5G, 0); SPEX(rssismc5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_RSSISMC5G, SSB_SPROM8_RSSISMC5G_SHIFT); SPEX(rssisav5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_RSSISAV5G, SSB_SPROM8_RSSISAV5G_SHIFT); SPEX(bxa5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_BXA5G, SSB_SPROM8_BXA5G_SHIFT); SPEX(pa0b0, SSB_SPROM8_PA0B0, 0xFFFF, 0); SPEX(pa0b1, SSB_SPROM8_PA0B1, 0xFFFF, 0); SPEX(pa0b2, SSB_SPROM8_PA0B2, 0xFFFF, 0); SPEX(pa1b0, SSB_SPROM8_PA1B0, 0xFFFF, 0); SPEX(pa1b1, SSB_SPROM8_PA1B1, 0xFFFF, 0); SPEX(pa1b2, SSB_SPROM8_PA1B2, 0xFFFF, 0); SPEX(pa1lob0, SSB_SPROM8_PA1LOB0, 0xFFFF, 0); SPEX(pa1lob1, SSB_SPROM8_PA1LOB1, 0xFFFF, 0); SPEX(pa1lob2, SSB_SPROM8_PA1LOB2, 0xFFFF, 0); SPEX(pa1hib0, SSB_SPROM8_PA1HIB0, 0xFFFF, 0); SPEX(pa1hib1, SSB_SPROM8_PA1HIB1, 0xFFFF, 0); SPEX(pa1hib2, SSB_SPROM8_PA1HIB2, 0xFFFF, 0); SPEX(cck2gpo, SSB_SPROM8_CCK2GPO, 0xFFFF, 0); SPEX32(ofdm2gpo, SSB_SPROM8_OFDM2GPO, 0xFFFFFFFF, 0); SPEX32(ofdm5glpo, SSB_SPROM8_OFDM5GLPO, 0xFFFFFFFF, 0); SPEX32(ofdm5gpo, SSB_SPROM8_OFDM5GPO, 0xFFFFFFFF, 0); SPEX32(ofdm5ghpo, SSB_SPROM8_OFDM5GHPO, 0xFFFFFFFF, 0); /* Extract the antenna gain values. */ out->antenna_gain.a0 = sprom_extract_antgain(out->revision, in, SSB_SPROM8_AGAIN01, SSB_SPROM8_AGAIN0, SSB_SPROM8_AGAIN0_SHIFT); out->antenna_gain.a1 = sprom_extract_antgain(out->revision, in, SSB_SPROM8_AGAIN01, SSB_SPROM8_AGAIN1, SSB_SPROM8_AGAIN1_SHIFT); out->antenna_gain.a2 = sprom_extract_antgain(out->revision, in, SSB_SPROM8_AGAIN23, SSB_SPROM8_AGAIN2, SSB_SPROM8_AGAIN2_SHIFT); out->antenna_gain.a3 = sprom_extract_antgain(out->revision, in, SSB_SPROM8_AGAIN23, SSB_SPROM8_AGAIN3, SSB_SPROM8_AGAIN3_SHIFT); /* Extract cores power info info */ for (i = 0; i < ARRAY_SIZE(pwr_info_offset); i++) { o = pwr_info_offset[i]; SPEX(core_pwr_info[i].itssi_2g, o + SSB_SROM8_2G_MAXP_ITSSI, SSB_SPROM8_2G_ITSSI, SSB_SPROM8_2G_ITSSI_SHIFT); SPEX(core_pwr_info[i].maxpwr_2g, o + SSB_SROM8_2G_MAXP_ITSSI, SSB_SPROM8_2G_MAXP, 0); SPEX(core_pwr_info[i].pa_2g[0], o + SSB_SROM8_2G_PA_0, ~0, 0); SPEX(core_pwr_info[i].pa_2g[1], o + SSB_SROM8_2G_PA_1, ~0, 0); SPEX(core_pwr_info[i].pa_2g[2], o + SSB_SROM8_2G_PA_2, ~0, 0); SPEX(core_pwr_info[i].itssi_5g, o + SSB_SROM8_5G_MAXP_ITSSI, SSB_SPROM8_5G_ITSSI, SSB_SPROM8_5G_ITSSI_SHIFT); SPEX(core_pwr_info[i].maxpwr_5g, o + SSB_SROM8_5G_MAXP_ITSSI, SSB_SPROM8_5G_MAXP, 0); SPEX(core_pwr_info[i].maxpwr_5gh, o + SSB_SPROM8_5GHL_MAXP, SSB_SPROM8_5GH_MAXP, 0); SPEX(core_pwr_info[i].maxpwr_5gl, o + SSB_SPROM8_5GHL_MAXP, SSB_SPROM8_5GL_MAXP, SSB_SPROM8_5GL_MAXP_SHIFT); SPEX(core_pwr_info[i].pa_5gl[0], o + SSB_SROM8_5GL_PA_0, ~0, 0); SPEX(core_pwr_info[i].pa_5gl[1], o + SSB_SROM8_5GL_PA_1, ~0, 0); SPEX(core_pwr_info[i].pa_5gl[2], o + SSB_SROM8_5GL_PA_2, ~0, 0); SPEX(core_pwr_info[i].pa_5g[0], o + SSB_SROM8_5G_PA_0, ~0, 0); SPEX(core_pwr_info[i].pa_5g[1], o + SSB_SROM8_5G_PA_1, ~0, 0); SPEX(core_pwr_info[i].pa_5g[2], o + SSB_SROM8_5G_PA_2, ~0, 0); SPEX(core_pwr_info[i].pa_5gh[0], o + SSB_SROM8_5GH_PA_0, ~0, 0); SPEX(core_pwr_info[i].pa_5gh[1], o + SSB_SROM8_5GH_PA_1, ~0, 0); SPEX(core_pwr_info[i].pa_5gh[2], o + SSB_SROM8_5GH_PA_2, ~0, 0); } /* Extract FEM info */ SPEX(fem.ghz2.tssipos, SSB_SPROM8_FEM2G, SSB_SROM8_FEM_TSSIPOS, SSB_SROM8_FEM_TSSIPOS_SHIFT); SPEX(fem.ghz2.extpa_gain, SSB_SPROM8_FEM2G, SSB_SROM8_FEM_EXTPA_GAIN, SSB_SROM8_FEM_EXTPA_GAIN_SHIFT); SPEX(fem.ghz2.pdet_range, SSB_SPROM8_FEM2G, SSB_SROM8_FEM_PDET_RANGE, SSB_SROM8_FEM_PDET_RANGE_SHIFT); SPEX(fem.ghz2.tr_iso, SSB_SPROM8_FEM2G, SSB_SROM8_FEM_TR_ISO, SSB_SROM8_FEM_TR_ISO_SHIFT); SPEX(fem.ghz2.antswlut, SSB_SPROM8_FEM2G, SSB_SROM8_FEM_ANTSWLUT, SSB_SROM8_FEM_ANTSWLUT_SHIFT); SPEX(fem.ghz5.tssipos, SSB_SPROM8_FEM5G, SSB_SROM8_FEM_TSSIPOS, SSB_SROM8_FEM_TSSIPOS_SHIFT); SPEX(fem.ghz5.extpa_gain, SSB_SPROM8_FEM5G, SSB_SROM8_FEM_EXTPA_GAIN, SSB_SROM8_FEM_EXTPA_GAIN_SHIFT); SPEX(fem.ghz5.pdet_range, SSB_SPROM8_FEM5G, SSB_SROM8_FEM_PDET_RANGE, SSB_SROM8_FEM_PDET_RANGE_SHIFT); SPEX(fem.ghz5.tr_iso, SSB_SPROM8_FEM5G, SSB_SROM8_FEM_TR_ISO, SSB_SROM8_FEM_TR_ISO_SHIFT); SPEX(fem.ghz5.antswlut, SSB_SPROM8_FEM5G, SSB_SROM8_FEM_ANTSWLUT, SSB_SROM8_FEM_ANTSWLUT_SHIFT); SPEX(leddc_on_time, SSB_SPROM8_LEDDC, SSB_SPROM8_LEDDC_ON, SSB_SPROM8_LEDDC_ON_SHIFT); SPEX(leddc_off_time, SSB_SPROM8_LEDDC, SSB_SPROM8_LEDDC_OFF, SSB_SPROM8_LEDDC_OFF_SHIFT); SPEX(txchain, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_TXCHAIN, SSB_SPROM8_TXRXC_TXCHAIN_SHIFT); SPEX(rxchain, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_RXCHAIN, SSB_SPROM8_TXRXC_RXCHAIN_SHIFT); SPEX(antswitch, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_SWITCH, SSB_SPROM8_TXRXC_SWITCH_SHIFT); SPEX(opo, SSB_SPROM8_OFDM2GPO, 0x00ff, 0); SPEX_ARRAY8(mcs2gpo, SSB_SPROM8_2G_MCSPO, ~0, 0); SPEX_ARRAY8(mcs5gpo, SSB_SPROM8_5G_MCSPO, ~0, 0); SPEX_ARRAY8(mcs5glpo, SSB_SPROM8_5GL_MCSPO, ~0, 0); SPEX_ARRAY8(mcs5ghpo, SSB_SPROM8_5GH_MCSPO, ~0, 0); SPEX(rawtempsense, SSB_SPROM8_RAWTS, SSB_SPROM8_RAWTS_RAWTEMP, SSB_SPROM8_RAWTS_RAWTEMP_SHIFT); SPEX(measpower, SSB_SPROM8_RAWTS, SSB_SPROM8_RAWTS_MEASPOWER, SSB_SPROM8_RAWTS_MEASPOWER_SHIFT); SPEX(tempsense_slope, SSB_SPROM8_OPT_CORRX, SSB_SPROM8_OPT_CORRX_TEMP_SLOPE, SSB_SPROM8_OPT_CORRX_TEMP_SLOPE_SHIFT); SPEX(tempcorrx, SSB_SPROM8_OPT_CORRX, SSB_SPROM8_OPT_CORRX_TEMPCORRX, SSB_SPROM8_OPT_CORRX_TEMPCORRX_SHIFT); SPEX(tempsense_option, SSB_SPROM8_OPT_CORRX, SSB_SPROM8_OPT_CORRX_TEMP_OPTION, SSB_SPROM8_OPT_CORRX_TEMP_OPTION_SHIFT); SPEX(freqoffset_corr, SSB_SPROM8_HWIQ_IQSWP, SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR, SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR_SHIFT); SPEX(iqcal_swp_dis, SSB_SPROM8_HWIQ_IQSWP, SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP, SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP_SHIFT); SPEX(hw_iqcal_en, SSB_SPROM8_HWIQ_IQSWP, SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL, SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL_SHIFT); SPEX(bw40po, SSB_SPROM8_BW40PO, ~0, 0); SPEX(cddpo, SSB_SPROM8_CDDPO, ~0, 0); SPEX(stbcpo, SSB_SPROM8_STBCPO, ~0, 0); SPEX(bwduppo, SSB_SPROM8_BWDUPPO, ~0, 0); SPEX(tempthresh, SSB_SPROM8_THERMAL, SSB_SPROM8_THERMAL_TRESH, SSB_SPROM8_THERMAL_TRESH_SHIFT); SPEX(tempoffset, SSB_SPROM8_THERMAL, SSB_SPROM8_THERMAL_OFFSET, SSB_SPROM8_THERMAL_OFFSET_SHIFT); SPEX(phycal_tempdelta, SSB_SPROM8_TEMPDELTA, SSB_SPROM8_TEMPDELTA_PHYCAL, SSB_SPROM8_TEMPDELTA_PHYCAL_SHIFT); SPEX(temps_period, SSB_SPROM8_TEMPDELTA, SSB_SPROM8_TEMPDELTA_PERIOD, SSB_SPROM8_TEMPDELTA_PERIOD_SHIFT); SPEX(temps_hysteresis, SSB_SPROM8_TEMPDELTA, SSB_SPROM8_TEMPDELTA_HYSTERESIS, SSB_SPROM8_TEMPDELTA_HYSTERESIS_SHIFT); sprom_extract_r458(out, in); /* TODO - get remaining rev 8 stuff needed */ } static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out, const u16 *in, u16 size) { memset(out, 0, sizeof(*out)); out->revision = in[size - 1] & 0x00FF; ssb_dbg("SPROM revision %d detected\n", out->revision); memset(out->et0mac, 0xFF, 6); /* preset et0 and et1 mac */ memset(out->et1mac, 0xFF, 6); if ((bus->chip_id & 0xFF00) == 0x4400) { /* Workaround: The BCM44XX chip has a stupid revision * number stored in the SPROM. * Always extract r1. */ out->revision = 1; ssb_dbg("SPROM treated as revision %d\n", out->revision); } switch (out->revision) { case 1: case 2: case 3: sprom_extract_r123(out, in); break; case 4: case 5: sprom_extract_r45(out, in); break; case 8: sprom_extract_r8(out, in); break; default: ssb_warn("Unsupported SPROM revision %d detected. Will extract v1\n", out->revision); out->revision = 1; sprom_extract_r123(out, in); } if (out->boardflags_lo == 0xFFFF) out->boardflags_lo = 0; /* per specs */ if (out->boardflags_hi == 0xFFFF) out->boardflags_hi = 0; /* per specs */ return 0; } static int ssb_pci_sprom_get(struct ssb_bus *bus, struct ssb_sprom *sprom) { int err; u16 *buf; if (!ssb_is_sprom_available(bus)) { ssb_err("No SPROM available!\n"); return -ENODEV; } if (bus->chipco.dev) { /* can be unavailable! */ /* * get SPROM offset: SSB_SPROM_BASE1 except for * chipcommon rev >= 31 or chip ID is 0x4312 and * chipcommon status & 3 == 2 */ if (bus->chipco.dev->id.revision >= 31) bus->sprom_offset = SSB_SPROM_BASE31; else if (bus->chip_id == 0x4312 && (bus->chipco.status & 0x03) == 2) bus->sprom_offset = SSB_SPROM_BASE31; else bus->sprom_offset = SSB_SPROM_BASE1; } else { bus->sprom_offset = SSB_SPROM_BASE1; } ssb_dbg("SPROM offset is 0x%x\n", bus->sprom_offset); buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL); if (!buf) return -ENOMEM; bus->sprom_size = SSB_SPROMSIZE_WORDS_R123; sprom_do_read(bus, buf); err = sprom_check_crc(buf, bus->sprom_size); if (err) { /* try for a 440 byte SPROM - revision 4 and higher */ kfree(buf); buf = kcalloc(SSB_SPROMSIZE_WORDS_R4, sizeof(u16), GFP_KERNEL); if (!buf) return -ENOMEM; bus->sprom_size = SSB_SPROMSIZE_WORDS_R4; sprom_do_read(bus, buf); err = sprom_check_crc(buf, bus->sprom_size); if (err) { /* All CRC attempts failed. * Maybe there is no SPROM on the device? * Now we ask the arch code if there is some sprom * available for this device in some other storage */ err = ssb_fill_sprom_with_fallback(bus, sprom); if (err) { ssb_warn("WARNING: Using fallback SPROM failed (err %d)\n", err); } else { ssb_dbg("Using SPROM revision %d provided by platform\n", sprom->revision); err = 0; goto out_free; } ssb_warn("WARNING: Invalid SPROM CRC (corrupt SPROM)\n"); } } err = sprom_extract(bus, sprom, buf, bus->sprom_size); out_free: kfree(buf); return err; } static void ssb_pci_get_boardinfo(struct ssb_bus *bus, struct ssb_boardinfo *bi) { bi->vendor = bus->host_pci->subsystem_vendor; bi->type = bus->host_pci->subsystem_device; } int ssb_pci_get_invariants(struct ssb_bus *bus, struct ssb_init_invariants *iv) { int err; err = ssb_pci_sprom_get(bus, &iv->sprom); if (err) goto out; ssb_pci_get_boardinfo(bus, &iv->boardinfo); out: return err; } #ifdef CONFIG_SSB_DEBUG static int ssb_pci_assert_buspower(struct ssb_bus *bus) { if (likely(bus->powered_up)) return 0; printk(KERN_ERR PFX "FATAL ERROR: Bus powered down " "while accessing PCI MMIO space\n"); if (bus->power_warn_count <= 10) { bus->power_warn_count++; dump_stack(); } return -ENODEV; } #else /* DEBUG */ static inline int ssb_pci_assert_buspower(struct ssb_bus *bus) { return 0; } #endif /* DEBUG */ static u8 ssb_pci_read8(struct ssb_device *dev, u16 offset) { struct ssb_bus *bus = dev->bus; if (unlikely(ssb_pci_assert_buspower(bus))) return 0xFF; if (unlikely(bus->mapped_device != dev)) { if (unlikely(ssb_pci_switch_core(bus, dev))) return 0xFF; } return ioread8(bus->mmio + offset); } static u16 ssb_pci_read16(struct ssb_device *dev, u16 offset) { struct ssb_bus *bus = dev->bus; if (unlikely(ssb_pci_assert_buspower(bus))) return 0xFFFF; if (unlikely(bus->mapped_device != dev)) { if (unlikely(ssb_pci_switch_core(bus, dev))) return 0xFFFF; } return ioread16(bus->mmio + offset); } static u32 ssb_pci_read32(struct ssb_device *dev, u16 offset) { struct ssb_bus *bus = dev->bus; if (unlikely(ssb_pci_assert_buspower(bus))) return 0xFFFFFFFF; if (unlikely(bus->mapped_device != dev)) { if (unlikely(ssb_pci_switch_core(bus, dev))) return 0xFFFFFFFF; } return ioread32(bus->mmio + offset); } #ifdef CONFIG_SSB_BLOCKIO static void ssb_pci_block_read(struct ssb_device *dev, void *buffer, size_t count, u16 offset, u8 reg_width) { struct ssb_bus *bus = dev->bus; void __iomem *addr = bus->mmio + offset; if (unlikely(ssb_pci_assert_buspower(bus))) goto error; if (unlikely(bus->mapped_device != dev)) { if (unlikely(ssb_pci_switch_core(bus, dev))) goto error; } switch (reg_width) { case sizeof(u8): ioread8_rep(addr, buffer, count); break; case sizeof(u16): SSB_WARN_ON(count & 1); ioread16_rep(addr, buffer, count >> 1); break; case sizeof(u32): SSB_WARN_ON(count & 3); ioread32_rep(addr, buffer, count >> 2); break; default: SSB_WARN_ON(1); } return; error: memset(buffer, 0xFF, count); } #endif /* CONFIG_SSB_BLOCKIO */ static void ssb_pci_write8(struct ssb_device *dev, u16 offset, u8 value) { struct ssb_bus *bus = dev->bus; if (unlikely(ssb_pci_assert_buspower(bus))) return; if (unlikely(bus->mapped_device != dev)) { if (unlikely(ssb_pci_switch_core(bus, dev))) return; } iowrite8(value, bus->mmio + offset); } static void ssb_pci_write16(struct ssb_device *dev, u16 offset, u16 value) { struct ssb_bus *bus = dev->bus; if (unlikely(ssb_pci_assert_buspower(bus))) return; if (unlikely(bus->mapped_device != dev)) { if (unlikely(ssb_pci_switch_core(bus, dev))) return; } iowrite16(value, bus->mmio + offset); } static void ssb_pci_write32(struct ssb_device *dev, u16 offset, u32 value) { struct ssb_bus *bus = dev->bus; if (unlikely(ssb_pci_assert_buspower(bus))) return; if (unlikely(bus->mapped_device != dev)) { if (unlikely(ssb_pci_switch_core(bus, dev))) return; } iowrite32(value, bus->mmio + offset); } #ifdef CONFIG_SSB_BLOCKIO static void ssb_pci_block_write(struct ssb_device *dev, const void *buffer, size_t count, u16 offset, u8 reg_width) { struct ssb_bus *bus = dev->bus; void __iomem *addr = bus->mmio + offset; if (unlikely(ssb_pci_assert_buspower(bus))) return; if (unlikely(bus->mapped_device != dev)) { if (unlikely(ssb_pci_switch_core(bus, dev))) return; } switch (reg_width) { case sizeof(u8): iowrite8_rep(addr, buffer, count); break; case sizeof(u16): SSB_WARN_ON(count & 1); iowrite16_rep(addr, buffer, count >> 1); break; case sizeof(u32): SSB_WARN_ON(count & 3); iowrite32_rep(addr, buffer, count >> 2); break; default: SSB_WARN_ON(1); } } #endif /* CONFIG_SSB_BLOCKIO */ /* Not "static", as it's used in main.c */ const struct ssb_bus_ops ssb_pci_ops = { .read8 = ssb_pci_read8, .read16 = ssb_pci_read16, .read32 = ssb_pci_read32, .write8 = ssb_pci_write8, .write16 = ssb_pci_write16, .write32 = ssb_pci_write32, #ifdef CONFIG_SSB_BLOCKIO .block_read = ssb_pci_block_read, .block_write = ssb_pci_block_write, #endif }; static ssize_t ssb_pci_attr_sprom_show(struct device *pcidev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = container_of(pcidev, struct pci_dev, dev); struct ssb_bus *bus; bus = ssb_pci_dev_to_bus(pdev); if (!bus) return -ENODEV; return ssb_attr_sprom_show(bus, buf, sprom_do_read); } static ssize_t ssb_pci_attr_sprom_store(struct device *pcidev, struct device_attribute *attr, const char *buf, size_t count) { struct pci_dev *pdev = container_of(pcidev, struct pci_dev, dev); struct ssb_bus *bus; bus = ssb_pci_dev_to_bus(pdev); if (!bus) return -ENODEV; return ssb_attr_sprom_store(bus, buf, count, sprom_check_crc, sprom_do_write); } static DEVICE_ATTR(ssb_sprom, 0600, ssb_pci_attr_sprom_show, ssb_pci_attr_sprom_store); void ssb_pci_exit(struct ssb_bus *bus) { struct pci_dev *pdev; if (bus->bustype != SSB_BUSTYPE_PCI) return; pdev = bus->host_pci; device_remove_file(&pdev->dev, &dev_attr_ssb_sprom); } int ssb_pci_init(struct ssb_bus *bus) { struct pci_dev *pdev; int err; if (bus->bustype != SSB_BUSTYPE_PCI) return 0; pdev = bus->host_pci; mutex_init(&bus->sprom_mutex); err = device_create_file(&pdev->dev, &dev_attr_ssb_sprom); if (err) goto out; out: return err; }
gpl-2.0
archos-sa/archos-gpl-gen9-kernel
lib/find_last_bit.c
2064
1108
/* find_last_bit.c: fallback find next bit implementation * * Copyright (C) 2008 IBM Corporation * Written by Rusty Russell <rusty@rustcorp.com.au> * (Inspired by David Howell's find_next_bit implementation) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/bitops.h> #include <linux/module.h> #include <asm/types.h> #include <asm/byteorder.h> unsigned long find_last_bit(const unsigned long *addr, unsigned long size) { unsigned long words; unsigned long tmp; /* Start at final word. */ words = size / BITS_PER_LONG; /* Partial final word? */ if (size & (BITS_PER_LONG-1)) { tmp = (addr[words] & (~0UL >> (BITS_PER_LONG - (size & (BITS_PER_LONG-1))))); if (tmp) goto found; } while (words) { tmp = addr[--words]; if (tmp) { found: return words * BITS_PER_LONG + __fls(tmp); } } /* Not found */ return size; } EXPORT_SYMBOL(find_last_bit);
gpl-2.0
tshansen/arcadekernel
net/sched/sch_red.c
3088
8951
/* * net/sched/sch_red.c Random Early Detection queue. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * * Changes: * J Hadi Salim 980914: computation fixes * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly. * J Hadi Salim 980816: ECN support */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <net/pkt_sched.h> #include <net/inet_ecn.h> #include <net/red.h> /* Parameters, settable by user: ----------------------------- limit - bytes (must be > qth_max + burst) Hard limit on queue length, should be chosen >qth_max to allow packet bursts. This parameter does not affect the algorithms behaviour and can be chosen arbitrarily high (well, less than ram size) Really, this limit will never be reached if RED works correctly. */ struct red_sched_data { u32 limit; /* HARD maximal queue length */ unsigned char flags; struct timer_list adapt_timer; struct red_parms parms; struct red_vars vars; struct red_stats stats; struct Qdisc *qdisc; }; static inline int red_use_ecn(struct red_sched_data *q) { return q->flags & TC_RED_ECN; } static inline int red_use_harddrop(struct red_sched_data *q) { return q->flags & TC_RED_HARDDROP; } static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct red_sched_data *q = qdisc_priv(sch); struct Qdisc *child = q->qdisc; int ret; q->vars.qavg = red_calc_qavg(&q->parms, &q->vars, child->qstats.backlog); if (red_is_idling(&q->vars)) red_end_of_idle_period(&q->vars); switch (red_action(&q->parms, &q->vars, q->vars.qavg)) { case RED_DONT_MARK: break; case RED_PROB_MARK: sch->qstats.overlimits++; if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) { q->stats.prob_drop++; goto congestion_drop; } q->stats.prob_mark++; break; case RED_HARD_MARK: sch->qstats.overlimits++; if (red_use_harddrop(q) || !red_use_ecn(q) || !INET_ECN_set_ce(skb)) { q->stats.forced_drop++; goto congestion_drop; } q->stats.forced_mark++; break; } ret = qdisc_enqueue(skb, child); if (likely(ret == NET_XMIT_SUCCESS)) { sch->q.qlen++; } else if (net_xmit_drop_count(ret)) { q->stats.pdrop++; sch->qstats.drops++; } return ret; congestion_drop: qdisc_drop(skb, sch); return NET_XMIT_CN; } static struct sk_buff *red_dequeue(struct Qdisc *sch) { struct sk_buff *skb; struct red_sched_data *q = qdisc_priv(sch); struct Qdisc *child = q->qdisc; skb = child->dequeue(child); if (skb) { qdisc_bstats_update(sch, skb); sch->q.qlen--; } else { if (!red_is_idling(&q->vars)) red_start_of_idle_period(&q->vars); } return skb; } static struct sk_buff *red_peek(struct Qdisc *sch) { struct red_sched_data *q = qdisc_priv(sch); struct Qdisc *child = q->qdisc; return child->ops->peek(child); } static unsigned int red_drop(struct Qdisc *sch) { struct red_sched_data *q = qdisc_priv(sch); struct Qdisc *child = q->qdisc; unsigned int len; if (child->ops->drop && (len = child->ops->drop(child)) > 0) { q->stats.other++; sch->qstats.drops++; sch->q.qlen--; return len; } if (!red_is_idling(&q->vars)) red_start_of_idle_period(&q->vars); return 0; } static void red_reset(struct Qdisc *sch) { struct red_sched_data *q = qdisc_priv(sch); qdisc_reset(q->qdisc); sch->q.qlen = 0; red_restart(&q->vars); } static void red_destroy(struct Qdisc *sch) { struct red_sched_data *q = qdisc_priv(sch); del_timer_sync(&q->adapt_timer); qdisc_destroy(q->qdisc); } static const struct nla_policy red_policy[TCA_RED_MAX + 1] = { [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) }, [TCA_RED_STAB] = { .len = RED_STAB_SIZE }, [TCA_RED_MAX_P] = { .type = NLA_U32 }, }; static int red_change(struct Qdisc *sch, struct nlattr *opt) { struct red_sched_data *q = qdisc_priv(sch); struct nlattr *tb[TCA_RED_MAX + 1]; struct tc_red_qopt *ctl; struct Qdisc *child = NULL; int err; u32 max_P; if (opt == NULL) return -EINVAL; err = nla_parse_nested(tb, TCA_RED_MAX, opt, red_policy); if (err < 0) return err; if (tb[TCA_RED_PARMS] == NULL || tb[TCA_RED_STAB] == NULL) return -EINVAL; max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0; ctl = nla_data(tb[TCA_RED_PARMS]); if (ctl->limit > 0) { child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit); if (IS_ERR(child)) return PTR_ERR(child); } sch_tree_lock(sch); q->flags = ctl->flags; q->limit = ctl->limit; if (child) { qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); qdisc_destroy(q->qdisc); q->qdisc = child; } red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog, ctl->Scell_log, nla_data(tb[TCA_RED_STAB]), max_P); red_set_vars(&q->vars); del_timer(&q->adapt_timer); if (ctl->flags & TC_RED_ADAPTATIVE) mod_timer(&q->adapt_timer, jiffies + HZ/2); if (!q->qdisc->q.qlen) red_start_of_idle_period(&q->vars); sch_tree_unlock(sch); return 0; } static inline void red_adaptative_timer(unsigned long arg) { struct Qdisc *sch = (struct Qdisc *)arg; struct red_sched_data *q = qdisc_priv(sch); spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); spin_lock(root_lock); red_adaptative_algo(&q->parms, &q->vars); mod_timer(&q->adapt_timer, jiffies + HZ/2); spin_unlock(root_lock); } static int red_init(struct Qdisc *sch, struct nlattr *opt) { struct red_sched_data *q = qdisc_priv(sch); q->qdisc = &noop_qdisc; setup_timer(&q->adapt_timer, red_adaptative_timer, (unsigned long)sch); return red_change(sch, opt); } static int red_dump(struct Qdisc *sch, struct sk_buff *skb) { struct red_sched_data *q = qdisc_priv(sch); struct nlattr *opts = NULL; struct tc_red_qopt opt = { .limit = q->limit, .flags = q->flags, .qth_min = q->parms.qth_min >> q->parms.Wlog, .qth_max = q->parms.qth_max >> q->parms.Wlog, .Wlog = q->parms.Wlog, .Plog = q->parms.Plog, .Scell_log = q->parms.Scell_log, }; sch->qstats.backlog = q->qdisc->qstats.backlog; opts = nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) || nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P)) goto nla_put_failure; return nla_nest_end(skb, opts); nla_put_failure: nla_nest_cancel(skb, opts); return -EMSGSIZE; } static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d) { struct red_sched_data *q = qdisc_priv(sch); struct tc_red_xstats st = { .early = q->stats.prob_drop + q->stats.forced_drop, .pdrop = q->stats.pdrop, .other = q->stats.other, .marked = q->stats.prob_mark + q->stats.forced_mark, }; return gnet_stats_copy_app(d, &st, sizeof(st)); } static int red_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { struct red_sched_data *q = qdisc_priv(sch); tcm->tcm_handle |= TC_H_MIN(1); tcm->tcm_info = q->qdisc->handle; return 0; } static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old) { struct red_sched_data *q = qdisc_priv(sch); if (new == NULL) new = &noop_qdisc; sch_tree_lock(sch); *old = q->qdisc; q->qdisc = new; qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); qdisc_reset(*old); sch_tree_unlock(sch); return 0; } static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg) { struct red_sched_data *q = qdisc_priv(sch); return q->qdisc; } static unsigned long red_get(struct Qdisc *sch, u32 classid) { return 1; } static void red_put(struct Qdisc *sch, unsigned long arg) { } static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker) { if (!walker->stop) { if (walker->count >= walker->skip) if (walker->fn(sch, 1, walker) < 0) { walker->stop = 1; return; } walker->count++; } } static const struct Qdisc_class_ops red_class_ops = { .graft = red_graft, .leaf = red_leaf, .get = red_get, .put = red_put, .walk = red_walk, .dump = red_dump_class, }; static struct Qdisc_ops red_qdisc_ops __read_mostly = { .id = "red", .priv_size = sizeof(struct red_sched_data), .cl_ops = &red_class_ops, .enqueue = red_enqueue, .dequeue = red_dequeue, .peek = red_peek, .drop = red_drop, .init = red_init, .reset = red_reset, .destroy = red_destroy, .change = red_change, .dump = red_dump, .dump_stats = red_dump_stats, .owner = THIS_MODULE, }; static int __init red_module_init(void) { return register_qdisc(&red_qdisc_ops); } static void __exit red_module_exit(void) { unregister_qdisc(&red_qdisc_ops); } module_init(red_module_init) module_exit(red_module_exit) MODULE_LICENSE("GPL");
gpl-2.0
MoKee/android_kernel_samsung_tuna
sound/soc/fsl/mpc5200_psc_ac97.c
3088
9170
/* * linux/sound/mpc5200-ac97.c -- AC97 support for the Freescale MPC52xx chip. * * Copyright (C) 2009 Jon Smirl, Digispeaker * Author: Jon Smirl <jonsmirl@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <linux/delay.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <asm/time.h> #include <asm/delay.h> #include <asm/mpc52xx.h> #include <asm/mpc52xx_psc.h> #include "mpc5200_dma.h" #include "mpc5200_psc_ac97.h" #define DRV_NAME "mpc5200-psc-ac97" /* ALSA only supports a single AC97 device so static is recommend here */ static struct psc_dma *psc_dma; static unsigned short psc_ac97_read(struct snd_ac97 *ac97, unsigned short reg) { int status; unsigned int val; mutex_lock(&psc_dma->mutex); /* Wait for command send status zero = ready */ status = spin_event_timeout(!(in_be16(&psc_dma->psc_regs->sr_csr.status) & MPC52xx_PSC_SR_CMDSEND), 100, 0); if (status == 0) { pr_err("timeout on ac97 bus (rdy)\n"); mutex_unlock(&psc_dma->mutex); return -ENODEV; } /* Force clear the data valid bit */ in_be32(&psc_dma->psc_regs->ac97_data); /* Send the read */ out_be32(&psc_dma->psc_regs->ac97_cmd, (1<<31) | ((reg & 0x7f) << 24)); /* Wait for the answer */ status = spin_event_timeout((in_be16(&psc_dma->psc_regs->sr_csr.status) & MPC52xx_PSC_SR_DATA_VAL), 100, 0); if (status == 0) { pr_err("timeout on ac97 read (val) %x\n", in_be16(&psc_dma->psc_regs->sr_csr.status)); mutex_unlock(&psc_dma->mutex); return -ENODEV; } /* Get the data */ val = in_be32(&psc_dma->psc_regs->ac97_data); if (((val >> 24) & 0x7f) != reg) { pr_err("reg echo error on ac97 read\n"); mutex_unlock(&psc_dma->mutex); return -ENODEV; } val = (val >> 8) & 0xffff; mutex_unlock(&psc_dma->mutex); return (unsigned short) val; } static void psc_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { int status; mutex_lock(&psc_dma->mutex); /* Wait for command status zero = ready */ status = spin_event_timeout(!(in_be16(&psc_dma->psc_regs->sr_csr.status) & MPC52xx_PSC_SR_CMDSEND), 100, 0); if (status == 0) { pr_err("timeout on ac97 bus (write)\n"); goto out; } /* Write data */ out_be32(&psc_dma->psc_regs->ac97_cmd, ((reg & 0x7f) << 24) | (val << 8)); out: mutex_unlock(&psc_dma->mutex); } static void psc_ac97_warm_reset(struct snd_ac97 *ac97) { struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs; mutex_lock(&psc_dma->mutex); out_be32(&regs->sicr, psc_dma->sicr | MPC52xx_PSC_SICR_AWR); udelay(3); out_be32(&regs->sicr, psc_dma->sicr); mutex_unlock(&psc_dma->mutex); } static void psc_ac97_cold_reset(struct snd_ac97 *ac97) { struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs; mutex_lock(&psc_dma->mutex); dev_dbg(psc_dma->dev, "cold reset\n"); mpc5200_psc_ac97_gpio_reset(psc_dma->id); /* Notify the PSC that a reset has occurred */ out_be32(&regs->sicr, psc_dma->sicr | MPC52xx_PSC_SICR_ACRB); /* Re-enable RX and TX */ out_8(&regs->command, MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE); mutex_unlock(&psc_dma->mutex); msleep(1); psc_ac97_warm_reset(ac97); } struct snd_ac97_bus_ops soc_ac97_ops = { .read = psc_ac97_read, .write = psc_ac97_write, .reset = psc_ac97_cold_reset, .warm_reset = psc_ac97_warm_reset, }; EXPORT_SYMBOL_GPL(soc_ac97_ops); static int psc_ac97_hw_analog_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *cpu_dai) { struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(cpu_dai); struct psc_dma_stream *s = to_psc_dma_stream(substream, psc_dma); dev_dbg(psc_dma->dev, "%s(substream=%p) p_size=%i p_bytes=%i" " periods=%i buffer_size=%i buffer_bytes=%i channels=%i" " rate=%i format=%i\n", __func__, substream, params_period_size(params), params_period_bytes(params), params_periods(params), params_buffer_size(params), params_buffer_bytes(params), params_channels(params), params_rate(params), params_format(params)); /* Determine the set of enable bits to turn on */ s->ac97_slot_bits = (params_channels(params) == 1) ? 0x100 : 0x300; if (substream->pstr->stream != SNDRV_PCM_STREAM_CAPTURE) s->ac97_slot_bits <<= 16; return 0; } static int psc_ac97_hw_digital_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *cpu_dai) { struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(cpu_dai); dev_dbg(psc_dma->dev, "%s(substream=%p)\n", __func__, substream); if (params_channels(params) == 1) out_be32(&psc_dma->psc_regs->ac97_slots, 0x01000000); else out_be32(&psc_dma->psc_regs->ac97_slots, 0x03000000); return 0; } static int psc_ac97_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(dai); struct psc_dma_stream *s = to_psc_dma_stream(substream, psc_dma); switch (cmd) { case SNDRV_PCM_TRIGGER_START: dev_dbg(psc_dma->dev, "AC97 START: stream=%i\n", substream->pstr->stream); /* Set the slot enable bits */ psc_dma->slots |= s->ac97_slot_bits; out_be32(&psc_dma->psc_regs->ac97_slots, psc_dma->slots); break; case SNDRV_PCM_TRIGGER_STOP: dev_dbg(psc_dma->dev, "AC97 STOP: stream=%i\n", substream->pstr->stream); /* Clear the slot enable bits */ psc_dma->slots &= ~(s->ac97_slot_bits); out_be32(&psc_dma->psc_regs->ac97_slots, psc_dma->slots); break; } return 0; } static int psc_ac97_probe(struct snd_soc_dai *cpu_dai) { struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(cpu_dai); struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs; /* Go */ out_8(&regs->command, MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE); return 0; } /* --------------------------------------------------------------------- * ALSA SoC Bindings * * - Digital Audio Interface (DAI) template * - create/destroy dai hooks */ /** * psc_ac97_dai_template: template CPU Digital Audio Interface */ static struct snd_soc_dai_ops psc_ac97_analog_ops = { .hw_params = psc_ac97_hw_analog_params, .trigger = psc_ac97_trigger, }; static struct snd_soc_dai_ops psc_ac97_digital_ops = { .hw_params = psc_ac97_hw_digital_params, }; static struct snd_soc_dai_driver psc_ac97_dai[] = { { .ac97_control = 1, .probe = psc_ac97_probe, .playback = { .channels_min = 1, .channels_max = 6, .rates = SNDRV_PCM_RATE_8000_48000, .formats = SNDRV_PCM_FMTBIT_S32_BE, }, .capture = { .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = SNDRV_PCM_FMTBIT_S32_BE, }, .ops = &psc_ac97_analog_ops, }, { .ac97_control = 1, .playback = { .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_32000 | \ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE, }, .ops = &psc_ac97_digital_ops, } }; /* --------------------------------------------------------------------- * OF platform bus binding code: * - Probe/remove operations * - OF device match table */ static int __devinit psc_ac97_of_probe(struct platform_device *op) { int rc; struct snd_ac97 ac97; struct mpc52xx_psc __iomem *regs; rc = snd_soc_register_dais(&op->dev, psc_ac97_dai, ARRAY_SIZE(psc_ac97_dai)); if (rc != 0) { dev_err(&op->dev, "Failed to register DAI\n"); return rc; } psc_dma = dev_get_drvdata(&op->dev); regs = psc_dma->psc_regs; ac97.private_data = psc_dma; psc_dma->imr = 0; out_be16(&psc_dma->psc_regs->isr_imr.imr, psc_dma->imr); /* Configure the serial interface mode to AC97 */ psc_dma->sicr = MPC52xx_PSC_SICR_SIM_AC97 | MPC52xx_PSC_SICR_ENAC97; out_be32(&regs->sicr, psc_dma->sicr); /* No slots active */ out_be32(&regs->ac97_slots, 0x00000000); return 0; } static int __devexit psc_ac97_of_remove(struct platform_device *op) { snd_soc_unregister_dais(&op->dev, ARRAY_SIZE(psc_ac97_dai)); return 0; } /* Match table for of_platform binding */ static struct of_device_id psc_ac97_match[] __devinitdata = { { .compatible = "fsl,mpc5200-psc-ac97", }, { .compatible = "fsl,mpc5200b-psc-ac97", }, {} }; MODULE_DEVICE_TABLE(of, psc_ac97_match); static struct platform_driver psc_ac97_driver = { .probe = psc_ac97_of_probe, .remove = __devexit_p(psc_ac97_of_remove), .driver = { .name = "mpc5200-psc-ac97", .owner = THIS_MODULE, .of_match_table = psc_ac97_match, }, }; /* --------------------------------------------------------------------- * Module setup and teardown; simply register the of_platform driver * for the PSC in AC97 mode. */ static int __init psc_ac97_init(void) { return platform_driver_register(&psc_ac97_driver); } module_init(psc_ac97_init); static void __exit psc_ac97_exit(void) { platform_driver_unregister(&psc_ac97_driver); } module_exit(psc_ac97_exit); MODULE_AUTHOR("Jon Smirl <jonsmirl@gmail.com>"); MODULE_DESCRIPTION("mpc5200 AC97 module"); MODULE_LICENSE("GPL");
gpl-2.0
Excito/kernel
sound/soc/fsl/mpc5200_psc_i2s.c
3088
6900
/* * Freescale MPC5200 PSC in I2S mode * ALSA SoC Digital Audio Interface (DAI) driver * * Copyright (C) 2008 Secret Lab Technologies Ltd. * Copyright (C) 2009 Jon Smirl, Digispeaker */ #include <linux/module.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <asm/mpc52xx_psc.h> #include "mpc5200_dma.h" /** * PSC_I2S_RATES: sample rates supported by the I2S * * This driver currently only supports the PSC running in I2S slave mode, * which means the codec determines the sample rate. Therefore, we tell * ALSA that we support all rates and let the codec driver decide what rates * are really supported. */ #define PSC_I2S_RATES (SNDRV_PCM_RATE_5512 | SNDRV_PCM_RATE_8000_192000 | \ SNDRV_PCM_RATE_CONTINUOUS) /** * PSC_I2S_FORMATS: audio formats supported by the PSC I2S mode */ #define PSC_I2S_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_BE | \ SNDRV_PCM_FMTBIT_S24_BE | SNDRV_PCM_FMTBIT_S32_BE) static int psc_i2s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai); u32 mode; dev_dbg(psc_dma->dev, "%s(substream=%p) p_size=%i p_bytes=%i" " periods=%i buffer_size=%i buffer_bytes=%i\n", __func__, substream, params_period_size(params), params_period_bytes(params), params_periods(params), params_buffer_size(params), params_buffer_bytes(params)); switch (params_format(params)) { case SNDRV_PCM_FORMAT_S8: mode = MPC52xx_PSC_SICR_SIM_CODEC_8; break; case SNDRV_PCM_FORMAT_S16_BE: mode = MPC52xx_PSC_SICR_SIM_CODEC_16; break; case SNDRV_PCM_FORMAT_S24_BE: mode = MPC52xx_PSC_SICR_SIM_CODEC_24; break; case SNDRV_PCM_FORMAT_S32_BE: mode = MPC52xx_PSC_SICR_SIM_CODEC_32; break; default: dev_dbg(psc_dma->dev, "invalid format\n"); return -EINVAL; } out_be32(&psc_dma->psc_regs->sicr, psc_dma->sicr | mode); return 0; } /** * psc_i2s_set_sysclk: set the clock frequency and direction * * This function is called by the machine driver to tell us what the clock * frequency and direction are. * * Currently, we only support operating as a clock slave (SND_SOC_CLOCK_IN), * and we don't care about the frequency. Return an error if the direction * is not SND_SOC_CLOCK_IN. * * @clk_id: reserved, should be zero * @freq: the frequency of the given clock ID, currently ignored * @dir: SND_SOC_CLOCK_IN (clock slave) or SND_SOC_CLOCK_OUT (clock master) */ static int psc_i2s_set_sysclk(struct snd_soc_dai *cpu_dai, int clk_id, unsigned int freq, int dir) { struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(cpu_dai); dev_dbg(psc_dma->dev, "psc_i2s_set_sysclk(cpu_dai=%p, dir=%i)\n", cpu_dai, dir); return (dir == SND_SOC_CLOCK_IN) ? 0 : -EINVAL; } /** * psc_i2s_set_fmt: set the serial format. * * This function is called by the machine driver to tell us what serial * format to use. * * This driver only supports I2S mode. Return an error if the format is * not SND_SOC_DAIFMT_I2S. * * @format: one of SND_SOC_DAIFMT_xxx */ static int psc_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int format) { struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(cpu_dai); dev_dbg(psc_dma->dev, "psc_i2s_set_fmt(cpu_dai=%p, format=%i)\n", cpu_dai, format); return (format == SND_SOC_DAIFMT_I2S) ? 0 : -EINVAL; } /* --------------------------------------------------------------------- * ALSA SoC Bindings * * - Digital Audio Interface (DAI) template * - create/destroy dai hooks */ /** * psc_i2s_dai_template: template CPU Digital Audio Interface */ static struct snd_soc_dai_ops psc_i2s_dai_ops = { .hw_params = psc_i2s_hw_params, .set_sysclk = psc_i2s_set_sysclk, .set_fmt = psc_i2s_set_fmt, }; static struct snd_soc_dai_driver psc_i2s_dai[] = {{ .playback = { .channels_min = 2, .channels_max = 2, .rates = PSC_I2S_RATES, .formats = PSC_I2S_FORMATS, }, .capture = { .channels_min = 2, .channels_max = 2, .rates = PSC_I2S_RATES, .formats = PSC_I2S_FORMATS, }, .ops = &psc_i2s_dai_ops, } }; /* --------------------------------------------------------------------- * OF platform bus binding code: * - Probe/remove operations * - OF device match table */ static int __devinit psc_i2s_of_probe(struct platform_device *op) { int rc; struct psc_dma *psc_dma; struct mpc52xx_psc __iomem *regs; rc = snd_soc_register_dais(&op->dev, psc_i2s_dai, ARRAY_SIZE(psc_i2s_dai)); if (rc != 0) { pr_err("Failed to register DAI\n"); return rc; } psc_dma = dev_get_drvdata(&op->dev); regs = psc_dma->psc_regs; /* Configure the serial interface mode; defaulting to CODEC8 mode */ psc_dma->sicr = MPC52xx_PSC_SICR_DTS1 | MPC52xx_PSC_SICR_I2S | MPC52xx_PSC_SICR_CLKPOL; out_be32(&psc_dma->psc_regs->sicr, psc_dma->sicr | MPC52xx_PSC_SICR_SIM_CODEC_8); /* Check for the codec handle. If it is not present then we * are done */ if (!of_get_property(op->dev.of_node, "codec-handle", NULL)) return 0; /* Due to errata in the dma mode; need to line up enabling * the transmitter with a transition on the frame sync * line */ /* first make sure it is low */ while ((in_8(&regs->ipcr_acr.ipcr) & 0x80) != 0) ; /* then wait for the transition to high */ while ((in_8(&regs->ipcr_acr.ipcr) & 0x80) == 0) ; /* Finally, enable the PSC. * Receiver must always be enabled; even when we only want * transmit. (see 15.3.2.3 of MPC5200B User's Guide) */ /* Go */ out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE); return 0; } static int __devexit psc_i2s_of_remove(struct platform_device *op) { snd_soc_unregister_dais(&op->dev, ARRAY_SIZE(psc_i2s_dai)); return 0; } /* Match table for of_platform binding */ static struct of_device_id psc_i2s_match[] __devinitdata = { { .compatible = "fsl,mpc5200-psc-i2s", }, { .compatible = "fsl,mpc5200b-psc-i2s", }, {} }; MODULE_DEVICE_TABLE(of, psc_i2s_match); static struct platform_driver psc_i2s_driver = { .probe = psc_i2s_of_probe, .remove = __devexit_p(psc_i2s_of_remove), .driver = { .name = "mpc5200-psc-i2s", .owner = THIS_MODULE, .of_match_table = psc_i2s_match, }, }; /* --------------------------------------------------------------------- * Module setup and teardown; simply register the of_platform driver * for the PSC in I2S mode. */ static int __init psc_i2s_init(void) { return platform_driver_register(&psc_i2s_driver); } module_init(psc_i2s_init); static void __exit psc_i2s_exit(void) { platform_driver_unregister(&psc_i2s_driver); } module_exit(psc_i2s_exit); MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); MODULE_DESCRIPTION("Freescale MPC5200 PSC in I2S mode ASoC Driver"); MODULE_LICENSE("GPL");
gpl-2.0
Danile71/u8850_kernel
arch/cris/kernel/module.c
3344
3221
/* Kernel module help for i386. Copyright (C) 2001 Rusty Russell. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/moduleloader.h> #include <linux/elf.h> #include <linux/vmalloc.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/slab.h> #if 0 #define DEBUGP printk #else #define DEBUGP(fmt , ...) #endif #ifdef CONFIG_ETRAX_KMALLOCED_MODULES #define MALLOC_MODULE(size) kmalloc(size, GFP_KERNEL) #define FREE_MODULE(region) kfree(region) #else #define MALLOC_MODULE(size) vmalloc_exec(size) #define FREE_MODULE(region) vfree(region) #endif void *module_alloc(unsigned long size) { if (size == 0) return NULL; return MALLOC_MODULE(size); } /* Free memory returned from module_alloc */ void module_free(struct module *mod, void *module_region) { FREE_MODULE(module_region); } /* We don't need anything special. */ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, char *secstrings, struct module *mod) { return 0; } int apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *me) { printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name); return -ENOEXEC; } int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *me) { unsigned int i; Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr; DEBUGP ("Applying add relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); for (i = 0; i < sechdrs[relsec].sh_size / sizeof (*rela); i++) { /* This is where to make the change */ uint32_t *loc = ((void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rela[i].r_offset); /* This is the symbol it is referring to. Note that all undefined symbols have been resolved. */ Elf32_Sym *sym = ((Elf32_Sym *)sechdrs[symindex].sh_addr + ELF32_R_SYM (rela[i].r_info)); switch (ELF32_R_TYPE(rela[i].r_info)) { case R_CRIS_32: *loc = sym->st_value + rela[i].r_addend; break; case R_CRIS_32_PCREL: *loc = sym->st_value - (unsigned)loc + rela[i].r_addend - 4; break; default: printk(KERN_ERR "module %s: Unknown relocation: %u\n", me->name, ELF32_R_TYPE(rela[i].r_info)); return -ENOEXEC; } } return 0; } int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) { return 0; } void module_arch_cleanup(struct module *mod) { }
gpl-2.0
ArolWright/android_kernel_motorola_msm8916
arch/sparc/kernel/visemul.c
4368
20085
/* visemul.c: Emulation of VIS instructions. * * Copyright (C) 2006 David S. Miller (davem@davemloft.net) */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/thread_info.h> #include <linux/perf_event.h> #include <asm/ptrace.h> #include <asm/pstate.h> #include <asm/fpumacro.h> #include <asm/uaccess.h> #include <asm/cacheflush.h> /* OPF field of various VIS instructions. */ /* 000111011 - four 16-bit packs */ #define FPACK16_OPF 0x03b /* 000111010 - two 32-bit packs */ #define FPACK32_OPF 0x03a /* 000111101 - four 16-bit packs */ #define FPACKFIX_OPF 0x03d /* 001001101 - four 16-bit expands */ #define FEXPAND_OPF 0x04d /* 001001011 - two 32-bit merges */ #define FPMERGE_OPF 0x04b /* 000110001 - 8-by-16-bit partitoned product */ #define FMUL8x16_OPF 0x031 /* 000110011 - 8-by-16-bit upper alpha partitioned product */ #define FMUL8x16AU_OPF 0x033 /* 000110101 - 8-by-16-bit lower alpha partitioned product */ #define FMUL8x16AL_OPF 0x035 /* 000110110 - upper 8-by-16-bit partitioned product */ #define FMUL8SUx16_OPF 0x036 /* 000110111 - lower 8-by-16-bit partitioned product */ #define FMUL8ULx16_OPF 0x037 /* 000111000 - upper 8-by-16-bit partitioned product */ #define FMULD8SUx16_OPF 0x038 /* 000111001 - lower unsigned 8-by-16-bit partitioned product */ #define FMULD8ULx16_OPF 0x039 /* 000101000 - four 16-bit compare; set rd if src1 > src2 */ #define FCMPGT16_OPF 0x028 /* 000101100 - two 32-bit compare; set rd if src1 > src2 */ #define FCMPGT32_OPF 0x02c /* 000100000 - four 16-bit compare; set rd if src1 <= src2 */ #define FCMPLE16_OPF 0x020 /* 000100100 - two 32-bit compare; set rd if src1 <= src2 */ #define FCMPLE32_OPF 0x024 /* 000100010 - four 16-bit compare; set rd if src1 != src2 */ #define FCMPNE16_OPF 0x022 /* 000100110 - two 32-bit compare; set rd if src1 != src2 */ #define FCMPNE32_OPF 0x026 /* 000101010 - four 16-bit compare; set rd if src1 == src2 */ #define FCMPEQ16_OPF 0x02a /* 000101110 - two 32-bit compare; set rd if src1 == src2 */ #define FCMPEQ32_OPF 0x02e /* 000000000 - Eight 8-bit edge boundary processing */ #define EDGE8_OPF 0x000 /* 000000001 - Eight 8-bit edge boundary processing, no CC */ #define EDGE8N_OPF 0x001 /* 000000010 - Eight 8-bit edge boundary processing, little-endian */ #define EDGE8L_OPF 0x002 /* 000000011 - Eight 8-bit edge boundary processing, little-endian, no CC */ #define EDGE8LN_OPF 0x003 /* 000000100 - Four 16-bit edge boundary processing */ #define EDGE16_OPF 0x004 /* 000000101 - Four 16-bit edge boundary processing, no CC */ #define EDGE16N_OPF 0x005 /* 000000110 - Four 16-bit edge boundary processing, little-endian */ #define EDGE16L_OPF 0x006 /* 000000111 - Four 16-bit edge boundary processing, little-endian, no CC */ #define EDGE16LN_OPF 0x007 /* 000001000 - Two 32-bit edge boundary processing */ #define EDGE32_OPF 0x008 /* 000001001 - Two 32-bit edge boundary processing, no CC */ #define EDGE32N_OPF 0x009 /* 000001010 - Two 32-bit edge boundary processing, little-endian */ #define EDGE32L_OPF 0x00a /* 000001011 - Two 32-bit edge boundary processing, little-endian, no CC */ #define EDGE32LN_OPF 0x00b /* 000111110 - distance between 8 8-bit components */ #define PDIST_OPF 0x03e /* 000010000 - convert 8-bit 3-D address to blocked byte address */ #define ARRAY8_OPF 0x010 /* 000010010 - convert 16-bit 3-D address to blocked byte address */ #define ARRAY16_OPF 0x012 /* 000010100 - convert 32-bit 3-D address to blocked byte address */ #define ARRAY32_OPF 0x014 /* 000011001 - Set the GSR.MASK field in preparation for a BSHUFFLE */ #define BMASK_OPF 0x019 /* 001001100 - Permute bytes as specified by GSR.MASK */ #define BSHUFFLE_OPF 0x04c #define VIS_OPF_SHIFT 5 #define VIS_OPF_MASK (0x1ff << VIS_OPF_SHIFT) #define RS1(INSN) (((INSN) >> 14) & 0x1f) #define RS2(INSN) (((INSN) >> 0) & 0x1f) #define RD(INSN) (((INSN) >> 25) & 0x1f) static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2, unsigned int rd, int from_kernel) { if (rs2 >= 16 || rs1 >= 16 || rd >= 16) { if (from_kernel != 0) __asm__ __volatile__("flushw"); else flushw_user(); } } static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) { unsigned long value, fp; if (reg < 16) return (!reg ? 0 : regs->u_regs[reg]); fp = regs->u_regs[UREG_FP]; if (regs->tstate & TSTATE_PRIV) { struct reg_window *win; win = (struct reg_window *)(fp + STACK_BIAS); value = win->locals[reg - 16]; } else if (!test_thread_64bit_stack(fp)) { struct reg_window32 __user *win32; win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); get_user(value, &win32->locals[reg - 16]); } else { struct reg_window __user *win; win = (struct reg_window __user *)(fp + STACK_BIAS); get_user(value, &win->locals[reg - 16]); } return value; } static inline unsigned long __user *__fetch_reg_addr_user(unsigned int reg, struct pt_regs *regs) { unsigned long fp = regs->u_regs[UREG_FP]; BUG_ON(reg < 16); BUG_ON(regs->tstate & TSTATE_PRIV); if (!test_thread_64bit_stack(fp)) { struct reg_window32 __user *win32; win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); return (unsigned long __user *)&win32->locals[reg - 16]; } else { struct reg_window __user *win; win = (struct reg_window __user *)(fp + STACK_BIAS); return &win->locals[reg - 16]; } } static inline unsigned long *__fetch_reg_addr_kern(unsigned int reg, struct pt_regs *regs) { BUG_ON(reg >= 16); BUG_ON(regs->tstate & TSTATE_PRIV); return &regs->u_regs[reg]; } static void store_reg(struct pt_regs *regs, unsigned long val, unsigned long rd) { if (rd < 16) { unsigned long *rd_kern = __fetch_reg_addr_kern(rd, regs); *rd_kern = val; } else { unsigned long __user *rd_user = __fetch_reg_addr_user(rd, regs); if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) __put_user((u32)val, (u32 __user *)rd_user); else __put_user(val, rd_user); } } static inline unsigned long fpd_regval(struct fpustate *f, unsigned int insn_regnum) { insn_regnum = (((insn_regnum & 1) << 5) | (insn_regnum & 0x1e)); return *(unsigned long *) &f->regs[insn_regnum]; } static inline unsigned long *fpd_regaddr(struct fpustate *f, unsigned int insn_regnum) { insn_regnum = (((insn_regnum & 1) << 5) | (insn_regnum & 0x1e)); return (unsigned long *) &f->regs[insn_regnum]; } static inline unsigned int fps_regval(struct fpustate *f, unsigned int insn_regnum) { return f->regs[insn_regnum]; } static inline unsigned int *fps_regaddr(struct fpustate *f, unsigned int insn_regnum) { return &f->regs[insn_regnum]; } struct edge_tab { u16 left, right; }; static struct edge_tab edge8_tab[8] = { { 0xff, 0x80 }, { 0x7f, 0xc0 }, { 0x3f, 0xe0 }, { 0x1f, 0xf0 }, { 0x0f, 0xf8 }, { 0x07, 0xfc }, { 0x03, 0xfe }, { 0x01, 0xff }, }; static struct edge_tab edge8_tab_l[8] = { { 0xff, 0x01 }, { 0xfe, 0x03 }, { 0xfc, 0x07 }, { 0xf8, 0x0f }, { 0xf0, 0x1f }, { 0xe0, 0x3f }, { 0xc0, 0x7f }, { 0x80, 0xff }, }; static struct edge_tab edge16_tab[4] = { { 0xf, 0x8 }, { 0x7, 0xc }, { 0x3, 0xe }, { 0x1, 0xf }, }; static struct edge_tab edge16_tab_l[4] = { { 0xf, 0x1 }, { 0xe, 0x3 }, { 0xc, 0x7 }, { 0x8, 0xf }, }; static struct edge_tab edge32_tab[2] = { { 0x3, 0x2 }, { 0x1, 0x3 }, }; static struct edge_tab edge32_tab_l[2] = { { 0x3, 0x1 }, { 0x2, 0x3 }, }; static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf) { unsigned long orig_rs1, rs1, orig_rs2, rs2, rd_val; u16 left, right; maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0); orig_rs1 = rs1 = fetch_reg(RS1(insn), regs); orig_rs2 = rs2 = fetch_reg(RS2(insn), regs); if (test_thread_flag(TIF_32BIT)) { rs1 = rs1 & 0xffffffff; rs2 = rs2 & 0xffffffff; } switch (opf) { default: case EDGE8_OPF: case EDGE8N_OPF: left = edge8_tab[rs1 & 0x7].left; right = edge8_tab[rs2 & 0x7].right; break; case EDGE8L_OPF: case EDGE8LN_OPF: left = edge8_tab_l[rs1 & 0x7].left; right = edge8_tab_l[rs2 & 0x7].right; break; case EDGE16_OPF: case EDGE16N_OPF: left = edge16_tab[(rs1 >> 1) & 0x3].left; right = edge16_tab[(rs2 >> 1) & 0x3].right; break; case EDGE16L_OPF: case EDGE16LN_OPF: left = edge16_tab_l[(rs1 >> 1) & 0x3].left; right = edge16_tab_l[(rs2 >> 1) & 0x3].right; break; case EDGE32_OPF: case EDGE32N_OPF: left = edge32_tab[(rs1 >> 2) & 0x1].left; right = edge32_tab[(rs2 >> 2) & 0x1].right; break; case EDGE32L_OPF: case EDGE32LN_OPF: left = edge32_tab_l[(rs1 >> 2) & 0x1].left; right = edge32_tab_l[(rs2 >> 2) & 0x1].right; break; } if ((rs1 & ~0x7UL) == (rs2 & ~0x7UL)) rd_val = right & left; else rd_val = left; store_reg(regs, rd_val, RD(insn)); switch (opf) { case EDGE8_OPF: case EDGE8L_OPF: case EDGE16_OPF: case EDGE16L_OPF: case EDGE32_OPF: case EDGE32L_OPF: { unsigned long ccr, tstate; __asm__ __volatile__("subcc %1, %2, %%g0\n\t" "rd %%ccr, %0" : "=r" (ccr) : "r" (orig_rs1), "r" (orig_rs2) : "cc"); tstate = regs->tstate & ~(TSTATE_XCC | TSTATE_ICC); regs->tstate = tstate | (ccr << 32UL); } } } static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf) { unsigned long rs1, rs2, rd_val; unsigned int bits, bits_mask; maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0); rs1 = fetch_reg(RS1(insn), regs); rs2 = fetch_reg(RS2(insn), regs); bits = (rs2 > 5 ? 5 : rs2); bits_mask = (1UL << bits) - 1UL; rd_val = ((((rs1 >> 11) & 0x3) << 0) | (((rs1 >> 33) & 0x3) << 2) | (((rs1 >> 55) & 0x1) << 4) | (((rs1 >> 13) & 0xf) << 5) | (((rs1 >> 35) & 0xf) << 9) | (((rs1 >> 56) & 0xf) << 13) | (((rs1 >> 17) & bits_mask) << 17) | (((rs1 >> 39) & bits_mask) << (17 + bits)) | (((rs1 >> 60) & 0xf) << (17 + (2*bits)))); switch (opf) { case ARRAY16_OPF: rd_val <<= 1; break; case ARRAY32_OPF: rd_val <<= 2; } store_reg(regs, rd_val, RD(insn)); } static void bmask(struct pt_regs *regs, unsigned int insn) { unsigned long rs1, rs2, rd_val, gsr; maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0); rs1 = fetch_reg(RS1(insn), regs); rs2 = fetch_reg(RS2(insn), regs); rd_val = rs1 + rs2; store_reg(regs, rd_val, RD(insn)); gsr = current_thread_info()->gsr[0] & 0xffffffff; gsr |= rd_val << 32UL; current_thread_info()->gsr[0] = gsr; } static void bshuffle(struct pt_regs *regs, unsigned int insn) { struct fpustate *f = FPUSTATE; unsigned long rs1, rs2, rd_val; unsigned long bmask, i; bmask = current_thread_info()->gsr[0] >> 32UL; rs1 = fpd_regval(f, RS1(insn)); rs2 = fpd_regval(f, RS2(insn)); rd_val = 0UL; for (i = 0; i < 8; i++) { unsigned long which = (bmask >> (i * 4)) & 0xf; unsigned long byte; if (which < 8) byte = (rs1 >> (which * 8)) & 0xff; else byte = (rs2 >> ((which-8)*8)) & 0xff; rd_val |= (byte << (i * 8)); } *fpd_regaddr(f, RD(insn)) = rd_val; } static void pdist(struct pt_regs *regs, unsigned int insn) { struct fpustate *f = FPUSTATE; unsigned long rs1, rs2, *rd, rd_val; unsigned long i; rs1 = fpd_regval(f, RS1(insn)); rs2 = fpd_regval(f, RS2(insn)); rd = fpd_regaddr(f, RD(insn)); rd_val = *rd; for (i = 0; i < 8; i++) { s16 s1, s2; s1 = (rs1 >> (56 - (i * 8))) & 0xff; s2 = (rs2 >> (56 - (i * 8))) & 0xff; /* Absolute value of difference. */ s1 -= s2; if (s1 < 0) s1 = ~s1 + 1; rd_val += s1; } *rd = rd_val; } static void pformat(struct pt_regs *regs, unsigned int insn, unsigned int opf) { struct fpustate *f = FPUSTATE; unsigned long rs1, rs2, gsr, scale, rd_val; gsr = current_thread_info()->gsr[0]; scale = (gsr >> 3) & (opf == FPACK16_OPF ? 0xf : 0x1f); switch (opf) { case FPACK16_OPF: { unsigned long byte; rs2 = fpd_regval(f, RS2(insn)); rd_val = 0; for (byte = 0; byte < 4; byte++) { unsigned int val; s16 src = (rs2 >> (byte * 16UL)) & 0xffffUL; int scaled = src << scale; int from_fixed = scaled >> 7; val = ((from_fixed < 0) ? 0 : (from_fixed > 255) ? 255 : from_fixed); rd_val |= (val << (8 * byte)); } *fps_regaddr(f, RD(insn)) = rd_val; break; } case FPACK32_OPF: { unsigned long word; rs1 = fpd_regval(f, RS1(insn)); rs2 = fpd_regval(f, RS2(insn)); rd_val = (rs1 << 8) & ~(0x000000ff000000ffUL); for (word = 0; word < 2; word++) { unsigned long val; s32 src = (rs2 >> (word * 32UL)); s64 scaled = src << scale; s64 from_fixed = scaled >> 23; val = ((from_fixed < 0) ? 0 : (from_fixed > 255) ? 255 : from_fixed); rd_val |= (val << (32 * word)); } *fpd_regaddr(f, RD(insn)) = rd_val; break; } case FPACKFIX_OPF: { unsigned long word; rs2 = fpd_regval(f, RS2(insn)); rd_val = 0; for (word = 0; word < 2; word++) { long val; s32 src = (rs2 >> (word * 32UL)); s64 scaled = src << scale; s64 from_fixed = scaled >> 16; val = ((from_fixed < -32768) ? -32768 : (from_fixed > 32767) ? 32767 : from_fixed); rd_val |= ((val & 0xffff) << (word * 16)); } *fps_regaddr(f, RD(insn)) = rd_val; break; } case FEXPAND_OPF: { unsigned long byte; rs2 = fps_regval(f, RS2(insn)); rd_val = 0; for (byte = 0; byte < 4; byte++) { unsigned long val; u8 src = (rs2 >> (byte * 8)) & 0xff; val = src << 4; rd_val |= (val << (byte * 16)); } *fpd_regaddr(f, RD(insn)) = rd_val; break; } case FPMERGE_OPF: { rs1 = fps_regval(f, RS1(insn)); rs2 = fps_regval(f, RS2(insn)); rd_val = (((rs2 & 0x000000ff) << 0) | ((rs1 & 0x000000ff) << 8) | ((rs2 & 0x0000ff00) << 8) | ((rs1 & 0x0000ff00) << 16) | ((rs2 & 0x00ff0000) << 16) | ((rs1 & 0x00ff0000) << 24) | ((rs2 & 0xff000000) << 24) | ((rs1 & 0xff000000) << 32)); *fpd_regaddr(f, RD(insn)) = rd_val; break; } } } static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf) { struct fpustate *f = FPUSTATE; unsigned long rs1, rs2, rd_val; switch (opf) { case FMUL8x16_OPF: { unsigned long byte; rs1 = fps_regval(f, RS1(insn)); rs2 = fpd_regval(f, RS2(insn)); rd_val = 0; for (byte = 0; byte < 4; byte++) { u16 src1 = (rs1 >> (byte * 8)) & 0x00ff; s16 src2 = (rs2 >> (byte * 16)) & 0xffff; u32 prod = src1 * src2; u16 scaled = ((prod & 0x00ffff00) >> 8); /* Round up. */ if (prod & 0x80) scaled++; rd_val |= ((scaled & 0xffffUL) << (byte * 16UL)); } *fpd_regaddr(f, RD(insn)) = rd_val; break; } case FMUL8x16AU_OPF: case FMUL8x16AL_OPF: { unsigned long byte; s16 src2; rs1 = fps_regval(f, RS1(insn)); rs2 = fps_regval(f, RS2(insn)); rd_val = 0; src2 = rs2 >> (opf == FMUL8x16AU_OPF ? 16 : 0); for (byte = 0; byte < 4; byte++) { u16 src1 = (rs1 >> (byte * 8)) & 0x00ff; u32 prod = src1 * src2; u16 scaled = ((prod & 0x00ffff00) >> 8); /* Round up. */ if (prod & 0x80) scaled++; rd_val |= ((scaled & 0xffffUL) << (byte * 16UL)); } *fpd_regaddr(f, RD(insn)) = rd_val; break; } case FMUL8SUx16_OPF: case FMUL8ULx16_OPF: { unsigned long byte, ushift; rs1 = fpd_regval(f, RS1(insn)); rs2 = fpd_regval(f, RS2(insn)); rd_val = 0; ushift = (opf == FMUL8SUx16_OPF) ? 8 : 0; for (byte = 0; byte < 4; byte++) { u16 src1; s16 src2; u32 prod; u16 scaled; src1 = ((rs1 >> ((16 * byte) + ushift)) & 0x00ff); src2 = ((rs2 >> (16 * byte)) & 0xffff); prod = src1 * src2; scaled = ((prod & 0x00ffff00) >> 8); /* Round up. */ if (prod & 0x80) scaled++; rd_val |= ((scaled & 0xffffUL) << (byte * 16UL)); } *fpd_regaddr(f, RD(insn)) = rd_val; break; } case FMULD8SUx16_OPF: case FMULD8ULx16_OPF: { unsigned long byte, ushift; rs1 = fps_regval(f, RS1(insn)); rs2 = fps_regval(f, RS2(insn)); rd_val = 0; ushift = (opf == FMULD8SUx16_OPF) ? 8 : 0; for (byte = 0; byte < 2; byte++) { u16 src1; s16 src2; u32 prod; u16 scaled; src1 = ((rs1 >> ((16 * byte) + ushift)) & 0x00ff); src2 = ((rs2 >> (16 * byte)) & 0xffff); prod = src1 * src2; scaled = ((prod & 0x00ffff00) >> 8); /* Round up. */ if (prod & 0x80) scaled++; rd_val |= ((scaled & 0xffffUL) << ((byte * 32UL) + 7UL)); } *fpd_regaddr(f, RD(insn)) = rd_val; break; } } } static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf) { struct fpustate *f = FPUSTATE; unsigned long rs1, rs2, rd_val, i; rs1 = fpd_regval(f, RS1(insn)); rs2 = fpd_regval(f, RS2(insn)); rd_val = 0; switch (opf) { case FCMPGT16_OPF: for (i = 0; i < 4; i++) { s16 a = (rs1 >> (i * 16)) & 0xffff; s16 b = (rs2 >> (i * 16)) & 0xffff; if (a > b) rd_val |= 8 >> i; } break; case FCMPGT32_OPF: for (i = 0; i < 2; i++) { s32 a = (rs1 >> (i * 32)) & 0xffffffff; s32 b = (rs2 >> (i * 32)) & 0xffffffff; if (a > b) rd_val |= 2 >> i; } break; case FCMPLE16_OPF: for (i = 0; i < 4; i++) { s16 a = (rs1 >> (i * 16)) & 0xffff; s16 b = (rs2 >> (i * 16)) & 0xffff; if (a <= b) rd_val |= 8 >> i; } break; case FCMPLE32_OPF: for (i = 0; i < 2; i++) { s32 a = (rs1 >> (i * 32)) & 0xffffffff; s32 b = (rs2 >> (i * 32)) & 0xffffffff; if (a <= b) rd_val |= 2 >> i; } break; case FCMPNE16_OPF: for (i = 0; i < 4; i++) { s16 a = (rs1 >> (i * 16)) & 0xffff; s16 b = (rs2 >> (i * 16)) & 0xffff; if (a != b) rd_val |= 8 >> i; } break; case FCMPNE32_OPF: for (i = 0; i < 2; i++) { s32 a = (rs1 >> (i * 32)) & 0xffffffff; s32 b = (rs2 >> (i * 32)) & 0xffffffff; if (a != b) rd_val |= 2 >> i; } break; case FCMPEQ16_OPF: for (i = 0; i < 4; i++) { s16 a = (rs1 >> (i * 16)) & 0xffff; s16 b = (rs2 >> (i * 16)) & 0xffff; if (a == b) rd_val |= 8 >> i; } break; case FCMPEQ32_OPF: for (i = 0; i < 2; i++) { s32 a = (rs1 >> (i * 32)) & 0xffffffff; s32 b = (rs2 >> (i * 32)) & 0xffffffff; if (a == b) rd_val |= 2 >> i; } break; } maybe_flush_windows(0, 0, RD(insn), 0); store_reg(regs, rd_val, RD(insn)); } /* Emulate the VIS instructions which are not implemented in * hardware on Niagara. */ int vis_emul(struct pt_regs *regs, unsigned int insn) { unsigned long pc = regs->tpc; unsigned int opf; BUG_ON(regs->tstate & TSTATE_PRIV); perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc)) return -EFAULT; save_and_clear_fpu(); opf = (insn & VIS_OPF_MASK) >> VIS_OPF_SHIFT; switch (opf) { default: return -EINVAL; /* Pixel Formatting Instructions. */ case FPACK16_OPF: case FPACK32_OPF: case FPACKFIX_OPF: case FEXPAND_OPF: case FPMERGE_OPF: pformat(regs, insn, opf); break; /* Partitioned Multiply Instructions */ case FMUL8x16_OPF: case FMUL8x16AU_OPF: case FMUL8x16AL_OPF: case FMUL8SUx16_OPF: case FMUL8ULx16_OPF: case FMULD8SUx16_OPF: case FMULD8ULx16_OPF: pmul(regs, insn, opf); break; /* Pixel Compare Instructions */ case FCMPGT16_OPF: case FCMPGT32_OPF: case FCMPLE16_OPF: case FCMPLE32_OPF: case FCMPNE16_OPF: case FCMPNE32_OPF: case FCMPEQ16_OPF: case FCMPEQ32_OPF: pcmp(regs, insn, opf); break; /* Edge Handling Instructions */ case EDGE8_OPF: case EDGE8N_OPF: case EDGE8L_OPF: case EDGE8LN_OPF: case EDGE16_OPF: case EDGE16N_OPF: case EDGE16L_OPF: case EDGE16LN_OPF: case EDGE32_OPF: case EDGE32N_OPF: case EDGE32L_OPF: case EDGE32LN_OPF: edge(regs, insn, opf); break; /* Pixel Component Distance */ case PDIST_OPF: pdist(regs, insn); break; /* Three-Dimensional Array Addressing Instructions */ case ARRAY8_OPF: case ARRAY16_OPF: case ARRAY32_OPF: array(regs, insn, opf); break; /* Byte Mask and Shuffle Instructions */ case BMASK_OPF: bmask(regs, insn); break; case BSHUFFLE_OPF: bshuffle(regs, insn); break; } regs->tpc = regs->tnpc; regs->tnpc += 4; return 0; }
gpl-2.0
peterzhu0503/kernel_rk3026
net/irda/irnetlink.c
9232
3294
/* * IrDA netlink layer, for stack configuration. * * Copyright (c) 2007 Samuel Ortiz <samuel@sortiz.org> * * Partly based on the 802.11 nelink implementation * (see net/wireless/nl80211.c) which is: * Copyright 2006 Johannes Berg <johannes@sipsolutions.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/socket.h> #include <linux/irda.h> #include <linux/gfp.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/irda/irda.h> #include <net/irda/irlap.h> #include <net/genetlink.h> static struct genl_family irda_nl_family = { .id = GENL_ID_GENERATE, .name = IRDA_NL_NAME, .hdrsize = 0, .version = IRDA_NL_VERSION, .maxattr = IRDA_NL_CMD_MAX, }; static struct net_device * ifname_to_netdev(struct net *net, struct genl_info *info) { char * ifname; if (!info->attrs[IRDA_NL_ATTR_IFNAME]) return NULL; ifname = nla_data(info->attrs[IRDA_NL_ATTR_IFNAME]); IRDA_DEBUG(5, "%s(): Looking for %s\n", __func__, ifname); return dev_get_by_name(net, ifname); } static int irda_nl_set_mode(struct sk_buff *skb, struct genl_info *info) { struct net_device * dev; struct irlap_cb * irlap; u32 mode; if (!info->attrs[IRDA_NL_ATTR_MODE]) return -EINVAL; mode = nla_get_u32(info->attrs[IRDA_NL_ATTR_MODE]); IRDA_DEBUG(5, "%s(): Switching to mode: %d\n", __func__, mode); dev = ifname_to_netdev(&init_net, info); if (!dev) return -ENODEV; irlap = (struct irlap_cb *)dev->atalk_ptr; if (!irlap) { dev_put(dev); return -ENODEV; } irlap->mode = mode; dev_put(dev); return 0; } static int irda_nl_get_mode(struct sk_buff *skb, struct genl_info *info) { struct net_device * dev; struct irlap_cb * irlap; struct sk_buff *msg; void *hdr; int ret = -ENOBUFS; dev = ifname_to_netdev(&init_net, info); if (!dev) return -ENODEV; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { dev_put(dev); return -ENOMEM; } irlap = (struct irlap_cb *)dev->atalk_ptr; if (!irlap) { ret = -ENODEV; goto err_out; } hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, &irda_nl_family, 0, IRDA_NL_CMD_GET_MODE); if (hdr == NULL) { ret = -EMSGSIZE; goto err_out; } if(nla_put_string(msg, IRDA_NL_ATTR_IFNAME, dev->name)) goto err_out; if(nla_put_u32(msg, IRDA_NL_ATTR_MODE, irlap->mode)) goto err_out; genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); err_out: nlmsg_free(msg); dev_put(dev); return ret; } static const struct nla_policy irda_nl_policy[IRDA_NL_ATTR_MAX + 1] = { [IRDA_NL_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 }, [IRDA_NL_ATTR_MODE] = { .type = NLA_U32 }, }; static struct genl_ops irda_nl_ops[] = { { .cmd = IRDA_NL_CMD_SET_MODE, .doit = irda_nl_set_mode, .policy = irda_nl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = IRDA_NL_CMD_GET_MODE, .doit = irda_nl_get_mode, .policy = irda_nl_policy, /* can be retrieved by unprivileged users */ }, }; int irda_nl_register(void) { return genl_register_family_with_ops(&irda_nl_family, irda_nl_ops, ARRAY_SIZE(irda_nl_ops)); } void irda_nl_unregister(void) { genl_unregister_family(&irda_nl_family); }
gpl-2.0
adhi1419/MSM7627A
sound/pci/ice1712/juli.c
9232
20286
/* * ALSA driver for ICEnsemble VT1724 (Envy24HT) * * Lowlevel functions for ESI Juli@ cards * * Copyright (c) 2004 Jaroslav Kysela <perex@perex.cz> * 2008 Pavel Hofman <dustin@seznam.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <asm/io.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/tlv.h> #include "ice1712.h" #include "envy24ht.h" #include "juli.h" struct juli_spec { struct ak4114 *ak4114; unsigned int analog:1; }; /* * chip addresses on I2C bus */ #define AK4114_ADDR 0x20 /* S/PDIF receiver */ #define AK4358_ADDR 0x22 /* DAC */ /* * Juli does not use the standard ICE1724 clock scheme. Juli's ice1724 chip is * supplied by external clock provided by Xilinx array and MK73-1 PLL frequency * multiplier. Actual frequency is set by ice1724 GPIOs hooked to the Xilinx. * * The clock circuitry is supplied by the two ice1724 crystals. This * arrangement allows to generate independent clock signal for AK4114's input * rate detection circuit. As a result, Juli, unlike most other * ice1724+ak4114-based cards, detects spdif input rate correctly. * This fact is applied in the driver, allowing to modify PCM stream rate * parameter according to the actual input rate. * * Juli uses the remaining three stereo-channels of its DAC to optionally * monitor analog input, digital input, and digital output. The corresponding * I2S signals are routed by Xilinx, controlled by GPIOs. * * The master mute is implemented using output muting transistors (GPIO) in * combination with smuting the DAC. * * The card itself has no HW master volume control, implemented using the * vmaster control. * * TODO: * researching and fixing the input monitors */ /* * GPIO pins */ #define GPIO_FREQ_MASK (3<<0) #define GPIO_FREQ_32KHZ (0<<0) #define GPIO_FREQ_44KHZ (1<<0) #define GPIO_FREQ_48KHZ (2<<0) #define GPIO_MULTI_MASK (3<<2) #define GPIO_MULTI_4X (0<<2) #define GPIO_MULTI_2X (1<<2) #define GPIO_MULTI_1X (2<<2) /* also external */ #define GPIO_MULTI_HALF (3<<2) #define GPIO_INTERNAL_CLOCK (1<<4) /* 0 = external, 1 = internal */ #define GPIO_CLOCK_MASK (1<<4) #define GPIO_ANALOG_PRESENT (1<<5) /* RO only: 0 = present */ #define GPIO_RXMCLK_SEL (1<<7) /* must be 0 */ #define GPIO_AK5385A_CKS0 (1<<8) #define GPIO_AK5385A_DFS1 (1<<9) #define GPIO_AK5385A_DFS0 (1<<10) #define GPIO_DIGOUT_MONITOR (1<<11) /* 1 = active */ #define GPIO_DIGIN_MONITOR (1<<12) /* 1 = active */ #define GPIO_ANAIN_MONITOR (1<<13) /* 1 = active */ #define GPIO_AK5385A_CKS1 (1<<14) /* must be 0 */ #define GPIO_MUTE_CONTROL (1<<15) /* output mute, 1 = muted */ #define GPIO_RATE_MASK (GPIO_FREQ_MASK | GPIO_MULTI_MASK | \ GPIO_CLOCK_MASK) #define GPIO_AK5385A_MASK (GPIO_AK5385A_CKS0 | GPIO_AK5385A_DFS0 | \ GPIO_AK5385A_DFS1 | GPIO_AK5385A_CKS1) #define JULI_PCM_RATE (SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 | \ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | \ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | \ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000) #define GPIO_RATE_16000 (GPIO_FREQ_32KHZ | GPIO_MULTI_HALF | \ GPIO_INTERNAL_CLOCK) #define GPIO_RATE_22050 (GPIO_FREQ_44KHZ | GPIO_MULTI_HALF | \ GPIO_INTERNAL_CLOCK) #define GPIO_RATE_24000 (GPIO_FREQ_48KHZ | GPIO_MULTI_HALF | \ GPIO_INTERNAL_CLOCK) #define GPIO_RATE_32000 (GPIO_FREQ_32KHZ | GPIO_MULTI_1X | \ GPIO_INTERNAL_CLOCK) #define GPIO_RATE_44100 (GPIO_FREQ_44KHZ | GPIO_MULTI_1X | \ GPIO_INTERNAL_CLOCK) #define GPIO_RATE_48000 (GPIO_FREQ_48KHZ | GPIO_MULTI_1X | \ GPIO_INTERNAL_CLOCK) #define GPIO_RATE_64000 (GPIO_FREQ_32KHZ | GPIO_MULTI_2X | \ GPIO_INTERNAL_CLOCK) #define GPIO_RATE_88200 (GPIO_FREQ_44KHZ | GPIO_MULTI_2X | \ GPIO_INTERNAL_CLOCK) #define GPIO_RATE_96000 (GPIO_FREQ_48KHZ | GPIO_MULTI_2X | \ GPIO_INTERNAL_CLOCK) #define GPIO_RATE_176400 (GPIO_FREQ_44KHZ | GPIO_MULTI_4X | \ GPIO_INTERNAL_CLOCK) #define GPIO_RATE_192000 (GPIO_FREQ_48KHZ | GPIO_MULTI_4X | \ GPIO_INTERNAL_CLOCK) /* * Initial setup of the conversion array GPIO <-> rate */ static unsigned int juli_rates[] = { 16000, 22050, 24000, 32000, 44100, 48000, 64000, 88200, 96000, 176400, 192000, }; static unsigned int gpio_vals[] = { GPIO_RATE_16000, GPIO_RATE_22050, GPIO_RATE_24000, GPIO_RATE_32000, GPIO_RATE_44100, GPIO_RATE_48000, GPIO_RATE_64000, GPIO_RATE_88200, GPIO_RATE_96000, GPIO_RATE_176400, GPIO_RATE_192000, }; static struct snd_pcm_hw_constraint_list juli_rates_info = { .count = ARRAY_SIZE(juli_rates), .list = juli_rates, .mask = 0, }; static int get_gpio_val(int rate) { int i; for (i = 0; i < ARRAY_SIZE(juli_rates); i++) if (juli_rates[i] == rate) return gpio_vals[i]; return 0; } static void juli_ak4114_write(void *private_data, unsigned char reg, unsigned char val) { snd_vt1724_write_i2c((struct snd_ice1712 *)private_data, AK4114_ADDR, reg, val); } static unsigned char juli_ak4114_read(void *private_data, unsigned char reg) { return snd_vt1724_read_i2c((struct snd_ice1712 *)private_data, AK4114_ADDR, reg); } /* * If SPDIF capture and slaved to SPDIF-IN, setting runtime rate * to the external rate */ static void juli_spdif_in_open(struct snd_ice1712 *ice, struct snd_pcm_substream *substream) { struct juli_spec *spec = ice->spec; struct snd_pcm_runtime *runtime = substream->runtime; int rate; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK || !ice->is_spdif_master(ice)) return; rate = snd_ak4114_external_rate(spec->ak4114); if (rate >= runtime->hw.rate_min && rate <= runtime->hw.rate_max) { runtime->hw.rate_min = rate; runtime->hw.rate_max = rate; } } /* * AK4358 section */ static void juli_akm_lock(struct snd_akm4xxx *ak, int chip) { } static void juli_akm_unlock(struct snd_akm4xxx *ak, int chip) { } static void juli_akm_write(struct snd_akm4xxx *ak, int chip, unsigned char addr, unsigned char data) { struct snd_ice1712 *ice = ak->private_data[0]; if (snd_BUG_ON(chip)) return; snd_vt1724_write_i2c(ice, AK4358_ADDR, addr, data); } /* * change the rate of envy24HT, AK4358, AK5385 */ static void juli_akm_set_rate_val(struct snd_akm4xxx *ak, unsigned int rate) { unsigned char old, tmp, ak4358_dfs; unsigned int ak5385_pins, old_gpio, new_gpio; struct snd_ice1712 *ice = ak->private_data[0]; struct juli_spec *spec = ice->spec; if (rate == 0) /* no hint - S/PDIF input is master or the new spdif input rate undetected, simply return */ return; /* adjust DFS on codecs */ if (rate > 96000) { ak4358_dfs = 2; ak5385_pins = GPIO_AK5385A_DFS1 | GPIO_AK5385A_CKS0; } else if (rate > 48000) { ak4358_dfs = 1; ak5385_pins = GPIO_AK5385A_DFS0; } else { ak4358_dfs = 0; ak5385_pins = 0; } /* AK5385 first, since it requires cold reset affecting both codecs */ old_gpio = ice->gpio.get_data(ice); new_gpio = (old_gpio & ~GPIO_AK5385A_MASK) | ak5385_pins; /* printk(KERN_DEBUG "JULI - ak5385 set_rate_val: new gpio 0x%x\n", new_gpio); */ ice->gpio.set_data(ice, new_gpio); /* cold reset */ old = inb(ICEMT1724(ice, AC97_CMD)); outb(old | VT1724_AC97_COLD, ICEMT1724(ice, AC97_CMD)); udelay(1); outb(old & ~VT1724_AC97_COLD, ICEMT1724(ice, AC97_CMD)); /* AK4358 */ /* set new value, reset DFS */ tmp = snd_akm4xxx_get(ak, 0, 2); snd_akm4xxx_reset(ak, 1); tmp = snd_akm4xxx_get(ak, 0, 2); tmp &= ~(0x03 << 4); tmp |= ak4358_dfs << 4; snd_akm4xxx_set(ak, 0, 2, tmp); snd_akm4xxx_reset(ak, 0); /* reinit ak4114 */ snd_ak4114_reinit(spec->ak4114); } #define AK_DAC(xname, xch) { .name = xname, .num_channels = xch } #define PCM_VOLUME "PCM Playback Volume" #define MONITOR_AN_IN_VOLUME "Monitor Analog In Volume" #define MONITOR_DIG_IN_VOLUME "Monitor Digital In Volume" #define MONITOR_DIG_OUT_VOLUME "Monitor Digital Out Volume" static const struct snd_akm4xxx_dac_channel juli_dac[] = { AK_DAC(PCM_VOLUME, 2), AK_DAC(MONITOR_AN_IN_VOLUME, 2), AK_DAC(MONITOR_DIG_OUT_VOLUME, 2), AK_DAC(MONITOR_DIG_IN_VOLUME, 2), }; static struct snd_akm4xxx akm_juli_dac __devinitdata = { .type = SND_AK4358, .num_dacs = 8, /* DAC1 - analog out DAC2 - analog in monitor DAC3 - digital out monitor DAC4 - digital in monitor */ .ops = { .lock = juli_akm_lock, .unlock = juli_akm_unlock, .write = juli_akm_write, .set_rate_val = juli_akm_set_rate_val }, .dac_info = juli_dac, }; #define juli_mute_info snd_ctl_boolean_mono_info static int juli_mute_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned int val; val = ice->gpio.get_data(ice) & (unsigned int) kcontrol->private_value; if (kcontrol->private_value == GPIO_MUTE_CONTROL) /* val 0 = signal on */ ucontrol->value.integer.value[0] = (val) ? 0 : 1; else /* val 1 = signal on */ ucontrol->value.integer.value[0] = (val) ? 1 : 0; return 0; } static int juli_mute_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned int old_gpio, new_gpio; old_gpio = ice->gpio.get_data(ice); if (ucontrol->value.integer.value[0]) { /* unmute */ if (kcontrol->private_value == GPIO_MUTE_CONTROL) { /* 0 = signal on */ new_gpio = old_gpio & ~GPIO_MUTE_CONTROL; /* un-smuting DAC */ snd_akm4xxx_write(ice->akm, 0, 0x01, 0x01); } else /* 1 = signal on */ new_gpio = old_gpio | (unsigned int) kcontrol->private_value; } else { /* mute */ if (kcontrol->private_value == GPIO_MUTE_CONTROL) { /* 1 = signal off */ new_gpio = old_gpio | GPIO_MUTE_CONTROL; /* smuting DAC */ snd_akm4xxx_write(ice->akm, 0, 0x01, 0x03); } else /* 0 = signal off */ new_gpio = old_gpio & ~((unsigned int) kcontrol->private_value); } /* printk(KERN_DEBUG "JULI - mute/unmute: control_value: 0x%x, old_gpio: 0x%x, " "new_gpio 0x%x\n", (unsigned int)ucontrol->value.integer.value[0], old_gpio, new_gpio); */ if (old_gpio != new_gpio) { ice->gpio.set_data(ice, new_gpio); return 1; } /* no change */ return 0; } static struct snd_kcontrol_new juli_mute_controls[] __devinitdata = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Master Playback Switch", .info = juli_mute_info, .get = juli_mute_get, .put = juli_mute_put, .private_value = GPIO_MUTE_CONTROL, }, /* Although the following functionality respects the succint NDA'd * documentation from the card manufacturer, and the same way of * operation is coded in OSS Juli driver, only Digital Out monitor * seems to work. Surprisingly, Analog input monitor outputs Digital * output data. The two are independent, as enabling both doubles * volume of the monitor sound. * * Checking traces on the board suggests the functionality described * by the manufacturer is correct - I2S from ADC and AK4114 * go to ICE as well as to Xilinx, I2S inputs of DAC2,3,4 (the monitor * inputs) are fed from Xilinx. * * I even checked traces on board and coded a support in driver for * an alternative possibility - the unused I2S ICE output channels * switched to HW-IN/SPDIF-IN and providing the monitoring signal to * the DAC - to no avail. The I2S outputs seem to be unconnected. * * The windows driver supports the monitoring correctly. */ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Monitor Analog In Switch", .info = juli_mute_info, .get = juli_mute_get, .put = juli_mute_put, .private_value = GPIO_ANAIN_MONITOR, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Monitor Digital Out Switch", .info = juli_mute_info, .get = juli_mute_get, .put = juli_mute_put, .private_value = GPIO_DIGOUT_MONITOR, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Monitor Digital In Switch", .info = juli_mute_info, .get = juli_mute_get, .put = juli_mute_put, .private_value = GPIO_DIGIN_MONITOR, }, }; static char *slave_vols[] __devinitdata = { PCM_VOLUME, MONITOR_AN_IN_VOLUME, MONITOR_DIG_IN_VOLUME, MONITOR_DIG_OUT_VOLUME, NULL }; static __devinitdata DECLARE_TLV_DB_SCALE(juli_master_db_scale, -6350, 50, 1); static struct snd_kcontrol __devinit *ctl_find(struct snd_card *card, const char *name) { struct snd_ctl_elem_id sid; memset(&sid, 0, sizeof(sid)); /* FIXME: strcpy is bad. */ strcpy(sid.name, name); sid.iface = SNDRV_CTL_ELEM_IFACE_MIXER; return snd_ctl_find_id(card, &sid); } static void __devinit add_slaves(struct snd_card *card, struct snd_kcontrol *master, char **list) { for (; *list; list++) { struct snd_kcontrol *slave = ctl_find(card, *list); /* printk(KERN_DEBUG "add_slaves - %s\n", *list); */ if (slave) { /* printk(KERN_DEBUG "slave %s found\n", *list); */ snd_ctl_add_slave(master, slave); } } } static int __devinit juli_add_controls(struct snd_ice1712 *ice) { struct juli_spec *spec = ice->spec; int err; unsigned int i; struct snd_kcontrol *vmaster; err = snd_ice1712_akm4xxx_build_controls(ice); if (err < 0) return err; for (i = 0; i < ARRAY_SIZE(juli_mute_controls); i++) { err = snd_ctl_add(ice->card, snd_ctl_new1(&juli_mute_controls[i], ice)); if (err < 0) return err; } /* Create virtual master control */ vmaster = snd_ctl_make_virtual_master("Master Playback Volume", juli_master_db_scale); if (!vmaster) return -ENOMEM; add_slaves(ice->card, vmaster, slave_vols); err = snd_ctl_add(ice->card, vmaster); if (err < 0) return err; /* only capture SPDIF over AK4114 */ err = snd_ak4114_build(spec->ak4114, NULL, ice->pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream); if (err < 0) return err; return 0; } /* * suspend/resume * */ #ifdef CONFIG_PM static int juli_resume(struct snd_ice1712 *ice) { struct snd_akm4xxx *ak = ice->akm; struct juli_spec *spec = ice->spec; /* akm4358 un-reset, un-mute */ snd_akm4xxx_reset(ak, 0); /* reinit ak4114 */ snd_ak4114_reinit(spec->ak4114); return 0; } static int juli_suspend(struct snd_ice1712 *ice) { struct snd_akm4xxx *ak = ice->akm; /* akm4358 reset and soft-mute */ snd_akm4xxx_reset(ak, 1); return 0; } #endif /* * initialize the chip */ static inline int juli_is_spdif_master(struct snd_ice1712 *ice) { return (ice->gpio.get_data(ice) & GPIO_INTERNAL_CLOCK) ? 0 : 1; } static unsigned int juli_get_rate(struct snd_ice1712 *ice) { int i; unsigned char result; result = ice->gpio.get_data(ice) & GPIO_RATE_MASK; for (i = 0; i < ARRAY_SIZE(gpio_vals); i++) if (gpio_vals[i] == result) return juli_rates[i]; return 0; } /* setting new rate */ static void juli_set_rate(struct snd_ice1712 *ice, unsigned int rate) { unsigned int old, new; unsigned char val; old = ice->gpio.get_data(ice); new = (old & ~GPIO_RATE_MASK) | get_gpio_val(rate); /* printk(KERN_DEBUG "JULI - set_rate: old %x, new %x\n", old & GPIO_RATE_MASK, new & GPIO_RATE_MASK); */ ice->gpio.set_data(ice, new); /* switching to external clock - supplied by external circuits */ val = inb(ICEMT1724(ice, RATE)); outb(val | VT1724_SPDIF_MASTER, ICEMT1724(ice, RATE)); } static inline unsigned char juli_set_mclk(struct snd_ice1712 *ice, unsigned int rate) { /* no change in master clock */ return 0; } /* setting clock to external - SPDIF */ static int juli_set_spdif_clock(struct snd_ice1712 *ice, int type) { unsigned int old; old = ice->gpio.get_data(ice); /* external clock (= 0), multiply 1x, 48kHz */ ice->gpio.set_data(ice, (old & ~GPIO_RATE_MASK) | GPIO_MULTI_1X | GPIO_FREQ_48KHZ); return 0; } /* Called when ak4114 detects change in the input SPDIF stream */ static void juli_ak4114_change(struct ak4114 *ak4114, unsigned char c0, unsigned char c1) { struct snd_ice1712 *ice = ak4114->change_callback_private; int rate; if (ice->is_spdif_master(ice) && c1) { /* only for SPDIF master mode, rate was changed */ rate = snd_ak4114_external_rate(ak4114); /* printk(KERN_DEBUG "ak4114 - input rate changed to %d\n", rate); */ juli_akm_set_rate_val(ice->akm, rate); } } static int __devinit juli_init(struct snd_ice1712 *ice) { static const unsigned char ak4114_init_vals[] = { /* AK4117_REG_PWRDN */ AK4114_RST | AK4114_PWN | AK4114_OCKS0 | AK4114_OCKS1, /* AK4114_REQ_FORMAT */ AK4114_DIF_I24I2S, /* AK4114_REG_IO0 */ AK4114_TX1E, /* AK4114_REG_IO1 */ AK4114_EFH_1024 | AK4114_DIT | AK4114_IPS(1), /* AK4114_REG_INT0_MASK */ 0, /* AK4114_REG_INT1_MASK */ 0 }; static const unsigned char ak4114_init_txcsb[] = { 0x41, 0x02, 0x2c, 0x00, 0x00 }; int err; struct juli_spec *spec; struct snd_akm4xxx *ak; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return -ENOMEM; ice->spec = spec; err = snd_ak4114_create(ice->card, juli_ak4114_read, juli_ak4114_write, ak4114_init_vals, ak4114_init_txcsb, ice, &spec->ak4114); if (err < 0) return err; /* callback for codecs rate setting */ spec->ak4114->change_callback = juli_ak4114_change; spec->ak4114->change_callback_private = ice; /* AK4114 in Juli can detect external rate correctly */ spec->ak4114->check_flags = 0; #if 0 /* * it seems that the analog doughter board detection does not work reliably, so * force the analog flag; it should be very rare (if ever) to come at Juli@ * used without the analog daughter board */ spec->analog = (ice->gpio.get_data(ice) & GPIO_ANALOG_PRESENT) ? 0 : 1; #else spec->analog = 1; #endif if (spec->analog) { printk(KERN_INFO "juli@: analog I/O detected\n"); ice->num_total_dacs = 2; ice->num_total_adcs = 2; ice->akm = kzalloc(sizeof(struct snd_akm4xxx), GFP_KERNEL); ak = ice->akm; if (!ak) return -ENOMEM; ice->akm_codecs = 1; err = snd_ice1712_akm4xxx_init(ak, &akm_juli_dac, NULL, ice); if (err < 0) return err; } /* juli is clocked by Xilinx array */ ice->hw_rates = &juli_rates_info; ice->is_spdif_master = juli_is_spdif_master; ice->get_rate = juli_get_rate; ice->set_rate = juli_set_rate; ice->set_mclk = juli_set_mclk; ice->set_spdif_clock = juli_set_spdif_clock; ice->spdif.ops.open = juli_spdif_in_open; #ifdef CONFIG_PM ice->pm_resume = juli_resume; ice->pm_suspend = juli_suspend; ice->pm_suspend_enabled = 1; #endif return 0; } /* * Juli@ boards don't provide the EEPROM data except for the vendor IDs. * hence the driver needs to sets up it properly. */ static unsigned char juli_eeprom[] __devinitdata = { [ICE_EEP2_SYSCONF] = 0x2b, /* clock 512, mpu401, 1xADC, 1xDACs, SPDIF in */ [ICE_EEP2_ACLINK] = 0x80, /* I2S */ [ICE_EEP2_I2S] = 0xf8, /* vol, 96k, 24bit, 192k */ [ICE_EEP2_SPDIF] = 0xc3, /* out-en, out-int, spdif-in */ [ICE_EEP2_GPIO_DIR] = 0x9f, /* 5, 6:inputs; 7, 4-0 outputs*/ [ICE_EEP2_GPIO_DIR1] = 0xff, [ICE_EEP2_GPIO_DIR2] = 0x7f, [ICE_EEP2_GPIO_MASK] = 0x60, /* 5, 6: locked; 7, 4-0 writable */ [ICE_EEP2_GPIO_MASK1] = 0x00, /* 0-7 writable */ [ICE_EEP2_GPIO_MASK2] = 0x7f, [ICE_EEP2_GPIO_STATE] = GPIO_FREQ_48KHZ | GPIO_MULTI_1X | GPIO_INTERNAL_CLOCK, /* internal clock, multiple 1x, 48kHz*/ [ICE_EEP2_GPIO_STATE1] = 0x00, /* unmuted */ [ICE_EEP2_GPIO_STATE2] = 0x00, }; /* entry point */ struct snd_ice1712_card_info snd_vt1724_juli_cards[] __devinitdata = { { .subvendor = VT1724_SUBDEVICE_JULI, .name = "ESI Juli@", .model = "juli", .chip_init = juli_init, .build_controls = juli_add_controls, .eeprom_size = sizeof(juli_eeprom), .eeprom_data = juli_eeprom, }, { } /* terminator */ };
gpl-2.0
CaptainThrowback/kernel_htc_hima
kernel/irq/manage.c
17
29256
/* * linux/kernel/irq/manage.c * * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar * Copyright (C) 2005-2006 Thomas Gleixner * * This file contains driver APIs to the irq subsystem. */ #define pr_fmt(fmt) "genirq: " fmt #include <linux/irq.h> #include <linux/kthread.h> #include <linux/module.h> #include <linux/random.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/sched/rt.h> #include <linux/task_work.h> #include "internals.h" #ifdef CONFIG_IRQ_FORCED_THREADING __read_mostly bool force_irqthreads; static int __init setup_forced_irqthreads(char *arg) { force_irqthreads = true; return 0; } early_param("threadirqs", setup_forced_irqthreads); #endif void synchronize_irq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); bool inprogress; if (!desc) return; do { unsigned long flags; while (irqd_irq_inprogress(&desc->irq_data)) cpu_relax(); raw_spin_lock_irqsave(&desc->lock, flags); inprogress = irqd_irq_inprogress(&desc->irq_data); raw_spin_unlock_irqrestore(&desc->lock, flags); } while (inprogress); wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); } EXPORT_SYMBOL(synchronize_irq); #ifdef CONFIG_SMP cpumask_var_t irq_default_affinity; int irq_can_set_affinity(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); if (!desc || !irqd_can_balance(&desc->irq_data) || !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) return 0; return 1; } void irq_set_thread_affinity(struct irq_desc *desc) { struct irqaction *action = desc->action; while (action) { if (action->thread) set_bit(IRQTF_AFFINITY, &action->thread_flags); action = action->next; } } #ifdef CONFIG_GENERIC_PENDING_IRQ static inline bool irq_can_move_pcntxt(struct irq_data *data) { return irqd_can_move_in_process_context(data); } static inline bool irq_move_pending(struct irq_data *data) { return irqd_is_setaffinity_pending(data); } static inline void irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { cpumask_copy(desc->pending_mask, mask); } static inline void irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { cpumask_copy(mask, desc->pending_mask); } #else static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; } static inline bool irq_move_pending(struct irq_data *data) { return false; } static inline void irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } static inline void irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } #endif int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { struct irq_desc *desc = irq_data_to_desc(data); struct irq_chip *chip = irq_data_get_irq_chip(data); int ret; ret = chip->irq_set_affinity(data, mask, force); switch (ret) { case IRQ_SET_MASK_OK: cpumask_copy(data->affinity, mask); case IRQ_SET_MASK_OK_NOCOPY: irq_set_thread_affinity(desc); ret = 0; } return ret; } int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, bool force) { struct irq_chip *chip = irq_data_get_irq_chip(data); struct irq_desc *desc = irq_data_to_desc(data); int ret = 0; if (!chip || !chip->irq_set_affinity) return -EINVAL; if (irq_can_move_pcntxt(data)) { ret = irq_do_set_affinity(data, mask, force); } else { irqd_set_move_pending(data); irq_copy_pending(desc, mask); } if (!list_empty(&desc->affinity_notify)) schedule_work(&desc->affinity_work); irqd_set(data, IRQD_AFFINITY_SET); return ret; } int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; int ret; if (!desc) return -EINVAL; raw_spin_lock_irqsave(&desc->lock, flags); ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); raw_spin_unlock_irqrestore(&desc->lock, flags); return ret; } EXPORT_SYMBOL(irq_set_affinity); int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) { unsigned long flags; struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); if (!desc) return -EINVAL; desc->affinity_hint = m; irq_put_desc_unlock(desc, flags); return 0; } EXPORT_SYMBOL_GPL(irq_set_affinity_hint); void irq_affinity_notify(struct work_struct *work) { struct irq_desc *desc = container_of(work, struct irq_desc, affinity_work); cpumask_var_t cpumask; unsigned long flags; struct irq_affinity_notify *notify; if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) return; raw_spin_lock_irqsave(&desc->lock, flags); if (irq_move_pending(&desc->irq_data)) irq_get_pending(cpumask, desc); else cpumask_copy(cpumask, desc->irq_data.affinity); raw_spin_unlock_irqrestore(&desc->lock, flags); list_for_each_entry(notify, &desc->affinity_notify, list) { if (!kref_get_unless_zero(&notify->kref)) continue; notify->notify(notify, cpumask); kref_put(&notify->kref, notify->release); } free_cpumask_var(cpumask); } int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; if (!desc) return -EINVAL; if (!notify) { WARN("%s called with NULL notifier - use irq_release_affinity_notifier function instead.\n", __func__); return -EINVAL; } notify->irq = irq; kref_init(&notify->kref); INIT_LIST_HEAD(&notify->list); raw_spin_lock_irqsave(&desc->lock, flags); list_add(&notify->list, &desc->affinity_notify); raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; } EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); int irq_release_affinity_notifier(struct irq_affinity_notify *notify) { struct irq_desc *desc; unsigned long flags; if (!notify) return -EINVAL; desc = irq_to_desc(notify->irq); raw_spin_lock_irqsave(&desc->lock, flags); list_del(&notify->list); raw_spin_unlock_irqrestore(&desc->lock, flags); kref_put(&notify->kref, notify->release); return 0; } EXPORT_SYMBOL(irq_release_affinity_notifier); #ifndef CONFIG_AUTO_IRQ_AFFINITY static int setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) { struct cpumask *set = irq_default_affinity; int node = desc->irq_data.node; if (!irq_can_set_affinity(irq)) return 0; if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { if (cpumask_intersects(desc->irq_data.affinity, cpu_online_mask)) set = desc->irq_data.affinity; else irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); } cpumask_and(mask, cpu_online_mask, set); if (node != NUMA_NO_NODE) { const struct cpumask *nodemask = cpumask_of_node(node); if (cpumask_intersects(mask, nodemask)) cpumask_and(mask, mask, nodemask); } irq_do_set_affinity(&desc->irq_data, mask, false); return 0; } #else static inline int setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask) { return irq_select_affinity(irq); } #endif int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; int ret; raw_spin_lock_irqsave(&desc->lock, flags); ret = setup_affinity(irq, desc, mask); raw_spin_unlock_irqrestore(&desc->lock, flags); return ret; } #else static inline int setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) { return 0; } #endif void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) { if (suspend) { if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) return; desc->istate |= IRQS_SUSPENDED; } if (!desc->depth++) irq_disable(desc); } static int __disable_irq_nosync(unsigned int irq) { unsigned long flags; struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); if (!desc) return -EINVAL; __disable_irq(desc, irq, false); irq_put_desc_busunlock(desc, flags); return 0; } void disable_irq_nosync(unsigned int irq) { __disable_irq_nosync(irq); } EXPORT_SYMBOL(disable_irq_nosync); void disable_irq(unsigned int irq) { if (!__disable_irq_nosync(irq)) synchronize_irq(irq); } EXPORT_SYMBOL(disable_irq); void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) { if (resume) { if (!(desc->istate & IRQS_SUSPENDED)) { if (!desc->action) return; if (!(desc->action->flags & IRQF_FORCE_RESUME)) return; desc->depth++; } desc->istate &= ~IRQS_SUSPENDED; } switch (desc->depth) { case 0: err_out: WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d, desc->istate=0x%x, desc->depth=%d\n", irq, desc->istate, desc->depth); break; case 1: { if (desc->istate & IRQS_SUSPENDED) goto err_out; irq_settings_set_noprobe(desc); irq_enable(desc); check_irq_resend(desc, irq); } default: desc->depth--; } } void enable_irq(unsigned int irq) { unsigned long flags; struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); if (!desc) return; if (WARN(!desc->irq_data.chip, KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) goto out; __enable_irq(desc, irq, false); out: irq_put_desc_busunlock(desc, flags); } EXPORT_SYMBOL(enable_irq); static int set_irq_wake_real(unsigned int irq, unsigned int on) { struct irq_desc *desc = irq_to_desc(irq); int ret = -ENXIO; if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) return 0; if (desc->irq_data.chip->irq_set_wake) ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); return ret; } int irq_set_irq_wake(unsigned int irq, unsigned int on) { unsigned long flags; struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); int ret = 0; if (!desc) return -EINVAL; if (on) { if (desc->wake_depth++ == 0) { ret = set_irq_wake_real(irq, on); if (ret) desc->wake_depth = 0; else irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); } } else { if (desc->wake_depth == 0) { WARN(1, "Unbalanced IRQ %d wake disable\n", irq); } else if (--desc->wake_depth == 0) { ret = set_irq_wake_real(irq, on); if (ret) desc->wake_depth = 1; else irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); } } irq_put_desc_busunlock(desc, flags); return ret; } EXPORT_SYMBOL(irq_set_irq_wake); int irq_read_line(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); int val; if (!desc || !desc->irq_data.chip->irq_read_line) return -EINVAL; chip_bus_lock(desc); raw_spin_lock(&desc->lock); val = desc->irq_data.chip->irq_read_line(&desc->irq_data); raw_spin_unlock(&desc->lock); chip_bus_sync_unlock(desc); return val; } EXPORT_SYMBOL_GPL(irq_read_line); int can_request_irq(unsigned int irq, unsigned long irqflags) { unsigned long flags; struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); int canrequest = 0; if (!desc) return 0; if (irq_settings_can_request(desc)) { if (!desc->action || irqflags & desc->action->flags & IRQF_SHARED) canrequest = 1; } irq_put_desc_unlock(desc, flags); return canrequest; } int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, unsigned long flags) { struct irq_chip *chip = desc->irq_data.chip; int ret, unmask = 0; if (!chip || !chip->irq_set_type) { pr_debug("No set_type function for IRQ %d (%s)\n", irq, chip ? (chip->name ? : "unknown") : "unknown"); return 0; } flags &= IRQ_TYPE_SENSE_MASK; if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { if (!irqd_irq_masked(&desc->irq_data)) mask_irq(desc); if (!irqd_irq_disabled(&desc->irq_data)) unmask = 1; } ret = chip->irq_set_type(&desc->irq_data, flags); switch (ret) { case IRQ_SET_MASK_OK: irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); irqd_set(&desc->irq_data, flags); case IRQ_SET_MASK_OK_NOCOPY: flags = irqd_get_trigger_type(&desc->irq_data); irq_settings_set_trigger_mask(desc, flags); irqd_clear(&desc->irq_data, IRQD_LEVEL); irq_settings_clr_level(desc); if (flags & IRQ_TYPE_LEVEL_MASK) { irq_settings_set_level(desc); irqd_set(&desc->irq_data, IRQD_LEVEL); } ret = 0; break; default: pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n", flags, irq, chip->irq_set_type); } if (unmask) unmask_irq(desc); return ret; } #ifdef CONFIG_HARDIRQS_SW_RESEND int irq_set_parent(int irq, int parent_irq) { unsigned long flags; struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); if (!desc) return -EINVAL; desc->parent_irq = parent_irq; irq_put_desc_unlock(desc, flags); return 0; } #endif static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) { return IRQ_WAKE_THREAD; } static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) { WARN(1, "Primary handler called for nested irq %d\n", irq); return IRQ_NONE; } static int irq_wait_for_interrupt(struct irqaction *action) { set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) { __set_current_state(TASK_RUNNING); return 0; } schedule(); set_current_state(TASK_INTERRUPTIBLE); } __set_current_state(TASK_RUNNING); return -1; } static void irq_finalize_oneshot(struct irq_desc *desc, struct irqaction *action) { if (!(desc->istate & IRQS_ONESHOT)) return; again: chip_bus_lock(desc); raw_spin_lock_irq(&desc->lock); if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { raw_spin_unlock_irq(&desc->lock); chip_bus_sync_unlock(desc); cpu_relax(); goto again; } if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) goto out_unlock; desc->threads_oneshot &= ~action->thread_mask; if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && irqd_irq_masked(&desc->irq_data)) unmask_irq(desc); out_unlock: raw_spin_unlock_irq(&desc->lock); chip_bus_sync_unlock(desc); } #ifdef CONFIG_SMP static void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { cpumask_var_t mask; bool valid = true; if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) return; if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { set_bit(IRQTF_AFFINITY, &action->thread_flags); return; } raw_spin_lock_irq(&desc->lock); if (desc->irq_data.affinity) cpumask_copy(mask, desc->irq_data.affinity); else valid = false; raw_spin_unlock_irq(&desc->lock); if (valid) set_cpus_allowed_ptr(current, mask); free_cpumask_var(mask); } #else static inline void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } #endif static irqreturn_t irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) { irqreturn_t ret; local_bh_disable(); ret = action->thread_fn(action->irq, action->dev_id); irq_finalize_oneshot(desc, action); local_bh_enable(); return ret; } static irqreturn_t irq_thread_fn(struct irq_desc *desc, struct irqaction *action) { irqreturn_t ret; ret = action->thread_fn(action->irq, action->dev_id); irq_finalize_oneshot(desc, action); return ret; } static void wake_threads_waitq(struct irq_desc *desc) { if (atomic_dec_and_test(&desc->threads_active)) wake_up(&desc->wait_for_threads); } static void irq_thread_dtor(struct callback_head *unused) { struct task_struct *tsk = current; struct irq_desc *desc; struct irqaction *action; if (WARN_ON_ONCE(!(current->flags & PF_EXITING))) return; action = kthread_data(tsk); pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", tsk->comm, tsk->pid, action->irq); desc = irq_to_desc(action->irq); if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) wake_threads_waitq(desc); irq_finalize_oneshot(desc, action); } static int irq_thread(void *data) { struct callback_head on_exit_work; static const struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; struct irqaction *action = data; struct irq_desc *desc = irq_to_desc(action->irq); irqreturn_t (*handler_fn)(struct irq_desc *desc, struct irqaction *action); if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD, &action->thread_flags)) handler_fn = irq_forced_thread_fn; else handler_fn = irq_thread_fn; sched_setscheduler(current, SCHED_FIFO, &param); init_task_work(&on_exit_work, irq_thread_dtor); task_work_add(current, &on_exit_work, false); irq_thread_check_affinity(desc, action); while (!irq_wait_for_interrupt(action)) { irqreturn_t action_ret; irq_thread_check_affinity(desc, action); action_ret = handler_fn(desc, action); if (action_ret == IRQ_HANDLED) atomic_inc(&desc->threads_handled); wake_threads_waitq(desc); } task_work_cancel(current, irq_thread_dtor); return 0; } static void irq_setup_forced_threading(struct irqaction *new) { if (!force_irqthreads) return; if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) return; new->flags |= IRQF_ONESHOT; if (!new->thread_fn) { set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); new->thread_fn = new->handler; new->handler = irq_default_primary_handler; } } static int __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) { struct irqaction *old, **old_ptr; unsigned long flags, thread_mask = 0; int ret, nested, shared = 0; cpumask_var_t mask; if (!desc) return -EINVAL; if (desc->irq_data.chip == &no_irq_chip) return -ENOSYS; if (!try_module_get(desc->owner)) return -ENODEV; nested = irq_settings_is_nested_thread(desc); if (nested) { if (!new->thread_fn) { ret = -EINVAL; goto out_mput; } new->handler = irq_nested_primary_handler; } else { if (irq_settings_can_thread(desc)) irq_setup_forced_threading(new); } if (new->thread_fn && !nested) { struct task_struct *t; t = kthread_create(irq_thread, new, "irq/%d-%s", irq, new->name); if (IS_ERR(t)) { ret = PTR_ERR(t); goto out_mput; } get_task_struct(t); new->thread = t; set_bit(IRQTF_AFFINITY, &new->thread_flags); } if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { ret = -ENOMEM; goto out_thread; } /* * Drivers are often written to work w/o knowledge about the * underlying irq chip implementation, so a request for a * threaded irq without a primary hard irq context handler * requires the ONESHOT flag to be set. Some irq chips like * MSI based interrupts are per se one shot safe. Check the * chip flags, so we can avoid the unmask dance at the end of * the threaded handler for those. */ if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) new->flags &= ~IRQF_ONESHOT; raw_spin_lock_irqsave(&desc->lock, flags); old_ptr = &desc->action; old = *old_ptr; if (old) { if (!((old->flags & new->flags) & IRQF_SHARED) || ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) || ((old->flags ^ new->flags) & IRQF_ONESHOT)) goto mismatch; if ((old->flags & IRQF_PERCPU) != (new->flags & IRQF_PERCPU)) goto mismatch; do { thread_mask |= old->thread_mask; old_ptr = &old->next; old = *old_ptr; } while (old); shared = 1; } if (new->flags & IRQF_ONESHOT) { if (thread_mask == ~0UL) { ret = -EBUSY; goto out_mask; } new->thread_mask = 1 << ffz(thread_mask); } else if (new->handler == irq_default_primary_handler && !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n", irq); ret = -EINVAL; goto out_mask; } if (!shared) { init_waitqueue_head(&desc->wait_for_threads); if (new->flags & IRQF_TRIGGER_MASK) { ret = __irq_set_trigger(desc, irq, new->flags & IRQF_TRIGGER_MASK); if (ret) goto out_mask; } desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ IRQS_ONESHOT | IRQS_WAITING); irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); if (new->flags & IRQF_PERCPU) { irqd_set(&desc->irq_data, IRQD_PER_CPU); irq_settings_set_per_cpu(desc); } if (new->flags & IRQF_ONESHOT) desc->istate |= IRQS_ONESHOT; if (irq_settings_can_autoenable(desc)) irq_startup(desc, true); else desc->depth = 1; if (new->flags & IRQF_NOBALANCING) { irq_settings_set_no_balancing(desc); irqd_set(&desc->irq_data, IRQD_NO_BALANCING); } setup_affinity(irq, desc, mask); } else if (new->flags & IRQF_TRIGGER_MASK) { unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; unsigned int omsk = irq_settings_get_trigger_mask(desc); if (nmsk != omsk) pr_warning("irq %d uses trigger mode %u; requested %u\n", irq, nmsk, omsk); } new->irq = irq; *old_ptr = new; desc->irq_count = 0; desc->irqs_unhandled = 0; if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { desc->istate &= ~IRQS_SPURIOUS_DISABLED; __enable_irq(desc, irq, false); } raw_spin_unlock_irqrestore(&desc->lock, flags); if (new->thread) wake_up_process(new->thread); register_irq_proc(irq, desc); new->dir = NULL; register_handler_proc(irq, new); free_cpumask_var(mask); return 0; mismatch: if (!(new->flags & IRQF_PROBE_SHARED)) { pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n", irq, new->flags, new->name, old->flags, old->name); #ifdef CONFIG_DEBUG_SHIRQ dump_stack(); #endif } ret = -EBUSY; out_mask: raw_spin_unlock_irqrestore(&desc->lock, flags); free_cpumask_var(mask); out_thread: if (new->thread) { struct task_struct *t = new->thread; new->thread = NULL; kthread_stop(t); put_task_struct(t); } out_mput: module_put(desc->owner); return ret; } int setup_irq(unsigned int irq, struct irqaction *act) { int retval; struct irq_desc *desc = irq_to_desc(irq); if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) return -EINVAL; chip_bus_lock(desc); retval = __setup_irq(irq, desc, act); chip_bus_sync_unlock(desc); return retval; } EXPORT_SYMBOL_GPL(setup_irq); static struct irqaction *__free_irq(unsigned int irq, void *dev_id) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action, **action_ptr; unsigned long flags; WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); if (!desc) return NULL; raw_spin_lock_irqsave(&desc->lock, flags); action_ptr = &desc->action; for (;;) { action = *action_ptr; if (!action) { WARN(1, "Trying to free already-free IRQ %d\n", irq); raw_spin_unlock_irqrestore(&desc->lock, flags); return NULL; } if (action->dev_id == dev_id) break; action_ptr = &action->next; } *action_ptr = action->next; if (!desc->action) { irq_shutdown(desc); if (desc->irq_data.chip->irq_mask) desc->irq_data.chip->irq_mask(&desc->irq_data); else if (desc->irq_data.chip->irq_mask_ack) desc->irq_data.chip->irq_mask_ack(&desc->irq_data); } #ifdef CONFIG_SMP if (WARN_ON_ONCE(desc->affinity_hint)) desc->affinity_hint = NULL; #endif raw_spin_unlock_irqrestore(&desc->lock, flags); unregister_handler_proc(irq, action); synchronize_irq(irq); #ifdef CONFIG_DEBUG_SHIRQ if (action->flags & IRQF_SHARED) { local_irq_save(flags); action->handler(irq, dev_id); local_irq_restore(flags); } #endif if (action->thread) { kthread_stop(action->thread); put_task_struct(action->thread); } module_put(desc->owner); return action; } void remove_irq(unsigned int irq, struct irqaction *act) { struct irq_desc *desc = irq_to_desc(irq); if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc))) __free_irq(irq, act->dev_id); } EXPORT_SYMBOL_GPL(remove_irq); void free_irq(unsigned int irq, void *dev_id) { struct irq_desc *desc = irq_to_desc(irq); #ifdef CONFIG_SMP struct irq_affinity_notify *notify; #endif if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) return; #ifdef CONFIG_SMP WARN_ON(!list_empty(&desc->affinity_notify)); list_for_each_entry(notify, &desc->affinity_notify, list) kref_put(&notify->kref, notify->release); #endif chip_bus_lock(desc); kfree(__free_irq(irq, dev_id)); chip_bus_sync_unlock(desc); } EXPORT_SYMBOL(free_irq); int request_threaded_irq(unsigned int irq, irq_handler_t handler, irq_handler_t thread_fn, unsigned long irqflags, const char *devname, void *dev_id) { struct irqaction *action; struct irq_desc *desc; int retval; if ((irqflags & IRQF_SHARED) && !dev_id) return -EINVAL; desc = irq_to_desc(irq); if (!desc) return -EINVAL; if (!irq_settings_can_request(desc) || WARN_ON(irq_settings_is_per_cpu_devid(desc))) return -EINVAL; if (!handler) { if (!thread_fn) return -EINVAL; handler = irq_default_primary_handler; } action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); if (!action) return -ENOMEM; action->handler = handler; action->thread_fn = thread_fn; action->flags = irqflags; action->name = devname; action->dev_id = dev_id; chip_bus_lock(desc); retval = __setup_irq(irq, desc, action); chip_bus_sync_unlock(desc); if (retval) kfree(action); #ifdef CONFIG_DEBUG_SHIRQ_FIXME if (!retval && (irqflags & IRQF_SHARED)) { unsigned long flags; disable_irq(irq); local_irq_save(flags); handler(irq, dev_id); local_irq_restore(flags); enable_irq(irq); } #endif return retval; } EXPORT_SYMBOL(request_threaded_irq); int request_any_context_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *name, void *dev_id) { struct irq_desc *desc = irq_to_desc(irq); int ret; if (!desc) return -EINVAL; if (irq_settings_is_nested_thread(desc)) { ret = request_threaded_irq(irq, NULL, handler, flags, name, dev_id); return !ret ? IRQC_IS_NESTED : ret; } ret = request_irq(irq, handler, flags, name, dev_id); return !ret ? IRQC_IS_HARDIRQ : ret; } EXPORT_SYMBOL_GPL(request_any_context_irq); void irq_set_pending(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; if (desc) { raw_spin_lock_irqsave(&desc->lock, flags); desc->istate |= IRQS_PENDING; raw_spin_unlock_irqrestore(&desc->lock, flags); } } EXPORT_SYMBOL_GPL(irq_set_pending); void enable_percpu_irq(unsigned int irq, unsigned int type) { unsigned int cpu = smp_processor_id(); unsigned long flags; struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); if (!desc) return; type &= IRQ_TYPE_SENSE_MASK; if (type != IRQ_TYPE_NONE) { int ret; ret = __irq_set_trigger(desc, irq, type); if (ret) { WARN(1, "failed to set type for IRQ%d\n", irq); goto out; } } irq_percpu_enable(desc, cpu); out: irq_put_desc_unlock(desc, flags); } EXPORT_SYMBOL_GPL(enable_percpu_irq); void disable_percpu_irq(unsigned int irq) { unsigned int cpu = smp_processor_id(); unsigned long flags; struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); if (!desc) return; irq_percpu_disable(desc, cpu); irq_put_desc_unlock(desc, flags); } EXPORT_SYMBOL_GPL(disable_percpu_irq); static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action; unsigned long flags; WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); if (!desc) return NULL; raw_spin_lock_irqsave(&desc->lock, flags); action = desc->action; if (!action || action->percpu_dev_id != dev_id) { WARN(1, "Trying to free already-free IRQ %d\n", irq); goto bad; } if (!cpumask_empty(desc->percpu_enabled)) { WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", irq, cpumask_first(desc->percpu_enabled)); goto bad; } desc->action = NULL; raw_spin_unlock_irqrestore(&desc->lock, flags); unregister_handler_proc(irq, action); module_put(desc->owner); return action; bad: raw_spin_unlock_irqrestore(&desc->lock, flags); return NULL; } void remove_percpu_irq(unsigned int irq, struct irqaction *act) { struct irq_desc *desc = irq_to_desc(irq); if (desc && irq_settings_is_per_cpu_devid(desc)) __free_percpu_irq(irq, act->percpu_dev_id); } void free_percpu_irq(unsigned int irq, void __percpu *dev_id) { struct irq_desc *desc = irq_to_desc(irq); if (!desc || !irq_settings_is_per_cpu_devid(desc)) return; chip_bus_lock(desc); kfree(__free_percpu_irq(irq, dev_id)); chip_bus_sync_unlock(desc); } int setup_percpu_irq(unsigned int irq, struct irqaction *act) { struct irq_desc *desc = irq_to_desc(irq); int retval; if (!desc || !irq_settings_is_per_cpu_devid(desc)) return -EINVAL; chip_bus_lock(desc); retval = __setup_irq(irq, desc, act); chip_bus_sync_unlock(desc); return retval; } int request_percpu_irq(unsigned int irq, irq_handler_t handler, const char *devname, void __percpu *dev_id) { struct irqaction *action; struct irq_desc *desc; int retval; if (!dev_id) return -EINVAL; desc = irq_to_desc(irq); if (!desc || !irq_settings_can_request(desc) || !irq_settings_is_per_cpu_devid(desc)) return -EINVAL; action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); if (!action) return -ENOMEM; action->handler = handler; action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND; action->name = devname; action->percpu_dev_id = dev_id; chip_bus_lock(desc); retval = __setup_irq(irq, desc, action); chip_bus_sync_unlock(desc); if (retval) kfree(action); return retval; }
gpl-2.0
vgstef/MuseScore
mscore/mediadialog.cpp
17
7722
//============================================================================= // MuseScore // Linux Music Score Editor // $Id:$ // // Copyright (C) 20011 Werner Schweer and others // // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License version 2. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. //============================================================================= #include "mediadialog.h" #include "icons.h" #include "musescore.h" #include "libmscore/score.h" #include "libmscore/audio.h" #include "scoreview.h" #include "omr/omr.h" #include "libmscore/tempo.h" namespace Ms { //--------------------------------------------------------- // MediaDialog //--------------------------------------------------------- MediaDialog::MediaDialog(QWidget* /*parent*/) : QDialog() { setupUi(this); setWindowFlags(this->windowFlags() & ~Qt::WindowContextHelpButtonHint); setWindowTitle(tr("MuseScore: Additional Media")); scanFileButton->setIcon(*icons[int(Icons::fileOpen_ICON)]); audioFileButton->setIcon(*icons[int(Icons::fileOpen_ICON)]); connect(addScan, SIGNAL(clicked()), SLOT(addScanPressed())); connect(removeScan, SIGNAL(clicked()), SLOT(removeScanPressed())); connect(addAudio, SIGNAL(clicked()), SLOT(addAudioPressed())); connect(removeAudio, SIGNAL(clicked()), SLOT(removeAudioPressed())); connect(scanFileButton, SIGNAL(clicked()), SLOT(scanFileButtonPressed())); connect(audioFileButton, SIGNAL(clicked()), SLOT(audioFileButtonPressed())); } //--------------------------------------------------------- // setScore //--------------------------------------------------------- void MediaDialog::setScore(Score* s) { score = s; Omr* omr = score->omr(); if (omr) { scanFile->setText(omr->path()); addScan->setEnabled(false); removeScan->setEnabled(true); scanFileButton->setEnabled(false); } else { scanFile->setText(QString()); addScan->setEnabled(true); removeScan->setEnabled(false); scanFileButton->setEnabled(true); } Audio* audio = score->audio(); if (audio) { audioFile->setText(audio->path()); addAudio->setEnabled(false); removeAudio->setEnabled(true); audioFileButton->setEnabled(false); } else { audioFile->setText(QString()); addAudio->setEnabled(true); removeAudio->setEnabled(false); audioFileButton->setEnabled(true); } } //--------------------------------------------------------- // addScanPressed //--------------------------------------------------------- void MediaDialog::addScanPressed() { QString path = scanFile->text(); if (score->omr() || path.isEmpty()) return; Omr* omr = new Omr(path, score); if (!omr->readPdf()) { qDebug("read omr failed"); delete omr; return; } score->setOmr(omr); mscore->currentScoreView()->showOmr(true); } //--------------------------------------------------------- // removeScanPressed //--------------------------------------------------------- void MediaDialog::removeScanPressed() { mscore->currentScoreView()->showOmr(false); score->removeOmr(); scanFile->setText(QString()); addScan->setEnabled(true); removeScan->setEnabled(false); scanFileButton->setEnabled(true); } //--------------------------------------------------------- // addAudioPressed //--------------------------------------------------------- void MediaDialog::addAudioPressed() { QString path = audioFile->text(); if (score->audio() || path.isEmpty()) return; QFile f(path); if (!f.open(QIODevice::ReadOnly)) return; QByteArray ba = f.readAll(); f.close(); Audio* audio = new Audio; audio->setPath(path); audio->setData(ba); score->setAudio(audio); mscore->updatePlayMode(); #if 0 QString wavPath = QDir::tempPath() + "/score.wav"; mscore->saveAs(score, true, wavPath, "wav"); QString program = "D:/HACK/sonic-annotator/bologna.bat"; QStringList arguments; arguments << QDir::toNativeSeparators(path)<< QDir::toNativeSeparators(wavPath); QProcess myProcess(this); myProcess.start(program, arguments); myProcess.waitForFinished(); qDebug() << myProcess.readAll(); #endif QFileInfo fi(path); QFile syncFile(fi.absolutePath() + "/" + fi.baseName() + ".txt"); TempoMap* tmo = score->tempomap(); if (!syncFile.open(QIODevice::ReadOnly)) return; qreal t = 0; int tick = 0; qreal lastTempo = tmo->tempo(0); TempoMap* tmn = new TempoMap(); tmn->setTempo(0, lastTempo); int resolution = 25; while (!syncFile.atEnd()) { for (int i = 0; !syncFile.atEnd() && i < resolution-1; i++) syncFile.readLine(); if (syncFile.atEnd()) break; QByteArray line = syncFile.readLine(); QString s(line); QStringList sl = s.split(":"); qreal tScore = sl[0].trimmed().toDouble(); qreal tPerformance = sl[1].trimmed().toDouble(); // timestamp of last int scoreTick = tmo->time2tick(tScore); qreal deltaError = tmo->tick2time(scoreTick) - tScore; int dt = scoreTick - tick; qreal deltaTime = tPerformance - t; if (deltaTime > 0) { qreal tempo = dt / (480 * deltaTime); if(tempo != lastTempo) { qDebug() << tempo; tmn->setTempo(tick, tempo); lastTempo = tempo; } } t = tPerformance - deltaError; tick = scoreTick; } score->setTempomap(tmn); syncFile.close(); QMessageBox::information(0, "Done", "Done"); } //--------------------------------------------------------- // removeAudioPressed //--------------------------------------------------------- void MediaDialog::removeAudioPressed() { score->removeAudio(); audioFile->setText(QString()); addAudio->setEnabled(true); removeAudio->setEnabled(false); audioFileButton->setEnabled(true); } //--------------------------------------------------------- // scanFileButtonPressed //--------------------------------------------------------- void MediaDialog::scanFileButtonPressed() { QString s = mscore->getScanFile(QString()); if (!s.isNull()) scanFile->setText(s); } //--------------------------------------------------------- // audioFileButtonPressed //--------------------------------------------------------- void MediaDialog::audioFileButtonPressed() { QString s = mscore->getAudioFile(QString()); if (!s.isNull()) audioFile->setText(s); } }
gpl-2.0
RytoEX/obs-studio
plugins/win-dshow/tiny-nv12-scale.c
17
5264
#include <string.h> #include "tiny-nv12-scale.h" /* TODO: optimize this stuff later, or replace with something better. it's * kind of garbage. although normally it shouldn't be called that often. plus * it's nearest neighbor so not really a huge deal. at the very least it * should be sse2 at some point. */ void nv12_scale_init(nv12_scale_t *s, enum target_format format, int dst_cx, int dst_cy, int src_cx, int src_cy) { s->format = format; s->src_cx = src_cx; s->src_cy = src_cy; s->dst_cx = dst_cx; s->dst_cy = dst_cy; } static void nv12_scale_nearest(nv12_scale_t *s, uint8_t *dst_start, const uint8_t *src) { register uint8_t *dst = dst_start; const int src_cx = s->src_cx; const int src_cy = s->src_cy; const int dst_cx = s->dst_cx; const int dst_cy = s->dst_cy; /* lum */ for (int y = 0; y < dst_cy; y++) { const int src_line = y * src_cy / dst_cy * s->src_cx; for (int x = 0; x < dst_cx; x++) { const int src_x = x * src_cx / dst_cx; *(dst++) = src[src_line + src_x]; } } src += src_cx * src_cy; /* uv */ const int dst_cx_d2 = dst_cx / 2; const int dst_cy_d2 = dst_cy / 2; for (int y = 0; y < dst_cy_d2; y++) { const int src_line = y * src_cy / dst_cy * src_cx; for (int x = 0; x < dst_cx_d2; x++) { const int src_x = x * src_cx / dst_cx * 2; const int pos = src_line + src_x; *(dst++) = src[pos]; *(dst++) = src[pos + 1]; } } } static void nv12_scale_nearest_to_i420(nv12_scale_t *s, uint8_t *dst_start, const uint8_t *src) { register uint8_t *dst = dst_start; const int src_cx = s->src_cx; const int src_cy = s->src_cy; const int dst_cx = s->dst_cx; const int dst_cy = s->dst_cy; const int size = src_cx * src_cy; /* lum */ for (int y = 0; y < dst_cy; y++) { const int src_line = y * src_cy / dst_cy * s->src_cx; for (int x = 0; x < dst_cx; x++) { const int src_x = x * src_cx / dst_cx; *(dst++) = src[src_line + src_x]; } } src += size; /* uv */ const int dst_cx_d2 = dst_cx / 2; const int dst_cy_d2 = dst_cy / 2; register uint8_t *dst2 = dst + dst_cx * dst_cy / 4; for (int y = 0; y < dst_cy_d2; y++) { const int src_line = y * src_cy / dst_cy * src_cx; for (int x = 0; x < dst_cx_d2; x++) { const int src_x = x * src_cx / dst_cx * 2; const int pos = src_line + src_x; *(dst++) = src[pos]; *(dst2++) = src[pos + 1]; } } } static void nv12_convert_to_i420(nv12_scale_t *s, uint8_t *dst_start, const uint8_t *src_start) { const int size = s->src_cx * s->src_cy; const int size_d4 = size / 4; memcpy(dst_start, src_start, size); register uint8_t *dst1 = dst_start + size; register uint8_t *dst2 = dst1 + size_d4; register uint8_t *dst_end = dst2 + size_d4; register const uint8_t *src = src_start + size; while (dst2 < dst_end) { *(dst1++) = *(src++); *(dst2++) = *(src++); } } static void nv12_scale_nearest_to_yuy2(nv12_scale_t *s, uint8_t *dst_start, const uint8_t *src) { register uint8_t *dst = dst_start; const int src_cx = s->src_cx; const int src_cy = s->src_cy; const int dst_cx = s->dst_cx; const int dst_cy = s->dst_cy; const int src_cx_d2 = src_cx / 2; const int src_cy_d2 = src_cy / 2; const int dst_cx_d2 = dst_cx / 2; const int dst_cy_d2 = dst_cy / 2; const int size = src_cx * src_cy; const uint8_t *src_uv = src + size; register int uv_flip = 0; for (int y = 0; y < dst_cy; y++) { const int src_line = y * src_cy / dst_cy * s->src_cx; const int src_line_d2 = y / 2 * src_cy_d2 / dst_cy_d2 * s->src_cx; for (int x = 0; x < dst_cx; x++) { const int src_x = x * src_cx / dst_cx; const int src_x_d2 = x / 2 * src_cx_d2 / dst_cx_d2; const int pos = src_line + src_x; const int pos_uv = src_line_d2 + src_x_d2 * 2 + uv_flip; *(dst++) = src[pos]; *(dst++) = src_uv[pos_uv]; uv_flip ^= 1; } } } static void nv12_convert_to_yuy2(nv12_scale_t *s, uint8_t *dst_start, const uint8_t *src_start) { const int size = s->src_cx * s->src_cy; const int size_dst_line = s->src_cx * 4; register const uint8_t *src_y = src_start; register const uint8_t *src_uv = src_y + size; register uint8_t *dst = dst_start; register uint8_t *dst_end = dst + size * 2; while (dst < dst_end) { register uint8_t *dst_line_end = dst + size_dst_line; const uint8_t *src_uv_start = src_uv; while (dst < dst_line_end) { *(dst++) = *(src_y++); *(dst++) = *(src_uv++); *(dst++) = *(src_y++); *(dst++) = *(src_uv++); } dst_line_end = dst + size_dst_line; src_uv = src_uv_start; while (dst < dst_line_end) { *(dst++) = *(src_y++); *(dst++) = *(src_uv++); *(dst++) = *(src_y++); *(dst++) = *(src_uv++); } } } void nv12_do_scale(nv12_scale_t *s, uint8_t *dst, const uint8_t *src) { if (s->src_cx == s->dst_cx && s->src_cy == s->dst_cy) { if (s->format == TARGET_FORMAT_I420) nv12_convert_to_i420(s, dst, src); else if (s->format == TARGET_FORMAT_YUY2) nv12_convert_to_yuy2(s, dst, src); else memcpy(dst, src, s->src_cx * s->src_cy * 3 / 2); } else { if (s->format == TARGET_FORMAT_I420) nv12_scale_nearest_to_i420(s, dst, src); else if (s->format == TARGET_FORMAT_YUY2) nv12_scale_nearest_to_yuy2(s, dst, src); else nv12_scale_nearest(s, dst, src); } }
gpl-2.0
tanzilli/ariag25-linux-2.6.39
fs/jbd/journal.c
17
56641
/* * linux/fs/jbd/journal.c * * Written by Stephen C. Tweedie <sct@redhat.com>, 1998 * * Copyright 1998 Red Hat corp --- All Rights Reserved * * This file is part of the Linux kernel and is made available under * the terms of the GNU General Public License, version 2, or at your * option, any later version, incorporated herein by reference. * * Generic filesystem journal-writing code; part of the ext2fs * journaling system. * * This file manages journals: areas of disk reserved for logging * transactional updates. This includes the kernel journaling thread * which is responsible for scheduling updates to the log. * * We do not actually manage the physical storage of the journal in this * file: that is left to a per-journal policy function, which allows us * to store the journal within a filesystem-specified area for ext2 * journaling (ext2 can use a reserved inode for storing the log). */ #include <linux/module.h> #include <linux/time.h> #include <linux/fs.h> #include <linux/jbd.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/freezer.h> #include <linux/pagemap.h> #include <linux/kthread.h> #include <linux/poison.h> #include <linux/proc_fs.h> #include <linux/debugfs.h> #include <linux/ratelimit.h> #include <asm/uaccess.h> #include <asm/page.h> EXPORT_SYMBOL(journal_start); EXPORT_SYMBOL(journal_restart); EXPORT_SYMBOL(journal_extend); EXPORT_SYMBOL(journal_stop); EXPORT_SYMBOL(journal_lock_updates); EXPORT_SYMBOL(journal_unlock_updates); EXPORT_SYMBOL(journal_get_write_access); EXPORT_SYMBOL(journal_get_create_access); EXPORT_SYMBOL(journal_get_undo_access); EXPORT_SYMBOL(journal_dirty_data); EXPORT_SYMBOL(journal_dirty_metadata); EXPORT_SYMBOL(journal_release_buffer); EXPORT_SYMBOL(journal_forget); #if 0 EXPORT_SYMBOL(journal_sync_buffer); #endif EXPORT_SYMBOL(journal_flush); EXPORT_SYMBOL(journal_revoke); EXPORT_SYMBOL(journal_init_dev); EXPORT_SYMBOL(journal_init_inode); EXPORT_SYMBOL(journal_update_format); EXPORT_SYMBOL(journal_check_used_features); EXPORT_SYMBOL(journal_check_available_features); EXPORT_SYMBOL(journal_set_features); EXPORT_SYMBOL(journal_create); EXPORT_SYMBOL(journal_load); EXPORT_SYMBOL(journal_destroy); EXPORT_SYMBOL(journal_abort); EXPORT_SYMBOL(journal_errno); EXPORT_SYMBOL(journal_ack_err); EXPORT_SYMBOL(journal_clear_err); EXPORT_SYMBOL(log_wait_commit); EXPORT_SYMBOL(log_start_commit); EXPORT_SYMBOL(journal_start_commit); EXPORT_SYMBOL(journal_force_commit_nested); EXPORT_SYMBOL(journal_wipe); EXPORT_SYMBOL(journal_blocks_per_page); EXPORT_SYMBOL(journal_invalidatepage); EXPORT_SYMBOL(journal_try_to_free_buffers); EXPORT_SYMBOL(journal_force_commit); static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); static void __journal_abort_soft (journal_t *journal, int errno); static const char *journal_dev_name(journal_t *journal, char *buffer); /* * Helper function used to manage commit timeouts */ static void commit_timeout(unsigned long __data) { struct task_struct * p = (struct task_struct *) __data; wake_up_process(p); } /* * kjournald: The main thread function used to manage a logging device * journal. * * This kernel thread is responsible for two things: * * 1) COMMIT: Every so often we need to commit the current state of the * filesystem to disk. The journal thread is responsible for writing * all of the metadata buffers to disk. * * 2) CHECKPOINT: We cannot reuse a used section of the log file until all * of the data in that part of the log has been rewritten elsewhere on * the disk. Flushing these old buffers to reclaim space in the log is * known as checkpointing, and this thread is responsible for that job. */ static int kjournald(void *arg) { journal_t *journal = arg; transaction_t *transaction; /* * Set up an interval timer which can be used to trigger a commit wakeup * after the commit interval expires */ setup_timer(&journal->j_commit_timer, commit_timeout, (unsigned long)current); /* Record that the journal thread is running */ journal->j_task = current; wake_up(&journal->j_wait_done_commit); printk(KERN_INFO "kjournald starting. Commit interval %ld seconds\n", journal->j_commit_interval / HZ); /* * And now, wait forever for commit wakeup events. */ spin_lock(&journal->j_state_lock); loop: if (journal->j_flags & JFS_UNMOUNT) goto end_loop; jbd_debug(1, "commit_sequence=%d, commit_request=%d\n", journal->j_commit_sequence, journal->j_commit_request); if (journal->j_commit_sequence != journal->j_commit_request) { jbd_debug(1, "OK, requests differ\n"); spin_unlock(&journal->j_state_lock); del_timer_sync(&journal->j_commit_timer); journal_commit_transaction(journal); spin_lock(&journal->j_state_lock); goto loop; } wake_up(&journal->j_wait_done_commit); if (freezing(current)) { /* * The simpler the better. Flushing journal isn't a * good idea, because that depends on threads that may * be already stopped. */ jbd_debug(1, "Now suspending kjournald\n"); spin_unlock(&journal->j_state_lock); refrigerator(); spin_lock(&journal->j_state_lock); } else { /* * We assume on resume that commits are already there, * so we don't sleep */ DEFINE_WAIT(wait); int should_sleep = 1; prepare_to_wait(&journal->j_wait_commit, &wait, TASK_INTERRUPTIBLE); if (journal->j_commit_sequence != journal->j_commit_request) should_sleep = 0; transaction = journal->j_running_transaction; if (transaction && time_after_eq(jiffies, transaction->t_expires)) should_sleep = 0; if (journal->j_flags & JFS_UNMOUNT) should_sleep = 0; if (should_sleep) { spin_unlock(&journal->j_state_lock); schedule(); spin_lock(&journal->j_state_lock); } finish_wait(&journal->j_wait_commit, &wait); } jbd_debug(1, "kjournald wakes\n"); /* * Were we woken up by a commit wakeup event? */ transaction = journal->j_running_transaction; if (transaction && time_after_eq(jiffies, transaction->t_expires)) { journal->j_commit_request = transaction->t_tid; jbd_debug(1, "woke because of timeout\n"); } goto loop; end_loop: spin_unlock(&journal->j_state_lock); del_timer_sync(&journal->j_commit_timer); journal->j_task = NULL; wake_up(&journal->j_wait_done_commit); jbd_debug(1, "Journal thread exiting.\n"); return 0; } static int journal_start_thread(journal_t *journal) { struct task_struct *t; t = kthread_run(kjournald, journal, "kjournald"); if (IS_ERR(t)) return PTR_ERR(t); wait_event(journal->j_wait_done_commit, journal->j_task != NULL); return 0; } static void journal_kill_thread(journal_t *journal) { spin_lock(&journal->j_state_lock); journal->j_flags |= JFS_UNMOUNT; while (journal->j_task) { wake_up(&journal->j_wait_commit); spin_unlock(&journal->j_state_lock); wait_event(journal->j_wait_done_commit, journal->j_task == NULL); spin_lock(&journal->j_state_lock); } spin_unlock(&journal->j_state_lock); } /* * journal_write_metadata_buffer: write a metadata buffer to the journal. * * Writes a metadata buffer to a given disk block. The actual IO is not * performed but a new buffer_head is constructed which labels the data * to be written with the correct destination disk block. * * Any magic-number escaping which needs to be done will cause a * copy-out here. If the buffer happens to start with the * JFS_MAGIC_NUMBER, then we can't write it to the log directly: the * magic number is only written to the log for descripter blocks. In * this case, we copy the data and replace the first word with 0, and we * return a result code which indicates that this buffer needs to be * marked as an escaped buffer in the corresponding log descriptor * block. The missing word can then be restored when the block is read * during recovery. * * If the source buffer has already been modified by a new transaction * since we took the last commit snapshot, we use the frozen copy of * that data for IO. If we end up using the existing buffer_head's data * for the write, then we *have* to lock the buffer to prevent anyone * else from using and possibly modifying it while the IO is in * progress. * * The function returns a pointer to the buffer_heads to be used for IO. * * We assume that the journal has already been locked in this function. * * Return value: * <0: Error * >=0: Finished OK * * On success: * Bit 0 set == escape performed on the data * Bit 1 set == buffer copy-out performed (kfree the data after IO) */ int journal_write_metadata_buffer(transaction_t *transaction, struct journal_head *jh_in, struct journal_head **jh_out, unsigned int blocknr) { int need_copy_out = 0; int done_copy_out = 0; int do_escape = 0; char *mapped_data; struct buffer_head *new_bh; struct journal_head *new_jh; struct page *new_page; unsigned int new_offset; struct buffer_head *bh_in = jh2bh(jh_in); journal_t *journal = transaction->t_journal; /* * The buffer really shouldn't be locked: only the current committing * transaction is allowed to write it, so nobody else is allowed * to do any IO. * * akpm: except if we're journalling data, and write() output is * also part of a shared mapping, and another thread has * decided to launch a writepage() against this buffer. */ J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in)); new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL); /* keep subsequent assertions sane */ new_bh->b_state = 0; init_buffer(new_bh, NULL, NULL); atomic_set(&new_bh->b_count, 1); new_jh = journal_add_journal_head(new_bh); /* This sleeps */ /* * If a new transaction has already done a buffer copy-out, then * we use that version of the data for the commit. */ jbd_lock_bh_state(bh_in); repeat: if (jh_in->b_frozen_data) { done_copy_out = 1; new_page = virt_to_page(jh_in->b_frozen_data); new_offset = offset_in_page(jh_in->b_frozen_data); } else { new_page = jh2bh(jh_in)->b_page; new_offset = offset_in_page(jh2bh(jh_in)->b_data); } mapped_data = kmap_atomic(new_page, KM_USER0); /* * Check for escaping */ if (*((__be32 *)(mapped_data + new_offset)) == cpu_to_be32(JFS_MAGIC_NUMBER)) { need_copy_out = 1; do_escape = 1; } kunmap_atomic(mapped_data, KM_USER0); /* * Do we need to do a data copy? */ if (need_copy_out && !done_copy_out) { char *tmp; jbd_unlock_bh_state(bh_in); tmp = jbd_alloc(bh_in->b_size, GFP_NOFS); jbd_lock_bh_state(bh_in); if (jh_in->b_frozen_data) { jbd_free(tmp, bh_in->b_size); goto repeat; } jh_in->b_frozen_data = tmp; mapped_data = kmap_atomic(new_page, KM_USER0); memcpy(tmp, mapped_data + new_offset, jh2bh(jh_in)->b_size); kunmap_atomic(mapped_data, KM_USER0); new_page = virt_to_page(tmp); new_offset = offset_in_page(tmp); done_copy_out = 1; } /* * Did we need to do an escaping? Now we've done all the * copying, we can finally do so. */ if (do_escape) { mapped_data = kmap_atomic(new_page, KM_USER0); *((unsigned int *)(mapped_data + new_offset)) = 0; kunmap_atomic(mapped_data, KM_USER0); } set_bh_page(new_bh, new_page, new_offset); new_jh->b_transaction = NULL; new_bh->b_size = jh2bh(jh_in)->b_size; new_bh->b_bdev = transaction->t_journal->j_dev; new_bh->b_blocknr = blocknr; set_buffer_mapped(new_bh); set_buffer_dirty(new_bh); *jh_out = new_jh; /* * The to-be-written buffer needs to get moved to the io queue, * and the original buffer whose contents we are shadowing or * copying is moved to the transaction's shadow queue. */ JBUFFER_TRACE(jh_in, "file as BJ_Shadow"); spin_lock(&journal->j_list_lock); __journal_file_buffer(jh_in, transaction, BJ_Shadow); spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh_in); JBUFFER_TRACE(new_jh, "file as BJ_IO"); journal_file_buffer(new_jh, transaction, BJ_IO); return do_escape | (done_copy_out << 1); } /* * Allocation code for the journal file. Manage the space left in the * journal, so that we can begin checkpointing when appropriate. */ /* * __log_space_left: Return the number of free blocks left in the journal. * * Called with the journal already locked. * * Called under j_state_lock */ int __log_space_left(journal_t *journal) { int left = journal->j_free; assert_spin_locked(&journal->j_state_lock); /* * Be pessimistic here about the number of those free blocks which * might be required for log descriptor control blocks. */ #define MIN_LOG_RESERVED_BLOCKS 32 /* Allow for rounding errors */ left -= MIN_LOG_RESERVED_BLOCKS; if (left <= 0) return 0; left -= (left >> 3); return left; } /* * Called under j_state_lock. Returns true if a transaction commit was started. */ int __log_start_commit(journal_t *journal, tid_t target) { /* * Are we already doing a recent enough commit? */ if (!tid_geq(journal->j_commit_request, target)) { /* * We want a new commit: OK, mark the request and wakeup the * commit thread. We do _not_ do the commit ourselves. */ journal->j_commit_request = target; jbd_debug(1, "JBD: requesting commit %d/%d\n", journal->j_commit_request, journal->j_commit_sequence); wake_up(&journal->j_wait_commit); return 1; } return 0; } int log_start_commit(journal_t *journal, tid_t tid) { int ret; spin_lock(&journal->j_state_lock); ret = __log_start_commit(journal, tid); spin_unlock(&journal->j_state_lock); return ret; } /* * Force and wait upon a commit if the calling process is not within * transaction. This is used for forcing out undo-protected data which contains * bitmaps, when the fs is running out of space. * * We can only force the running transaction if we don't have an active handle; * otherwise, we will deadlock. * * Returns true if a transaction was started. */ int journal_force_commit_nested(journal_t *journal) { transaction_t *transaction = NULL; tid_t tid; spin_lock(&journal->j_state_lock); if (journal->j_running_transaction && !current->journal_info) { transaction = journal->j_running_transaction; __log_start_commit(journal, transaction->t_tid); } else if (journal->j_committing_transaction) transaction = journal->j_committing_transaction; if (!transaction) { spin_unlock(&journal->j_state_lock); return 0; /* Nothing to retry */ } tid = transaction->t_tid; spin_unlock(&journal->j_state_lock); log_wait_commit(journal, tid); return 1; } /* * Start a commit of the current running transaction (if any). Returns true * if a transaction is going to be committed (or is currently already * committing), and fills its tid in at *ptid */ int journal_start_commit(journal_t *journal, tid_t *ptid) { int ret = 0; spin_lock(&journal->j_state_lock); if (journal->j_running_transaction) { tid_t tid = journal->j_running_transaction->t_tid; __log_start_commit(journal, tid); /* There's a running transaction and we've just made sure * it's commit has been scheduled. */ if (ptid) *ptid = tid; ret = 1; } else if (journal->j_committing_transaction) { /* * If ext3_write_super() recently started a commit, then we * have to wait for completion of that transaction */ if (ptid) *ptid = journal->j_committing_transaction->t_tid; ret = 1; } spin_unlock(&journal->j_state_lock); return ret; } /* * Wait for a specified commit to complete. * The caller may not hold the journal lock. */ int log_wait_commit(journal_t *journal, tid_t tid) { int err = 0; #ifdef CONFIG_JBD_DEBUG spin_lock(&journal->j_state_lock); if (!tid_geq(journal->j_commit_request, tid)) { printk(KERN_EMERG "%s: error: j_commit_request=%d, tid=%d\n", __func__, journal->j_commit_request, tid); } spin_unlock(&journal->j_state_lock); #endif spin_lock(&journal->j_state_lock); while (tid_gt(tid, journal->j_commit_sequence)) { jbd_debug(1, "JBD: want %d, j_commit_sequence=%d\n", tid, journal->j_commit_sequence); wake_up(&journal->j_wait_commit); spin_unlock(&journal->j_state_lock); wait_event(journal->j_wait_done_commit, !tid_gt(tid, journal->j_commit_sequence)); spin_lock(&journal->j_state_lock); } spin_unlock(&journal->j_state_lock); if (unlikely(is_journal_aborted(journal))) { printk(KERN_EMERG "journal commit I/O error\n"); err = -EIO; } return err; } /* * Return 1 if a given transaction has not yet sent barrier request * connected with a transaction commit. If 0 is returned, transaction * may or may not have sent the barrier. Used to avoid sending barrier * twice in common cases. */ int journal_trans_will_send_data_barrier(journal_t *journal, tid_t tid) { int ret = 0; transaction_t *commit_trans; if (!(journal->j_flags & JFS_BARRIER)) return 0; spin_lock(&journal->j_state_lock); /* Transaction already committed? */ if (tid_geq(journal->j_commit_sequence, tid)) goto out; /* * Transaction is being committed and we already proceeded to * writing commit record? */ commit_trans = journal->j_committing_transaction; if (commit_trans && commit_trans->t_tid == tid && commit_trans->t_state >= T_COMMIT_RECORD) goto out; ret = 1; out: spin_unlock(&journal->j_state_lock); return ret; } EXPORT_SYMBOL(journal_trans_will_send_data_barrier); /* * Log buffer allocation routines: */ int journal_next_log_block(journal_t *journal, unsigned int *retp) { unsigned int blocknr; spin_lock(&journal->j_state_lock); J_ASSERT(journal->j_free > 1); blocknr = journal->j_head; journal->j_head++; journal->j_free--; if (journal->j_head == journal->j_last) journal->j_head = journal->j_first; spin_unlock(&journal->j_state_lock); return journal_bmap(journal, blocknr, retp); } /* * Conversion of logical to physical block numbers for the journal * * On external journals the journal blocks are identity-mapped, so * this is a no-op. If needed, we can use j_blk_offset - everything is * ready. */ int journal_bmap(journal_t *journal, unsigned int blocknr, unsigned int *retp) { int err = 0; unsigned int ret; if (journal->j_inode) { ret = bmap(journal->j_inode, blocknr); if (ret) *retp = ret; else { char b[BDEVNAME_SIZE]; printk(KERN_ALERT "%s: journal block not found " "at offset %u on %s\n", __func__, blocknr, bdevname(journal->j_dev, b)); err = -EIO; __journal_abort_soft(journal, err); } } else { *retp = blocknr; /* +journal->j_blk_offset */ } return err; } /* * We play buffer_head aliasing tricks to write data/metadata blocks to * the journal without copying their contents, but for journal * descriptor blocks we do need to generate bona fide buffers. * * After the caller of journal_get_descriptor_buffer() has finished modifying * the buffer's contents they really should run flush_dcache_page(bh->b_page). * But we don't bother doing that, so there will be coherency problems with * mmaps of blockdevs which hold live JBD-controlled filesystems. */ struct journal_head *journal_get_descriptor_buffer(journal_t *journal) { struct buffer_head *bh; unsigned int blocknr; int err; err = journal_next_log_block(journal, &blocknr); if (err) return NULL; bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize); if (!bh) return NULL; lock_buffer(bh); memset(bh->b_data, 0, journal->j_blocksize); set_buffer_uptodate(bh); unlock_buffer(bh); BUFFER_TRACE(bh, "return this buffer"); return journal_add_journal_head(bh); } /* * Management for journal control blocks: functions to create and * destroy journal_t structures, and to initialise and read existing * journal blocks from disk. */ /* First: create and setup a journal_t object in memory. We initialise * very few fields yet: that has to wait until we have created the * journal structures from from scratch, or loaded them from disk. */ static journal_t * journal_init_common (void) { journal_t *journal; int err; journal = kzalloc(sizeof(*journal), GFP_KERNEL); if (!journal) goto fail; init_waitqueue_head(&journal->j_wait_transaction_locked); init_waitqueue_head(&journal->j_wait_logspace); init_waitqueue_head(&journal->j_wait_done_commit); init_waitqueue_head(&journal->j_wait_checkpoint); init_waitqueue_head(&journal->j_wait_commit); init_waitqueue_head(&journal->j_wait_updates); mutex_init(&journal->j_barrier); mutex_init(&journal->j_checkpoint_mutex); spin_lock_init(&journal->j_revoke_lock); spin_lock_init(&journal->j_list_lock); spin_lock_init(&journal->j_state_lock); journal->j_commit_interval = (HZ * JBD_DEFAULT_MAX_COMMIT_AGE); /* The journal is marked for error until we succeed with recovery! */ journal->j_flags = JFS_ABORT; /* Set up a default-sized revoke table for the new mount. */ err = journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH); if (err) { kfree(journal); goto fail; } return journal; fail: return NULL; } /* journal_init_dev and journal_init_inode: * * Create a journal structure assigned some fixed set of disk blocks to * the journal. We don't actually touch those disk blocks yet, but we * need to set up all of the mapping information to tell the journaling * system where the journal blocks are. * */ /** * journal_t * journal_init_dev() - creates and initialises a journal structure * @bdev: Block device on which to create the journal * @fs_dev: Device which hold journalled filesystem for this journal. * @start: Block nr Start of journal. * @len: Length of the journal in blocks. * @blocksize: blocksize of journalling device * * Returns: a newly created journal_t * * * journal_init_dev creates a journal which maps a fixed contiguous * range of blocks on an arbitrary block device. * */ journal_t * journal_init_dev(struct block_device *bdev, struct block_device *fs_dev, int start, int len, int blocksize) { journal_t *journal = journal_init_common(); struct buffer_head *bh; int n; if (!journal) return NULL; /* journal descriptor can store up to n blocks -bzzz */ journal->j_blocksize = blocksize; n = journal->j_blocksize / sizeof(journal_block_tag_t); journal->j_wbufsize = n; journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL); if (!journal->j_wbuf) { printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n", __func__); goto out_err; } journal->j_dev = bdev; journal->j_fs_dev = fs_dev; journal->j_blk_offset = start; journal->j_maxlen = len; bh = __getblk(journal->j_dev, start, journal->j_blocksize); if (!bh) { printk(KERN_ERR "%s: Cannot get buffer for journal superblock\n", __func__); goto out_err; } journal->j_sb_buffer = bh; journal->j_superblock = (journal_superblock_t *)bh->b_data; return journal; out_err: kfree(journal->j_wbuf); kfree(journal); return NULL; } /** * journal_t * journal_init_inode () - creates a journal which maps to a inode. * @inode: An inode to create the journal in * * journal_init_inode creates a journal which maps an on-disk inode as * the journal. The inode must exist already, must support bmap() and * must have all data blocks preallocated. */ journal_t * journal_init_inode (struct inode *inode) { struct buffer_head *bh; journal_t *journal = journal_init_common(); int err; int n; unsigned int blocknr; if (!journal) return NULL; journal->j_dev = journal->j_fs_dev = inode->i_sb->s_bdev; journal->j_inode = inode; jbd_debug(1, "journal %p: inode %s/%ld, size %Ld, bits %d, blksize %ld\n", journal, inode->i_sb->s_id, inode->i_ino, (long long) inode->i_size, inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize); journal->j_maxlen = inode->i_size >> inode->i_sb->s_blocksize_bits; journal->j_blocksize = inode->i_sb->s_blocksize; /* journal descriptor can store up to n blocks -bzzz */ n = journal->j_blocksize / sizeof(journal_block_tag_t); journal->j_wbufsize = n; journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL); if (!journal->j_wbuf) { printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n", __func__); goto out_err; } err = journal_bmap(journal, 0, &blocknr); /* If that failed, give up */ if (err) { printk(KERN_ERR "%s: Cannot locate journal superblock\n", __func__); goto out_err; } bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize); if (!bh) { printk(KERN_ERR "%s: Cannot get buffer for journal superblock\n", __func__); goto out_err; } journal->j_sb_buffer = bh; journal->j_superblock = (journal_superblock_t *)bh->b_data; return journal; out_err: kfree(journal->j_wbuf); kfree(journal); return NULL; } /* * If the journal init or create aborts, we need to mark the journal * superblock as being NULL to prevent the journal destroy from writing * back a bogus superblock. */ static void journal_fail_superblock (journal_t *journal) { struct buffer_head *bh = journal->j_sb_buffer; brelse(bh); journal->j_sb_buffer = NULL; } /* * Given a journal_t structure, initialise the various fields for * startup of a new journaling session. We use this both when creating * a journal, and after recovering an old journal to reset it for * subsequent use. */ static int journal_reset(journal_t *journal) { journal_superblock_t *sb = journal->j_superblock; unsigned int first, last; first = be32_to_cpu(sb->s_first); last = be32_to_cpu(sb->s_maxlen); if (first + JFS_MIN_JOURNAL_BLOCKS > last + 1) { printk(KERN_ERR "JBD: Journal too short (blocks %u-%u).\n", first, last); journal_fail_superblock(journal); return -EINVAL; } journal->j_first = first; journal->j_last = last; journal->j_head = first; journal->j_tail = first; journal->j_free = last - first; journal->j_tail_sequence = journal->j_transaction_sequence; journal->j_commit_sequence = journal->j_transaction_sequence - 1; journal->j_commit_request = journal->j_commit_sequence; journal->j_max_transaction_buffers = journal->j_maxlen / 4; /* Add the dynamic fields and write it to disk. */ journal_update_superblock(journal, 1); return journal_start_thread(journal); } /** * int journal_create() - Initialise the new journal file * @journal: Journal to create. This structure must have been initialised * * Given a journal_t structure which tells us which disk blocks we can * use, create a new journal superblock and initialise all of the * journal fields from scratch. **/ int journal_create(journal_t *journal) { unsigned int blocknr; struct buffer_head *bh; journal_superblock_t *sb; int i, err; if (journal->j_maxlen < JFS_MIN_JOURNAL_BLOCKS) { printk (KERN_ERR "Journal length (%d blocks) too short.\n", journal->j_maxlen); journal_fail_superblock(journal); return -EINVAL; } if (journal->j_inode == NULL) { /* * We don't know what block to start at! */ printk(KERN_EMERG "%s: creation of journal on external device!\n", __func__); BUG(); } /* Zero out the entire journal on disk. We cannot afford to have any blocks on disk beginning with JFS_MAGIC_NUMBER. */ jbd_debug(1, "JBD: Zeroing out journal blocks...\n"); for (i = 0; i < journal->j_maxlen; i++) { err = journal_bmap(journal, i, &blocknr); if (err) return err; bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize); if (unlikely(!bh)) return -ENOMEM; lock_buffer(bh); memset (bh->b_data, 0, journal->j_blocksize); BUFFER_TRACE(bh, "marking dirty"); mark_buffer_dirty(bh); BUFFER_TRACE(bh, "marking uptodate"); set_buffer_uptodate(bh); unlock_buffer(bh); __brelse(bh); } sync_blockdev(journal->j_dev); jbd_debug(1, "JBD: journal cleared.\n"); /* OK, fill in the initial static fields in the new superblock */ sb = journal->j_superblock; sb->s_header.h_magic = cpu_to_be32(JFS_MAGIC_NUMBER); sb->s_header.h_blocktype = cpu_to_be32(JFS_SUPERBLOCK_V2); sb->s_blocksize = cpu_to_be32(journal->j_blocksize); sb->s_maxlen = cpu_to_be32(journal->j_maxlen); sb->s_first = cpu_to_be32(1); journal->j_transaction_sequence = 1; journal->j_flags &= ~JFS_ABORT; journal->j_format_version = 2; return journal_reset(journal); } /** * void journal_update_superblock() - Update journal sb on disk. * @journal: The journal to update. * @wait: Set to '0' if you don't want to wait for IO completion. * * Update a journal's dynamic superblock fields and write it to disk, * optionally waiting for the IO to complete. */ void journal_update_superblock(journal_t *journal, int wait) { journal_superblock_t *sb = journal->j_superblock; struct buffer_head *bh = journal->j_sb_buffer; /* * As a special case, if the on-disk copy is already marked as needing * no recovery (s_start == 0) and there are no outstanding transactions * in the filesystem, then we can safely defer the superblock update * until the next commit by setting JFS_FLUSHED. This avoids * attempting a write to a potential-readonly device. */ if (sb->s_start == 0 && journal->j_tail_sequence == journal->j_transaction_sequence) { jbd_debug(1,"JBD: Skipping superblock update on recovered sb " "(start %u, seq %d, errno %d)\n", journal->j_tail, journal->j_tail_sequence, journal->j_errno); goto out; } if (buffer_write_io_error(bh)) { char b[BDEVNAME_SIZE]; /* * Oh, dear. A previous attempt to write the journal * superblock failed. This could happen because the * USB device was yanked out. Or it could happen to * be a transient write error and maybe the block will * be remapped. Nothing we can do but to retry the * write and hope for the best. */ printk(KERN_ERR "JBD: previous I/O error detected " "for journal superblock update for %s.\n", journal_dev_name(journal, b)); clear_buffer_write_io_error(bh); set_buffer_uptodate(bh); } spin_lock(&journal->j_state_lock); jbd_debug(1,"JBD: updating superblock (start %u, seq %d, errno %d)\n", journal->j_tail, journal->j_tail_sequence, journal->j_errno); sb->s_sequence = cpu_to_be32(journal->j_tail_sequence); sb->s_start = cpu_to_be32(journal->j_tail); sb->s_errno = cpu_to_be32(journal->j_errno); spin_unlock(&journal->j_state_lock); BUFFER_TRACE(bh, "marking dirty"); mark_buffer_dirty(bh); if (wait) { sync_dirty_buffer(bh); if (buffer_write_io_error(bh)) { char b[BDEVNAME_SIZE]; printk(KERN_ERR "JBD: I/O error detected " "when updating journal superblock for %s.\n", journal_dev_name(journal, b)); clear_buffer_write_io_error(bh); set_buffer_uptodate(bh); } } else write_dirty_buffer(bh, WRITE); out: /* If we have just flushed the log (by marking s_start==0), then * any future commit will have to be careful to update the * superblock again to re-record the true start of the log. */ spin_lock(&journal->j_state_lock); if (sb->s_start) journal->j_flags &= ~JFS_FLUSHED; else journal->j_flags |= JFS_FLUSHED; spin_unlock(&journal->j_state_lock); } /* * Read the superblock for a given journal, performing initial * validation of the format. */ static int journal_get_superblock(journal_t *journal) { struct buffer_head *bh; journal_superblock_t *sb; int err = -EIO; bh = journal->j_sb_buffer; J_ASSERT(bh != NULL); if (!buffer_uptodate(bh)) { ll_rw_block(READ, 1, &bh); wait_on_buffer(bh); if (!buffer_uptodate(bh)) { printk (KERN_ERR "JBD: IO error reading journal superblock\n"); goto out; } } sb = journal->j_superblock; err = -EINVAL; if (sb->s_header.h_magic != cpu_to_be32(JFS_MAGIC_NUMBER) || sb->s_blocksize != cpu_to_be32(journal->j_blocksize)) { printk(KERN_WARNING "JBD: no valid journal superblock found\n"); goto out; } switch(be32_to_cpu(sb->s_header.h_blocktype)) { case JFS_SUPERBLOCK_V1: journal->j_format_version = 1; break; case JFS_SUPERBLOCK_V2: journal->j_format_version = 2; break; default: printk(KERN_WARNING "JBD: unrecognised superblock format ID\n"); goto out; } if (be32_to_cpu(sb->s_maxlen) < journal->j_maxlen) journal->j_maxlen = be32_to_cpu(sb->s_maxlen); else if (be32_to_cpu(sb->s_maxlen) > journal->j_maxlen) { printk (KERN_WARNING "JBD: journal file too short\n"); goto out; } return 0; out: journal_fail_superblock(journal); return err; } /* * Load the on-disk journal superblock and read the key fields into the * journal_t. */ static int load_superblock(journal_t *journal) { int err; journal_superblock_t *sb; err = journal_get_superblock(journal); if (err) return err; sb = journal->j_superblock; journal->j_tail_sequence = be32_to_cpu(sb->s_sequence); journal->j_tail = be32_to_cpu(sb->s_start); journal->j_first = be32_to_cpu(sb->s_first); journal->j_last = be32_to_cpu(sb->s_maxlen); journal->j_errno = be32_to_cpu(sb->s_errno); return 0; } /** * int journal_load() - Read journal from disk. * @journal: Journal to act on. * * Given a journal_t structure which tells us which disk blocks contain * a journal, read the journal from disk to initialise the in-memory * structures. */ int journal_load(journal_t *journal) { int err; journal_superblock_t *sb; err = load_superblock(journal); if (err) return err; sb = journal->j_superblock; /* If this is a V2 superblock, then we have to check the * features flags on it. */ if (journal->j_format_version >= 2) { if ((sb->s_feature_ro_compat & ~cpu_to_be32(JFS_KNOWN_ROCOMPAT_FEATURES)) || (sb->s_feature_incompat & ~cpu_to_be32(JFS_KNOWN_INCOMPAT_FEATURES))) { printk (KERN_WARNING "JBD: Unrecognised features on journal\n"); return -EINVAL; } } /* Let the recovery code check whether it needs to recover any * data from the journal. */ if (journal_recover(journal)) goto recovery_error; /* OK, we've finished with the dynamic journal bits: * reinitialise the dynamic contents of the superblock in memory * and reset them on disk. */ if (journal_reset(journal)) goto recovery_error; journal->j_flags &= ~JFS_ABORT; journal->j_flags |= JFS_LOADED; return 0; recovery_error: printk (KERN_WARNING "JBD: recovery failed\n"); return -EIO; } /** * void journal_destroy() - Release a journal_t structure. * @journal: Journal to act on. * * Release a journal_t structure once it is no longer in use by the * journaled object. * Return <0 if we couldn't clean up the journal. */ int journal_destroy(journal_t *journal) { int err = 0; /* Wait for the commit thread to wake up and die. */ journal_kill_thread(journal); /* Force a final log commit */ if (journal->j_running_transaction) journal_commit_transaction(journal); /* Force any old transactions to disk */ /* Totally anal locking here... */ spin_lock(&journal->j_list_lock); while (journal->j_checkpoint_transactions != NULL) { spin_unlock(&journal->j_list_lock); log_do_checkpoint(journal); spin_lock(&journal->j_list_lock); } J_ASSERT(journal->j_running_transaction == NULL); J_ASSERT(journal->j_committing_transaction == NULL); J_ASSERT(journal->j_checkpoint_transactions == NULL); spin_unlock(&journal->j_list_lock); if (journal->j_sb_buffer) { if (!is_journal_aborted(journal)) { /* We can now mark the journal as empty. */ journal->j_tail = 0; journal->j_tail_sequence = ++journal->j_transaction_sequence; journal_update_superblock(journal, 1); } else { err = -EIO; } brelse(journal->j_sb_buffer); } if (journal->j_inode) iput(journal->j_inode); if (journal->j_revoke) journal_destroy_revoke(journal); kfree(journal->j_wbuf); kfree(journal); return err; } /** *int journal_check_used_features () - Check if features specified are used. * @journal: Journal to check. * @compat: bitmask of compatible features * @ro: bitmask of features that force read-only mount * @incompat: bitmask of incompatible features * * Check whether the journal uses all of a given set of * features. Return true (non-zero) if it does. **/ int journal_check_used_features (journal_t *journal, unsigned long compat, unsigned long ro, unsigned long incompat) { journal_superblock_t *sb; if (!compat && !ro && !incompat) return 1; if (journal->j_format_version == 1) return 0; sb = journal->j_superblock; if (((be32_to_cpu(sb->s_feature_compat) & compat) == compat) && ((be32_to_cpu(sb->s_feature_ro_compat) & ro) == ro) && ((be32_to_cpu(sb->s_feature_incompat) & incompat) == incompat)) return 1; return 0; } /** * int journal_check_available_features() - Check feature set in journalling layer * @journal: Journal to check. * @compat: bitmask of compatible features * @ro: bitmask of features that force read-only mount * @incompat: bitmask of incompatible features * * Check whether the journaling code supports the use of * all of a given set of features on this journal. Return true * (non-zero) if it can. */ int journal_check_available_features (journal_t *journal, unsigned long compat, unsigned long ro, unsigned long incompat) { if (!compat && !ro && !incompat) return 1; /* We can support any known requested features iff the * superblock is in version 2. Otherwise we fail to support any * extended sb features. */ if (journal->j_format_version != 2) return 0; if ((compat & JFS_KNOWN_COMPAT_FEATURES) == compat && (ro & JFS_KNOWN_ROCOMPAT_FEATURES) == ro && (incompat & JFS_KNOWN_INCOMPAT_FEATURES) == incompat) return 1; return 0; } /** * int journal_set_features () - Mark a given journal feature in the superblock * @journal: Journal to act on. * @compat: bitmask of compatible features * @ro: bitmask of features that force read-only mount * @incompat: bitmask of incompatible features * * Mark a given journal feature as present on the * superblock. Returns true if the requested features could be set. * */ int journal_set_features (journal_t *journal, unsigned long compat, unsigned long ro, unsigned long incompat) { journal_superblock_t *sb; if (journal_check_used_features(journal, compat, ro, incompat)) return 1; if (!journal_check_available_features(journal, compat, ro, incompat)) return 0; jbd_debug(1, "Setting new features 0x%lx/0x%lx/0x%lx\n", compat, ro, incompat); sb = journal->j_superblock; sb->s_feature_compat |= cpu_to_be32(compat); sb->s_feature_ro_compat |= cpu_to_be32(ro); sb->s_feature_incompat |= cpu_to_be32(incompat); return 1; } /** * int journal_update_format () - Update on-disk journal structure. * @journal: Journal to act on. * * Given an initialised but unloaded journal struct, poke about in the * on-disk structure to update it to the most recent supported version. */ int journal_update_format (journal_t *journal) { journal_superblock_t *sb; int err; err = journal_get_superblock(journal); if (err) return err; sb = journal->j_superblock; switch (be32_to_cpu(sb->s_header.h_blocktype)) { case JFS_SUPERBLOCK_V2: return 0; case JFS_SUPERBLOCK_V1: return journal_convert_superblock_v1(journal, sb); default: break; } return -EINVAL; } static int journal_convert_superblock_v1(journal_t *journal, journal_superblock_t *sb) { int offset, blocksize; struct buffer_head *bh; printk(KERN_WARNING "JBD: Converting superblock from version 1 to 2.\n"); /* Pre-initialise new fields to zero */ offset = ((char *) &(sb->s_feature_compat)) - ((char *) sb); blocksize = be32_to_cpu(sb->s_blocksize); memset(&sb->s_feature_compat, 0, blocksize-offset); sb->s_nr_users = cpu_to_be32(1); sb->s_header.h_blocktype = cpu_to_be32(JFS_SUPERBLOCK_V2); journal->j_format_version = 2; bh = journal->j_sb_buffer; BUFFER_TRACE(bh, "marking dirty"); mark_buffer_dirty(bh); sync_dirty_buffer(bh); return 0; } /** * int journal_flush () - Flush journal * @journal: Journal to act on. * * Flush all data for a given journal to disk and empty the journal. * Filesystems can use this when remounting readonly to ensure that * recovery does not need to happen on remount. */ int journal_flush(journal_t *journal) { int err = 0; transaction_t *transaction = NULL; unsigned int old_tail; spin_lock(&journal->j_state_lock); /* Force everything buffered to the log... */ if (journal->j_running_transaction) { transaction = journal->j_running_transaction; __log_start_commit(journal, transaction->t_tid); } else if (journal->j_committing_transaction) transaction = journal->j_committing_transaction; /* Wait for the log commit to complete... */ if (transaction) { tid_t tid = transaction->t_tid; spin_unlock(&journal->j_state_lock); log_wait_commit(journal, tid); } else { spin_unlock(&journal->j_state_lock); } /* ...and flush everything in the log out to disk. */ spin_lock(&journal->j_list_lock); while (!err && journal->j_checkpoint_transactions != NULL) { spin_unlock(&journal->j_list_lock); mutex_lock(&journal->j_checkpoint_mutex); err = log_do_checkpoint(journal); mutex_unlock(&journal->j_checkpoint_mutex); spin_lock(&journal->j_list_lock); } spin_unlock(&journal->j_list_lock); if (is_journal_aborted(journal)) return -EIO; cleanup_journal_tail(journal); /* Finally, mark the journal as really needing no recovery. * This sets s_start==0 in the underlying superblock, which is * the magic code for a fully-recovered superblock. Any future * commits of data to the journal will restore the current * s_start value. */ spin_lock(&journal->j_state_lock); old_tail = journal->j_tail; journal->j_tail = 0; spin_unlock(&journal->j_state_lock); journal_update_superblock(journal, 1); spin_lock(&journal->j_state_lock); journal->j_tail = old_tail; J_ASSERT(!journal->j_running_transaction); J_ASSERT(!journal->j_committing_transaction); J_ASSERT(!journal->j_checkpoint_transactions); J_ASSERT(journal->j_head == journal->j_tail); J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence); spin_unlock(&journal->j_state_lock); return 0; } /** * int journal_wipe() - Wipe journal contents * @journal: Journal to act on. * @write: flag (see below) * * Wipe out all of the contents of a journal, safely. This will produce * a warning if the journal contains any valid recovery information. * Must be called between journal_init_*() and journal_load(). * * If 'write' is non-zero, then we wipe out the journal on disk; otherwise * we merely suppress recovery. */ int journal_wipe(journal_t *journal, int write) { int err = 0; J_ASSERT (!(journal->j_flags & JFS_LOADED)); err = load_superblock(journal); if (err) return err; if (!journal->j_tail) goto no_recovery; printk (KERN_WARNING "JBD: %s recovery information on journal\n", write ? "Clearing" : "Ignoring"); err = journal_skip_recovery(journal); if (write) journal_update_superblock(journal, 1); no_recovery: return err; } /* * journal_dev_name: format a character string to describe on what * device this journal is present. */ static const char *journal_dev_name(journal_t *journal, char *buffer) { struct block_device *bdev; if (journal->j_inode) bdev = journal->j_inode->i_sb->s_bdev; else bdev = journal->j_dev; return bdevname(bdev, buffer); } /* * Journal abort has very specific semantics, which we describe * for journal abort. * * Two internal function, which provide abort to te jbd layer * itself are here. */ /* * Quick version for internal journal use (doesn't lock the journal). * Aborts hard --- we mark the abort as occurred, but do _nothing_ else, * and don't attempt to make any other journal updates. */ static void __journal_abort_hard(journal_t *journal) { transaction_t *transaction; char b[BDEVNAME_SIZE]; if (journal->j_flags & JFS_ABORT) return; printk(KERN_ERR "Aborting journal on device %s.\n", journal_dev_name(journal, b)); spin_lock(&journal->j_state_lock); journal->j_flags |= JFS_ABORT; transaction = journal->j_running_transaction; if (transaction) __log_start_commit(journal, transaction->t_tid); spin_unlock(&journal->j_state_lock); } /* Soft abort: record the abort error status in the journal superblock, * but don't do any other IO. */ static void __journal_abort_soft (journal_t *journal, int errno) { if (journal->j_flags & JFS_ABORT) return; if (!journal->j_errno) journal->j_errno = errno; __journal_abort_hard(journal); if (errno) journal_update_superblock(journal, 1); } /** * void journal_abort () - Shutdown the journal immediately. * @journal: the journal to shutdown. * @errno: an error number to record in the journal indicating * the reason for the shutdown. * * Perform a complete, immediate shutdown of the ENTIRE * journal (not of a single transaction). This operation cannot be * undone without closing and reopening the journal. * * The journal_abort function is intended to support higher level error * recovery mechanisms such as the ext2/ext3 remount-readonly error * mode. * * Journal abort has very specific semantics. Any existing dirty, * unjournaled buffers in the main filesystem will still be written to * disk by bdflush, but the journaling mechanism will be suspended * immediately and no further transaction commits will be honoured. * * Any dirty, journaled buffers will be written back to disk without * hitting the journal. Atomicity cannot be guaranteed on an aborted * filesystem, but we _do_ attempt to leave as much data as possible * behind for fsck to use for cleanup. * * Any attempt to get a new transaction handle on a journal which is in * ABORT state will just result in an -EROFS error return. A * journal_stop on an existing handle will return -EIO if we have * entered abort state during the update. * * Recursive transactions are not disturbed by journal abort until the * final journal_stop, which will receive the -EIO error. * * Finally, the journal_abort call allows the caller to supply an errno * which will be recorded (if possible) in the journal superblock. This * allows a client to record failure conditions in the middle of a * transaction without having to complete the transaction to record the * failure to disk. ext3_error, for example, now uses this * functionality. * * Errors which originate from within the journaling layer will NOT * supply an errno; a null errno implies that absolutely no further * writes are done to the journal (unless there are any already in * progress). * */ void journal_abort(journal_t *journal, int errno) { __journal_abort_soft(journal, errno); } /** * int journal_errno () - returns the journal's error state. * @journal: journal to examine. * * This is the errno numbet set with journal_abort(), the last * time the journal was mounted - if the journal was stopped * without calling abort this will be 0. * * If the journal has been aborted on this mount time -EROFS will * be returned. */ int journal_errno(journal_t *journal) { int err; spin_lock(&journal->j_state_lock); if (journal->j_flags & JFS_ABORT) err = -EROFS; else err = journal->j_errno; spin_unlock(&journal->j_state_lock); return err; } /** * int journal_clear_err () - clears the journal's error state * @journal: journal to act on. * * An error must be cleared or Acked to take a FS out of readonly * mode. */ int journal_clear_err(journal_t *journal) { int err = 0; spin_lock(&journal->j_state_lock); if (journal->j_flags & JFS_ABORT) err = -EROFS; else journal->j_errno = 0; spin_unlock(&journal->j_state_lock); return err; } /** * void journal_ack_err() - Ack journal err. * @journal: journal to act on. * * An error must be cleared or Acked to take a FS out of readonly * mode. */ void journal_ack_err(journal_t *journal) { spin_lock(&journal->j_state_lock); if (journal->j_errno) journal->j_flags |= JFS_ACK_ERR; spin_unlock(&journal->j_state_lock); } int journal_blocks_per_page(struct inode *inode) { return 1 << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); } /* * Journal_head storage management */ static struct kmem_cache *journal_head_cache; #ifdef CONFIG_JBD_DEBUG static atomic_t nr_journal_heads = ATOMIC_INIT(0); #endif static int journal_init_journal_head_cache(void) { int retval; J_ASSERT(journal_head_cache == NULL); journal_head_cache = kmem_cache_create("journal_head", sizeof(struct journal_head), 0, /* offset */ SLAB_TEMPORARY, /* flags */ NULL); /* ctor */ retval = 0; if (!journal_head_cache) { retval = -ENOMEM; printk(KERN_EMERG "JBD: no memory for journal_head cache\n"); } return retval; } static void journal_destroy_journal_head_cache(void) { if (journal_head_cache) { kmem_cache_destroy(journal_head_cache); journal_head_cache = NULL; } } /* * journal_head splicing and dicing */ static struct journal_head *journal_alloc_journal_head(void) { struct journal_head *ret; #ifdef CONFIG_JBD_DEBUG atomic_inc(&nr_journal_heads); #endif ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS); if (ret == NULL) { jbd_debug(1, "out of memory for journal_head\n"); printk_ratelimited(KERN_NOTICE "ENOMEM in %s, retrying.\n", __func__); while (ret == NULL) { yield(); ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS); } } return ret; } static void journal_free_journal_head(struct journal_head *jh) { #ifdef CONFIG_JBD_DEBUG atomic_dec(&nr_journal_heads); memset(jh, JBD_POISON_FREE, sizeof(*jh)); #endif kmem_cache_free(journal_head_cache, jh); } /* * A journal_head is attached to a buffer_head whenever JBD has an * interest in the buffer. * * Whenever a buffer has an attached journal_head, its ->b_state:BH_JBD bit * is set. This bit is tested in core kernel code where we need to take * JBD-specific actions. Testing the zeroness of ->b_private is not reliable * there. * * When a buffer has its BH_JBD bit set, its ->b_count is elevated by one. * * When a buffer has its BH_JBD bit set it is immune from being released by * core kernel code, mainly via ->b_count. * * A journal_head may be detached from its buffer_head when the journal_head's * b_transaction, b_cp_transaction and b_next_transaction pointers are NULL. * Various places in JBD call journal_remove_journal_head() to indicate that the * journal_head can be dropped if needed. * * Various places in the kernel want to attach a journal_head to a buffer_head * _before_ attaching the journal_head to a transaction. To protect the * journal_head in this situation, journal_add_journal_head elevates the * journal_head's b_jcount refcount by one. The caller must call * journal_put_journal_head() to undo this. * * So the typical usage would be: * * (Attach a journal_head if needed. Increments b_jcount) * struct journal_head *jh = journal_add_journal_head(bh); * ... * jh->b_transaction = xxx; * journal_put_journal_head(jh); * * Now, the journal_head's b_jcount is zero, but it is safe from being released * because it has a non-zero b_transaction. */ /* * Give a buffer_head a journal_head. * * Doesn't need the journal lock. * May sleep. */ struct journal_head *journal_add_journal_head(struct buffer_head *bh) { struct journal_head *jh; struct journal_head *new_jh = NULL; repeat: if (!buffer_jbd(bh)) { new_jh = journal_alloc_journal_head(); memset(new_jh, 0, sizeof(*new_jh)); } jbd_lock_bh_journal_head(bh); if (buffer_jbd(bh)) { jh = bh2jh(bh); } else { J_ASSERT_BH(bh, (atomic_read(&bh->b_count) > 0) || (bh->b_page && bh->b_page->mapping)); if (!new_jh) { jbd_unlock_bh_journal_head(bh); goto repeat; } jh = new_jh; new_jh = NULL; /* We consumed it */ set_buffer_jbd(bh); bh->b_private = jh; jh->b_bh = bh; get_bh(bh); BUFFER_TRACE(bh, "added journal_head"); } jh->b_jcount++; jbd_unlock_bh_journal_head(bh); if (new_jh) journal_free_journal_head(new_jh); return bh->b_private; } /* * Grab a ref against this buffer_head's journal_head. If it ended up not * having a journal_head, return NULL */ struct journal_head *journal_grab_journal_head(struct buffer_head *bh) { struct journal_head *jh = NULL; jbd_lock_bh_journal_head(bh); if (buffer_jbd(bh)) { jh = bh2jh(bh); jh->b_jcount++; } jbd_unlock_bh_journal_head(bh); return jh; } static void __journal_remove_journal_head(struct buffer_head *bh) { struct journal_head *jh = bh2jh(bh); J_ASSERT_JH(jh, jh->b_jcount >= 0); get_bh(bh); if (jh->b_jcount == 0) { if (jh->b_transaction == NULL && jh->b_next_transaction == NULL && jh->b_cp_transaction == NULL) { J_ASSERT_JH(jh, jh->b_jlist == BJ_None); J_ASSERT_BH(bh, buffer_jbd(bh)); J_ASSERT_BH(bh, jh2bh(jh) == bh); BUFFER_TRACE(bh, "remove journal_head"); if (jh->b_frozen_data) { printk(KERN_WARNING "%s: freeing " "b_frozen_data\n", __func__); jbd_free(jh->b_frozen_data, bh->b_size); } if (jh->b_committed_data) { printk(KERN_WARNING "%s: freeing " "b_committed_data\n", __func__); jbd_free(jh->b_committed_data, bh->b_size); } bh->b_private = NULL; jh->b_bh = NULL; /* debug, really */ clear_buffer_jbd(bh); __brelse(bh); journal_free_journal_head(jh); } else { BUFFER_TRACE(bh, "journal_head was locked"); } } } /* * journal_remove_journal_head(): if the buffer isn't attached to a transaction * and has a zero b_jcount then remove and release its journal_head. If we did * see that the buffer is not used by any transaction we also "logically" * decrement ->b_count. * * We in fact take an additional increment on ->b_count as a convenience, * because the caller usually wants to do additional things with the bh * after calling here. * The caller of journal_remove_journal_head() *must* run __brelse(bh) at some * time. Once the caller has run __brelse(), the buffer is eligible for * reaping by try_to_free_buffers(). */ void journal_remove_journal_head(struct buffer_head *bh) { jbd_lock_bh_journal_head(bh); __journal_remove_journal_head(bh); jbd_unlock_bh_journal_head(bh); } /* * Drop a reference on the passed journal_head. If it fell to zero then try to * release the journal_head from the buffer_head. */ void journal_put_journal_head(struct journal_head *jh) { struct buffer_head *bh = jh2bh(jh); jbd_lock_bh_journal_head(bh); J_ASSERT_JH(jh, jh->b_jcount > 0); --jh->b_jcount; if (!jh->b_jcount && !jh->b_transaction) { __journal_remove_journal_head(bh); __brelse(bh); } jbd_unlock_bh_journal_head(bh); } /* * debugfs tunables */ #ifdef CONFIG_JBD_DEBUG u8 journal_enable_debug __read_mostly; EXPORT_SYMBOL(journal_enable_debug); static struct dentry *jbd_debugfs_dir; static struct dentry *jbd_debug; static void __init jbd_create_debugfs_entry(void) { jbd_debugfs_dir = debugfs_create_dir("jbd", NULL); if (jbd_debugfs_dir) jbd_debug = debugfs_create_u8("jbd-debug", S_IRUGO | S_IWUSR, jbd_debugfs_dir, &journal_enable_debug); } static void __exit jbd_remove_debugfs_entry(void) { debugfs_remove(jbd_debug); debugfs_remove(jbd_debugfs_dir); } #else static inline void jbd_create_debugfs_entry(void) { } static inline void jbd_remove_debugfs_entry(void) { } #endif struct kmem_cache *jbd_handle_cache; static int __init journal_init_handle_cache(void) { jbd_handle_cache = kmem_cache_create("journal_handle", sizeof(handle_t), 0, /* offset */ SLAB_TEMPORARY, /* flags */ NULL); /* ctor */ if (jbd_handle_cache == NULL) { printk(KERN_EMERG "JBD: failed to create handle cache\n"); return -ENOMEM; } return 0; } static void journal_destroy_handle_cache(void) { if (jbd_handle_cache) kmem_cache_destroy(jbd_handle_cache); } /* * Module startup and shutdown */ static int __init journal_init_caches(void) { int ret; ret = journal_init_revoke_caches(); if (ret == 0) ret = journal_init_journal_head_cache(); if (ret == 0) ret = journal_init_handle_cache(); return ret; } static void journal_destroy_caches(void) { journal_destroy_revoke_caches(); journal_destroy_journal_head_cache(); journal_destroy_handle_cache(); } static int __init journal_init(void) { int ret; BUILD_BUG_ON(sizeof(struct journal_superblock_s) != 1024); ret = journal_init_caches(); if (ret != 0) journal_destroy_caches(); jbd_create_debugfs_entry(); return ret; } static void __exit journal_exit(void) { #ifdef CONFIG_JBD_DEBUG int n = atomic_read(&nr_journal_heads); if (n) printk(KERN_EMERG "JBD: leaked %d journal_heads!\n", n); #endif jbd_remove_debugfs_entry(); journal_destroy_caches(); } MODULE_LICENSE("GPL"); module_init(journal_init); module_exit(journal_exit);
gpl-2.0
pascalorama/snasm68kdb
src/dos/drive_cache.cpp
17
22081
/* * Copyright (C) 2002-2010 The DOSBox Team * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* $Id: drive_cache.cpp,v 1.59 2009-04-16 12:28:30 qbix79 Exp $ */ #include "drives.h" #include "dos_inc.h" #include "support.h" #include "cross.h" // STL stuff #include <vector> #include <iterator> #include <algorithm> #if defined (WIN32) /* Win 32 */ #define WIN32_LEAN_AND_MEAN // Exclude rarely-used stuff from #include <windows.h> #endif #if defined (OS2) #define INCL_DOSERRORS #define INCL_DOSFILEMGR #include <os2.h> #endif int fileInfoCounter = 0; bool SortByName(DOS_Drive_Cache::CFileInfo* const &a, DOS_Drive_Cache::CFileInfo* const &b) { return strcmp(a->shortname,b->shortname)<0; } bool SortByNameRev(DOS_Drive_Cache::CFileInfo* const &a, DOS_Drive_Cache::CFileInfo* const &b) { return strcmp(a->shortname,b->shortname)>0; } bool SortByDirName(DOS_Drive_Cache::CFileInfo* const &a, DOS_Drive_Cache::CFileInfo* const &b) { // Directories first... if (a->isDir!=b->isDir) return (a->isDir>b->isDir); return strcmp(a->shortname,b->shortname)<0; } bool SortByDirNameRev(DOS_Drive_Cache::CFileInfo* const &a, DOS_Drive_Cache::CFileInfo* const &b) { // Directories first... if (a->isDir!=b->isDir) return (a->isDir>b->isDir); return strcmp(a->shortname,b->shortname)>0; } DOS_Drive_Cache::DOS_Drive_Cache(void) { dirBase = new CFileInfo; save_dir = 0; srchNr = 0; label[0] = 0; nextFreeFindFirst = 0; for (Bit32u i=0; i<MAX_OPENDIRS; i++) { dirSearch[i] = 0; free[i] = true; dirFindFirst[i] = 0; }; SetDirSort(DIRALPHABETICAL); updatelabel = true; } DOS_Drive_Cache::DOS_Drive_Cache(const char* path) { dirBase = new CFileInfo; save_dir = 0; srchNr = 0; label[0] = 0; nextFreeFindFirst = 0; for (Bit32u i=0; i<MAX_OPENDIRS; i++) { dirSearch[i] = 0; free[i] = true; dirFindFirst[i] = 0; }; SetDirSort(DIRALPHABETICAL); SetBaseDir(path); updatelabel = true; } DOS_Drive_Cache::~DOS_Drive_Cache(void) { Clear(); for (Bit32u i=0; i<MAX_OPENDIRS; i++) { delete dirFindFirst[i]; dirFindFirst[i]=0; }; } void DOS_Drive_Cache::Clear(void) { delete dirBase; dirBase = 0; nextFreeFindFirst = 0; for (Bit32u i=0; i<MAX_OPENDIRS; i++) dirSearch[i] = 0; } void DOS_Drive_Cache::EmptyCache(void) { // Empty Cache and reinit Clear(); dirBase = new CFileInfo; save_dir = 0; srchNr = 0; for (Bit32u i=0; i<MAX_OPENDIRS; i++) free[i] = true; SetBaseDir(basePath); } void DOS_Drive_Cache::SetLabel(const char* vname,bool cdrom,bool allowupdate) { /* allowupdate defaults to true. if mount sets a label then allowupdate is * false and will this function return at once after the first call. * The label will be set at the first call. */ if(!this->updatelabel) return; this->updatelabel = allowupdate; Set_Label(vname,label,cdrom); LOG(LOG_DOSMISC,LOG_NORMAL)("DIRCACHE: Set volume label to %s",label); } Bit16u DOS_Drive_Cache::GetFreeID(CFileInfo* dir) { for (Bit16u i=0; i<MAX_OPENDIRS; i++) if (free[i] || (dir==dirSearch[i])) return i; LOG(LOG_FILES,LOG_NORMAL)("DIRCACHE: Too many open directories!"); return 0; } void DOS_Drive_Cache::SetBaseDir(const char* baseDir) { Bit16u id; strcpy(basePath,baseDir); if (OpenDir(baseDir,id)) { char* result = 0; ReadDir(id,result); }; // Get Volume Label #if defined (WIN32) || defined (OS2) bool cdrom = false; char labellocal[256]={ 0 }; char drive[4] = "C:\\"; drive[0] = basePath[0]; #if defined (WIN32) if (GetVolumeInformation(drive,labellocal,256,NULL,NULL,NULL,NULL,0)) { UINT test = GetDriveType(drive); if(test == DRIVE_CDROM) cdrom = true; #else // OS2 //TODO determine wether cdrom or not! FSINFO fsinfo; ULONG drivenumber = drive[0]; if (drivenumber > 26) { // drive letter was lowercase drivenumber = drive[0] - 'a' + 1; } APIRET rc = DosQueryFSInfo(drivenumber, FSIL_VOLSER, &fsinfo, sizeof(FSINFO)); if (rc == NO_ERROR) { #endif /* Set label and allow being updated */ SetLabel(labellocal,cdrom,true); } #endif } void DOS_Drive_Cache::ExpandName(char* path) { strcpy(path,GetExpandName(path)); } char* DOS_Drive_Cache::GetExpandName(const char* path) { static char work [CROSS_LEN] = { 0 }; char dir [CROSS_LEN]; work[0] = 0; strcpy (dir,path); const char* pos = strrchr(path,CROSS_FILESPLIT); if (pos) dir[pos-path+1] = 0; CFileInfo* dirInfo = FindDirInfo(dir, work); if (pos) { // Last Entry = File strcpy(dir,pos+1); GetLongName(dirInfo, dir); strcat(work,dir); } if (*work) { size_t len = strlen(work); #if defined (WIN32) if((work[len-1] == CROSS_FILESPLIT ) && (len >= 2) && (work[len-2] != ':')) { #else if((len > 1) && (work[len-1] == CROSS_FILESPLIT )) { #endif work[len-1] = 0; // Remove trailing slashes except when in root } } return work; } void DOS_Drive_Cache::AddEntry(const char* path, bool checkExists) { // Get Last part... char file [CROSS_LEN]; char expand [CROSS_LEN]; CFileInfo* dir = FindDirInfo(path,expand); const char* pos = strrchr(path,CROSS_FILESPLIT); if (pos) { strcpy(file,pos+1); // Check if file already exists, then don't add new entry... if (checkExists) { if (GetLongName(dir,file)>=0) return; } CreateEntry(dir,file,false); Bits index = GetLongName(dir,file); if (index>=0) { Bit32u i; // Check if there are any open search dir that are affected by this... if (dir) for (i=0; i<MAX_OPENDIRS; i++) { if ((dirSearch[i]==dir) && ((Bit32u)index<=dirSearch[i]->nextEntry)) dirSearch[i]->nextEntry++; } } // LOG_DEBUG("DIR: Added Entry %s",path); } else { // LOG_DEBUG("DIR: Error: Failed to add %s",path); } } void DOS_Drive_Cache::DeleteEntry(const char* path, bool ignoreLastDir) { CacheOut(path,ignoreLastDir); if (dirSearch[srchNr] && (dirSearch[srchNr]->nextEntry>0)) dirSearch[srchNr]->nextEntry--; if (!ignoreLastDir) { // Check if there are any open search dir that are affected by this... Bit32u i; char expand [CROSS_LEN]; CFileInfo* dir = FindDirInfo(path,expand); if (dir) for (i=0; i<MAX_OPENDIRS; i++) { if ((dirSearch[i]==dir) && (dirSearch[i]->nextEntry>0)) dirSearch[i]->nextEntry--; } } } void DOS_Drive_Cache::CacheOut(const char* path, bool ignoreLastDir) { char expand[CROSS_LEN] = { 0 }; CFileInfo* dir; if (ignoreLastDir) { char tmp[CROSS_LEN] = { 0 }; Bit32s len=0; const char* pos = strrchr(path,CROSS_FILESPLIT); if (pos) len = (Bit32s)(pos - path); if (len>0) { safe_strncpy(tmp,path,len+1); } else { strcpy(tmp,path); } dir = FindDirInfo(tmp,expand); } else { dir = FindDirInfo(path,expand); } // LOG_DEBUG("DIR: Caching out %s : dir %s",expand,dir->orgname); // delete file objects... for(Bit32u i=0; i<dir->fileList.size(); i++) { if (dirSearch[srchNr]==dir->fileList[i]) dirSearch[srchNr] = 0; delete dir->fileList[i]; dir->fileList[i] = 0; } // clear lists dir->fileList.clear(); dir->longNameList.clear(); save_dir = 0; } bool DOS_Drive_Cache::IsCachedIn(CFileInfo* curDir) { return (curDir->fileList.size()>0); } bool DOS_Drive_Cache::GetShortName(const char* fullname, char* shortname) { // Get Dir Info char expand[CROSS_LEN] = {0}; CFileInfo* curDir = FindDirInfo(fullname,expand); std::vector<CFileInfo*>::size_type filelist_size = curDir->longNameList.size(); if (GCC_UNLIKELY(filelist_size<=0)) return false; Bits low = 0; Bits high = (Bits)(filelist_size-1); Bits mid, res; while (low<=high) { mid = (low+high)/2; res = strcmp(fullname,curDir->longNameList[mid]->orgname); if (res>0) low = mid+1; else if (res<0) high = mid-1; else { strcpy(shortname,curDir->longNameList[mid]->shortname); return true; }; } return false; } int DOS_Drive_Cache::CompareShortname(const char* compareName, const char* shortName) { char const* cpos = strchr(shortName,'~'); if (cpos) { /* the following code is replaced as it's not safe when char* is 64 bits */ /* Bits compareCount1 = (int)cpos - (int)shortName; char* endPos = strchr(cpos,'.'); Bitu numberSize = endPos ? int(endPos)-int(cpos) : strlen(cpos); char* lpos = strchr(compareName,'.'); Bits compareCount2 = lpos ? int(lpos)-int(compareName) : strlen(compareName); if (compareCount2>8) compareCount2 = 8; compareCount2 -= numberSize; if (compareCount2>compareCount1) compareCount1 = compareCount2; */ size_t compareCount1 = strcspn(shortName,"~"); size_t numberSize = strcspn(cpos,"."); size_t compareCount2 = strcspn(compareName,"."); if(compareCount2 > 8) compareCount2 = 8; /* We want * compareCount2 -= numberSize; * if (compareCount2>compareCount1) compareCount1 = compareCount2; * but to prevent negative numbers: */ if(compareCount2 > compareCount1 + numberSize) compareCount1 = compareCount2 - numberSize; return strncmp(compareName,shortName,compareCount1); } return strcmp(compareName,shortName); } Bitu DOS_Drive_Cache::CreateShortNameID(CFileInfo* curDir, const char* name) { std::vector<CFileInfo*>::size_type filelist_size = curDir->longNameList.size(); if (GCC_UNLIKELY(filelist_size<=0)) return 1; // shortener IDs start with 1 Bitu foundNr = 0; Bits low = 0; Bits high = (Bits)(filelist_size-1); Bits mid, res; while (low<=high) { mid = (low+high)/2; res = CompareShortname(name,curDir->longNameList[mid]->shortname); if (res>0) low = mid+1; else if (res<0) high = mid-1; else { // any more same x chars in next entries ? do { foundNr = curDir->longNameList[mid]->shortNr; mid++; } while((Bitu)mid<curDir->longNameList.size() && (CompareShortname(name,curDir->longNameList[mid]->shortname)==0)); break; }; } return foundNr+1; } bool DOS_Drive_Cache::RemoveTrailingDot(char* shortname) { // remove trailing '.' if no extension is available (Linux compatibility) size_t len = strlen(shortname); if (len && (shortname[len-1]=='.')) { if (len==1) return false; if ((len==2) && (shortname[0]=='.')) return false; shortname[len-1] = 0; return true; } return false; } Bits DOS_Drive_Cache::GetLongName(CFileInfo* curDir, char* shortName) { std::vector<CFileInfo*>::size_type filelist_size = curDir->fileList.size(); if (GCC_UNLIKELY(filelist_size<=0)) return -1; // Remove dot, if no extension... RemoveTrailingDot(shortName); // Search long name and return array number of element Bits low = 0; Bits high = (Bits)(filelist_size-1); Bits mid,res; while (low<=high) { mid = (low+high)/2; res = strcmp(shortName,curDir->fileList[mid]->shortname); if (res>0) low = mid+1; else if (res<0) high = mid-1; else { // Found strcpy(shortName,curDir->fileList[mid]->orgname); return mid; }; } // not available return -1; } bool DOS_Drive_Cache::RemoveSpaces(char* str) { // Removes all spaces char* curpos = str; char* chkpos = str; while (*chkpos!=0) { if (*chkpos==' ') chkpos++; else *curpos++ = *chkpos++; } *curpos = 0; return (curpos!=chkpos); } void DOS_Drive_Cache::CreateShortName(CFileInfo* curDir, CFileInfo* info) { Bits len = 0; bool createShort = false; char tmpNameBuffer[CROSS_LEN]; char* tmpName = tmpNameBuffer; // Remove Spaces strcpy(tmpName,info->orgname); upcase(tmpName); createShort = RemoveSpaces(tmpName); // Get Length of filename char* pos = strchr(tmpName,'.'); if (pos) { // ignore preceding '.' if extension is longer than "3" if (strlen(pos)>4) { while (*tmpName=='.') tmpName++; createShort = true; } pos = strchr(tmpName,'.'); if (pos) len = (Bits)(pos - tmpName); else len = (Bits)strlen(tmpName); } else { len = (Bits)strlen(tmpName); } // Should shortname version be created ? createShort = createShort || (len>8); if (!createShort) { char buffer[CROSS_LEN]; strcpy(buffer,tmpName); createShort = (GetLongName(curDir,buffer)>=0); } if (createShort) { // Create number char buffer[8]; info->shortNr = CreateShortNameID(curDir,tmpName); sprintf(buffer,"%d",info->shortNr); // Copy first letters Bits tocopy = 0; size_t buflen = strlen(buffer); if (len+buflen+1>8) tocopy = (Bits)(8 - buflen - 1); else tocopy = len; safe_strncpy(info->shortname,tmpName,tocopy+1); // Copy number strcat(info->shortname,"~"); strcat(info->shortname,buffer); // Add (and cut) Extension, if available if (pos) { // Step to last extension... pos = strrchr(tmpName, '.'); // add extension strncat(info->shortname,pos,4); info->shortname[DOS_NAMELENGTH] = 0; } // keep list sorted for CreateShortNameID to work correctly if (curDir->longNameList.size()>0) { if (!(strcmp(info->shortname,curDir->longNameList.back()->shortname)<0)) { // append at end of list curDir->longNameList.push_back(info); } else { // look for position where to insert this element bool found=false; std::vector<CFileInfo*>::iterator it; for (it=curDir->longNameList.begin(); it!=curDir->longNameList.end(); ++it) { if (strcmp(info->shortname,(*it)->shortname)<0) { found = true; break; } } // Put it in longname list... if (found) curDir->longNameList.insert(it,info); else curDir->longNameList.push_back(info); } } else { // empty file list, append curDir->longNameList.push_back(info); } } else { strcpy(info->shortname,tmpName); } RemoveTrailingDot(info->shortname); } DOS_Drive_Cache::CFileInfo* DOS_Drive_Cache::FindDirInfo(const char* path, char* expandedPath) { // statics static char split[2] = { CROSS_FILESPLIT,0 }; char dir [CROSS_LEN]; char work [CROSS_LEN]; const char* start = path; const char* pos; CFileInfo* curDir = dirBase; Bit16u id; if (save_dir && (strcmp(path,save_path)==0)) { strcpy(expandedPath,save_expanded); return save_dir; }; // LOG_DEBUG("DIR: Find %s",path); // Remove base dir path start += strlen(basePath); strcpy(expandedPath,basePath); // hehe, baseDir should be cached in... if (!IsCachedIn(curDir)) { strcpy(work,basePath); if (OpenDir(curDir,work,id)) { char buffer[CROSS_LEN]; char* result = 0; strcpy(buffer,dirPath); ReadDir(id,result); strcpy(dirPath,buffer); free[id] = true; }; }; do { // bool errorcheck = false; pos = strchr(start,CROSS_FILESPLIT); if (pos) { safe_strncpy(dir,start,pos-start+1); /*errorcheck = true;*/ } else { strcpy(dir,start); }; // Path found Bits nextDir = GetLongName(curDir,dir); strcat(expandedPath,dir); // Error check /* if ((errorcheck) && (nextDir<0)) { LOG_DEBUG("DIR: Error: %s not found.",expandedPath); }; */ // Follow Directory if ((nextDir>=0) && curDir->fileList[nextDir]->isDir) { curDir = curDir->fileList[nextDir]; strcpy (curDir->orgname,dir); if (!IsCachedIn(curDir)) { if (OpenDir(curDir,expandedPath,id)) { char buffer[CROSS_LEN]; char* result = 0; strcpy(buffer,dirPath); ReadDir(id,result); strcpy(dirPath,buffer); free[id] = true; }; } }; if (pos) { strcat(expandedPath,split); start = pos+1; } } while (pos); // Save last result for faster access next time strcpy(save_path,path); strcpy(save_expanded,expandedPath); save_dir = curDir; return curDir; } bool DOS_Drive_Cache::OpenDir(const char* path, Bit16u& id) { char expand[CROSS_LEN] = {0}; CFileInfo* dir = FindDirInfo(path,expand); if (OpenDir(dir,expand,id)) { dirSearch[id]->nextEntry = 0; return true; } return false; } bool DOS_Drive_Cache::OpenDir(CFileInfo* dir, const char* expand, Bit16u& id) { id = GetFreeID(dir); dirSearch[id] = dir; char expandcopy [CROSS_LEN]; strcpy(expandcopy,expand); // Add "/" char end[2]={CROSS_FILESPLIT,0}; if (expandcopy[strlen(expandcopy)-1]!=CROSS_FILESPLIT) strcat(expandcopy,end); // open dir if (dirSearch[id]) { // open dir dir_information* dirp = open_directory(expandcopy); if (dirp) { // Reset it.. close_directory(dirp); strcpy(dirPath,expandcopy); free[id] = false; return true; } }; return false; } void DOS_Drive_Cache::CreateEntry(CFileInfo* dir, const char* name, bool is_directory) { CFileInfo* info = new CFileInfo; strcpy(info->orgname, name); info->shortNr = 0; info->isDir = is_directory; // Check for long filenames... CreateShortName(dir, info); bool found = false; // keep list sorted (so GetLongName works correctly, used by CreateShortName in this routine) if (dir->fileList.size()>0) { if (!(strcmp(info->shortname,dir->fileList.back()->shortname)<0)) { // append at end of list dir->fileList.push_back(info); } else { // look for position where to insert this element std::vector<CFileInfo*>::iterator it; for (it=dir->fileList.begin(); it!=dir->fileList.end(); ++it) { if (strcmp(info->shortname,(*it)->shortname)<0) { found = true; break; } } // Put file in lists if (found) dir->fileList.insert(it,info); else dir->fileList.push_back(info); } } else { // empty file list, append dir->fileList.push_back(info); } } void DOS_Drive_Cache::CopyEntry(CFileInfo* dir, CFileInfo* from) { CFileInfo* info = new CFileInfo; // just copy things into new fileinfo strcpy(info->orgname, from->orgname); strcpy(info->shortname, from->shortname); info->shortNr = from->shortNr; info->isDir = from->isDir; dir->fileList.push_back(info); } bool DOS_Drive_Cache::ReadDir(Bit16u id, char* &result) { // shouldnt happen... if (id>MAX_OPENDIRS) return false; if (!IsCachedIn(dirSearch[id])) { // Try to open directory dir_information* dirp = open_directory(dirPath); if (!dirp) { free[id] = true; return false; } // Read complete directory char dir_name[CROSS_LEN]; bool is_directory; if (read_directory_first(dirp, dir_name, is_directory)) { CreateEntry(dirSearch[id], dir_name, is_directory); while (read_directory_next(dirp, dir_name, is_directory)) { CreateEntry(dirSearch[id], dir_name, is_directory); } } // close dir close_directory(dirp); // Info /* if (!dirp) { LOG_DEBUG("DIR: Error Caching in %s",dirPath); return false; } else { char buffer[128]; sprintf(buffer,"DIR: Caching in %s (%d Files)",dirPath,dirSearch[srchNr]->fileList.size()); LOG_DEBUG(buffer); };*/ }; if (SetResult(dirSearch[id], result, dirSearch[id]->nextEntry)) return true; free[id] = true; return false; } bool DOS_Drive_Cache::SetResult(CFileInfo* dir, char* &result, Bitu entryNr) { static char res[CROSS_LEN] = { 0 }; result = res; if (entryNr>=dir->fileList.size()) return false; CFileInfo* info = dir->fileList[entryNr]; // copy filename, short version strcpy(res,info->shortname); // Set to next Entry dir->nextEntry = entryNr+1; return true; } // FindFirst / FindNext bool DOS_Drive_Cache::FindFirst(char* path, Bit16u& id) { Bit16u dirID; // Cache directory in if (!OpenDir(path,dirID)) return false; //Find a free slot. //If the next one isn't free, move on to the next, if none is free => reset and assume the worst Bit16u local_findcounter = 0; while ( local_findcounter < MAX_OPENDIRS ) { if (dirFindFirst[this->nextFreeFindFirst] == 0) break; if (++this->nextFreeFindFirst >= MAX_OPENDIRS) this->nextFreeFindFirst = 0; //Wrap around local_findcounter++; } Bit16u dirFindFirstID = this->nextFreeFindFirst++; if (this->nextFreeFindFirst >= MAX_OPENDIRS) this->nextFreeFindFirst = 0; //Increase and wrap around for the next search. if (local_findcounter == MAX_OPENDIRS) { //Here is the reset from above. // no free slot found... LOG(LOG_MISC,LOG_ERROR)("DIRCACHE: FindFirst/Next: All slots full. Resetting"); // Clear the internal list then. dirFindFirstID = 0; this->nextFreeFindFirst = 1; //the next free one after this search for(Bitu n=0; n<MAX_OPENDIRS;n++) { // Clear and reuse slot delete dirFindFirst[n]; dirFindFirst[n]=0; } } dirFindFirst[dirFindFirstID] = new CFileInfo(); dirFindFirst[dirFindFirstID]-> nextEntry = 0; // Copy entries to use with FindNext for (Bitu i=0; i<dirSearch[dirID]->fileList.size(); i++) { CopyEntry(dirFindFirst[dirFindFirstID],dirSearch[dirID]->fileList[i]); } // Now re-sort the fileList accordingly to output switch (sortDirType) { case ALPHABETICAL : break; // case ALPHABETICAL : std::sort(dirFindFirst[dirFindFirstID]->fileList.begin(), dirFindFirst[dirFindFirstID]->fileList.end(), SortByName); break; case DIRALPHABETICAL : std::sort(dirFindFirst[dirFindFirstID]->fileList.begin(), dirFindFirst[dirFindFirstID]->fileList.end(), SortByDirName); break; case ALPHABETICALREV : std::sort(dirFindFirst[dirFindFirstID]->fileList.begin(), dirFindFirst[dirFindFirstID]->fileList.end(), SortByNameRev); break; case DIRALPHABETICALREV : std::sort(dirFindFirst[dirFindFirstID]->fileList.begin(), dirFindFirst[dirFindFirstID]->fileList.end(), SortByDirNameRev); break; case NOSORT : break; } // LOG(LOG_MISC,LOG_ERROR)("DIRCACHE: FindFirst : %s (ID:%02X)",path,dirFindFirstID); id = dirFindFirstID; return true; } bool DOS_Drive_Cache::FindNext(Bit16u id, char* &result) { // out of range ? if ((id>=MAX_OPENDIRS) || !dirFindFirst[id]) { LOG(LOG_MISC,LOG_ERROR)("DIRCACHE: FindFirst/Next failure : ID out of range: %04X",id); return false; } if (!SetResult(dirFindFirst[id], result, dirFindFirst[id]->nextEntry)) { // free slot delete dirFindFirst[id]; dirFindFirst[id] = 0; return false; } return true; }
gpl-2.0
botioni/aml_linux_kernel
arch/arm/mach-meson/board-8726m-refb09.c
17
75733
/* * * arch/arm/mach-meson/meson.c * * Copyright (C) 2010 AMLOGIC, INC. * * License terms: GNU General Public License (GPL) version 2 * Platform machine definition. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/dma-mapping.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <linux/device.h> #include <linux/spi/flash.h> #include <mach/hardware.h> #include <mach/platform.h> #include <mach/memory.h> #include <mach/clock.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/setup.h> #include <mach/lm.h> #include <asm/memory.h> #include <asm/mach/map.h> #include <mach/am_eth_pinmux.h> #include <mach/nand.h> #include <linux/i2c.h> #include <linux/i2c-aml.h> #include <mach/power_gate.h> #include <linux/aml_bl.h> #include <linux/reboot.h> #include <linux/syscalls.h> #ifdef CONFIG_AM_UART_WITH_S_CORE #include <linux/uart-aml.h> #endif #include <mach/card_io.h> #include <mach/pinmux.h> #include <mach/gpio.h> #include <linux/delay.h> #include <mach/clk_set.h> #include "board-8726m-refb09.h" #if defined(CONFIG_TOUCHSCREEN_ADS7846) #include <linux/spi/spi.h> #include <linux/spi/spi_gpio.h> #include <linux/spi/ads7846.h> #endif #ifdef CONFIG_ANDROID_PMEM #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/android_pmem.h> #endif #ifdef CONFIG_SENSORS_MXC622X #include <linux/mxc622x.h> #endif #ifdef CONFIG_SENSORS_MMC31XX #include <linux/mmc31xx.h> #endif #ifdef CONFIG_SN7325 #include <linux/sn7325.h> #endif #ifdef CONFIG_AMLOGIC_PM #include <linux/power_supply.h> #include <linux/aml_power.h> #endif #ifdef CONFIG_USB_ANDROID #include <linux/usb/android_composite.h> #endif #ifdef CONFIG_SUSPEND #include <mach/pm.h> #endif #ifdef CONFIG_SND_AML_M1_MID_WM8900 #include <sound/wm8900.h> #endif #ifdef CONFIG_VIDEO_AMLOGIC_CAPTURE #include <media/amlogic/aml_camera.h> #endif #ifdef CONFIG_EFUSE #include <linux/efuse.h> #endif #if defined(CONFIG_JPEGLOGO) static struct resource jpeglogo_resources[] = { [0] = { .start = CONFIG_JPEGLOGO_ADDR, .end = CONFIG_JPEGLOGO_ADDR + CONFIG_JPEGLOGO_SIZE - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = CODEC_ADDR_START, .end = CODEC_ADDR_END, .flags = IORESOURCE_MEM, }, }; static struct platform_device jpeglogo_device = { .name = "jpeglogo-dev", .id = 0, .num_resources = ARRAY_SIZE(jpeglogo_resources), .resource = jpeglogo_resources, }; #endif #if defined(CONFIG_KEYPADS_AM)||defined(CONFIG_KEYPADS_AM_MODULE) static struct resource intput_resources[] = { { .start = 0x0, .end = 0x0, .name="8726", .flags = IORESOURCE_IO, }, }; static struct platform_device input_device = { .name = "m1-kp", .id = 0, .num_resources = ARRAY_SIZE(intput_resources), .resource = intput_resources, }; #endif #ifdef CONFIG_SARADC_AM #include <linux/saradc.h> static struct platform_device saradc_device = { .name = "saradc", .id = 0, .dev = { .platform_data = NULL, }, }; #endif #ifdef CONFIG_ADC_TOUCHSCREEN_AM #include <linux/adc_ts.h> #define XLCD 800 #define YLCD 600 #define SWAP_XY 0 #define XPOL 0 #define YPOL 0 #define XMIN 30 #define XMAX 980 #define YMIN 70 #define YMAX 980 int adcts_convert(int x, int y) { #if (SWAP_XY == 1) swap(x, y); #endif if (x < XMIN) x = XMIN; if (x > XMAX) x = XMAX; if (y < YMIN) y = YMIN; if (y > YMAX) y = YMAX; #if (XPOL == 1) x = XMAX + XMIN - x; #endif #if (YPOL == 1) y = YMAX + YMIN - y; #endif x = (x- XMIN) * XLCD / (XMAX - XMIN); y = (y- YMIN) * YLCD / (YMAX - YMIN); return (x << 16) | y; } static struct adc_ts_platform_data adc_ts_pdata = { .irq = -1, //INT_SAR_ADC .x_plate_ohms = 400, .poll_delay = 1, .poll_period = 5, .abs_xmin = 0, .abs_xmax = XLCD, .abs_ymin = 0, .abs_ymax = YLCD, .convert = adcts_convert, }; static struct platform_device adc_ts_device = { .name = "adc_ts", .id = 0, .dev = { .platform_data = &adc_ts_pdata, }, }; #endif #if defined(CONFIG_ADC_KEYPADS_AM)||defined(CONFIG_ADC_KEYPADS_AM_MODULE) #include <linux/input.h> #include <linux/adc_keypad.h> static int adc_kp_led_control(int *param) { if(param[0] == 0){//led off set_gpio_val(GPIOA_bank_bit(4), GPIOA_bit_bit0_14(4), 0); set_gpio_mode(GPIOA_bank_bit(4), GPIOA_bit_bit0_14(4), GPIO_OUTPUT_MODE); return 0; }else if(param[0] == 1) {//led on if ((param[1]!=KEY_PAGEUP) && (param[1]!=KEY_PAGEDOWN)){ set_gpio_val(GPIOA_bank_bit(4), GPIOA_bit_bit0_14(4), 1); set_gpio_mode(GPIOA_bank_bit(4), GPIOA_bit_bit0_14(4), GPIO_OUTPUT_MODE); return 0; } } else if(param[0] == 2) {//start counting return 1; } } static struct adc_key adc_kp_key[] = { {KEY_PAGEUP, "vol+", CHAN_4, 139, 60}, {KEY_PAGEDOWN, "vol-", CHAN_4, 266, 60}, {KEY_SEARCH, "search", CHAN_4, 387, 60}, {KEY_TAB, "exit", CHAN_4, 509, 60}, {KEY_LEFTMETA, "menu", CHAN_4, 633, 60}, {KEY_HOME, "home", CHAN_4, 763, 60}, }; static struct adc_kp_platform_data adc_kp_pdata = { .led_control = adc_kp_led_control, .led_control_param_num =2, .key = &adc_kp_key[0], .key_num = ARRAY_SIZE(adc_kp_key), }; static struct platform_device adc_kp_device = { .name = "m1-adckp", .id = 0, .num_resources = 0, .resource = NULL, .dev = { .platform_data = &adc_kp_pdata, } }; #endif #if defined(CONFIG_KEY_INPUT_CUSTOM_AM) || defined(CONFIG_KEY_INPUT_CUSTOM_AM_MODULE) #include <linux/input.h> #include <linux/input/key_input.h> int _key_code_list[] = {KEY_POWER}; static inline int key_input_init_func(void) { //GP_INPUT2 no init return 0; } static inline int key_scan(int *key_state_list) { int ret = 0; key_state_list[0] = ((READ_CBUS_REG(0x1f53) >> 10) & 1) ? 0 : 1; //Read GP_INPUT2 return ret; } static struct key_input_platform_data key_input_pdata = { .scan_period = 20, .fuzz_time = 60, .key_code_list = &_key_code_list[0], .key_num = ARRAY_SIZE(_key_code_list), .scan_func = key_scan, .init_func = key_input_init_func, .config = 0, }; static struct platform_device input_device_key = { .name = "m1-keyinput", .id = 0, .num_resources = 0, .resource = NULL, .dev = { .platform_data = &key_input_pdata, } }; #endif #ifdef CONFIG_SN7325 static int sn7325_pwr_rst(void) { //reset set_gpio_val(GPIOD_bank_bit2_24(20), GPIOD_bit_bit2_24(20), 0); //low set_gpio_mode(GPIOD_bank_bit2_24(20), GPIOD_bit_bit2_24(20), GPIO_OUTPUT_MODE); udelay(2); //delay 2us set_gpio_val(GPIOD_bank_bit2_24(20), GPIOD_bit_bit2_24(20), 1); //high set_gpio_mode(GPIOD_bank_bit2_24(20), GPIOD_bit_bit2_24(20), GPIO_OUTPUT_MODE); //end return 0; } static struct sn7325_platform_data sn7325_pdata = { .pwr_rst = &sn7325_pwr_rst, }; #endif #if defined(CONFIG_FB_AM) static struct resource fb_device_resources[] = { [0] = { .start = OSD1_ADDR_START, .end = OSD1_ADDR_END, .flags = IORESOURCE_MEM, }, #if defined(CONFIG_FB_OSD2_ENABLE) [1] = { .start = OSD2_ADDR_START, .end = OSD2_ADDR_END, .flags = IORESOURCE_MEM, }, #endif }; static struct platform_device fb_device = { .name = "mesonfb", .id = 0, .num_resources = ARRAY_SIZE(fb_device_resources), .resource = fb_device_resources, }; #endif #ifdef CONFIG_USB_PHY_CONTROL static struct resource usb_phy_control_device_resources[] = { { .start = CBUS_REG_ADDR(PREI_USB_PHY_REG), .end = -1, .flags = IORESOURCE_MEM, }, }; static struct platform_device usb_phy_control_device = { .name = "usb_phy_control", .id = -1, .resource = usb_phy_control_device_resources, }; #endif #ifdef CONFIG_USB_DWC_OTG_HCD static void set_usb_a_vbus_power(char is_power_on) { #define USB_A_POW_GPIO PREG_EGPIO #define USB_A_POW_GPIO_BIT 3 #define USB_A_POW_GPIO_BIT_ON 1 #define USB_A_POW_GPIO_BIT_OFF 0 if(is_power_on) { printk(KERN_INFO "set usb port power on (board gpio %d)!\n",USB_A_POW_GPIO_BIT); set_gpio_mode(USB_A_POW_GPIO,USB_A_POW_GPIO_BIT,GPIO_OUTPUT_MODE); set_gpio_val(USB_A_POW_GPIO,USB_A_POW_GPIO_BIT,USB_A_POW_GPIO_BIT_ON); } else { printk(KERN_INFO "set usb port power off (board gpio %d)!\n",USB_A_POW_GPIO_BIT); set_gpio_mode(USB_A_POW_GPIO,USB_A_POW_GPIO_BIT,GPIO_OUTPUT_MODE); set_gpio_val(USB_A_POW_GPIO,USB_A_POW_GPIO_BIT,USB_A_POW_GPIO_BIT_OFF); } } //usb_a is OTG port static struct lm_device usb_ld_a = { .type = LM_DEVICE_TYPE_USB, .id = 0, .irq = INT_USB_A, .resource.start = IO_USB_A_BASE, .resource.end = -1, .dma_mask_room = DMA_BIT_MASK(32), .port_type = USB_PORT_TYPE_OTG, .port_speed = USB_PORT_SPEED_DEFAULT, .dma_config = USB_DMA_BURST_SINGLE, .set_vbus_power = set_usb_a_vbus_power, }; #endif #ifdef CONFIG_SATA_DWC_AHCI static struct lm_device sata_ld = { .type = LM_DEVICE_TYPE_SATA, .id = 2, .irq = INT_SATA, .dma_mask_room = DMA_BIT_MASK(32), .resource.start = IO_SATA_BASE, .resource.end = -1, }; #endif #if defined(CONFIG_AM_STREAMING) static struct resource codec_resources[] = { [0] = { .start = CODEC_ADDR_START, .end = CODEC_ADDR_END, .flags = IORESOURCE_MEM, }, [1] = { .start = STREAMBUF_ADDR_START, .end = STREAMBUF_ADDR_END, .flags = IORESOURCE_MEM, }, }; static struct platform_device codec_device = { .name = "amstream", .id = 0, .num_resources = ARRAY_SIZE(codec_resources), .resource = codec_resources, }; #endif #if defined(CONFIG_AM_VIDEO) static struct resource deinterlace_resources[] = { [0] = { .start = DI_ADDR_START, .end = DI_ADDR_END, .flags = IORESOURCE_MEM, }, }; static struct platform_device deinterlace_device = { .name = "deinterlace", .id = 0, .num_resources = ARRAY_SIZE(deinterlace_resources), .resource = deinterlace_resources, }; #endif #if defined(CONFIG_TVIN_VDIN) static struct resource vdin_resources[] = { [0] = { .start = VDIN_ADDR_START, //pbufAddr .end = VDIN_ADDR_END, //pbufAddr + size .flags = IORESOURCE_MEM, }, [1] = { .start = VDIN_ADDR_START, .end = VDIN_ADDR_END, .flags = IORESOURCE_MEM, }, [2] = { .start = INT_VDIN_VSYNC, .end = INT_VDIN_VSYNC, .flags = IORESOURCE_IRQ, }, [3] = { .start = INT_VDIN_VSYNC, .end = INT_VDIN_VSYNC, .flags = IORESOURCE_IRQ, }, }; static struct platform_device vdin_device = { .name = "vdin", .id = -1, .num_resources = ARRAY_SIZE(vdin_resources), .resource = vdin_resources, }; #endif #ifdef CONFIG_TVIN_BT656IN //add pin mux info for bt656 input #if 0 static struct resource bt656in_resources[] = { [0] = { .start = VDIN_ADDR_START, //pbufAddr .end = VDIN_ADDR_END, //pbufAddr + size .flags = IORESOURCE_MEM, }, [1] = { //bt656/camera/bt601 input resource pin mux setting .start = 0x3000, //mask--mux gpioD 15 to bt656 clk; mux gpioD 16:23 to be bt656 dt_in .end = PERIPHS_PIN_MUX_5 + 0x3000, .flags = IORESOURCE_MEM, }, [2] = { //camera/bt601 input resource pin mux setting .start = 0x1c000, //mask--mux gpioD 12 to bt601 FIQ; mux gpioD 13 to bt601HS; mux gpioD 14 to bt601 VS; .end = PERIPHS_PIN_MUX_5 + 0x1c000, .flags = IORESOURCE_MEM, }, [3] = { //bt601 input resource pin mux setting .start = 0x800, //mask--mux gpioD 24 to bt601 IDQ;; .end = PERIPHS_PIN_MUX_5 + 0x800, .flags = IORESOURCE_MEM, }, }; #endif static struct platform_device bt656in_device = { .name = "amvdec_656in", .id = -1, // .num_resources = ARRAY_SIZE(bt656in_resources), // .resource = bt656in_resources, }; #endif #if defined(CONFIG_CARDREADER) static struct resource amlogic_card_resource[] = { [0] = { .start = 0x1200230, //physical address .end = 0x120024c, .flags = 0x200, } }; int init_camera_io=0; void extern_wifi_power(int is_power) { if(init_camera_io==0){ configIO(1, 0); setIO_level(1, 0, 1);//200m poweer_disable setIO_level(1, 0, 6);//200m pwd low configIO(0, 0); setIO_level(0, 0, 2);//200m reset low configIO(1, 0); setIO_level(1, 0, 2);//30m poweer_disable setIO_level(1, 1, 0);//30m pwd enable configIO(0, 0); setIO_level(0, 0, 3);//30m reset low init_camera_io=1; } if(is_power) { *(volatile unsigned *)EGPIO_GPIOD_ENABLE &= ~PREG_IO_13_MASK; *(volatile unsigned *)EGPIO_GPIOD_OUTPUT |= PREG_IO_13_MASK; msleep(500); configIO(0, 0); setIO_level(0, 1, 7); setIO_level(0, 1, 5); msleep(50); setIO_level(0, 0, 5); msleep(50); setIO_level(0, 1, 5); printk("extern_wifi_power ON!\n"); } else { configIO(0, 0); setIO_level(0, 0, 5); setIO_level(0, 0, 7); *(volatile unsigned *)EGPIO_GPIOD_ENABLE &= ~PREG_IO_13_MASK; *(volatile unsigned *)EGPIO_GPIOD_OUTPUT &= ~PREG_IO_13_MASK; printk("extern_wifi_power OFF!\n"); } return; } void extern_wifi_power_wl_en(int is_power) { if(is_power) { configIO(0, 0); setIO_level(0, 1, 5); printk("extern_wifi_power_wl_en ON!\n"); } else { configIO(0, 0); setIO_level(0, 0, 5); printk("extern_wifi_power_wl_en OFF!\n"); } } EXPORT_SYMBOL(extern_wifi_power_wl_en); void sdio_extern_init(void) { extern_wifi_power(1); } static struct aml_card_info amlogic_card_info[] = { [0] = { .name = "sd_card", .work_mode = CARD_HW_MODE, .io_pad_type = SDIO_GPIOA_9_14, .card_ins_en_reg = EGPIO_GPIOC_ENABLE, .card_ins_en_mask = PREG_IO_0_MASK, .card_ins_input_reg = EGPIO_GPIOC_INPUT, .card_ins_input_mask = PREG_IO_0_MASK, .card_power_en_reg = 0, .card_power_en_mask = 0, .card_power_output_reg = 0, .card_power_output_mask = 0, .card_power_en_lev = 0, .card_wp_en_reg = EGPIO_GPIOA_ENABLE, .card_wp_en_mask = PREG_IO_11_MASK, .card_wp_input_reg = EGPIO_GPIOA_INPUT, .card_wp_input_mask = PREG_IO_11_MASK, .card_extern_init = 0, }, [1] = { .name = "sdio_card", .work_mode = CARD_HW_MODE, .io_pad_type = SDIO_GPIOB_2_7, .card_ins_en_reg = 0, .card_ins_en_mask = 0, .card_ins_input_reg = 0, .card_ins_input_mask = 0, .card_power_en_reg = 0, .card_power_en_mask = 0, .card_power_output_reg = 0, .card_power_output_mask = 0, .card_power_en_lev = 0, .card_wp_en_reg = 0, .card_wp_en_mask = 0, .card_wp_input_reg = 0, .card_wp_input_mask = 0, .card_extern_init = sdio_extern_init, }, }; static struct aml_card_platform amlogic_card_platform = { .card_num = ARRAY_SIZE(amlogic_card_info), .card_info = amlogic_card_info, }; static struct platform_device amlogic_card_device = { .name = "AMLOGIC_CARD", .id = -1, .num_resources = ARRAY_SIZE(amlogic_card_resource), .resource = amlogic_card_resource, .dev = { .platform_data = &amlogic_card_platform, }, }; #endif #if defined(CONFIG_AML_AUDIO_DSP) static struct resource audiodsp_resources[] = { [0] = { .start = AUDIODSP_ADDR_START, .end = AUDIODSP_ADDR_END, .flags = IORESOURCE_MEM, }, }; static struct platform_device audiodsp_device = { .name = "audiodsp", .id = 0, .num_resources = ARRAY_SIZE(audiodsp_resources), .resource = audiodsp_resources, }; #endif static struct resource aml_m1_audio_resource[]={ [0] = { .start = 0, .end = 0, .flags = IORESOURCE_MEM, }, }; static struct platform_device aml_audio={ .name = "aml_m1_audio_wm8900", .id = -1, .resource = aml_m1_audio_resource, .num_resources = ARRAY_SIZE(aml_m1_audio_resource), }; #ifdef CONFIG_SND_AML_M1_MID_WM8900 //use LED_CS1 as hp detect pin #define PWM_TCNT (600-1) #define PWM_MAX_VAL (420) int get_display_mode(void) { int fd; int ret = 0; char mode[8]; fd = sys_open("/sys/class/display/mode", O_RDWR | O_NDELAY, 0); if(fd >= 0) { memset(mode,0,8); sys_read(fd,mode,8); if(strncmp("panel",mode,5)) ret = 1; sys_close(fd); } return ret; } int wm8900_is_hp_pluged(void) { int level = 0; int cs_no = 0; // Enable VBG_EN WRITE_CBUS_REG_BITS(PREG_AM_ANALOG_ADDR, 1, 0, 1); // wire pm_gpioA_7_led_pwm = pin_mux_reg0[22]; WRITE_CBUS_REG(LED_PWM_REG0,(0 << 31) | // disable the overall circuit (0 << 30) | // 1:Closed Loop 0:Open Loop (0 << 16) | // PWM total count (0 << 13) | // Enable (1 << 12) | // enable (0 << 10) | // test (7 << 7) | // CS0 REF, Voltage FeedBack: about 0.505V (7 << 4) | // CS1 REF, Current FeedBack: about 0.505V (0 << 0)); // DIMCTL Analog dimmer cs_no = READ_CBUS_REG(LED_PWM_REG3); if(cs_no &(1<<14)) level |= (1<<0); // temp patch to mute speaker when hdmi output if(level == 1) if(get_display_mode() != 0) { return 1; } return (level == 1)?(0):(1); //return 1: hp pluged, 0: hp unpluged. } static struct wm8900_platform_data wm8900_pdata = { .is_hp_pluged = &wm8900_is_hp_pluged, }; #endif #if defined(CONFIG_TOUCHSCREEN_ADS7846) #define SPI_0 0 #define SPI_1 1 #define SPI_2 2 // GPIOC_8(G20, XPT_CLK) #define GPIO_SPI_SCK ((GPIOC_bank_bit0_26(8)<<16) |GPIOC_bit_bit0_26(8)) // GPIOC_7(G21, XPT_IN) #define GPIO_SPI_MOSI ((GPIOC_bank_bit0_26(7)<<16) |GPIOC_bit_bit0_26(7)) // GPIOC_6(G22, XPT_OUT) #define GPIO_SPI_MISO ((GPIOC_bank_bit0_26(6)<<16) |GPIOC_bit_bit0_26(6)) // GPIOC_0(J20, XPT_NCS) #define GPIO_TSC2046_CS ((GPIOC_bank_bit0_26(0)<<16) |GPIOC_bit_bit0_26(0)) // GPIOC_4(H20, NPEN_IRQ) #define GPIO_TSC2046_PENDOWN ((GPIOC_bank_bit0_26(4)<<16) |GPIOC_bit_bit0_26(4)) static const struct spi_gpio_platform_data spi_gpio_pdata = { .sck = GPIO_SPI_SCK, .mosi = GPIO_SPI_MOSI, .miso = GPIO_SPI_MISO, .num_chipselect = 1, }; static struct platform_device spi_gpio = { .name = "spi_gpio", .id = SPI_2, .dev = { .platform_data = (void *)&spi_gpio_pdata, }, }; static const struct ads7846_platform_data ads7846_pdata = { .model = 7846, .vref_delay_usecs = 100, .vref_mv = 2500, .keep_vref_on = false, .swap_xy = 0, .settle_delay_usecs = 10, .penirq_recheck_delay_usecs = 0, .x_plate_ohms =500, .y_plate_ohms = 500, .x_min = 0, .x_max = 0xfff, .y_min = 0, .y_max = 0xfff, .pressure_min = 0, .pressure_max = 0xfff, .debounce_max = 0, .debounce_tol = 0, .debounce_rep = 0, .gpio_pendown = GPIO_TSC2046_PENDOWN, .get_pendown_state =NULL, .filter_init = NULL, .filter = NULL, .filter_cleanup = NULL, .wait_for_sync = NULL, .wakeup = false, }; static struct spi_board_info spi_board_info_list[] = { [0] = { .modalias = "ads7846", .platform_data = (void *)&ads7846_pdata, .controller_data = (void *)GPIO_TSC2046_CS, .irq = INT_GPIO_0, .max_speed_hz = 500000, .bus_num = SPI_2, .chip_select = 0, .mode = SPI_MODE_0, }, }; static int ads7846_init_gpio(void) { /* memson Bit(s) Description 256-105 Unused 104 JTAG_TDO 103 JTAG_TDI 102 JTAG_TMS 101 JTAG_TCK 100 gpioA_23 99 gpioA_24 98 gpioA_25 97 gpioA_26 98-75 gpioE[21:0] 75-50 gpioD[24:0] 49-23 gpioC[26:0] 22-15 gpioB[22;15] 14-0 gpioA[14:0] */ /* set input mode */ gpio_direction_input(GPIO_TSC2046_PENDOWN); /* set gpio interrupt #0 source=GPIOC_4, and triggered by falling edge(=1) */ gpio_enable_edge_int(27, 1, 0); // // reg2 bit24~26 // CLEAR_CBUS_REG_MASK(PERIPHS_PIN_MUX_2, // (1<<24) | (1<<25) | (1<<26)); // // reg3 bit5~7,12,16~18,22 // CLEAR_CBUS_REG_MASK(PERIPHS_PIN_MUX_3, // (1<<5) | (1<<6) | (1<<7) | (1<<9) | (1<<12) | (1<<16) | (1<<17) | (1<<18) | (1<<22)); // // reg4 bit26~27 // CLEAR_CBUS_REG_MASK(PERIPHS_PIN_MUX_4, // (1<<26) | (1<<27)); // // reg9 bit0,4,6~8 // CLEAR_CBUS_REG_MASK(PERIPHS_PIN_MUX_9, // (1<<0) | (1<<4) | (1<<6) | (1<<7) | (1<<8)); // // reg10 bit0,4,6~8 // CLEAR_CBUS_REG_MASK(PERIPHS_PIN_MUX_10, // (1<<0) | (1<<4) | (1<<6) | (1<<7) | (1<<8)); return 0; } #endif #ifdef CONFIG_TOUCHSCREEN_TSC2007 #include <linux/i2c/tsc2007.h> //GPIOD_24 #define GPIO_TSC2007_PENIRQ ((GPIOD_bank_bit2_24(24)<<16) |GPIOD_bit_bit2_24(24)) #define GPIO_TSC2007_PENIRQ_IDX (GPIOD_IDX + 24) static int tsc2007_init_platform_hw(void) { /* memson Bit(s) Description 256-105 Unused 104 JTAG_TDO 103 JTAG_TDI 102 JTAG_TMS 101 JTAG_TCK 100 gpioA_23 99 gpioA_24 98 gpioA_25 97 gpioA_26 98-76 gpioE[21:0] 75-50 gpioD[24:0] 49-23 gpioC[26:0] 22-15 gpioB[22;15] 14-0 gpioA[14:0] */ /* set input mode */ gpio_direction_input(GPIO_TSC2007_PENIRQ); /* set gpio interrupt #0 source=GPIOD_24, and triggered by falling edge(=1) */ gpio_enable_edge_int(GPIO_TSC2007_PENIRQ_IDX, 1, 0); return 0; } static int tsc2007_get_pendown_state(void) { return !gpio_get_value(GPIO_TSC2007_PENIRQ); } #define XLCD 800 #define YLCD 600 #define SWAP_XY 0 #define XPOL 0 #define YPOL 1 #define XMIN 130 #define XMAX 3970 #define YMIN 250 #define YMAX 4000 int tsc2007_convert(int x, int y) { #if (SWAP_XY == 1) swap(x, y); #endif if (x < XMIN) x = XMIN; if (x > XMAX) x = XMAX; if (y < YMIN) y = YMIN; if (y > YMAX) y = YMAX; #if (XPOL == 1) x = XMAX + XMIN - x; #endif #if (YPOL == 1) y = YMAX + YMIN - y; #endif x = (x- XMIN) * XLCD / (XMAX - XMIN); y = (y- YMIN) * YLCD / (YMAX - YMIN); y = y - 32 * YLCD / (y / 2 + YLCD); return (x << 16) | y; } static struct tsc2007_platform_data tsc2007_pdata = { .model = 2007, .x_plate_ohms = 400, .get_pendown_state = tsc2007_get_pendown_state, .clear_penirq = NULL, .init_platform_hw = tsc2007_init_platform_hw, .exit_platform_hw = NULL, .poll_delay = 40, .poll_period = 10, .abs_xmin = 0, .abs_xmax = XLCD, .abs_ymin = 0, .abs_ymax = YLCD, .convert = tsc2007_convert, }; #endif #ifdef CONFIG_ITK_CAPACITIVE_TOUCHSCREEN #include <linux/i2c/itk.h> //GPIOD_24 #define GPIO_ITK_PENIRQ ((GPIOD_bank_bit2_24(24)<<16) |GPIOD_bit_bit2_24(24)) #define GPIO_ITK_RST static int itk_init_irq(void) { /* memson Bit(s) Description 256-105 Unused 104 JTAG_TDO 103 JTAG_TDI 102 JTAG_TMS 101 JTAG_TCK 100 gpioA_23 99 gpioA_24 98 gpioA_25 97 gpioA_26 98-76 gpioE[21:0] 75-50 gpioD[24:0] 49-23 gpioC[26:0] 22-15 gpioB[22;15] 14-0 gpioA[14:0] */ /* set input mode */ gpio_direction_input(GPIO_ITK_PENIRQ); /* set gpio interrupt #0 source=GPIOD_24, and triggered by falling edge(=1) */ gpio_enable_edge_int(50+24, 1, 0); return 0; } static int itk_get_irq_level(void) { return gpio_get_value(GPIO_ITK_PENIRQ); } static struct itk_platform_data itk_pdata = { .init_irq = &itk_init_irq, .get_irq_level = &itk_get_irq_level, .tp_max_width = 32752, .tp_max_height = 32752, .lcd_max_width = 800, .lcd_max_height = 600, }; #endif #ifdef CONFIG_UOR7X5X_RESISTIVE_TOUCHSCREEN #include <linux/i2c/uor7x5x.h> //GPIOD_24 #define GPIO_UOR7X5X_PENIRQ ((GPIOD_bank_bit2_24(24)<<16) |GPIOD_bit_bit2_24(24)) #define GPIO_UOR7X5X_RST (GPIOD_IDX + 24) static int uor7x5x_init_irq(void) { /* memson Bit(s) Description 256-105 Unused 104 JTAG_TDO 103 JTAG_TDI 102 JTAG_TMS 101 JTAG_TCK 100 gpioA_23 99 gpioA_24 98 gpioA_25 97 gpioA_26 98-76 gpioE[21:0] 75-50 gpioD[24:0] 49-23 gpioC[26:0] 22-15 gpioB[22;15] 14-0 gpioA[14:0] */ printk("uor7x5x_init_irq \n"); /* set input mode */ gpio_direction_input(GPIO_UOR7X5X_PENIRQ); /* set gpio interrupt #0 source=GPIOD_24, and triggered by falling edge(=1) */ gpio_enable_edge_int(GPIO_UOR7X5X_RST, 1, 0); return 0; } static int uor7x5x_get_irq_level(void) { return gpio_get_value(GPIO_UOR7X5X_PENIRQ); } #define UOR7X5X_XLCD 800 #define UOR7X5X_YLCD 600 #define UOR7X5X_SWAP_XY 0 #define UOR7X5X_XPOL 1 #define UOR7X5X_YPOL 1 #define UOR7X5X_XMIN 1100 #define UOR7X5X_XMAX 3900 #define UOR7X5X_YMIN 200 #define UOR7X5X_YMAX 2400 int uor7x5x_convert(int x, int y) { #if (UOR7X5X_SWAP_XY == 1) swap(x, y); #endif if (x < UOR7X5X_XMIN) x = UOR7X5X_XMIN; if (x > UOR7X5X_XMAX) x = UOR7X5X_XMAX; if (y < UOR7X5X_YMIN) y = UOR7X5X_YMIN; if (y > UOR7X5X_YMAX) y = UOR7X5X_YMAX; #if (UOR7X5X_XPOL == 1) x = UOR7X5X_XMAX + UOR7X5X_XMIN - x; #endif #if (UOR7X5X_YPOL == 1) y = UOR7X5X_YMAX + UOR7X5X_YMIN - y; #endif x = (x- UOR7X5X_XMIN) * UOR7X5X_XLCD / (UOR7X5X_XMAX - UOR7X5X_XMIN); y = (y- UOR7X5X_YMIN) * UOR7X5X_YLCD / (UOR7X5X_YMAX - UOR7X5X_YMIN); //y = y - 32 * UOR7X5X_YLCD / (y / 2 + UOR7X5X_YLCD); return (x << 16) | y; } static struct uor7x5x_platform_data uor7x5x_pdata = { .init_irq = &uor7x5x_init_irq, .get_irq_level = &uor7x5x_get_irq_level, .abs_xmin = 0, .abs_xmax = UOR7X5X_XLCD, .abs_ymin = 0, .abs_ymax = UOR7X5X_YLCD, .convert = uor7x5x_convert, }; #endif #ifdef CONFIG_UOR6X5X_RESISTIVE_TOUCHSCREEN #include <linux/i2c/uor6x5x.h> //GPIOD_24 #define GPIO_UOR6X5X_PENIRQ ((GPIOD_bank_bit2_24(24)<<16) |GPIOD_bit_bit2_24(24)) #define GPIO_UOR6X5X_RST (GPIOD_IDX + 24) static int uor6x5x_init_irq(void) { /* memson Bit(s) Description 256-105 Unused 104 JTAG_TDO 103 JTAG_TDI 102 JTAG_TMS 101 JTAG_TCK 100 gpioA_23 99 gpioA_24 98 gpioA_25 97 gpioA_26 98-76 gpioE[21:0] 75-50 gpioD[24:0] 49-23 gpioC[26:0] 22-15 gpioB[22;15] 14-0 gpioA[14:0] */ printk("uor6x5x_init_irq \n"); /* set input mode */ gpio_direction_input(GPIO_UOR6X5X_PENIRQ); /* set gpio interrupt #0 source=GPIOD_24, and triggered by falling edge(=1) */ gpio_enable_edge_int(GPIO_UOR6X5X_RST, 1, 0); return 0; } static int uor6x5x_get_irq_level(void) { return gpio_get_value(GPIO_UOR6X5X_PENIRQ); } #define UOR6X5X_XLCD 800 #define UOR6X5X_YLCD 600 #define UOR6X5X_SWAP_XY 0 #define UOR6X5X_XPOL 0 #define UOR6X5X_YPOL 1 #define UOR6X5X_XMIN 230 #define UOR6X5X_XMAX 3800 #define UOR6X5X_YMIN 330 #define UOR6X5X_YMAX 3620 int uor6x5x_convert(int x, int y) { #if (UOR6X5X_SWAP_XY == 1) swap(x, y); #endif if (x < UOR6X5X_XMIN) x = UOR6X5X_XMIN; if (x > UOR6X5X_XMAX) x = UOR6X5X_XMAX; if (y < UOR6X5X_YMIN) y = UOR6X5X_YMIN; if (y > UOR6X5X_YMAX) y = UOR6X5X_YMAX; #if (UOR6X5X_XPOL == 1) x = UOR6X5X_XMAX + UOR6X5X_XMIN - x; #endif #if (UOR6X5X_YPOL == 1) y = UOR6X5X_YMAX + UOR6X5X_YMIN - y; #endif x = (x- UOR6X5X_XMIN) * UOR6X5X_XLCD / (UOR6X5X_XMAX - UOR6X5X_XMIN); y = (y- UOR6X5X_YMIN) * UOR6X5X_YLCD / (UOR6X5X_YMAX - UOR6X5X_YMIN); //y = y - 32 * UOR6X5X_YLCD / (y / 2 + UOR6X5X_YLCD); return (x << 16) | y; } static struct uor6x5x_platform_data uor6x5x_pdata = { .init_irq = &uor6x5x_init_irq, .get_irq_level = &uor6x5x_get_irq_level, .abs_xmin = 0, .abs_xmax = UOR6X5X_XLCD, .abs_ymin = 0, .abs_ymax = UOR6X5X_YLCD, .convert = uor6x5x_convert, }; #endif #ifdef CONFIG_ANDROID_PMEM static struct android_pmem_platform_data pmem_data = { .name = "pmem", .start = PMEM_START, .size = PMEM_SIZE, .no_allocator = 1, .cached = 0, }; static struct platform_device android_pmem_device = { .name = "android_pmem", .id = 0, .dev = { .platform_data = &pmem_data, }, }; #endif #if defined(CONFIG_AML_RTC) static struct platform_device aml_rtc_device = { .name = "aml_rtc", .id = -1, }; #endif #if defined (CONFIG_AMLOGIC_VIDEOIN_MANAGER) static struct resource vm_resources[] = { [0] = { .start = VM_ADDR_START, .end = VM_ADDR_END, .flags = IORESOURCE_MEM, }, }; static struct platform_device vm_device = { .name = "vm", .id = 0, .num_resources = ARRAY_SIZE(vm_resources), .resource = vm_resources, }; #endif /* AMLOGIC_VIDEOIN_MANAGER */ #if defined(CONFIG_VIDEO_AMLOGIC_CAPTURE_GC0308) static int gc0308_v4l2_init(void) { udelay(1000); WRITE_CBUS_REG(HHI_ETH_CLK_CNTL,0x31e);// 24M XTAL WRITE_CBUS_REG(HHI_DEMOD_PLL_CNTL,0x232);// 24M XTAL udelay(1000); eth_set_pinmux(ETH_BANK0_GPIOC3_C12,ETH_CLK_OUT_GPIOC12_REG3_1, 1); #ifdef CONFIG_SN7325 printk( "amlogic camera driver: init gc0308_v4l2_init. \n"); configIO(1, 0); setIO_level(1, 0, 2);//30m poweer_disable //setIO_level(1, 0, 2);//200m poweer_disable setIO_level(1, 0, 0);//30m pwd enable //setIO_level(1, 0, 6);//200m pwd low configIO(0, 0); setIO_level(0, 0, 3);//30m reset low //setIO_level(0, 0, 2);//200m reset low configIO(1, 0); msleep(10); setIO_level(1, 1, 2);//30m poweer_enable msleep(10); configIO(0, 0); setIO_level(0, 1, 3);//30m reset high msleep(20); #endif } static int gc0308_v4l2_uninit(void) { #ifdef CONFIG_SN7325 printk( "amlogic camera driver: uninit gc0308_v4l2_uninit. \n"); configIO(1, 0); setIO_level(1, 0, 1);//200m poweer_disable setIO_level(1, 0, 6);//200m pwd low configIO(0, 0); setIO_level(0, 0, 2);//200m reset low configIO(1, 0); setIO_level(1, 0, 2);//30m poweer_disable setIO_level(1, 1, 0);//30m pwd enable configIO(0, 0); setIO_level(0, 0, 3);//30m reset low msleep(20); #endif } static int gc0308_v4l2_disable(void) { return; } static void gc0308_v4l2_early_suspend(void) { #if defined(CONFIG_TCA6424)||defined(CONFIG_SN7325) configIO(1, 0); setIO_level(1, 1, 0); #endif } static void gc0308_v4l2_late_resume(void) { #if defined(CONFIG_TCA6424)||defined(CONFIG_SN7325) configIO(1, 0); setIO_level(1, 0, 0); #endif } aml_plat_cam_data_t video_gc0308_data = { .name="video-gc0308", .video_nr=1, .device_init= gc0308_v4l2_init, .device_uninit=gc0308_v4l2_uninit, .early_suspend = gc0308_v4l2_early_suspend, .late_resume = gc0308_v4l2_late_resume, .device_disable=gc0308_v4l2_disable, }; #endif /* VIDEO_AMLOGIC_CAPTURE_GT2005 */ #if defined(CONFIG_VIDEO_AMLOGIC_CAPTURE_GT2005) //#include <media/amlogic/aml_camera.h> static int gt2005_v4l2_init(void) { udelay(1000); WRITE_CBUS_REG(HHI_ETH_CLK_CNTL,0x30f);// 24M XTAL WRITE_CBUS_REG(HHI_DEMOD_PLL_CNTL,0x232);// 24M XTAL udelay(1000); eth_set_pinmux(ETH_BANK0_GPIOC3_C12,ETH_CLK_OUT_GPIOC12_REG3_1, 1); #ifdef CONFIG_SN7325 printk( "amlogic camera driver: init CONFIG_SN7325. \n"); configIO(1, 0); //setIO_level(1, 0, 1);//30m poweer_disable //1:PP, 0:Level, 2:ppnum setIO_level(1, 0, 1);//200m poweer_disable //setIO_level(1, 1, 0);//30m pwd disable setIO_level(1, 0, 6);//200m pwd low configIO(0, 0);//OD //setIO_level(0, 0, 3);//30m reset low setIO_level(0, 0, 2);//200m reset low configIO(1, 0);//PP msleep(10); setIO_level(1, 1, 1);//200m poweer_enable msleep(10); configIO(0, 0); setIO_level(0, 1, 2);//200m reset high msleep(10); configIO(1, 0); setIO_level(1, 1, 6);//200m pwd high //configIO(1, 0); msleep(20); #endif } static int gt2005_v4l2_uninit(void) { #ifdef CONFIG_SN7325 printk( "amlogic camera driver: uninit gt2005_v4l2_uninit. \n"); configIO(1, 0); setIO_level(1, 0, 1);//200m poweer_disable setIO_level(1, 0, 6);//200m pwd low configIO(0, 0); setIO_level(0, 0, 2);//200m reset low msleep(20); #endif } static int gt2005_v4l2_disable(void) { #if 0 printk( "amlogic camera driver: gt2005_v4l2_disable. \n"); configIO(1, 0); setIO_level(1, 0, 6);//200m pwd low msleep(20); #endif } static void gt2005_v4l2_early_suspend(void) { #if defined(CONFIG_TCA6424)||defined(CONFIG_SN7325) configIO(1, 0); setIO_level(1, 0, 1); #endif } static void gt2005_v4l2_late_resume(void) { #if defined(CONFIG_TCA6424)||defined(CONFIG_SN7325) configIO(1, 0); setIO_level(1, 1, 1); #endif } aml_plat_cam_data_t video_gt2005_data = { .name="video-gt2005", .video_nr=0, .device_init= gt2005_v4l2_init, .device_uninit=gt2005_v4l2_uninit, .early_suspend = gt2005_v4l2_early_suspend, .late_resume = gt2005_v4l2_late_resume, .device_disable=gt2005_v4l2_disable, }; #endif /* VIDEO_AMLOGIC_CAPTURE_GT2005 */ #if defined(CONFIG_SUSPEND) typedef struct { char name[32]; unsigned bank; unsigned bit; gpio_mode_t mode; unsigned value; unsigned enable; } gpio_data_t; //#define MAX_GPIO 24 #define MAX_GPIO 23 static gpio_data_t gpio_data[MAX_GPIO] = { // 5 {"GPIOA_7 -- BL_PWM", GPIOA_bank_bit0_14(7), GPIOA_bit_bit0_14(7), GPIO_OUTPUT_MODE, 1, 1}, {"GPIOA_6 -- VCCx2_EN", GPIOA_bank_bit0_14(6), GPIOA_bit_bit0_14(6), GPIO_OUTPUT_MODE, 1, 1}, {"GPIOA_5 -- LCD_CLK", GPIOA_bank_bit0_14(5), GPIOA_bit_bit0_14(5), GPIO_OUTPUT_MODE, 1, 1}, {"GPIOA_2 -- OEH", GPIOA_bank_bit0_14(2), GPIOA_bit_bit0_14(2), GPIO_OUTPUT_MODE, 1, 1}, // {"GPIOA_0 -- WIFI_32K", GPIOA_bank_bit0_14(0), GPIOA_bit_bit0_14(0), GPIO_OUTPUT_MODE, 1, 1}, // 6 {"GPIOB_2 -- WIFI_SD_CMD", GPIOB_bank_bit0_7(2), GPIOB_bit_bit0_7(2), GPIO_OUTPUT_MODE, 1, 1}, {"GPIOB_3 -- WIFI_SD_CLK", GPIOB_bank_bit0_7(3), GPIOB_bit_bit0_7(3), GPIO_OUTPUT_MODE, 1, 1}, {"GPIOB_4 -- WIFI_SD_D0", GPIOB_bank_bit0_7(4), GPIOB_bit_bit0_7(4), GPIO_OUTPUT_MODE, 1, 1}, {"GPIOB_5 -- WIFI_SD_D1", GPIOB_bank_bit0_7(5), GPIOB_bit_bit0_7(5), GPIO_OUTPUT_MODE, 1, 1}, {"GPIOB_6 -- WIFI_SD_D2", GPIOB_bank_bit0_7(6), GPIOB_bit_bit0_7(6), GPIO_OUTPUT_MODE, 1, 1}, {"GPIOB_7 -- WIFI_SD_D3", GPIOB_bank_bit0_7(7), GPIOB_bit_bit0_7(7), GPIO_OUTPUT_MODE, 1, 1}, //camera {"GPIOC_3 -- camera PCLK", GPIOC_bank_bit0_26(3), GPIOC_bit_bit0_26(3), GPIO_OUTPUT_MODE, 1, 1}, // 6 //pannel {"GPIOD_12 -- LCD_PWR_EN", GPIOD_bank_bit2_24(12), GPIOD_bit_bit2_24(12), GPIO_OUTPUT_MODE, 1, 1}, {"GPIOD_13 -- nLCD_VCC", GPIOD_bank_bit2_24(13), GPIOD_bit_bit2_24(13), GPIO_OUTPUT_MODE, 1, 1}, // {"GPIOD_21 -- EXT_PWER_EN2", GPIOD_bank_bit2_24(21), GPIOD_bit_bit2_24(21), GPIO_OUTPUT_MODE, 1, 1}, {"GPIOD_22 -- EXT_PWER_EN1", GPIOD_bank_bit2_24(22), GPIOD_bit_bit2_24(22), GPIO_OUTPUT_MODE, 1, 1}, {"GPIOD_23 -- KEYLED_CTRL", GPIOD_bank_bit2_24(23), GPIOD_bit_bit2_24(23), GPIO_OUTPUT_MODE, 1, 1}, //backlight {"GPIOD_18 -- BACKLIGHT_EN", GPIOD_bank_bit2_24(18), GPIOD_bit_bit2_24(18), GPIO_OUTPUT_MODE, 1, 1}, // 5 {"GPIOE_4 -- NAND_nCS1", GPIOE_bank_bit0_15(4), GPIOE_bit_bit0_15(4), GPIO_OUTPUT_MODE, 1, 1}, {"GPIOE_5 -- NAND_nCS2", GPIOE_bank_bit0_15(5), GPIOE_bit_bit0_15(5), GPIO_OUTPUT_MODE, 1, 1}, {"GPIOE_16 -- nand_ncs3", GPIOE_bank_bit16_21(16), GPIOE_bit_bit16_21(16), GPIO_OUTPUT_MODE, 1, 1}, {"GPIOE_17 -- nand_ncs4", GPIOE_bank_bit16_21(17), GPIOE_bit_bit16_21(17), GPIO_OUTPUT_MODE, 1, 1}, {"GPIOE_18 -- Linux_TX", GPIOE_bank_bit16_21(18), GPIOE_bit_bit16_21(18), GPIO_OUTPUT_MODE, 1, 1}, // ----------------------------------- i2s --------------------------------- {"TEST_N -- I2S_DOUT", GPIOJTAG_bank_bit(16), GPIOJTAG_bit_bit16(16), GPIO_OUTPUT_MODE, 1, 1}, }; static void save_gpio(int port) { gpio_data[port].mode = get_gpio_mode(gpio_data[port].bank, gpio_data[port].bit); if (gpio_data[port].mode==GPIO_OUTPUT_MODE) { if (gpio_data[port].enable){ printk("change %s output %d to input\n", gpio_data[port].name, gpio_data[port].value); gpio_data[port].value = get_gpio_val(gpio_data[port].bank, gpio_data[port].bit); set_gpio_mode(gpio_data[port].bank, gpio_data[port].bit, GPIO_INPUT_MODE); } else{ printk("no change %s output %d\n", gpio_data[port].name, gpio_data[port].value); } } } static void restore_gpio(int port) { if ((gpio_data[port].mode==GPIO_OUTPUT_MODE)&&(gpio_data[port].enable)) { set_gpio_val(gpio_data[port].bank, gpio_data[port].bit, gpio_data[port].value); set_gpio_mode(gpio_data[port].bank, gpio_data[port].bit, GPIO_OUTPUT_MODE); printk("%s output %d\n", gpio_data[port].name, gpio_data[port].value); } } typedef struct { char name[32]; unsigned reg; unsigned bits; unsigned enable; } pinmux_data_t; #define MAX_PINMUX 13 pinmux_data_t pinmux_data[MAX_PINMUX] = { {"HDMI", 0, (1<<2)|(1<<1)|(1<<0), 1}, {"TCON", 0, (1<<14)|(1<<11), 1}, {"I2S_OUT", 0, (1<<18), 1}, {"I2S_CLK", 1, (1<<19)|(1<<15)|(1<<11), 1}, {"SPI", 1, (1<<29)|(1<<27)|(1<<25)|(1<<23), 1}, {"I2C", 2, (1<<5)|(1<<2), 1}, {"SD", 2, (1<<15)|(1<<14)|(1<<13)|(1<<12)|(1<<8), 1}, {"PWM", 2, (1<<31), 1}, {"UART_A", 3, (1<<24)|(1<23), 0}, {"RGB", 4, (1<<5)|(1<<4)|(1<<3)|(1<<2)|(1<<1)|(1<<0), 1}, {"UART_B", 5, (1<<24)|(1<23), 0}, {"REMOTE", 5, (1<<31), 1}, {"CAMERA", 3, (1<<13), 1}, }; static unsigned pinmux_backup[6]; static void save_pinmux(void) { int i; for (i=0;i<6;i++) pinmux_backup[i] = READ_CBUS_REG(PERIPHS_PIN_MUX_0+i); for (i=0;i<MAX_PINMUX;i++){ if (pinmux_data[i].enable){ printk("%s %x\n", pinmux_data[i].name, pinmux_data[i].bits); clear_mio_mux(pinmux_data[i].reg, pinmux_data[i].bits); } } } static void restore_pinmux(void) { int i; for (i=0;i<6;i++) WRITE_CBUS_REG(PERIPHS_PIN_MUX_0+i, pinmux_backup[i]); } static void set_vccx2(int power_on) { int i; if(power_on) { for (i=0;i<MAX_GPIO;i++) restore_gpio(i); restore_pinmux(); set_gpio_val(GPIOA_bank_bit(6), GPIOA_bit_bit0_14(6), 1); set_gpio_mode(GPIOA_bank_bit(6), GPIOA_bit_bit0_14(6), GPIO_OUTPUT_MODE); //set clk for wifi SET_CBUS_REG_MASK(PERIPHS_PIN_MUX_8, (1<<18)); CLEAR_CBUS_REG_MASK(PREG_EGPIO_EN_N, (1<<4)); } else { set_gpio_val(GPIOA_bank_bit(6), GPIOA_bit_bit0_14(6), 0); set_gpio_mode(GPIOA_bank_bit(6), GPIOA_bit_bit0_14(6), GPIO_OUTPUT_MODE); save_pinmux(); for (i=0;i<MAX_GPIO;i++) save_gpio(i); //disable wifi clk //CLEAR_CBUS_REG_MASK(PERIPHS_PIN_MUX_8, (1<<18)); //SET_CBUS_REG_MASK(PREG_EGPIO_EN_N, (1<<4)); } } #ifdef CONFIG_EXGPIO typedef struct { char name[32]; unsigned bank; unsigned bit; gpio_mode_t mode; unsigned s_value; unsigned r_value; unsigned enable; } exgpio_data_t; #define MAX_EXGPIO 2 static exgpio_data_t exgpio_data[MAX_EXGPIO] = { // 11 {"PP0 -- CAMR2_PWDN", EXGPIO_BANK1, 0, GPIO_OUTPUT_MODE, 1, 0, 1}, {"PP1 -- CAM_PWR_EN1", EXGPIO_BANK1, 1, GPIO_OUTPUT_MODE, 0, 0, 1}, //{"PP2 -- CAM_PWR_EN2", EXGPIO_BANK1, 2, GPIO_OUTPUT_MODE, 0, 0, 1}, //{"PP3 -- CHG_SW", EXGPIO_BANK1, 3, GPIO_OUTPUT_MODE, 1, 0, 1}, //{"PP4 -- SPKVDD_ON", EXGPIO_BANK1, 4, GPIO_OUTPUT_MODE, 0, 0, 1}, //{"PP5 -- G_RESET", EXGPIO_BANK1, 5, GPIO_OUTPUT_MODE, 0, 0, 1}, //{"PP6 -- CAMR1_PWDN", EXGPIO_BANK1, 6, GPIO_OUTPUT_MODE, 0, 0, 1}, //{"OD0 -- SENSORPWR_EN", EXGPIO_BANK0, 0, GPIO_OUTPUT_MODE, 0, 0, 1}, //{"OD2 -- SENSOR_RST_1", EXGPIO_BANK0, 2, GPIO_OUTPUT_MODE, 1, 0, 1}, //{"OD3 -- SENSOR_RST_2", EXGPIO_BANK0, 3, GPIO_OUTPUT_MODE, 1, 0, 1}, //{"OD5 -- WIFI_PWD_L", EXGPIO_BANK0, 5, GPIO_OUTPUT_MODE, 0, 0, 1}, }; static void save_exgpio(int port) { exgpio_data[port].mode = get_gpio_mode(exgpio_data[port].bank, exgpio_data[port].bit); if((exgpio_data[port].mode==GPIO_OUTPUT_MODE) && exgpio_data[port].enable) { exgpio_data[port].r_value = get_gpio_val(exgpio_data[port].bank, exgpio_data[port].bit); set_gpio_val(exgpio_data[port].bank, exgpio_data[port].bit, exgpio_data[port].s_value); #if 0 int value = get_gpio_val(exgpio_data[port].bank, exgpio_data[port].bit); printk("name:%s, write:%d, read:%d\n", exgpio_data[port].name, exgpio_data[port].s_value, value); #endif } else{ printk("no change %s output %d\n", exgpio_data[port].name, exgpio_data[port].s_value); } } static void restore_exgpio(int port) { if ((exgpio_data[port].mode==GPIO_OUTPUT_MODE) && exgpio_data[port].enable) { set_gpio_val(exgpio_data[port].bank, exgpio_data[port].bit, exgpio_data[port].r_value); #if 0 int value = get_gpio_val(exgpio_data[port].bank, exgpio_data[port].bit); printk("name:%s, write:%d, read:%d\n", exgpio_data[port].name, exgpio_data[port].r_value, value); #endif } else{ printk("no change %s output %d\n", exgpio_data[port].name, exgpio_data[port].r_value); } } static void set_exgpio_on_early_suspend(int power_on) { int i; if(power_on) { for (i=0;i<MAX_EXGPIO;i++) restore_exgpio(i); } else { for (i=0;i<MAX_EXGPIO;i++) save_exgpio(i); } } #else #define set_exgpio_on_early_suspend NULL #endif static struct meson_pm_config aml_pm_pdata = { .pctl_reg_base = IO_APB_BUS_BASE, .mmc_reg_base = APB_REG_ADDR(0x1000), .hiu_reg_base = CBUS_REG_ADDR(0x1000), .power_key = (1<<8), .ddr_clk = 0x00110820, .sleepcount = 128, .set_vccx2 = set_vccx2, .set_exgpio_early_suspend = set_exgpio_on_early_suspend, .core_voltage_adjust = 10, }; static struct platform_device aml_pm_device = { .name = "pm-meson", .dev = { .platform_data = &aml_pm_pdata, }, .id = -1, }; #endif #if defined(CONFIG_I2C_SW_AML) static struct aml_sw_i2c_platform aml_sw_i2c_plat = { .sw_pins = { .scl_reg_out = MESON_I2C_PREG_GPIOB_OUTLVL, .scl_reg_in = MESON_I2C_PREG_GPIOB_INLVL, .scl_bit = 2, /*MESON_I2C_MASTER_A_GPIOB_2_REG*/ .scl_oe = MESON_I2C_PREG_GPIOB_OE, .sda_reg_out = MESON_I2C_PREG_GPIOB_OUTLVL, .sda_reg_in = MESON_I2C_PREG_GPIOB_INLVL, .sda_bit = 3, /*MESON_I2C_MASTER_A_GPIOB_3_BIT*/ .sda_oe = MESON_I2C_PREG_GPIOB_OE, }, .udelay = 2, .timeout = 100, }; static struct platform_device aml_sw_i2c_device = { .name = "aml-sw-i2c", .id = -1, .dev = { .platform_data = &aml_sw_i2c_plat, }, }; #endif #if defined(CONFIG_I2C_AML) static struct aml_i2c_platform aml_i2c_plat = { .wait_count = 1000000, .wait_ack_interval = 5, .wait_read_interval = 5, .wait_xfer_interval = 5, .master_no = AML_I2C_MASTER_B, .use_pio = 0, .master_i2c_speed = AML_I2C_SPPED_200K, .master_b_pinmux = { .scl_reg = MESON_I2C_MASTER_B_GPIOB_0_REG, .scl_bit = MESON_I2C_MASTER_B_GPIOB_0_BIT, .sda_reg = MESON_I2C_MASTER_B_GPIOB_1_REG, .sda_bit = MESON_I2C_MASTER_B_GPIOB_1_BIT, } }; static struct resource aml_i2c_resource[] = { [0] = {/*master a*/ .start = MESON_I2C_MASTER_A_START, .end = MESON_I2C_MASTER_A_END, .flags = IORESOURCE_MEM, }, [1] = {/*master b*/ .start = MESON_I2C_MASTER_B_START, .end = MESON_I2C_MASTER_B_END, .flags = IORESOURCE_MEM, }, [2] = {/*slave*/ .start = MESON_I2C_SLAVE_START, .end = MESON_I2C_SLAVE_END, .flags = IORESOURCE_MEM, }, }; static struct platform_device aml_i2c_device = { .name = "aml-i2c", .id = -1, .num_resources = ARRAY_SIZE(aml_i2c_resource), .resource = aml_i2c_resource, .dev = { .platform_data = &aml_i2c_plat, }, }; #endif #ifdef CONFIG_AMLOGIC_PM static int is_ac_connected(void) { return (READ_CBUS_REG(ASSIST_HW_REV)&(1<<9))? 1:0;//GP_INPUT1 } //static int is_usb_connected(void) //{ // return 0; //} static void ic_control(int flag) { if(flag) {//ic on set_gpio_val(GPIOD_bank_bit2_24(19), GPIOD_bit_bit2_24(19), 1); set_gpio_mode(GPIOD_bank_bit2_24(19), GPIOD_bit_bit2_24(19), GPIO_OUTPUT_MODE); } else{ set_gpio_val(GPIOD_bank_bit2_24(19), GPIOD_bit_bit2_24(19), 0); set_gpio_mode(GPIOD_bank_bit2_24(19), GPIOD_bit_bit2_24(19), GPIO_OUTPUT_MODE); } } static void powerkey_led_onoff(int onoff) { if(onoff == 0){//powerkey led off set_gpio_val(GPIOD_bank_bit2_24(23), GPIOD_bit_bit2_24(23), 0); set_gpio_mode(GPIOD_bank_bit2_24(23), GPIOD_bit_bit2_24(23), GPIO_OUTPUT_MODE); }else if(onoff) {//powerkey led on set_gpio_val(GPIOD_bank_bit2_24(23), GPIOD_bit_bit2_24(23), 1); set_gpio_mode(GPIOD_bank_bit2_24(23), GPIOD_bit_bit2_24(23), GPIO_OUTPUT_MODE); } } static void set_charge(int flags) { //low: fast charge high: slow charge CLEAR_CBUS_REG_MASK(PERIPHS_PIN_MUX_7, (1<<18)); if(flags == 1)//high { #ifdef CONFIG_SN7325 configIO(1, 0); setIO_level(1, 0, 3); #endif } else//low { #ifdef CONFIG_SN7325 configIO(1, 0); setIO_level(1, 1, 3); #endif } } #ifdef CONFIG_SARADC_AM extern int get_adc_sample(int chan); #endif static int get_bat_vol(void) { #ifdef CONFIG_SARADC_AM return get_adc_sample(5); #else return 0; #endif } static int get_charge_status(void) { static char count =0; if ((READ_CBUS_REG(ASSIST_HW_REV)&(1<<8))? 1:0){ if ((count<10) && (count>=0)){ count++; }else{ return 1; } }else{ count = 0; } return 0; } static void set_bat_off(void) { //BL_PWM -> GPIOA_7: 0 #if 0 set_gpio_val(GPIOA_bank_bit(7), GPIOA_bit_bit0_14(7), 0); set_gpio_mode(GPIOA_bank_bit(7), GPIOA_bit_bit0_14(7), GPIO_OUTPUT_MODE); #else set_gpio_val(GPIOD_bank_bit2_24(18), GPIOD_bit_bit2_24(18), 0); set_gpio_mode(GPIOD_bank_bit2_24(18), GPIOD_bit_bit2_24(18), GPIO_OUTPUT_MODE); #endif if(is_ac_connected()){ //AC in after power off press kernel_restart("reboot"); } set_gpio_val(GPIOA_bank_bit(8), GPIOA_bit_bit0_14(8), 0); set_gpio_mode(GPIOA_bank_bit(8), GPIOA_bit_bit0_14(8), GPIO_OUTPUT_MODE); } #if 0 static int bat_value_table[37]={ 0, //0 538,//0 540,//4 542,//10 544,//15 545,//16 546,//18 547,//20 548,//23 549,//26 551,//29 553,//32 555,//35 557,//37 559,//40 561,//43 563,//46 565,//49 567,//51 569,//54 571,//57 573,//60 575,//63 577,//66 579,//68 582,//71 585,//74 589,//77 593,//80 596,//83 599,//85 601,//88 604,//91 608,//95 610,//97 620,//100 620 //100 }; static int bat_charge_value_table[37]={ 0, //0 547,//0 551,//4 553,//10 556,//15 558,//16 560,//18 562,//20 564,//23 566,//26 567,//29 568,//32 569,//35 570,//37 571,//40 572,//43 573,//46 574,//49 576,//51 578,//54 580,//57 582,//60 585,//63 587,//66 590,//68 593,//71 596,//74 599,//77 602,//80 605,//83 608,//85 612,//88 615,//91 617,//95 618,//97 620,//100 620 //100 }; #else static int bat_value_table[36]={ 0, //0 539*4/3,//0 545*4/3,//5 548*4/3,//15 549*4/3,//16 551*4/3,//18 553*4/3,//20 555*4/3,//23 558*4/3,//26 560*4/3,//29 562*4/3,//32 563*4/3,//35 564*4/3,//37 566*4/3,//40 568*4/3,//43 570*4/3,//46 572*4/3,//49 573*4/3,//51 575*4/3,//54 578*4/3,//57 580*4/3,//60 582*4/3,//63 585*4/3,//66 587*4/3,//68 590*4/3,//71 593*4/3,//74 596*4/3,//77 599*4/3,//80 602*4/3,//83 604*4/3,//85 607*4/3,//88 610*4/3,//91 611*4/3,//95 612*4/3,//97 613*4/3,//100 613*4/3 //100 }; static int bat_charge_value_table[36]={ 0, //0 564*4/3,//0 573*4/3,//5 578*4/3,//15 579*4/3,//16 581*4/3,//18 582*4/3,//20 584*4/3,//23 585*4/3,//26 587*4/3,//29 588*4/3,//32 589*4/3,//35 590*4/3,//37 592*4/3,//40 593*4/3,//43 595*4/3,//46 597*4/3,//49 598*4/3,//51 601*4/3,//54 604*4/3,//57 605*4/3,//60 607*4/3,//63 608*4/3,//66 609*4/3,//68 610*4/3,//71 611*4/3,//74 612*4/3,//77 614*4/3,//80 616*4/3,//83 618*4/3,//85 619*4/3,//88 620*4/3,//91 621*4/3,//95 622*4/3,//97 623*4/3,//100 623*4/3 //100 }; #endif static int bat_level_table[36]={ 0, 0, 5, 15, 16, 18, 20, 23, 26, 29, 32, 35, 37, 40, 43, 46, 49, 51, 54, 57, 60, 63, 66, 68, 71, 74, 77, 80, 83, 85, 88, 91, 95, 97, 100, 100 }; static struct aml_power_pdata power_pdata = { .is_ac_online = is_ac_connected, //.is_usb_online = is_usb_connected, .set_charge = set_charge, .get_bat_vol = get_bat_vol, .get_charge_status = get_charge_status, .set_bat_off = set_bat_off, .bat_value_table = bat_value_table, .bat_charge_value_table = bat_charge_value_table, .bat_level_table = bat_level_table, .bat_table_len = 36, .ic_control = ic_control, .powerkey_led_onoff = powerkey_led_onoff, .is_support_usb_charging = 0, //.supplied_to = supplicants, //.num_supplicants = ARRAY_SIZE(supplicants), }; static struct platform_device power_dev = { .name = "aml-power", .id = -1, .dev = { .platform_data = &power_pdata, }, }; #endif #define PINMUX_UART_A UART_A_GPIO_D21_D22 #define PINMUX_UART_B UART_B_GPIO_E18_E19 #if defined(CONFIG_AM_UART_WITH_S_CORE) #if defined(CONFIG_AM_UART0_SET_PORT_A) #define UART_0_PORT UART_A #define UART_1_PORT UART_B #elif defined(CONFIG_AM_UART0_SET_PORT_B) #define UART_0_PORT UART_B #define UART_1_PORT UART_A #endif static struct aml_uart_platform aml_uart_plat = { .uart_line[0] = UART_0_PORT, .uart_line[1] = UART_1_PORT }; static struct platform_device aml_uart_device = { .name = "am_uart", .id = -1, .num_resources = 0, .resource = NULL, .dev = { .platform_data = &aml_uart_plat, }, }; #endif #ifdef CONFIG_EFUSE static bool efuse_data_verify(unsigned char *usid) { return true; } static struct efuse_platform_data aml_efuse_plat = { .pos = 337, .count = 20, .data_verify = efuse_data_verify, }; static struct platform_device aml_efuse_device = { .name = "efuse", .id = -1, .dev = { .platform_data = &aml_efuse_plat, }, }; #endif #ifdef CONFIG_AM_NAND /*static struct mtd_partition partition_info[] = { { .name = "U-BOOT", .offset = 0, .size=4*1024*1024, // .set_flags=0, // .dual_partnum=0, }, { .name = "Boot Para", .offset = 4*1024*1024, .size=4*1024*1024, // .set_flags=0, // .dual_partnum=0, }, { .name = "Kernel", .offset = 8*1024*1024, .size=4*1024*1024, // .set_flags=0, // .dual_partnum=0, }, { .name = "YAFFS2", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, // .set_flags=0, // .dual_partnum=0, }, // { .name="FTL_Part", // .offset=MTDPART_OFS_APPEND, // .size=MTDPART_SIZ_FULL, // // .set_flags=MTD_AVNFTL, // // .dual_partnum=1, // } }; static struct aml_m1_nand_platform aml_2kpage128kblocknand_platform = { .page_size = 2048, .spare_size=64, .erase_size= 128*1024, .bch_mode=1, //BCH8 .encode_size=528, .timing_mode=5, .ce_num=1, .onfi_mode=0, .partitions = partition_info, .nr_partitions = ARRAY_SIZE(partition_info), }; */ /*static struct aml_m1_nand_platform aml_Micron4GBABAnand_platform = { .page_size = 2048*2, .spare_size= 224, //for micron ABA 4GB .erase_size=1024*1024, .bch_mode= 3, //BCH16 .encode_size=540, .timing_mode=5, .onfi_mode=1, .ce_num=1, .partitions = partition_info, .nr_partitions = ARRAY_SIZE(partition_info), }; static struct resource aml_nand_resources[] = { { .start = 0xc1108600, .end = 0xc1108624, .flags = IORESOURCE_MEM, }, }; static struct platform_device aml_nand_device = { .name = "aml_m1_nand", .id = 0, .num_resources = ARRAY_SIZE(aml_nand_resources), .resource = aml_nand_resources, .dev = { .platform_data = &aml_Micron4GBABAnand_platform, }, };*/ /*static struct mtd_partition normal_partition_info[] = { { .name = "environment", .offset = 8*1024*1024, .size = 8*1024*1024, }, { .name = "splash", .offset = 16*1024*1024, .size = 4*1024*1024, }, { .name = "recovery", .offset = 20*1024*1024, .size = 16*1024*1024, }, { .name = "boot", .offset = 36*1024*1024, .size = 16*1024*1024, }, { .name = "cache", .offset = 52*1024*1024, .size = 32*1024*1024, }, };*/ static struct mtd_partition multi_partition_info[] = { { .name = "logo", .offset = 32*1024*1024, .size = 16*1024*1024, }, { .name = "aml_logo", .offset = 48*1024*1024, .size = 16*1024*1024, }, { .name = "recovery", .offset = 64*1024*1024, .size = 16*1024*1024, }, { .name = "boot", .offset = 80*1024*1024, .size = 16*1024*1024, }, { .name = "system", .offset = 96*1024*1024, .size = 256*1024*1024, }, { .name = "cache", .offset = 352*1024*1024, .size = 64*1024*1024, }, { .name = "userdata", .offset = 416*1024*1024, .size = 256*1024*1024, }, { .name = "NFTL_Part", .offset = ((416 + 256)*1024*1024), .size = ((0x200000000 - (416 + 256)*1024*1024)), }, }; static struct aml_nand_platform aml_nand_mid_platform[] = { { .name = NAND_BOOT_NAME, .chip_enable_pad = AML_NAND_CE0, .ready_busy_pad = AML_NAND_CE0, .platform_nand_data = { .chip = { .nr_chips = 1, .options = (NAND_TIMING_MODE5 | NAND_ECC_BCH16_MODE), }, }, .T_REA = 20, .T_RHOH = 15, }, { .name = NAND_MULTI_NAME, .chip_enable_pad = (AML_NAND_CE0 | (AML_NAND_CE1 << 4) | (AML_NAND_CE2 << 8) | (AML_NAND_CE3 << 12)), .ready_busy_pad = (AML_NAND_CE0 | (AML_NAND_CE0 << 4) | (AML_NAND_CE1 << 8) | (AML_NAND_CE1 << 12)), .platform_nand_data = { .chip = { .nr_chips = 4, .nr_partitions = ARRAY_SIZE(multi_partition_info), .partitions = multi_partition_info, .options = (NAND_TIMING_MODE5 | NAND_ECC_BCH16_MODE | NAND_TWO_PLANE_MODE), }, }, .T_REA = 20, .T_RHOH = 15, } }; struct aml_nand_device aml_nand_mid_device = { .aml_nand_platform = aml_nand_mid_platform, .dev_num = ARRAY_SIZE(aml_nand_mid_platform), }; static struct resource aml_nand_resources[] = { { .start = 0xc1108600, .end = 0xc1108624, .flags = IORESOURCE_MEM, }, }; static struct platform_device aml_nand_device = { .name = "aml_m1_nand", .id = 0, .num_resources = ARRAY_SIZE(aml_nand_resources), .resource = aml_nand_resources, .dev = { .platform_data = &aml_nand_mid_device, }, }; #endif #if defined(CONFIG_AMLOGIC_BACKLIGHT) #define PWM_TCNT (600-1) #define PWM_MAX_VAL (420) static void aml_8726m_bl_init(void) { #if 0 unsigned val; WRITE_CBUS_REG_BITS(PERIPHS_PIN_MUX_0, 0, 22, 1); WRITE_CBUS_REG_BITS(PREG_AM_ANALOG_ADDR, 1, 0, 1); WRITE_CBUS_REG(VGHL_PWM_REG0, 0); WRITE_CBUS_REG(VGHL_PWM_REG1, 0); WRITE_CBUS_REG(VGHL_PWM_REG2, 0); WRITE_CBUS_REG(VGHL_PWM_REG3, 0); WRITE_CBUS_REG(VGHL_PWM_REG4, 0); val = (0 << 31) | // disable the overall circuit (0 << 30) | // 1:Closed Loop 0:Open Loop (PWM_TCNT << 16) | // PWM total count (0 << 13) | // Enable (1 << 12) | // enable (0 << 10) | // test (3 << 7) | // CS0 REF, Voltage FeedBack: about 0.27V (7 << 4) | // CS1 REF, Current FeedBack: about 0.54V (0 << 0); // DIMCTL Analog dimmer WRITE_CBUS_REG(VGHL_PWM_REG0, val); val = (1 << 30) | // enable high frequency clock (PWM_MAX_VAL << 16) | // MAX PWM value (0 << 0); // MIN PWM value WRITE_CBUS_REG(VGHL_PWM_REG1, val); val = (0 << 31) | // disable timeout test mode (0 << 30) | // timeout based on the comparator output (0 << 16) | // timeout = 10uS (0 << 13) | // Select oscillator as the clock (just for grins) (1 << 11) | // 1:Enable OverCurrent Portection 0:Disable (3 << 8) | // Filter: shift every 3 ticks (0 << 6) | // Filter: count 1uS ticks (0 << 5) | // PWM polarity : negative (0 << 4) | // comparator: negative, Different with NikeD3 (1 << 0); // +/- 1 WRITE_CBUS_REG(VGHL_PWM_REG2, val); val = ( 1 << 16) | // Feedback down-sampling = PWM_freq/1 = PWM_freq ( 1 << 14) | // enable to re-write MATCH_VAL ( 210 << 0) ; // preset PWM_duty = 50% WRITE_CBUS_REG(VGHL_PWM_REG3, val); val = ( 0 << 30) | // 1:Digital Dimmer 0:Analog Dimmer ( 2 << 28) | // dimmer_timebase = 1uS (1000 << 14) | // Digital dimmer_duty = 0%, the most darkness (1000 << 0) ; // dimmer_freq = 1KHz WRITE_CBUS_REG(VGHL_PWM_REG4, val); #else SET_CBUS_REG_MASK(PWM_MISC_REG_AB, (1 << 0)); msleep(100); SET_CBUS_REG_MASK(PERIPHS_PIN_MUX_2, (1<<31)); msleep(100); #endif printk("\n\nBacklight init.\n\n"); } static unsigned bl_level; static unsigned aml_8726m_get_bl_level(void) { return bl_level; } #define BL_MAX_LEVEL 60000 static int board_ver = 1; static int pre_level = 0; static int first_time = 0; static void aml_8726m_set_bl_level(unsigned level) { unsigned cs_level, hi, low; if ((first_time == 0)||(first_time == 1)) { first_time++; return; } if (level != pre_level) { pre_level = level; //#if 0 if (board_ver == 1) { if (level <= 40) { cs_level = 28740; } else if (level > 40 && level < 152) { cs_level = (level - 31) * 260 + 28740; } else cs_level = BL_MAX_LEVEL; } //#else else if (board_ver == 2) { if (level <= 0) { cs_level = 0; } else if (level > 0 && level < 255) { cs_level = level * 235; } else cs_level = BL_MAX_LEVEL; } low = cs_level; hi = BL_MAX_LEVEL - low; //printk("\n\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@level=<%d>@@@@@@@@@@@@@@@@@@@@@@@.\n\n",level); //WRITE_CBUS_REG_BITS(VGHL_PWM_REG0, cs_level, 0, 4); WRITE_CBUS_REG_BITS(PWM_PWM_A,(low/50),0,16); //low WRITE_CBUS_REG_BITS(PWM_PWM_A,(hi/50),16,16); //hi } } static void aml_8726m_power_on_bl(void) { #if 0 msleep(100); SET_CBUS_REG_MASK(PWM_MISC_REG_AB, (1 << 0)); msleep(100); SET_CBUS_REG_MASK(PERIPHS_PIN_MUX_2, (1<<31)); //BL_PWM -> GPIOA_7: 1 msleep(200); #if 0 set_gpio_val(GPIOA_bank_bit(7), GPIOA_bit_bit0_14(7), 1); set_gpio_mode(GPIOA_bank_bit(7), GPIOA_bit_bit0_14(7), GPIO_OUTPUT_MODE); #else set_gpio_val(GPIOD_bank_bit2_24(18), GPIOD_bit_bit2_24(18), 1); set_gpio_mode(GPIOD_bank_bit2_24(18), GPIOD_bit_bit2_24(18), GPIO_OUTPUT_MODE); #endif #else printk("backlight on\n"); #endif } static void aml_8726m_power_off_bl(void) { #if 0 //BL_PWM -> GPIOD_18: 0 #if 0 set_gpio_val(GPIOA_bank_bit(7), GPIOA_bit_bit0_14(7), 0); set_gpio_mode(GPIOA_bank_bit(7), GPIOA_bit_bit0_14(7), GPIO_OUTPUT_MODE); #else set_gpio_val(GPIOD_bank_bit2_24(18), GPIOD_bit_bit2_24(18), 0); set_gpio_mode(GPIOD_bank_bit2_24(18), GPIOD_bit_bit2_24(18), GPIO_OUTPUT_MODE); #endif CLEAR_CBUS_REG_MASK(PERIPHS_PIN_MUX_2, (1<<31)); CLEAR_CBUS_REG_MASK(PWM_MISC_REG_AB, (1 << 0)); set_gpio_val(GPIOA_bank_bit(7), GPIOA_bit_bit0_14(7), 0); set_gpio_mode(GPIOA_bank_bit(7), GPIOA_bit_bit0_14(7), GPIO_OUTPUT_MODE); #else printk("backlight off\n"); #endif } struct aml_bl_platform_data aml_bl_platform = { .bl_init = aml_8726m_bl_init, .power_on_bl = aml_8726m_power_on_bl, .power_off_bl = aml_8726m_power_off_bl, .get_bl_level = aml_8726m_get_bl_level, .set_bl_level = aml_8726m_set_bl_level, }; static struct platform_device aml_bl_device = { .name = "aml-bl", .id = -1, .num_resources = 0, .resource = NULL, .dev = { .platform_data = &aml_bl_platform, }, }; #endif #if defined(CONFIG_AM_TV_OUTPUT)||defined(CONFIG_AM_TCON_OUTPUT) static struct resource vout_device_resources[] = { [0] = { .start = 0, .end = 0, .flags = IORESOURCE_MEM, }, }; static struct platform_device vout_device = { .name = "mesonvout", .id = 0, .num_resources = ARRAY_SIZE(vout_device_resources), .resource = vout_device_resources, }; #endif #ifdef CONFIG_USB_ANDROID #ifdef CONFIG_USB_ANDROID_MASS_STORAGE static struct usb_mass_storage_platform_data mass_storage_pdata = { .nluns = 2, .vendor = "Pandigital", .product = "Pandigital", .release = 0x0100, }; static struct platform_device usb_mass_storage_device = { .name = "usb_mass_storage", .id = -1, .dev = { .platform_data = &mass_storage_pdata, }, }; #endif static char *usb_functions[] = { "usb_mass_storage" }; static char *usb_functions_adb[] = { #ifdef CONFIG_USB_ANDROID_MASS_STORAGE "usb_mass_storage", #endif #ifdef CONFIG_USB_ANDROID_ADB "adb" #endif }; static struct android_usb_product usb_products[] = { { .product_id = 0x0c01, .num_functions = ARRAY_SIZE(usb_functions), .functions = usb_functions, }, { .product_id = 0x0c02, .num_functions = ARRAY_SIZE(usb_functions_adb), .functions = usb_functions_adb, }, }; static struct android_usb_platform_data android_usb_pdata = { .vendor_id = 0x0bb4, .product_id = 0x0c01, .version = 0x0100, .product_name = "Pandigital", .manufacturer_name = "Pandigital", .num_products = ARRAY_SIZE(usb_products), .products = usb_products, .num_functions = ARRAY_SIZE(usb_functions_adb), .functions = usb_functions_adb, }; static struct platform_device android_usb_device = { .name = "android_usb", .id = -1, .dev = { .platform_data = &android_usb_pdata, }, }; #endif #ifdef CONFIG_POST_PROCESS_MANAGER static struct resource ppmgr_resources[] = { [0] = { .start = PPMGR_ADDR_START, .end = PPMGR_ADDR_END, .flags = IORESOURCE_MEM, }, }; static struct platform_device ppmgr_device = { .name = "ppmgr", .id = 0, .num_resources = ARRAY_SIZE(ppmgr_resources), .resource = ppmgr_resources, }; #endif static struct platform_device __initdata *platform_devs[] = { #if defined(CONFIG_JPEGLOGO) &jpeglogo_device, #endif #if defined (CONFIG_AMLOGIC_PM) &power_dev, #endif #if defined(CONFIG_FB_AM) &fb_device, #endif #if defined(CONFIG_AM_STREAMING) &codec_device, #endif #if defined(CONFIG_AM_VIDEO) &deinterlace_device, #endif #if defined(CONFIG_TVIN_VDIN) &vdin_device, #endif #if defined(CONFIG_TVIN_BT656IN) &bt656in_device, #endif #if defined(CONFIG_AML_AUDIO_DSP) &audiodsp_device, #endif &aml_audio, #if defined(CONFIG_CARDREADER) &amlogic_card_device, #endif #if defined(CONFIG_KEYPADS_AM)||defined(CONFIG_VIRTUAL_REMOTE)||defined(CONFIG_KEYPADS_AM_MODULE) &input_device, #endif #ifdef CONFIG_SARADC_AM &saradc_device, #endif #ifdef CONFIG_ADC_TOUCHSCREEN_AM &adc_ts_device, #endif #if defined(CONFIG_ADC_KEYPADS_AM)||defined(CONFIG_ADC_KEYPADS_AM_MODULE) &adc_kp_device, #endif #if defined(CONFIG_KEY_INPUT_CUSTOM_AM) || defined(CONFIG_KEY_INPUT_CUSTOM_AM_MODULE) &input_device_key, //changed by Elvis #endif #if defined(CONFIG_TOUCHSCREEN_ADS7846) &spi_gpio, #endif #ifdef CONFIG_AM_NAND &aml_nand_device, #endif #if defined(CONFIG_NAND_FLASH_DRIVER_MULTIPLANE_CE) &aml_nand_device, #endif #if defined(CONFIG_AML_RTC) &aml_rtc_device, #endif #if defined(CONFIG_SUSPEND) &aml_pm_device, #endif #if defined(CONFIG_ANDROID_PMEM) &android_pmem_device, #endif #if defined(CONFIG_I2C_SW_AML) &aml_sw_i2c_device, #endif #if defined(CONFIG_I2C_AML) &aml_i2c_device, #endif #if defined(CONFIG_AM_UART_WITH_S_CORE) &aml_uart_device, #endif #if defined(CONFIG_AMLOGIC_BACKLIGHT) &aml_bl_device, #endif #ifdef CONFIG_AMLOGIC_VIDEOIN_MANAGER &vm_device, #endif #if defined(CONFIG_AM_TV_OUTPUT)||defined(CONFIG_AM_TCON_OUTPUT) &vout_device, #endif #ifdef CONFIG_USB_ANDROID &android_usb_device, #ifdef CONFIG_USB_ANDROID_MASS_STORAGE &usb_mass_storage_device, #endif #endif #ifdef CONFIG_POST_PROCESS_MANAGER &ppmgr_device, #endif #ifdef CONFIG_EFUSE &aml_efuse_device, #endif #ifdef CONFIG_USB_PHY_CONTROL &usb_phy_control_device, #endif }; static struct i2c_board_info __initdata aml_i2c_bus_info[] = { #ifdef CONFIG_SENSORS_MMC31XX { I2C_BOARD_INFO(MMC31XX_I2C_NAME, MMC31XX_I2C_ADDR), }, #endif #ifdef CONFIG_SENSORS_MXC622X { I2C_BOARD_INFO(MXC622X_I2C_NAME, MXC622X_I2C_ADDR), }, #endif #if CONFIG_VIDEO_AMLOGIC_CAPTURE_GT2005 { /*gt2005 i2c address is 0x78/0x79*/ I2C_BOARD_INFO("gt2005_i2c", 0x78 >> 1 ), .platform_data = (void *)&video_gt2005_data }, #endif #if CONFIG_VIDEO_AMLOGIC_CAPTURE_GC0308 { /*gt2005 i2c address is 0x78/0x79*/ I2C_BOARD_INFO("gc0308_i2c", 0x42 >> 1 ), .platform_data = (void *)&video_gc0308_data }, #endif #ifdef CONFIG_SND_AML_M1_MID_WM8900 { I2C_BOARD_INFO("wm8900", 0x1A), .platform_data = (void *)&wm8900_pdata, }, #endif #ifdef CONFIG_SN7325 { I2C_BOARD_INFO("sn7325", 0x59), .platform_data = (void *)&sn7325_pdata, }, #endif #ifdef CONFIG_TOUCHSCREEN_TSC2007 { I2C_BOARD_INFO("tsc2007", 0x48), .irq = INT_GPIO_0, .platform_data = (void *)&tsc2007_pdata, }, #endif #ifdef CONFIG_ITK_CAPACITIVE_TOUCHSCREEN { I2C_BOARD_INFO("itk", 0x41), .irq = INT_GPIO_0, .platform_data = (void *)&itk_pdata, }, #endif #ifdef CONFIG_UOR7X5X_RESISTIVE_TOUCHSCREEN { I2C_BOARD_INFO("uor7x5x", 0x48), .irq = INT_GPIO_0, .platform_data = (void *)&uor7x5x_pdata, }, #endif #ifdef CONFIG_UOR6X5X_RESISTIVE_TOUCHSCREEN { I2C_BOARD_INFO("uor6x5x", 0x48), .irq = INT_GPIO_0, .platform_data = (void *)&uor6x5x_pdata, }, #endif }; static int __init aml_i2c_init(void) { i2c_register_board_info(0, aml_i2c_bus_info, ARRAY_SIZE(aml_i2c_bus_info)); return 0; } #if defined(CONFIG_TVIN_BT656IN) static void __init bt656in_pinmux_init(void) { set_mio_mux(3, 0xf000); //mask--mux gpio_c3 to bt656 clk; mux gpioc[4:11] to be bt656 dt_in CLEAR_CBUS_REG_MASK(PERIPHS_PIN_MUX_2, 0x0f000000); CLEAR_CBUS_REG_MASK(PERIPHS_PIN_MUX_3, 0x01be07fc); CLEAR_CBUS_REG_MASK(PERIPHS_PIN_MUX_4, 0x0c000000); } #endif static void __init eth_pinmux_init(void) { eth_set_pinmux(ETH_BANK2_GPIOD15_D23,ETH_CLK_OUT_GPIOD24_REG5_1,0); //power hold //setbits_le32(P_PREG_AGPIO_O,(1<<8)); //clrbits_le32(P_PREG_AGPIO_EN_N,(1<<8)); //set_gpio_mode(GPIOA_bank_bit(4),GPIOA_bit_bit0_14(4),GPIO_OUTPUT_MODE); //set_gpio_val(GPIOA_bank_bit(4),GPIOA_bit_bit0_14(4),1); CLEAR_CBUS_REG_MASK(PREG_ETHERNET_ADDR0, 1); SET_CBUS_REG_MASK(PREG_ETHERNET_ADDR0, (1 << 1)); SET_CBUS_REG_MASK(PREG_ETHERNET_ADDR0, 1); udelay(100); /*reset*/ #if 0 // set_gpio_mode(GPIOE_bank_bit16_21(16),GPIOE_bit_bit16_21(16),GPIO_OUTPUT_MODE); // set_gpio_val(GPIOE_bank_bit16_21(16),GPIOE_bit_bit16_21(16),0); // udelay(100); //GPIOE_bank_bit16_21(16) reset end; // set_gpio_val(GPIOE_bank_bit16_21(16),GPIOE_bit_bit16_21(16),1); #endif aml_i2c_init(); } static void __init device_pinmux_init(void ) { u32 i,mask_data; for(i=0;i<13;i++) { switch(i) //reserve lcd pinmux for logo display. { case 0: mask_data=~(1<<11|1<<14); break; case 4: mask_data=~(1<<0|1<<2|1<<4); break; default: mask_data=0x7fffffff; break; } clear_mio_mux(i,mask_data); } /*other deivce power on*/ /*GPIOA_200e_bit4..usb/eth/YUV power on*/ set_gpio_mode(PREG_EGPIO,1<<4,GPIO_OUTPUT_MODE); set_gpio_val(PREG_EGPIO,1<<4,1); uart_set_pinmux(UART_PORT_A,PINMUX_UART_A); uart_set_pinmux(UART_PORT_B,PINMUX_UART_B); /*pinmux of eth*/ //eth_pinmux_init(); aml_i2c_init(); #if defined(CONFIG_TVIN_BT656IN) bt656in_pinmux_init(); #endif set_audio_pinmux(AUDIO_OUT_TEST_N); set_audio_pinmux(AUDIO_IN_JTAG); //set clk for wifi SET_CBUS_REG_MASK(PERIPHS_PIN_MUX_8, (1<<18)); CLEAR_CBUS_REG_MASK(PREG_EGPIO_EN_N, (1<<4)); } static void __init device_clk_setting(void) { /*Demod CLK for eth and sata*/ demod_apll_setting(0,1200*CLK_1M); /*eth clk*/ eth_clk_set(ETH_CLKSRC_APLL_CLK,400*CLK_1M,50*CLK_1M); } static void disable_unused_model(void) { CLK_GATE_OFF(VIDEO_IN); CLK_GATE_OFF(BT656_IN); CLK_GATE_OFF(ETHERNET); CLK_GATE_OFF(SATA); CLK_GATE_OFF(WIFI); video_dac_disable(); //audio_internal_dac_disable(); //disable wifi SET_CBUS_REG_MASK(HHI_GCLK_MPEG2, (1<<5)); SET_CBUS_REG_MASK(HHI_WIFI_CLK_CNTL, (1<<0)); __raw_writel(0xCFF,0xC9320ED8); __raw_writel((__raw_readl(0xC9320EF0))&0xF9FFFFFF,0xC9320EF0); CLEAR_CBUS_REG_MASK(HHI_GCLK_MPEG2, (1<<5)); CLEAR_CBUS_REG_MASK(HHI_WIFI_CLK_CNTL, (1<<0)); ///disable demod SET_CBUS_REG_MASK(HHI_DEMOD_CLK_CNTL, (1<<8));//enable demod core digital clock SET_CBUS_REG_MASK(HHI_DEMOD_PLL_CNTL, (1<<15));//enable demod adc clock CLEAR_APB_REG_MASK(0x4004,(1<<31)); //disable analog demod adc CLEAR_CBUS_REG_MASK(HHI_DEMOD_PLL_CNTL, (1<<15));//disable demod adc clock CLEAR_CBUS_REG_MASK(HHI_DEMOD_CLK_CNTL, (1<<8));//disable demod core digital clock } static void __init power_hold(void) { printk(KERN_INFO "power hold set high!\n"); set_gpio_val(GPIOA_bank_bit(8), GPIOA_bit_bit0_14(8), 1); set_gpio_mode(GPIOA_bank_bit(8), GPIOA_bit_bit0_14(8), GPIO_OUTPUT_MODE); /* PIN28, GPIOA_6, Pull high, For En_5V */ set_gpio_val(GPIOA_bank_bit(6), GPIOA_bit_bit0_14(6), 1); set_gpio_mode(GPIOA_bank_bit(6), GPIOA_bit_bit0_14(6), GPIO_OUTPUT_MODE); } #define GPIO_WLAN_IRQ ((GPIOD_bank_bit2_24(14)<<16) |GPIOD_bit_bit2_24(14)) static __init void m1_init_machine(void) { meson_cache_init(); power_hold(); pm_power_off = set_bat_off; device_clk_setting(); device_pinmux_init(); #ifdef CONFIG_CAMERA_GC0308 camera_power_on_init(); #endif #ifdef CONFIG_CAMERA_GT2005 camera_gt2005_power_on_init(); #endif platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs)); #ifdef CONFIG_USB_DWC_OTG_HCD set_usb_phy_clk(USB_PHY_CLOCK_SEL_XTAL_DIV2); set_usb_ctl_por(USB_CTL_INDEX_B,USB_CTL_POR_DISABLE); //disable usb_b lm_device_register(&usb_ld_a); #endif #ifdef CONFIG_SATA_DWC_AHCI set_sata_phy_clk(SATA_PHY_CLOCK_SEL_DEMOD_PLL); lm_device_register(&sata_ld); #endif #if defined(CONFIG_TOUCHSCREEN_ADS7846) ads7846_init_gpio(); spi_register_board_info(spi_board_info_list, ARRAY_SIZE(spi_board_info_list)); #endif disable_unused_model(); /* Set Ti WiFi interrupt pin */ gpio_direction_input(GPIO_WLAN_IRQ); /* set gpio interrupt #2 source=GPIOD_14, and triggered by falling edge(=1) */ gpio_enable_edge_int(64, 1, 2); printk(KERN_INFO "WLANPWR_EN set high!\n"); set_gpio_val(GPIOD_bank_bit2_24(15), GPIOD_bit_bit2_24(15), 1); set_gpio_mode(GPIOD_bank_bit2_24(15), GPIOD_bit_bit2_24(15), GPIO_OUTPUT_MODE); //power key led off!!! powerkey_led_onoff(0); printk(KERN_INFO "WIFI ENABLE : OK\n"); } /*VIDEO MEMORY MAPING*/ static __initdata struct map_desc meson_video_mem_desc[] = { { .virtual = PAGE_ALIGN(__phys_to_virt(RESERVED_MEM_START)), .pfn = __phys_to_pfn(RESERVED_MEM_START), .length = RESERVED_MEM_END-RESERVED_MEM_START+1, .type = MT_DEVICE, }, }; static __init void m1_map_io(void) { meson_map_io(); iotable_init(meson_video_mem_desc, ARRAY_SIZE(meson_video_mem_desc)); } static __init void m1_irq_init(void) { meson_init_irq(); } static __init void m1_fixup(struct machine_desc *mach, struct tag *tag, char **cmdline, struct meminfo *m) { struct membank *pbank; m->nr_banks = 0; pbank=&m->bank[m->nr_banks]; pbank->start = PAGE_ALIGN(PHYS_MEM_START); pbank->size = SZ_64M & PAGE_MASK; pbank->node = PHYS_TO_NID(PHYS_MEM_START); m->nr_banks++; pbank=&m->bank[m->nr_banks]; pbank->start = PAGE_ALIGN(RESERVED_MEM_END+1); pbank->size = (PHYS_MEM_END-RESERVED_MEM_END) & PAGE_MASK; pbank->node = PHYS_TO_NID(RESERVED_MEM_END+1); m->nr_banks++; } MACHINE_START(MESON_8726M, "AMLOGIC MESON-M1 8726M SZ") .phys_io = MESON_PERIPHS1_PHYS_BASE, .io_pg_offst = (MESON_PERIPHS1_PHYS_BASE >> 18) & 0xfffc, .boot_params = BOOT_PARAMS_OFFSET, .map_io = m1_map_io, .init_irq = m1_irq_init, .timer = &meson_sys_timer, .init_machine = m1_init_machine, .fixup = m1_fixup, .video_start = RESERVED_MEM_START, .video_end = RESERVED_MEM_END, MACHINE_END int get_board_version(void) { return board_ver; } EXPORT_SYMBOL(get_board_version); static int uboot_ver = 1; int get_uboot_version(void) { return uboot_ver; } EXPORT_SYMBOL(get_uboot_version); static int __init board_ver_setup(char *s) { if(strncmp(s, "v2", 2)==0){ board_ver = 2; //#if defined(CONFIG_KEY_INPUT_CUSTOM_AM) || defined(CONFIG_KEY_INPUT_CUSTOM_AM_MODULE) // key_input_pdata.config = 2; //#endif } else board_ver = 1; printk("board_ver = %s",s); return 0; } __setup("board_ver=",board_ver_setup) ; static int __init uboot_ver_setup(char *s) { if(strncmp(s, "v2", 2)==0) uboot_ver = 2; else uboot_ver = 1; printk("uboot_ver = %s",s); return 0; } __setup("uboot_ver=",uboot_ver_setup) ;
gpl-2.0
joeyli/qemu-acpitad
savevm.c
17
38186
/* * QEMU System Emulator * * Copyright (c) 2003-2008 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "config-host.h" #include "qemu-common.h" #include "hw/boards.h" #include "hw/hw.h" #include "hw/qdev.h" #include "net/net.h" #include "monitor/monitor.h" #include "sysemu/sysemu.h" #include "qemu/timer.h" #include "audio/audio.h" #include "migration/migration.h" #include "qemu/sockets.h" #include "qemu/queue.h" #include "sysemu/cpus.h" #include "exec/memory.h" #include "qmp-commands.h" #include "trace.h" #include "qemu/iov.h" #include "block/snapshot.h" #include "block/qapi.h" #ifndef ETH_P_RARP #define ETH_P_RARP 0x8035 #endif #define ARP_HTYPE_ETH 0x0001 #define ARP_PTYPE_IP 0x0800 #define ARP_OP_REQUEST_REV 0x3 static int announce_self_create(uint8_t *buf, uint8_t *mac_addr) { /* Ethernet header. */ memset(buf, 0xff, 6); /* destination MAC addr */ memcpy(buf + 6, mac_addr, 6); /* source MAC addr */ *(uint16_t *)(buf + 12) = htons(ETH_P_RARP); /* ethertype */ /* RARP header. */ *(uint16_t *)(buf + 14) = htons(ARP_HTYPE_ETH); /* hardware addr space */ *(uint16_t *)(buf + 16) = htons(ARP_PTYPE_IP); /* protocol addr space */ *(buf + 18) = 6; /* hardware addr length (ethernet) */ *(buf + 19) = 4; /* protocol addr length (IPv4) */ *(uint16_t *)(buf + 20) = htons(ARP_OP_REQUEST_REV); /* opcode */ memcpy(buf + 22, mac_addr, 6); /* source hw addr */ memset(buf + 28, 0x00, 4); /* source protocol addr */ memcpy(buf + 32, mac_addr, 6); /* target hw addr */ memset(buf + 38, 0x00, 4); /* target protocol addr */ /* Padding to get up to 60 bytes (ethernet min packet size, minus FCS). */ memset(buf + 42, 0x00, 18); return 60; /* len (FCS will be added by hardware) */ } static void qemu_announce_self_iter(NICState *nic, void *opaque) { uint8_t buf[60]; int len; trace_qemu_announce_self_iter(qemu_ether_ntoa(&nic->conf->macaddr)); len = announce_self_create(buf, nic->conf->macaddr.a); qemu_send_packet_raw(qemu_get_queue(nic), buf, len); } static void qemu_announce_self_once(void *opaque) { static int count = SELF_ANNOUNCE_ROUNDS; QEMUTimer *timer = *(QEMUTimer **)opaque; qemu_foreach_nic(qemu_announce_self_iter, NULL); if (--count) { /* delay 50ms, 150ms, 250ms, ... */ timer_mod(timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + self_announce_delay(count)); } else { timer_del(timer); timer_free(timer); } } void qemu_announce_self(void) { static QEMUTimer *timer; timer = timer_new_ms(QEMU_CLOCK_REALTIME, qemu_announce_self_once, &timer); qemu_announce_self_once(&timer); } /***********************************************************/ /* savevm/loadvm support */ static ssize_t block_writev_buffer(void *opaque, struct iovec *iov, int iovcnt, int64_t pos) { int ret; QEMUIOVector qiov; qemu_iovec_init_external(&qiov, iov, iovcnt); ret = bdrv_writev_vmstate(opaque, &qiov, pos); if (ret < 0) { return ret; } return qiov.size; } static int block_put_buffer(void *opaque, const uint8_t *buf, int64_t pos, int size) { bdrv_save_vmstate(opaque, buf, pos, size); return size; } static int block_get_buffer(void *opaque, uint8_t *buf, int64_t pos, int size) { return bdrv_load_vmstate(opaque, buf, pos, size); } static int bdrv_fclose(void *opaque) { return bdrv_flush(opaque); } static const QEMUFileOps bdrv_read_ops = { .get_buffer = block_get_buffer, .close = bdrv_fclose }; static const QEMUFileOps bdrv_write_ops = { .put_buffer = block_put_buffer, .writev_buffer = block_writev_buffer, .close = bdrv_fclose }; static QEMUFile *qemu_fopen_bdrv(BlockDriverState *bs, int is_writable) { if (is_writable) { return qemu_fopen_ops(bs, &bdrv_write_ops); } return qemu_fopen_ops(bs, &bdrv_read_ops); } /* QEMUFile timer support. * Not in qemu-file.c to not add qemu-timer.c as dependency to qemu-file.c */ void timer_put(QEMUFile *f, QEMUTimer *ts) { uint64_t expire_time; expire_time = timer_expire_time_ns(ts); qemu_put_be64(f, expire_time); } void timer_get(QEMUFile *f, QEMUTimer *ts) { uint64_t expire_time; expire_time = qemu_get_be64(f); if (expire_time != -1) { timer_mod_ns(ts, expire_time); } else { timer_del(ts); } } /* VMState timer support. * Not in vmstate.c to not add qemu-timer.c as dependency to vmstate.c */ static int get_timer(QEMUFile *f, void *pv, size_t size) { QEMUTimer *v = pv; timer_get(f, v); return 0; } static void put_timer(QEMUFile *f, void *pv, size_t size) { QEMUTimer *v = pv; timer_put(f, v); } const VMStateInfo vmstate_info_timer = { .name = "timer", .get = get_timer, .put = put_timer, }; typedef struct CompatEntry { char idstr[256]; int instance_id; } CompatEntry; typedef struct SaveStateEntry { QTAILQ_ENTRY(SaveStateEntry) entry; char idstr[256]; int instance_id; int alias_id; int version_id; int section_id; SaveVMHandlers *ops; const VMStateDescription *vmsd; void *opaque; CompatEntry *compat; int is_ram; } SaveStateEntry; static QTAILQ_HEAD(savevm_handlers, SaveStateEntry) savevm_handlers = QTAILQ_HEAD_INITIALIZER(savevm_handlers); static int global_section_id; static void dump_vmstate_vmsd(FILE *out_file, const VMStateDescription *vmsd, int indent, bool is_subsection); static void dump_vmstate_vmsf(FILE *out_file, const VMStateField *field, int indent) { fprintf(out_file, "%*s{\n", indent, ""); indent += 2; fprintf(out_file, "%*s\"field\": \"%s\",\n", indent, "", field->name); fprintf(out_file, "%*s\"version_id\": %d,\n", indent, "", field->version_id); fprintf(out_file, "%*s\"field_exists\": %s,\n", indent, "", field->field_exists ? "true" : "false"); fprintf(out_file, "%*s\"size\": %zu", indent, "", field->size); if (field->vmsd != NULL) { fprintf(out_file, ",\n"); dump_vmstate_vmsd(out_file, field->vmsd, indent, false); } fprintf(out_file, "\n%*s}", indent - 2, ""); } static void dump_vmstate_vmss(FILE *out_file, const VMStateSubsection *subsection, int indent) { if (subsection->vmsd != NULL) { dump_vmstate_vmsd(out_file, subsection->vmsd, indent, true); } } static void dump_vmstate_vmsd(FILE *out_file, const VMStateDescription *vmsd, int indent, bool is_subsection) { if (is_subsection) { fprintf(out_file, "%*s{\n", indent, ""); } else { fprintf(out_file, "%*s\"%s\": {\n", indent, "", "Description"); } indent += 2; fprintf(out_file, "%*s\"name\": \"%s\",\n", indent, "", vmsd->name); fprintf(out_file, "%*s\"version_id\": %d,\n", indent, "", vmsd->version_id); fprintf(out_file, "%*s\"minimum_version_id\": %d", indent, "", vmsd->minimum_version_id); if (vmsd->fields != NULL) { const VMStateField *field = vmsd->fields; bool first; fprintf(out_file, ",\n%*s\"Fields\": [\n", indent, ""); first = true; while (field->name != NULL) { if (field->flags & VMS_MUST_EXIST) { /* Ignore VMSTATE_VALIDATE bits; these don't get migrated */ field++; continue; } if (!first) { fprintf(out_file, ",\n"); } dump_vmstate_vmsf(out_file, field, indent + 2); field++; first = false; } fprintf(out_file, "\n%*s]", indent, ""); } if (vmsd->subsections != NULL) { const VMStateSubsection *subsection = vmsd->subsections; bool first; fprintf(out_file, ",\n%*s\"Subsections\": [\n", indent, ""); first = true; while (subsection->vmsd != NULL) { if (!first) { fprintf(out_file, ",\n"); } dump_vmstate_vmss(out_file, subsection, indent + 2); subsection++; first = false; } fprintf(out_file, "\n%*s]", indent, ""); } fprintf(out_file, "\n%*s}", indent - 2, ""); } static void dump_machine_type(FILE *out_file) { MachineClass *mc; mc = MACHINE_GET_CLASS(current_machine); fprintf(out_file, " \"vmschkmachine\": {\n"); fprintf(out_file, " \"Name\": \"%s\"\n", mc->name); fprintf(out_file, " },\n"); } void dump_vmstate_json_to_file(FILE *out_file) { GSList *list, *elt; bool first; fprintf(out_file, "{\n"); dump_machine_type(out_file); first = true; list = object_class_get_list(TYPE_DEVICE, true); for (elt = list; elt; elt = elt->next) { DeviceClass *dc = OBJECT_CLASS_CHECK(DeviceClass, elt->data, TYPE_DEVICE); const char *name; int indent = 2; if (!dc->vmsd) { continue; } if (!first) { fprintf(out_file, ",\n"); } name = object_class_get_name(OBJECT_CLASS(dc)); fprintf(out_file, "%*s\"%s\": {\n", indent, "", name); indent += 2; fprintf(out_file, "%*s\"Name\": \"%s\",\n", indent, "", name); fprintf(out_file, "%*s\"version_id\": %d,\n", indent, "", dc->vmsd->version_id); fprintf(out_file, "%*s\"minimum_version_id\": %d,\n", indent, "", dc->vmsd->minimum_version_id); dump_vmstate_vmsd(out_file, dc->vmsd, indent, false); fprintf(out_file, "\n%*s}", indent - 2, ""); first = false; } fprintf(out_file, "\n}\n"); fclose(out_file); } static int calculate_new_instance_id(const char *idstr) { SaveStateEntry *se; int instance_id = 0; QTAILQ_FOREACH(se, &savevm_handlers, entry) { if (strcmp(idstr, se->idstr) == 0 && instance_id <= se->instance_id) { instance_id = se->instance_id + 1; } } return instance_id; } static int calculate_compat_instance_id(const char *idstr) { SaveStateEntry *se; int instance_id = 0; QTAILQ_FOREACH(se, &savevm_handlers, entry) { if (!se->compat) { continue; } if (strcmp(idstr, se->compat->idstr) == 0 && instance_id <= se->compat->instance_id) { instance_id = se->compat->instance_id + 1; } } return instance_id; } /* TODO: Individual devices generally have very little idea about the rest of the system, so instance_id should be removed/replaced. Meanwhile pass -1 as instance_id if you do not already have a clearly distinguishing id for all instances of your device class. */ int register_savevm_live(DeviceState *dev, const char *idstr, int instance_id, int version_id, SaveVMHandlers *ops, void *opaque) { SaveStateEntry *se; se = g_malloc0(sizeof(SaveStateEntry)); se->version_id = version_id; se->section_id = global_section_id++; se->ops = ops; se->opaque = opaque; se->vmsd = NULL; /* if this is a live_savem then set is_ram */ if (ops->save_live_setup != NULL) { se->is_ram = 1; } if (dev) { char *id = qdev_get_dev_path(dev); if (id) { pstrcpy(se->idstr, sizeof(se->idstr), id); pstrcat(se->idstr, sizeof(se->idstr), "/"); g_free(id); se->compat = g_malloc0(sizeof(CompatEntry)); pstrcpy(se->compat->idstr, sizeof(se->compat->idstr), idstr); se->compat->instance_id = instance_id == -1 ? calculate_compat_instance_id(idstr) : instance_id; instance_id = -1; } } pstrcat(se->idstr, sizeof(se->idstr), idstr); if (instance_id == -1) { se->instance_id = calculate_new_instance_id(se->idstr); } else { se->instance_id = instance_id; } assert(!se->compat || se->instance_id == 0); /* add at the end of list */ QTAILQ_INSERT_TAIL(&savevm_handlers, se, entry); return 0; } int register_savevm(DeviceState *dev, const char *idstr, int instance_id, int version_id, SaveStateHandler *save_state, LoadStateHandler *load_state, void *opaque) { SaveVMHandlers *ops = g_malloc0(sizeof(SaveVMHandlers)); ops->save_state = save_state; ops->load_state = load_state; return register_savevm_live(dev, idstr, instance_id, version_id, ops, opaque); } void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque) { SaveStateEntry *se, *new_se; char id[256] = ""; if (dev) { char *path = qdev_get_dev_path(dev); if (path) { pstrcpy(id, sizeof(id), path); pstrcat(id, sizeof(id), "/"); g_free(path); } } pstrcat(id, sizeof(id), idstr); QTAILQ_FOREACH_SAFE(se, &savevm_handlers, entry, new_se) { if (strcmp(se->idstr, id) == 0 && se->opaque == opaque) { QTAILQ_REMOVE(&savevm_handlers, se, entry); if (se->compat) { g_free(se->compat); } g_free(se->ops); g_free(se); } } } int vmstate_register_with_alias_id(DeviceState *dev, int instance_id, const VMStateDescription *vmsd, void *opaque, int alias_id, int required_for_version) { SaveStateEntry *se; /* If this triggers, alias support can be dropped for the vmsd. */ assert(alias_id == -1 || required_for_version >= vmsd->minimum_version_id); se = g_malloc0(sizeof(SaveStateEntry)); se->version_id = vmsd->version_id; se->section_id = global_section_id++; se->opaque = opaque; se->vmsd = vmsd; se->alias_id = alias_id; if (dev) { char *id = qdev_get_dev_path(dev); if (id) { pstrcpy(se->idstr, sizeof(se->idstr), id); pstrcat(se->idstr, sizeof(se->idstr), "/"); g_free(id); se->compat = g_malloc0(sizeof(CompatEntry)); pstrcpy(se->compat->idstr, sizeof(se->compat->idstr), vmsd->name); se->compat->instance_id = instance_id == -1 ? calculate_compat_instance_id(vmsd->name) : instance_id; instance_id = -1; } } pstrcat(se->idstr, sizeof(se->idstr), vmsd->name); if (instance_id == -1) { se->instance_id = calculate_new_instance_id(se->idstr); } else { se->instance_id = instance_id; } assert(!se->compat || se->instance_id == 0); /* add at the end of list */ QTAILQ_INSERT_TAIL(&savevm_handlers, se, entry); return 0; } void vmstate_unregister(DeviceState *dev, const VMStateDescription *vmsd, void *opaque) { SaveStateEntry *se, *new_se; QTAILQ_FOREACH_SAFE(se, &savevm_handlers, entry, new_se) { if (se->vmsd == vmsd && se->opaque == opaque) { QTAILQ_REMOVE(&savevm_handlers, se, entry); if (se->compat) { g_free(se->compat); } g_free(se); } } } static int vmstate_load(QEMUFile *f, SaveStateEntry *se, int version_id) { trace_vmstate_load(se->idstr, se->vmsd ? se->vmsd->name : "(old)"); if (!se->vmsd) { /* Old style */ return se->ops->load_state(f, se->opaque, version_id); } return vmstate_load_state(f, se->vmsd, se->opaque, version_id); } static void vmstate_save(QEMUFile *f, SaveStateEntry *se) { trace_vmstate_save(se->idstr, se->vmsd ? se->vmsd->name : "(old)"); if (!se->vmsd) { /* Old style */ se->ops->save_state(f, se->opaque); return; } vmstate_save_state(f, se->vmsd, se->opaque); } bool qemu_savevm_state_blocked(Error **errp) { SaveStateEntry *se; QTAILQ_FOREACH(se, &savevm_handlers, entry) { if (se->vmsd && se->vmsd->unmigratable) { error_setg(errp, "State blocked by non-migratable device '%s'", se->idstr); return true; } } return false; } void qemu_savevm_state_begin(QEMUFile *f, const MigrationParams *params) { SaveStateEntry *se; int ret; trace_savevm_state_begin(); QTAILQ_FOREACH(se, &savevm_handlers, entry) { if (!se->ops || !se->ops->set_params) { continue; } se->ops->set_params(params, se->opaque); } qemu_put_be32(f, QEMU_VM_FILE_MAGIC); qemu_put_be32(f, QEMU_VM_FILE_VERSION); QTAILQ_FOREACH(se, &savevm_handlers, entry) { int len; if (!se->ops || !se->ops->save_live_setup) { continue; } if (se->ops && se->ops->is_active) { if (!se->ops->is_active(se->opaque)) { continue; } } /* Section type */ qemu_put_byte(f, QEMU_VM_SECTION_START); qemu_put_be32(f, se->section_id); /* ID string */ len = strlen(se->idstr); qemu_put_byte(f, len); qemu_put_buffer(f, (uint8_t *)se->idstr, len); qemu_put_be32(f, se->instance_id); qemu_put_be32(f, se->version_id); ret = se->ops->save_live_setup(f, se->opaque); if (ret < 0) { qemu_file_set_error(f, ret); break; } } } /* * this function has three return values: * negative: there was one error, and we have -errno. * 0 : We haven't finished, caller have to go again * 1 : We have finished, we can go to complete phase */ int qemu_savevm_state_iterate(QEMUFile *f) { SaveStateEntry *se; int ret = 1; trace_savevm_state_iterate(); QTAILQ_FOREACH(se, &savevm_handlers, entry) { if (!se->ops || !se->ops->save_live_iterate) { continue; } if (se->ops && se->ops->is_active) { if (!se->ops->is_active(se->opaque)) { continue; } } if (qemu_file_rate_limit(f)) { return 0; } trace_savevm_section_start(se->idstr, se->section_id); /* Section type */ qemu_put_byte(f, QEMU_VM_SECTION_PART); qemu_put_be32(f, se->section_id); ret = se->ops->save_live_iterate(f, se->opaque); trace_savevm_section_end(se->idstr, se->section_id); if (ret < 0) { qemu_file_set_error(f, ret); } if (ret <= 0) { /* Do not proceed to the next vmstate before this one reported completion of the current stage. This serializes the migration and reduces the probability that a faster changing state is synchronized over and over again. */ break; } } return ret; } void qemu_savevm_state_complete(QEMUFile *f) { SaveStateEntry *se; int ret; trace_savevm_state_complete(); cpu_synchronize_all_states(); QTAILQ_FOREACH(se, &savevm_handlers, entry) { if (!se->ops || !se->ops->save_live_complete) { continue; } if (se->ops && se->ops->is_active) { if (!se->ops->is_active(se->opaque)) { continue; } } trace_savevm_section_start(se->idstr, se->section_id); /* Section type */ qemu_put_byte(f, QEMU_VM_SECTION_END); qemu_put_be32(f, se->section_id); ret = se->ops->save_live_complete(f, se->opaque); trace_savevm_section_end(se->idstr, se->section_id); if (ret < 0) { qemu_file_set_error(f, ret); return; } } QTAILQ_FOREACH(se, &savevm_handlers, entry) { int len; if ((!se->ops || !se->ops->save_state) && !se->vmsd) { continue; } trace_savevm_section_start(se->idstr, se->section_id); /* Section type */ qemu_put_byte(f, QEMU_VM_SECTION_FULL); qemu_put_be32(f, se->section_id); /* ID string */ len = strlen(se->idstr); qemu_put_byte(f, len); qemu_put_buffer(f, (uint8_t *)se->idstr, len); qemu_put_be32(f, se->instance_id); qemu_put_be32(f, se->version_id); vmstate_save(f, se); trace_savevm_section_end(se->idstr, se->section_id); } qemu_put_byte(f, QEMU_VM_EOF); qemu_fflush(f); } uint64_t qemu_savevm_state_pending(QEMUFile *f, uint64_t max_size) { SaveStateEntry *se; uint64_t ret = 0; QTAILQ_FOREACH(se, &savevm_handlers, entry) { if (!se->ops || !se->ops->save_live_pending) { continue; } if (se->ops && se->ops->is_active) { if (!se->ops->is_active(se->opaque)) { continue; } } ret += se->ops->save_live_pending(f, se->opaque, max_size); } return ret; } void qemu_savevm_state_cancel(void) { SaveStateEntry *se; trace_savevm_state_cancel(); QTAILQ_FOREACH(se, &savevm_handlers, entry) { if (se->ops && se->ops->cancel) { se->ops->cancel(se->opaque); } } } static int qemu_savevm_state(QEMUFile *f) { int ret; MigrationParams params = { .blk = 0, .shared = 0 }; if (qemu_savevm_state_blocked(NULL)) { return -EINVAL; } qemu_mutex_unlock_iothread(); qemu_savevm_state_begin(f, &params); qemu_mutex_lock_iothread(); while (qemu_file_get_error(f) == 0) { if (qemu_savevm_state_iterate(f) > 0) { break; } } ret = qemu_file_get_error(f); if (ret == 0) { qemu_savevm_state_complete(f); ret = qemu_file_get_error(f); } if (ret != 0) { qemu_savevm_state_cancel(); } return ret; } static int qemu_save_device_state(QEMUFile *f) { SaveStateEntry *se; qemu_put_be32(f, QEMU_VM_FILE_MAGIC); qemu_put_be32(f, QEMU_VM_FILE_VERSION); cpu_synchronize_all_states(); QTAILQ_FOREACH(se, &savevm_handlers, entry) { int len; if (se->is_ram) { continue; } if ((!se->ops || !se->ops->save_state) && !se->vmsd) { continue; } /* Section type */ qemu_put_byte(f, QEMU_VM_SECTION_FULL); qemu_put_be32(f, se->section_id); /* ID string */ len = strlen(se->idstr); qemu_put_byte(f, len); qemu_put_buffer(f, (uint8_t *)se->idstr, len); qemu_put_be32(f, se->instance_id); qemu_put_be32(f, se->version_id); vmstate_save(f, se); } qemu_put_byte(f, QEMU_VM_EOF); return qemu_file_get_error(f); } static SaveStateEntry *find_se(const char *idstr, int instance_id) { SaveStateEntry *se; QTAILQ_FOREACH(se, &savevm_handlers, entry) { if (!strcmp(se->idstr, idstr) && (instance_id == se->instance_id || instance_id == se->alias_id)) return se; /* Migrating from an older version? */ if (strstr(se->idstr, idstr) && se->compat) { if (!strcmp(se->compat->idstr, idstr) && (instance_id == se->compat->instance_id || instance_id == se->alias_id)) return se; } } return NULL; } typedef struct LoadStateEntry { QLIST_ENTRY(LoadStateEntry) entry; SaveStateEntry *se; int section_id; int version_id; } LoadStateEntry; int qemu_loadvm_state(QEMUFile *f) { QLIST_HEAD(, LoadStateEntry) loadvm_handlers = QLIST_HEAD_INITIALIZER(loadvm_handlers); LoadStateEntry *le, *new_le; uint8_t section_type; unsigned int v; int ret; if (qemu_savevm_state_blocked(NULL)) { return -EINVAL; } v = qemu_get_be32(f); if (v != QEMU_VM_FILE_MAGIC) { return -EINVAL; } v = qemu_get_be32(f); if (v == QEMU_VM_FILE_VERSION_COMPAT) { fprintf(stderr, "SaveVM v2 format is obsolete and don't work anymore\n"); return -ENOTSUP; } if (v != QEMU_VM_FILE_VERSION) { return -ENOTSUP; } while ((section_type = qemu_get_byte(f)) != QEMU_VM_EOF) { uint32_t instance_id, version_id, section_id; SaveStateEntry *se; char idstr[257]; int len; switch (section_type) { case QEMU_VM_SECTION_START: case QEMU_VM_SECTION_FULL: /* Read section start */ section_id = qemu_get_be32(f); len = qemu_get_byte(f); qemu_get_buffer(f, (uint8_t *)idstr, len); idstr[len] = 0; instance_id = qemu_get_be32(f); version_id = qemu_get_be32(f); /* Find savevm section */ se = find_se(idstr, instance_id); if (se == NULL) { fprintf(stderr, "Unknown savevm section or instance '%s' %d\n", idstr, instance_id); ret = -EINVAL; goto out; } /* Validate version */ if (version_id > se->version_id) { fprintf(stderr, "savevm: unsupported version %d for '%s' v%d\n", version_id, idstr, se->version_id); ret = -EINVAL; goto out; } /* Add entry */ le = g_malloc0(sizeof(*le)); le->se = se; le->section_id = section_id; le->version_id = version_id; QLIST_INSERT_HEAD(&loadvm_handlers, le, entry); ret = vmstate_load(f, le->se, le->version_id); if (ret < 0) { fprintf(stderr, "qemu: warning: error while loading state for instance 0x%x of device '%s'\n", instance_id, idstr); goto out; } break; case QEMU_VM_SECTION_PART: case QEMU_VM_SECTION_END: section_id = qemu_get_be32(f); QLIST_FOREACH(le, &loadvm_handlers, entry) { if (le->section_id == section_id) { break; } } if (le == NULL) { fprintf(stderr, "Unknown savevm section %d\n", section_id); ret = -EINVAL; goto out; } ret = vmstate_load(f, le->se, le->version_id); if (ret < 0) { fprintf(stderr, "qemu: warning: error while loading state section id %d\n", section_id); goto out; } break; default: fprintf(stderr, "Unknown savevm section type %d\n", section_type); ret = -EINVAL; goto out; } } cpu_synchronize_all_post_init(); ret = 0; out: QLIST_FOREACH_SAFE(le, &loadvm_handlers, entry, new_le) { QLIST_REMOVE(le, entry); g_free(le); } if (ret == 0) { ret = qemu_file_get_error(f); } return ret; } static BlockDriverState *find_vmstate_bs(void) { BlockDriverState *bs = NULL; while ((bs = bdrv_next(bs))) { if (bdrv_can_snapshot(bs)) { return bs; } } return NULL; } /* * Deletes snapshots of a given name in all opened images. */ static int del_existing_snapshots(Monitor *mon, const char *name) { BlockDriverState *bs; QEMUSnapshotInfo sn1, *snapshot = &sn1; Error *err = NULL; bs = NULL; while ((bs = bdrv_next(bs))) { if (bdrv_can_snapshot(bs) && bdrv_snapshot_find(bs, snapshot, name) >= 0) { bdrv_snapshot_delete_by_id_or_name(bs, name, &err); if (err) { monitor_printf(mon, "Error while deleting snapshot on device '%s':" " %s\n", bdrv_get_device_name(bs), error_get_pretty(err)); error_free(err); return -1; } } } return 0; } void do_savevm(Monitor *mon, const QDict *qdict) { BlockDriverState *bs, *bs1; QEMUSnapshotInfo sn1, *sn = &sn1, old_sn1, *old_sn = &old_sn1; int ret; QEMUFile *f; int saved_vm_running; uint64_t vm_state_size; qemu_timeval tv; struct tm tm; const char *name = qdict_get_try_str(qdict, "name"); /* Verify if there is a device that doesn't support snapshots and is writable */ bs = NULL; while ((bs = bdrv_next(bs))) { if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) { continue; } if (!bdrv_can_snapshot(bs)) { monitor_printf(mon, "Device '%s' is writable but does not support snapshots.\n", bdrv_get_device_name(bs)); return; } } bs = find_vmstate_bs(); if (!bs) { monitor_printf(mon, "No block device can accept snapshots\n"); return; } saved_vm_running = runstate_is_running(); vm_stop(RUN_STATE_SAVE_VM); memset(sn, 0, sizeof(*sn)); /* fill auxiliary fields */ qemu_gettimeofday(&tv); sn->date_sec = tv.tv_sec; sn->date_nsec = tv.tv_usec * 1000; sn->vm_clock_nsec = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); if (name) { ret = bdrv_snapshot_find(bs, old_sn, name); if (ret >= 0) { pstrcpy(sn->name, sizeof(sn->name), old_sn->name); pstrcpy(sn->id_str, sizeof(sn->id_str), old_sn->id_str); } else { pstrcpy(sn->name, sizeof(sn->name), name); } } else { /* cast below needed for OpenBSD where tv_sec is still 'long' */ localtime_r((const time_t *)&tv.tv_sec, &tm); strftime(sn->name, sizeof(sn->name), "vm-%Y%m%d%H%M%S", &tm); } /* Delete old snapshots of the same name */ if (name && del_existing_snapshots(mon, name) < 0) { goto the_end; } /* save the VM state */ f = qemu_fopen_bdrv(bs, 1); if (!f) { monitor_printf(mon, "Could not open VM state file\n"); goto the_end; } ret = qemu_savevm_state(f); vm_state_size = qemu_ftell(f); qemu_fclose(f); if (ret < 0) { monitor_printf(mon, "Error %d while writing VM\n", ret); goto the_end; } /* create the snapshots */ bs1 = NULL; while ((bs1 = bdrv_next(bs1))) { if (bdrv_can_snapshot(bs1)) { /* Write VM state size only to the image that contains the state */ sn->vm_state_size = (bs == bs1 ? vm_state_size : 0); ret = bdrv_snapshot_create(bs1, sn); if (ret < 0) { monitor_printf(mon, "Error while creating snapshot on '%s'\n", bdrv_get_device_name(bs1)); } } } the_end: if (saved_vm_running) { vm_start(); } } void qmp_xen_save_devices_state(const char *filename, Error **errp) { QEMUFile *f; int saved_vm_running; int ret; saved_vm_running = runstate_is_running(); vm_stop(RUN_STATE_SAVE_VM); f = qemu_fopen(filename, "wb"); if (!f) { error_setg_file_open(errp, errno, filename); goto the_end; } ret = qemu_save_device_state(f); qemu_fclose(f); if (ret < 0) { error_set(errp, QERR_IO_ERROR); } the_end: if (saved_vm_running) { vm_start(); } } int load_vmstate(const char *name) { BlockDriverState *bs, *bs_vm_state; QEMUSnapshotInfo sn; QEMUFile *f; int ret; bs_vm_state = find_vmstate_bs(); if (!bs_vm_state) { error_report("No block device supports snapshots"); return -ENOTSUP; } /* Don't even try to load empty VM states */ ret = bdrv_snapshot_find(bs_vm_state, &sn, name); if (ret < 0) { return ret; } else if (sn.vm_state_size == 0) { error_report("This is a disk-only snapshot. Revert to it offline " "using qemu-img."); return -EINVAL; } /* Verify if there is any device that doesn't support snapshots and is writable and check if the requested snapshot is available too. */ bs = NULL; while ((bs = bdrv_next(bs))) { if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) { continue; } if (!bdrv_can_snapshot(bs)) { error_report("Device '%s' is writable but does not support snapshots.", bdrv_get_device_name(bs)); return -ENOTSUP; } ret = bdrv_snapshot_find(bs, &sn, name); if (ret < 0) { error_report("Device '%s' does not have the requested snapshot '%s'", bdrv_get_device_name(bs), name); return ret; } } /* Flush all IO requests so they don't interfere with the new state. */ bdrv_drain_all(); bs = NULL; while ((bs = bdrv_next(bs))) { if (bdrv_can_snapshot(bs)) { ret = bdrv_snapshot_goto(bs, name); if (ret < 0) { error_report("Error %d while activating snapshot '%s' on '%s'", ret, name, bdrv_get_device_name(bs)); return ret; } } } /* restore the VM state */ f = qemu_fopen_bdrv(bs_vm_state, 0); if (!f) { error_report("Could not open VM state file"); return -EINVAL; } qemu_system_reset(VMRESET_SILENT); ret = qemu_loadvm_state(f); qemu_fclose(f); if (ret < 0) { error_report("Error %d while loading VM state", ret); return ret; } return 0; } void do_delvm(Monitor *mon, const QDict *qdict) { BlockDriverState *bs, *bs1; Error *err = NULL; const char *name = qdict_get_str(qdict, "name"); bs = find_vmstate_bs(); if (!bs) { monitor_printf(mon, "No block device supports snapshots\n"); return; } bs1 = NULL; while ((bs1 = bdrv_next(bs1))) { if (bdrv_can_snapshot(bs1)) { bdrv_snapshot_delete_by_id_or_name(bs, name, &err); if (err) { monitor_printf(mon, "Error while deleting snapshot on device '%s':" " %s\n", bdrv_get_device_name(bs), error_get_pretty(err)); error_free(err); } } } } void do_info_snapshots(Monitor *mon, const QDict *qdict) { BlockDriverState *bs, *bs1; QEMUSnapshotInfo *sn_tab, *sn, s, *sn_info = &s; int nb_sns, i, ret, available; int total; int *available_snapshots; bs = find_vmstate_bs(); if (!bs) { monitor_printf(mon, "No available block device supports snapshots\n"); return; } nb_sns = bdrv_snapshot_list(bs, &sn_tab); if (nb_sns < 0) { monitor_printf(mon, "bdrv_snapshot_list: error %d\n", nb_sns); return; } if (nb_sns == 0) { monitor_printf(mon, "There is no snapshot available.\n"); return; } available_snapshots = g_malloc0(sizeof(int) * nb_sns); total = 0; for (i = 0; i < nb_sns; i++) { sn = &sn_tab[i]; available = 1; bs1 = NULL; while ((bs1 = bdrv_next(bs1))) { if (bdrv_can_snapshot(bs1) && bs1 != bs) { ret = bdrv_snapshot_find(bs1, sn_info, sn->id_str); if (ret < 0) { available = 0; break; } } } if (available) { available_snapshots[total] = i; total++; } } if (total > 0) { bdrv_snapshot_dump((fprintf_function)monitor_printf, mon, NULL); monitor_printf(mon, "\n"); for (i = 0; i < total; i++) { sn = &sn_tab[available_snapshots[i]]; bdrv_snapshot_dump((fprintf_function)monitor_printf, mon, sn); monitor_printf(mon, "\n"); } } else { monitor_printf(mon, "There is no suitable snapshot available\n"); } g_free(sn_tab); g_free(available_snapshots); } void vmstate_register_ram(MemoryRegion *mr, DeviceState *dev) { qemu_ram_set_idstr(memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK, memory_region_name(mr), dev); } void vmstate_unregister_ram(MemoryRegion *mr, DeviceState *dev) { qemu_ram_unset_idstr(memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK); } void vmstate_register_ram_global(MemoryRegion *mr) { vmstate_register_ram(mr, NULL); }
gpl-2.0
lgeek/linux-tronsmart-orion-r28
drivers/gpu/drm/rockchip/rockchip_drm_connector.c
17
12879
/* * Copyright (C) ROCKCHIP, Inc. * Author:yzq<yzq@rock-chips.com> * * based on exynos_drm_connector.c * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include <drm/rockchip_drm.h> #include "rockchip_drm_drv.h" #include "rockchip_drm_encoder.h" #define to_rockchip_connector(x) container_of(x, struct rockchip_drm_connector,\ drm_connector) struct rockchip_drm_connector { struct drm_connector drm_connector; uint32_t encoder_id; struct rockchip_drm_manager *manager; uint32_t dpms; }; /* convert rockchip_video_timings to drm_display_mode */ static inline void convert_to_display_mode(struct drm_display_mode *mode, struct rockchip_drm_panel_info *panel) { struct fb_videomode *timing = &panel->timing; DRM_DEBUG_KMS("%s\n", __FILE__); mode->clock = timing->pixclock / 1000; mode->vrefresh = timing->refresh; mode->hdisplay = timing->xres; mode->hsync_start = mode->hdisplay + timing->right_margin; mode->hsync_end = mode->hsync_start + timing->hsync_len; mode->htotal = mode->hsync_end + timing->left_margin; mode->vdisplay = timing->yres; mode->vsync_start = mode->vdisplay + timing->lower_margin; mode->vsync_end = mode->vsync_start + timing->vsync_len; mode->vtotal = mode->vsync_end + timing->upper_margin; mode->width_mm = panel->width_mm; mode->height_mm = panel->height_mm; if (timing->vmode & FB_VMODE_INTERLACED) mode->flags |= DRM_MODE_FLAG_INTERLACE; if (timing->vmode & FB_VMODE_DOUBLE) mode->flags |= DRM_MODE_FLAG_DBLSCAN; } static inline void convert_fbmode_to_display_mode(struct drm_display_mode *mode, struct fb_videomode *timing) { DRM_DEBUG_KMS("%s\n", __FILE__); mode->clock = timing->pixclock / 1000; mode->vrefresh = timing->refresh; mode->hdisplay = timing->xres; mode->hsync_start = mode->hdisplay + timing->right_margin; mode->hsync_end = mode->hsync_start + timing->hsync_len; mode->htotal = mode->hsync_end + timing->left_margin; mode->vdisplay = timing->yres; mode->vsync_start = mode->vdisplay + timing->lower_margin; mode->vsync_end = mode->vsync_start + timing->vsync_len; mode->vtotal = mode->vsync_end + timing->upper_margin; if (timing->vmode & FB_VMODE_INTERLACED) mode->flags |= DRM_MODE_FLAG_INTERLACE; if (timing->vmode & FB_VMODE_DOUBLE) mode->flags |= DRM_MODE_FLAG_DBLSCAN; } /* convert drm_display_mode to rockchip_video_timings */ static inline void convert_to_video_timing(struct fb_videomode *timing, struct drm_display_mode *mode) { DRM_DEBUG_KMS("%s\n", __FILE__); memset(timing, 0, sizeof(*timing)); timing->pixclock = mode->clock * 1000; timing->refresh = drm_mode_vrefresh(mode); timing->xres = mode->hdisplay; timing->right_margin = mode->hsync_start - mode->hdisplay; timing->hsync_len = mode->hsync_end - mode->hsync_start; timing->left_margin = mode->htotal - mode->hsync_end; timing->yres = mode->vdisplay; timing->lower_margin = mode->vsync_start - mode->vdisplay; timing->vsync_len = mode->vsync_end - mode->vsync_start; timing->upper_margin = mode->vtotal - mode->vsync_end; if (mode->flags & DRM_MODE_FLAG_INTERLACE) timing->vmode = FB_VMODE_INTERLACED; else timing->vmode = FB_VMODE_NONINTERLACED; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) timing->vmode |= FB_VMODE_DOUBLE; } static int rockchip_drm_connector_get_modes(struct drm_connector *connector) { struct rockchip_drm_connector *rockchip_connector = to_rockchip_connector(connector); struct rockchip_drm_manager *manager = rockchip_connector->manager; struct rockchip_drm_display_ops *display_ops = manager->display_ops; struct edid *edid = NULL; unsigned int count = 0; int ret; DRM_DEBUG_KMS("%s\n", __FILE__); if (!display_ops) { DRM_DEBUG_KMS("display_ops is null.\n"); return 0; } /* * if get_edid() exists then get_edid() callback of hdmi side * is called to get edid data through i2c interface else * get timing from the FIMD driver(display controller). * * P.S. in case of lcd panel, count is always 1 if success * because lcd panel has only one mode. */ if (display_ops->get_edid) { edid = display_ops->get_edid(manager->dev, connector); if (IS_ERR_OR_NULL(edid)) { ret = PTR_ERR(edid); edid = NULL; DRM_ERROR("Panel operation get_edid failed %d\n", ret); goto out; } count = drm_add_edid_modes(connector, edid); if (!count) { DRM_ERROR("Add edid modes failed %d\n", count); goto out; } drm_mode_connector_update_edid_property(connector, edid); } else if(display_ops->get_modelist){ struct list_head *pos,*head; struct fb_modelist *modelist; struct fb_videomode *mode; struct drm_display_mode *disp_mode = NULL; count=0; head = display_ops->get_modelist(manager->dev); list_for_each(pos,head){ modelist = list_entry(pos, struct fb_modelist, list); mode = &modelist->mode; disp_mode = drm_mode_create(connector->dev); if (!mode) { DRM_ERROR("failed to create a new display mode.\n"); return count; } convert_fbmode_to_display_mode(disp_mode, mode); if(mode->xres == 1280 && mode->yres == 720 && mode->refresh == 60) disp_mode->type |= DRM_MODE_TYPE_PREFERRED; drm_mode_set_name(disp_mode); // snprintf(disp_mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s-%d", // disp_mode->hdisplay, disp_mode->vdisplay, // !!(disp_mode->flags & DRM_MODE_FLAG_INTERLACE)? "i" : "p",disp_mode->vrefresh); drm_mode_probed_add(connector, disp_mode); count++; } } else { struct rockchip_drm_panel_info *panel; struct drm_display_mode *mode = drm_mode_create(connector->dev); if (!mode) { DRM_ERROR("failed to create a new display mode.\n"); return 0; } if (display_ops->get_panel) panel = display_ops->get_panel(manager->dev); else { drm_mode_destroy(connector->dev, mode); return 0; } convert_to_display_mode(mode, panel); connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; drm_mode_set_name(mode); drm_mode_probed_add(connector, mode); count = 1; } out: kfree(edid); return count; } static int rockchip_drm_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct rockchip_drm_connector *rockchip_connector = to_rockchip_connector(connector); struct rockchip_drm_manager *manager = rockchip_connector->manager; struct rockchip_drm_display_ops *display_ops = manager->display_ops; struct fb_videomode timing; int ret = MODE_BAD; DRM_DEBUG_KMS("%s\n", __FILE__); convert_to_video_timing(&timing, mode); if (display_ops && display_ops->check_timing) if (!display_ops->check_timing(manager->dev, (void *)&timing)) ret = MODE_OK; return ret; } struct drm_encoder *rockchip_drm_best_encoder(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct rockchip_drm_connector *rockchip_connector = to_rockchip_connector(connector); struct drm_mode_object *obj; struct drm_encoder *encoder; DRM_DEBUG_KMS("%s\n", __FILE__); obj = drm_mode_object_find(dev, rockchip_connector->encoder_id, DRM_MODE_OBJECT_ENCODER); if (!obj) { DRM_DEBUG_KMS("Unknown ENCODER ID %d\n", rockchip_connector->encoder_id); return NULL; } encoder = obj_to_encoder(obj); return encoder; } static struct drm_connector_helper_funcs rockchip_connector_helper_funcs = { .get_modes = rockchip_drm_connector_get_modes, .mode_valid = rockchip_drm_connector_mode_valid, .best_encoder = rockchip_drm_best_encoder, }; void rockchip_drm_display_power(struct drm_connector *connector, int mode) { struct drm_encoder *encoder = rockchip_drm_best_encoder(connector); struct rockchip_drm_connector *rockchip_connector; struct rockchip_drm_manager *manager = rockchip_drm_get_manager(encoder); struct rockchip_drm_display_ops *display_ops = manager->display_ops; rockchip_connector = to_rockchip_connector(connector); if (rockchip_connector->dpms == mode) { DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n"); return; } if (display_ops && display_ops->power_on) display_ops->power_on(manager->dev, mode); rockchip_connector->dpms = mode; } static void rockchip_drm_connector_dpms(struct drm_connector *connector, int mode) { DRM_DEBUG_KMS("%s\n", __FILE__); /* * in case that drm_crtc_helper_set_mode() is called, * encoder/crtc->funcs->dpms() will be just returned * because they already were DRM_MODE_DPMS_ON so only * rockchip_drm_display_power() will be called. */ drm_helper_connector_dpms(connector, mode); rockchip_drm_display_power(connector, mode); } static int rockchip_drm_connector_fill_modes(struct drm_connector *connector, unsigned int max_width, unsigned int max_height) { struct rockchip_drm_connector *rockchip_connector = to_rockchip_connector(connector); struct rockchip_drm_manager *manager = rockchip_connector->manager; struct rockchip_drm_manager_ops *ops = manager->ops; unsigned int width, height; width = max_width; height = max_height; /* * if specific driver want to find desired_mode using maxmum * resolution then get max width and height from that driver. */ if (ops && ops->get_max_resol) ops->get_max_resol(manager->dev, &width, &height); return drm_helper_probe_single_connector_modes(connector, width, height); } /* get detection status of display device. */ static enum drm_connector_status rockchip_drm_connector_detect(struct drm_connector *connector, bool force) { struct rockchip_drm_connector *rockchip_connector = to_rockchip_connector(connector); struct rockchip_drm_manager *manager = rockchip_connector->manager; struct rockchip_drm_display_ops *display_ops = manager->display_ops; enum drm_connector_status status = connector_status_disconnected; DRM_DEBUG_KMS("%s\n", __FILE__); if (display_ops && display_ops->is_connected) { if (display_ops->is_connected(manager->dev)) status = connector_status_connected; else status = connector_status_disconnected; } return status; } static void rockchip_drm_connector_destroy(struct drm_connector *connector) { struct rockchip_drm_connector *rockchip_connector = to_rockchip_connector(connector); DRM_DEBUG_KMS("%s\n", __FILE__); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(rockchip_connector); } static struct drm_connector_funcs rockchip_connector_funcs = { .dpms = rockchip_drm_connector_dpms, .fill_modes = rockchip_drm_connector_fill_modes, .detect = rockchip_drm_connector_detect, .destroy = rockchip_drm_connector_destroy, }; struct drm_connector *rockchip_drm_connector_create(struct drm_device *dev, struct drm_encoder *encoder) { struct rockchip_drm_connector *rockchip_connector; struct rockchip_drm_manager *manager = rockchip_drm_get_manager(encoder); struct drm_connector *connector; int type; int err; DRM_DEBUG_KMS("%s\n", __FILE__); rockchip_connector = kzalloc(sizeof(*rockchip_connector), GFP_KERNEL); if (!rockchip_connector) { DRM_ERROR("failed to allocate connector\n"); return NULL; } connector = &rockchip_connector->drm_connector; switch (manager->display_ops->type) { case ROCKCHIP_DISPLAY_TYPE_HDMI: type = DRM_MODE_CONNECTOR_HDMIA; connector->interlace_allowed = true; connector->polled = DRM_CONNECTOR_POLL_HPD; break; case ROCKCHIP_DISPLAY_TYPE_VIDI: type = DRM_MODE_CONNECTOR_VIRTUAL; connector->polled = DRM_CONNECTOR_POLL_HPD; break; case ROCKCHIP_DISPLAY_TYPE_LCD: type = DRM_MODE_CONNECTOR_LVDS; break; default: type = DRM_MODE_CONNECTOR_Unknown; break; } drm_connector_init(dev, connector, &rockchip_connector_funcs, type); drm_connector_helper_add(connector, &rockchip_connector_helper_funcs); err = drm_sysfs_connector_add(connector); if (err) goto err_connector; rockchip_connector->encoder_id = encoder->base.id; rockchip_connector->manager = manager; rockchip_connector->dpms = DRM_MODE_DPMS_OFF; connector->dpms = DRM_MODE_DPMS_OFF; connector->encoder = encoder; err = drm_mode_connector_attach_encoder(connector, encoder); if (err) { DRM_ERROR("failed to attach a connector to a encoder\n"); goto err_sysfs; } DRM_DEBUG_KMS("connector has been created\n"); return connector; err_sysfs: drm_sysfs_connector_remove(connector); err_connector: drm_connector_cleanup(connector); kfree(rockchip_connector); return NULL; }
gpl-2.0
hodobaj/Pandaren5.3.0-master
dep/acelite/ace/MEM_IO.cpp
529
11787
// MEM_IO.cpp // $Id: MEM_IO.cpp 92069 2010-09-28 11:38:59Z johnnyw $ #include "ace/MEM_IO.h" #include "ace/Handle_Set.h" #if (ACE_HAS_POSITION_INDEPENDENT_POINTERS == 1) #if !defined (__ACE_INLINE__) #include "ace/MEM_IO.inl" #endif /* __ACE_INLINE__ */ ACE_BEGIN_VERSIONED_NAMESPACE_DECL ACE_ALLOC_HOOK_DEFINE(ACE_MEM_IO) ACE_Reactive_MEM_IO::~ACE_Reactive_MEM_IO (void) { } int ACE_Reactive_MEM_IO::init (ACE_HANDLE handle, const ACE_TCHAR *name, MALLOC_OPTIONS *options) { ACE_TRACE ("ACE_Reactive_MEM_IO::init"); this->handle_ = handle; return this->create_shm_malloc (name, options); } ssize_t ACE_Reactive_MEM_IO::recv_buf (ACE_MEM_SAP_Node *&buf, int flags, const ACE_Time_Value *timeout) { ACE_TRACE ("ACE_Reactive_MEM_IO::recv_buf"); if (this->shm_malloc_ == 0 || this->handle_ == ACE_INVALID_HANDLE) return -1; ACE_OFF_T new_offset = 0; ssize_t retv = ACE::recv (this->handle_, (char *) &new_offset, sizeof (ACE_OFF_T), flags, timeout); if (retv == 0) { // ACE_DEBUG ((LM_INFO, "MEM_Stream closed\n")); buf = 0; return 0; } else if (retv != static_cast <ssize_t> (sizeof (ACE_OFF_T))) { // Nothing available or we are really screwed. buf = 0; return -1; } return this->get_buf_len (new_offset, buf); } ssize_t ACE_Reactive_MEM_IO::send_buf (ACE_MEM_SAP_Node *buf, int flags, const ACE_Time_Value *timeout) { ACE_TRACE ("ACE_Reactive_MEM_IO::send_buf"); if (this->shm_malloc_ == 0 || this->handle_ == ACE_INVALID_HANDLE) { return -1; } // The offset. ACE_OFF_T offset = ACE_Utils::truncate_cast<ACE_OFF_T> ( reinterpret_cast<char *> (buf) - static_cast<char *> (this->shm_malloc_->base_addr ())); // Send the offset value over the socket. if (ACE::send (this->handle_, (const char *) &offset, sizeof (offset), flags, timeout) != static_cast <ssize_t> (sizeof (offset))) { // unsuccessful send, release the memory in the shared-memory. this->release_buffer (buf); return -1; } return ACE_Utils::truncate_cast<ssize_t> (buf->size ()); } #if defined (ACE_WIN32) || !defined (_ACE_USE_SV_SEM) int ACE_MT_MEM_IO::Simple_Queue::write (ACE_MEM_SAP_Node *new_node) { if (this->mq_ == 0) return -1; // Here, we assume we already have acquired the lock necessary. // And we are allowed to write. if (this->mq_->tail_.addr () == 0) // nothing in the queue. { this->mq_->head_ = new_node; this->mq_->tail_ = new_node; new_node->next_ = 0; } else { this->mq_->tail_->next_ = new_node; new_node->next_ = 0; this->mq_->tail_ = new_node; } return 0; } ACE_MEM_SAP_Node * ACE_MT_MEM_IO::Simple_Queue::read () { if (this->mq_ == 0) return 0; ACE_MEM_SAP_Node *retv = 0; ACE_SEH_TRY { retv = this->mq_->head_; // Here, we assume we already have acquired the lock necessary // and there are soemthing in the queue. if (this->mq_->head_ == this->mq_->tail_) { // Last message in the queue. this->mq_->head_ = 0; this->mq_->tail_ = 0; } else this->mq_->head_ = retv->next_; } ACE_SEH_EXCEPT (this->malloc_->memory_pool ().seh_selector (GetExceptionInformation ())) { } return retv; } ACE_MT_MEM_IO::~ACE_MT_MEM_IO () { delete this->recv_channel_.sema_; delete this->recv_channel_.lock_; delete this->send_channel_.sema_; delete this->send_channel_.lock_; } int ACE_MT_MEM_IO::init (ACE_HANDLE handle, const ACE_TCHAR *name, MALLOC_OPTIONS *options) { ACE_TRACE ("ACE_MT_MEM_IO::init"); ACE_UNUSED_ARG (handle); // @@ Give me a rule on naming and how the queue should // be kept in the shared memory and we are done // with this. if (this->create_shm_malloc (name, options) == -1) return -1; ACE_TCHAR server_sema [MAXPATHLEN]; ACE_TCHAR client_sema [MAXPATHLEN]; ACE_TCHAR server_lock [MAXPATHLEN]; ACE_TCHAR client_lock [MAXPATHLEN]; const ACE_TCHAR *basename = ACE::basename (name); // size_t baselen = ACE_OS::strlen (basename); // Building names. @@ Check buffer overflow? ACE_OS::strcpy (server_sema, basename); ACE_OS::strcat (server_sema, ACE_TEXT ("_sema_to_server")); ACE_OS::strcpy (client_sema, basename); ACE_OS::strcat (client_sema, ACE_TEXT ("_sema_to_client")); ACE_OS::strcpy (server_lock, basename); ACE_OS::strcat (server_lock, ACE_TEXT ("_lock_to_server")); ACE_OS::strcpy (client_lock, basename); ACE_OS::strcat (client_lock, ACE_TEXT ("_lock_to_client")); void *to_server_ptr = 0; // @@ Here, we assume the shared memory fill will never be resued. // So we can determine whether we are server or client by examining // if the simple message queues have already been set up in // the Malloc object or not. if (this->shm_malloc_->find ("to_server", to_server_ptr) == -1) { void *ptr = 0; // We are server. ACE_ALLOCATOR_RETURN (ptr, this->shm_malloc_->malloc (2 * sizeof (MQ_Struct)), -1); MQ_Struct *mymq = reinterpret_cast<MQ_Struct *> (ptr); mymq->tail_ = 0; mymq->head_ = 0; (mymq + 1)->tail_ = 0; (mymq + 1)->head_ = 0; if (this->shm_malloc_->bind ("to_server", mymq) == -1) return -1; if (this->shm_malloc_->bind ("to_client", mymq + 1) == -1) return -1; this->recv_channel_.queue_.init (mymq, this->shm_malloc_); ACE_NEW_RETURN (this->recv_channel_.sema_, ACE_SYNCH_PROCESS_SEMAPHORE (0, server_sema), -1); ACE_NEW_RETURN (this->recv_channel_.lock_, ACE_SYNCH_PROCESS_MUTEX (server_lock), -1); this->send_channel_.queue_.init (mymq + 1, this->shm_malloc_); ACE_NEW_RETURN (this->send_channel_.sema_, ACE_SYNCH_PROCESS_SEMAPHORE (0, client_sema), -1); ACE_NEW_RETURN (this->send_channel_.lock_, ACE_SYNCH_PROCESS_MUTEX (client_lock), -1); } else { // we are client. MQ_Struct *mymq = reinterpret_cast<MQ_Struct *> (to_server_ptr); this->recv_channel_.queue_.init (mymq +1, this->shm_malloc_); ACE_NEW_RETURN (this->recv_channel_.sema_, ACE_SYNCH_PROCESS_SEMAPHORE (0, client_sema), -1); ACE_NEW_RETURN (this->recv_channel_.lock_, ACE_SYNCH_PROCESS_MUTEX (client_lock), -1); this->send_channel_.queue_.init (mymq, this->shm_malloc_); ACE_NEW_RETURN (this->send_channel_.sema_, ACE_SYNCH_PROCESS_SEMAPHORE (0, server_sema), -1); ACE_NEW_RETURN (this->send_channel_.lock_, ACE_SYNCH_PROCESS_MUTEX (server_lock), -1); } return 0; } ssize_t ACE_MT_MEM_IO::recv_buf (ACE_MEM_SAP_Node *&buf, int flags, const ACE_Time_Value *timeout) { ACE_TRACE ("ACE_MT_MEM_IO::recv_buf"); // @@ Don't know how to handle timeout yet. ACE_UNUSED_ARG (timeout); ACE_UNUSED_ARG (flags); if (this->shm_malloc_ == 0) { return -1; } // Need to handle timeout here. if (this->recv_channel_.sema_->acquire () == -1) { return -1; } { // @@ We can probably skip the lock in certain circumstance. ACE_GUARD_RETURN (ACE_SYNCH_PROCESS_MUTEX, ace_mon, *this->recv_channel_.lock_, -1); buf = this->recv_channel_.queue_.read (); if (buf != 0) { return ACE_Utils::truncate_cast<ssize_t> (buf->size ()); } return -1; } } ssize_t ACE_MT_MEM_IO::send_buf (ACE_MEM_SAP_Node *buf, int flags, const ACE_Time_Value *timeout) { ACE_TRACE ("ACE_MT_MEM_IO::send_buf"); // @@ Don't know how to handle timeout yet. ACE_UNUSED_ARG (timeout); ACE_UNUSED_ARG (flags); if (this->shm_malloc_ == 0) { return -1; } { // @@ We can probably skip the lock in certain curcumstances. ACE_GUARD_RETURN (ACE_SYNCH_PROCESS_MUTEX, ace_mon, *this->send_channel_.lock_, -1); if (this->send_channel_.queue_.write (buf) == -1) { this->release_buffer (buf); return -1; } } if (this->send_channel_.sema_->release () == -1) { return -1; } return ACE_Utils::truncate_cast<ssize_t> (buf->size ()); } #endif /* ACE_WIN32 || !_ACE_USE_SV_SEM */ void ACE_MEM_IO::dump (void) const { #if defined (ACE_HAS_DUMP) ACE_TRACE ("ACE_MEM_IO::dump"); #endif /* ACE_HAS_DUMP */ } int ACE_MEM_IO::init (const ACE_TCHAR *name, ACE_MEM_IO::Signal_Strategy type, ACE_MEM_SAP::MALLOC_OPTIONS *options) { ACE_UNUSED_ARG (type); delete this->deliver_strategy_; this->deliver_strategy_ = 0; switch (type) { case ACE_MEM_IO::Reactive: ACE_NEW_RETURN (this->deliver_strategy_, ACE_Reactive_MEM_IO (), -1); break; #if defined (ACE_WIN32) || !defined (_ACE_USE_SV_SEM) case ACE_MEM_IO::MT: ACE_NEW_RETURN (this->deliver_strategy_, ACE_MT_MEM_IO (), -1); break; #endif /* ACE_WIN32 || !_ACE_USE_SV_SEM */ default: return -1; } return this->deliver_strategy_->init (this->get_handle (), name, options); } int ACE_MEM_IO::fini (void) { if (this->deliver_strategy_ != 0) { return this->deliver_strategy_->fini (); } else { return -1; } } // Allows a client to read from a socket without having to provide // a buffer to read. This method determines how much data is in the // socket, allocates a buffer of this size, reads in the data, and // returns the number of bytes read. ssize_t ACE_MEM_IO::send (const ACE_Message_Block *message_block, const ACE_Time_Value *timeout) { ACE_TRACE ("ACE_MEM_IO::send"); if (this->deliver_strategy_ == 0) { return -1; // Something went seriously wrong. } size_t len = message_block->total_length (); if (len != 0) { ACE_MEM_SAP_Node *buf = reinterpret_cast<ACE_MEM_SAP_Node *> ( this->deliver_strategy_->acquire_buffer ( ACE_Utils::truncate_cast<ssize_t> (len))); size_t n = 0; while (message_block != 0) { ACE_OS::memcpy (static_cast<char *> (buf->data ()) + n, message_block->rd_ptr (), message_block->length ()); n += message_block->length (); if (message_block->cont ()) { message_block = message_block->cont (); } else { message_block = message_block->next (); } } buf->size_ = len; return this->deliver_strategy_->send_buf (buf, 0, timeout); } return 0; } ACE_END_VERSIONED_NAMESPACE_DECL #endif /* ACE_HAS_POSITION_INDEPENDENT_POINTERS == 1 */
gpl-2.0
larks/linux-rcu
drivers/isdn/hisax/nj_u.c
785
6995
/* $Id: nj_u.c,v 2.14.2.3 2004/01/13 14:31:26 keil Exp $ * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/init.h> #include "hisax.h" #include "icc.h" #include "isdnl1.h" #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/ppp_defs.h> #include "netjet.h" static const char *NETjet_U_revision = "$Revision: 2.14.2.3 $"; static u_char dummyrr(struct IsdnCardState *cs, int chan, u_char off) { return(5); } static void dummywr(struct IsdnCardState *cs, int chan, u_char off, u_char value) { } static irqreturn_t netjet_u_interrupt(int intno, void *dev_id) { struct IsdnCardState *cs = dev_id; u_char val, sval; u_long flags; spin_lock_irqsave(&cs->lock, flags); if (!((sval = bytein(cs->hw.njet.base + NETJET_IRQSTAT1)) & NETJET_ISACIRQ)) { val = NETjet_ReadIC(cs, ICC_ISTA); if (cs->debug & L1_DEB_ISAC) debugl1(cs, "tiger: i1 %x %x", sval, val); if (val) { icc_interrupt(cs, val); NETjet_WriteIC(cs, ICC_MASK, 0xFF); NETjet_WriteIC(cs, ICC_MASK, 0x0); } } /* start new code 13/07/00 GE */ /* set bits in sval to indicate which page is free */ if (inl(cs->hw.njet.base + NETJET_DMA_WRITE_ADR) < inl(cs->hw.njet.base + NETJET_DMA_WRITE_IRQ)) /* the 2nd write page is free */ sval = 0x08; else /* the 1st write page is free */ sval = 0x04; if (inl(cs->hw.njet.base + NETJET_DMA_READ_ADR) < inl(cs->hw.njet.base + NETJET_DMA_READ_IRQ)) /* the 2nd read page is free */ sval = sval | 0x02; else /* the 1st read page is free */ sval = sval | 0x01; if (sval != cs->hw.njet.last_is0) /* we have a DMA interrupt */ { if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } cs->hw.njet.irqstat0 = sval; if ((cs->hw.njet.irqstat0 & NETJET_IRQM0_READ) != (cs->hw.njet.last_is0 & NETJET_IRQM0_READ)) /* we have a read dma int */ read_tiger(cs); if ((cs->hw.njet.irqstat0 & NETJET_IRQM0_WRITE) != (cs->hw.njet.last_is0 & NETJET_IRQM0_WRITE)) /* we have a write dma int */ write_tiger(cs); /* end new code 13/07/00 GE */ test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } static void reset_netjet_u(struct IsdnCardState *cs) { cs->hw.njet.ctrl_reg = 0xff; /* Reset On */ byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg); mdelay(10); cs->hw.njet.ctrl_reg = 0x40; /* Reset Off and status read clear */ /* now edge triggered for TJ320 GE 13/07/00 */ byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg); mdelay(10); cs->hw.njet.auxd = 0xC0; cs->hw.njet.dmactrl = 0; byteout(cs->hw.njet.auxa, 0); byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ); byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ); byteout(cs->hw.njet.auxa, cs->hw.njet.auxd); } static int NETjet_U_card_msg(struct IsdnCardState *cs, int mt, void *arg) { u_long flags; switch (mt) { case CARD_RESET: spin_lock_irqsave(&cs->lock, flags); reset_netjet_u(cs); spin_unlock_irqrestore(&cs->lock, flags); return(0); case CARD_RELEASE: release_io_netjet(cs); return(0); case CARD_INIT: spin_lock_irqsave(&cs->lock, flags); inittiger(cs); reset_netjet_u(cs); clear_pending_icc_ints(cs); initicc(cs); /* Reenable all IRQ */ cs->writeisac(cs, ICC_MASK, 0); spin_unlock_irqrestore(&cs->lock, flags); return(0); case CARD_TEST: return(0); } return(0); } static int __devinit nju_pci_probe(struct pci_dev *dev_netjet, struct IsdnCardState *cs) { if (pci_enable_device(dev_netjet)) return(0); pci_set_master(dev_netjet); cs->irq = dev_netjet->irq; if (!cs->irq) { printk(KERN_WARNING "NETspider-U: No IRQ for PCI card found\n"); return(0); } cs->hw.njet.base = pci_resource_start(dev_netjet, 0); if (!cs->hw.njet.base) { printk(KERN_WARNING "NETspider-U: No IO-Adr for PCI card found\n"); return(0); } return (1); } static int __devinit nju_cs_init(struct IsdnCard *card, struct IsdnCardState *cs) { cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA; cs->hw.njet.isac = cs->hw.njet.base | NETJET_ISAC_OFF; mdelay(10); cs->hw.njet.ctrl_reg = 0xff; /* Reset On */ byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg); mdelay(10); cs->hw.njet.ctrl_reg = 0x00; /* Reset Off and status read clear */ byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg); mdelay(10); cs->hw.njet.auxd = 0xC0; cs->hw.njet.dmactrl = 0; byteout(cs->hw.njet.auxa, 0); byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ); byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ); byteout(cs->hw.njet.auxa, cs->hw.njet.auxd); switch ( ( ( NETjet_ReadIC( cs, ICC_RBCH ) >> 5 ) & 3 ) ) { case 3 : return 1; /* end loop */ case 0 : printk( KERN_WARNING "NETspider-U: NETjet-S PCI card found\n" ); return -1; /* continue looping */ default : printk( KERN_WARNING "NETspider-U: No PCI card found\n" ); return 0; /* end loop & function */ } return 1; /* end loop */ } static int __devinit nju_cs_init_rest(struct IsdnCard *card, struct IsdnCardState *cs) { const int bytecnt = 256; printk(KERN_INFO "NETspider-U: PCI card configured at %#lx IRQ %d\n", cs->hw.njet.base, cs->irq); if (!request_region(cs->hw.njet.base, bytecnt, "netspider-u isdn")) { printk(KERN_WARNING "HiSax: NETspider-U config port %#lx-%#lx " "already in use\n", cs->hw.njet.base, cs->hw.njet.base + bytecnt); return (0); } setup_icc(cs); cs->readisac = &NETjet_ReadIC; cs->writeisac = &NETjet_WriteIC; cs->readisacfifo = &NETjet_ReadICfifo; cs->writeisacfifo = &NETjet_WriteICfifo; cs->BC_Read_Reg = &dummyrr; cs->BC_Write_Reg = &dummywr; cs->BC_Send_Data = &netjet_fill_dma; cs->cardmsg = &NETjet_U_card_msg; cs->irq_func = &netjet_u_interrupt; cs->irq_flags |= IRQF_SHARED; ICCVersion(cs, "NETspider-U:"); return (1); } static struct pci_dev *dev_netjet __devinitdata = NULL; int __devinit setup_netjet_u(struct IsdnCard *card) { int ret; struct IsdnCardState *cs = card->cs; char tmp[64]; #ifdef __BIG_ENDIAN #error "not running on big endian machines now" #endif strcpy(tmp, NETjet_U_revision); printk(KERN_INFO "HiSax: Traverse Tech. NETspider-U driver Rev. %s\n", HiSax_getrev(tmp)); if (cs->typ != ISDN_CTYPE_NETJET_U) return(0); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); for ( ;; ) { if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) { ret = nju_pci_probe(dev_netjet, cs); if (!ret) return(0); } else { printk(KERN_WARNING "NETspider-U: No PCI card found\n"); return(0); } ret = nju_cs_init(card, cs); if (!ret) return (0); if (ret > 0) break; /* ret < 0 == continue looping */ } return nju_cs_init_rest(card, cs); }
gpl-2.0
AK-Kernel/AK-OnePone
drivers/net/can/janz-ican3.c
1553
44369
/* * Janz MODULbus VMOD-ICAN3 CAN Interface Driver * * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/netdevice.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/mfd/janz.h> #include <asm/io.h> /* the DPM has 64k of memory, organized into 256x 256 byte pages */ #define DPM_NUM_PAGES 256 #define DPM_PAGE_SIZE 256 #define DPM_PAGE_ADDR(p) ((p) * DPM_PAGE_SIZE) /* JANZ ICAN3 "old-style" host interface queue page numbers */ #define QUEUE_OLD_CONTROL 0 #define QUEUE_OLD_RB0 1 #define QUEUE_OLD_RB1 2 #define QUEUE_OLD_WB0 3 #define QUEUE_OLD_WB1 4 /* Janz ICAN3 "old-style" host interface control registers */ #define MSYNC_PEER 0x00 /* ICAN only */ #define MSYNC_LOCL 0x01 /* host only */ #define TARGET_RUNNING 0x02 #define MSYNC_RB0 0x01 #define MSYNC_RB1 0x02 #define MSYNC_RBLW 0x04 #define MSYNC_RB_MASK (MSYNC_RB0 | MSYNC_RB1) #define MSYNC_WB0 0x10 #define MSYNC_WB1 0x20 #define MSYNC_WBLW 0x40 #define MSYNC_WB_MASK (MSYNC_WB0 | MSYNC_WB1) /* Janz ICAN3 "new-style" host interface queue page numbers */ #define QUEUE_TOHOST 5 #define QUEUE_FROMHOST_MID 6 #define QUEUE_FROMHOST_HIGH 7 #define QUEUE_FROMHOST_LOW 8 /* The first free page in the DPM is #9 */ #define DPM_FREE_START 9 /* Janz ICAN3 "new-style" and "fast" host interface descriptor flags */ #define DESC_VALID 0x80 #define DESC_WRAP 0x40 #define DESC_INTERRUPT 0x20 #define DESC_IVALID 0x10 #define DESC_LEN(len) (len) /* Janz ICAN3 Firmware Messages */ #define MSG_CONNECTI 0x02 #define MSG_DISCONNECT 0x03 #define MSG_IDVERS 0x04 #define MSG_MSGLOST 0x05 #define MSG_NEWHOSTIF 0x08 #define MSG_INQUIRY 0x0a #define MSG_SETAFILMASK 0x10 #define MSG_INITFDPMQUEUE 0x11 #define MSG_HWCONF 0x12 #define MSG_FMSGLOST 0x15 #define MSG_CEVTIND 0x37 #define MSG_CBTRREQ 0x41 #define MSG_COFFREQ 0x42 #define MSG_CONREQ 0x43 #define MSG_CCONFREQ 0x47 /* * Janz ICAN3 CAN Inquiry Message Types * * NOTE: there appears to be a firmware bug here. You must send * NOTE: INQUIRY_STATUS and expect to receive an INQUIRY_EXTENDED * NOTE: response. The controller never responds to a message with * NOTE: the INQUIRY_EXTENDED subspec :( */ #define INQUIRY_STATUS 0x00 #define INQUIRY_TERMINATION 0x01 #define INQUIRY_EXTENDED 0x04 /* Janz ICAN3 CAN Set Acceptance Filter Mask Message Types */ #define SETAFILMASK_REJECT 0x00 #define SETAFILMASK_FASTIF 0x02 /* Janz ICAN3 CAN Hardware Configuration Message Types */ #define HWCONF_TERMINATE_ON 0x01 #define HWCONF_TERMINATE_OFF 0x00 /* Janz ICAN3 CAN Event Indication Message Types */ #define CEVTIND_EI 0x01 #define CEVTIND_DOI 0x02 #define CEVTIND_LOST 0x04 #define CEVTIND_FULL 0x08 #define CEVTIND_BEI 0x10 #define CEVTIND_CHIP_SJA1000 0x02 #define ICAN3_BUSERR_QUOTA_MAX 255 /* Janz ICAN3 CAN Frame Conversion */ #define ICAN3_ECHO 0x10 #define ICAN3_EFF_RTR 0x40 #define ICAN3_SFF_RTR 0x10 #define ICAN3_EFF 0x80 #define ICAN3_CAN_TYPE_MASK 0x0f #define ICAN3_CAN_TYPE_SFF 0x00 #define ICAN3_CAN_TYPE_EFF 0x01 #define ICAN3_CAN_DLC_MASK 0x0f /* * SJA1000 Status and Error Register Definitions * * Copied from drivers/net/can/sja1000/sja1000.h */ /* status register content */ #define SR_BS 0x80 #define SR_ES 0x40 #define SR_TS 0x20 #define SR_RS 0x10 #define SR_TCS 0x08 #define SR_TBS 0x04 #define SR_DOS 0x02 #define SR_RBS 0x01 #define SR_CRIT (SR_BS|SR_ES) /* ECC register */ #define ECC_SEG 0x1F #define ECC_DIR 0x20 #define ECC_ERR 6 #define ECC_BIT 0x00 #define ECC_FORM 0x40 #define ECC_STUFF 0x80 #define ECC_MASK 0xc0 /* Number of buffers for use in the "new-style" host interface */ #define ICAN3_NEW_BUFFERS 16 /* Number of buffers for use in the "fast" host interface */ #define ICAN3_TX_BUFFERS 512 #define ICAN3_RX_BUFFERS 1024 /* SJA1000 Clock Input */ #define ICAN3_CAN_CLOCK 8000000 /* Driver Name */ #define DRV_NAME "janz-ican3" /* DPM Control Registers -- starts at offset 0x100 in the MODULbus registers */ struct ican3_dpm_control { /* window address register */ u8 window_address; u8 unused1; /* * Read access: clear interrupt from microcontroller * Write access: send interrupt to microcontroller */ u8 interrupt; u8 unused2; /* write-only: reset all hardware on the module */ u8 hwreset; u8 unused3; /* write-only: generate an interrupt to the TPU */ u8 tpuinterrupt; }; struct ican3_dev { /* must be the first member */ struct can_priv can; /* CAN network device */ struct net_device *ndev; struct napi_struct napi; /* Device for printing */ struct device *dev; /* module number */ unsigned int num; /* base address of registers and IRQ */ struct janz_cmodio_onboard_regs __iomem *ctrl; struct ican3_dpm_control __iomem *dpmctrl; void __iomem *dpm; int irq; /* CAN bus termination status */ struct completion termination_comp; bool termination_enabled; /* CAN bus error status registers */ struct completion buserror_comp; struct can_berr_counter bec; /* old and new style host interface */ unsigned int iftype; /* * Any function which changes the current DPM page must hold this * lock while it is performing data accesses. This ensures that the * function will not be preempted and end up reading data from a * different DPM page than it expects. */ spinlock_t lock; /* new host interface */ unsigned int rx_int; unsigned int rx_num; unsigned int tx_num; /* fast host interface */ unsigned int fastrx_start; unsigned int fastrx_int; unsigned int fastrx_num; unsigned int fasttx_start; unsigned int fasttx_num; /* first free DPM page */ unsigned int free_page; }; struct ican3_msg { u8 control; u8 spec; __le16 len; u8 data[252]; }; struct ican3_new_desc { u8 control; u8 pointer; }; struct ican3_fast_desc { u8 control; u8 command; u8 data[14]; }; /* write to the window basic address register */ static inline void ican3_set_page(struct ican3_dev *mod, unsigned int page) { BUG_ON(page >= DPM_NUM_PAGES); iowrite8(page, &mod->dpmctrl->window_address); } /* * ICAN3 "old-style" host interface */ /* * Receive a message from the ICAN3 "old-style" firmware interface * * LOCKING: must hold mod->lock * * returns 0 on success, -ENOMEM when no message exists */ static int ican3_old_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg) { unsigned int mbox, mbox_page; u8 locl, peer, xord; /* get the MSYNC registers */ ican3_set_page(mod, QUEUE_OLD_CONTROL); peer = ioread8(mod->dpm + MSYNC_PEER); locl = ioread8(mod->dpm + MSYNC_LOCL); xord = locl ^ peer; if ((xord & MSYNC_RB_MASK) == 0x00) { dev_dbg(mod->dev, "no mbox for reading\n"); return -ENOMEM; } /* find the first free mbox to read */ if ((xord & MSYNC_RB_MASK) == MSYNC_RB_MASK) mbox = (xord & MSYNC_RBLW) ? MSYNC_RB0 : MSYNC_RB1; else mbox = (xord & MSYNC_RB0) ? MSYNC_RB0 : MSYNC_RB1; /* copy the message */ mbox_page = (mbox == MSYNC_RB0) ? QUEUE_OLD_RB0 : QUEUE_OLD_RB1; ican3_set_page(mod, mbox_page); memcpy_fromio(msg, mod->dpm, sizeof(*msg)); /* * notify the firmware that the read buffer is available * for it to fill again */ locl ^= mbox; ican3_set_page(mod, QUEUE_OLD_CONTROL); iowrite8(locl, mod->dpm + MSYNC_LOCL); return 0; } /* * Send a message through the "old-style" firmware interface * * LOCKING: must hold mod->lock * * returns 0 on success, -ENOMEM when no free space exists */ static int ican3_old_send_msg(struct ican3_dev *mod, struct ican3_msg *msg) { unsigned int mbox, mbox_page; u8 locl, peer, xord; /* get the MSYNC registers */ ican3_set_page(mod, QUEUE_OLD_CONTROL); peer = ioread8(mod->dpm + MSYNC_PEER); locl = ioread8(mod->dpm + MSYNC_LOCL); xord = locl ^ peer; if ((xord & MSYNC_WB_MASK) == MSYNC_WB_MASK) { dev_err(mod->dev, "no mbox for writing\n"); return -ENOMEM; } /* calculate a free mbox to use */ mbox = (xord & MSYNC_WB0) ? MSYNC_WB1 : MSYNC_WB0; /* copy the message to the DPM */ mbox_page = (mbox == MSYNC_WB0) ? QUEUE_OLD_WB0 : QUEUE_OLD_WB1; ican3_set_page(mod, mbox_page); memcpy_toio(mod->dpm, msg, sizeof(*msg)); locl ^= mbox; if (mbox == MSYNC_WB1) locl |= MSYNC_WBLW; ican3_set_page(mod, QUEUE_OLD_CONTROL); iowrite8(locl, mod->dpm + MSYNC_LOCL); return 0; } /* * ICAN3 "new-style" Host Interface Setup */ static void __devinit ican3_init_new_host_interface(struct ican3_dev *mod) { struct ican3_new_desc desc; unsigned long flags; void __iomem *dst; int i; spin_lock_irqsave(&mod->lock, flags); /* setup the internal datastructures for RX */ mod->rx_num = 0; mod->rx_int = 0; /* tohost queue descriptors are in page 5 */ ican3_set_page(mod, QUEUE_TOHOST); dst = mod->dpm; /* initialize the tohost (rx) queue descriptors: pages 9-24 */ for (i = 0; i < ICAN3_NEW_BUFFERS; i++) { desc.control = DESC_INTERRUPT | DESC_LEN(1); /* I L=1 */ desc.pointer = mod->free_page; /* set wrap flag on last buffer */ if (i == ICAN3_NEW_BUFFERS - 1) desc.control |= DESC_WRAP; memcpy_toio(dst, &desc, sizeof(desc)); dst += sizeof(desc); mod->free_page++; } /* fromhost (tx) mid queue descriptors are in page 6 */ ican3_set_page(mod, QUEUE_FROMHOST_MID); dst = mod->dpm; /* setup the internal datastructures for TX */ mod->tx_num = 0; /* initialize the fromhost mid queue descriptors: pages 25-40 */ for (i = 0; i < ICAN3_NEW_BUFFERS; i++) { desc.control = DESC_VALID | DESC_LEN(1); /* V L=1 */ desc.pointer = mod->free_page; /* set wrap flag on last buffer */ if (i == ICAN3_NEW_BUFFERS - 1) desc.control |= DESC_WRAP; memcpy_toio(dst, &desc, sizeof(desc)); dst += sizeof(desc); mod->free_page++; } /* fromhost hi queue descriptors are in page 7 */ ican3_set_page(mod, QUEUE_FROMHOST_HIGH); dst = mod->dpm; /* initialize only a single buffer in the fromhost hi queue (unused) */ desc.control = DESC_VALID | DESC_WRAP | DESC_LEN(1); /* VW L=1 */ desc.pointer = mod->free_page; memcpy_toio(dst, &desc, sizeof(desc)); mod->free_page++; /* fromhost low queue descriptors are in page 8 */ ican3_set_page(mod, QUEUE_FROMHOST_LOW); dst = mod->dpm; /* initialize only a single buffer in the fromhost low queue (unused) */ desc.control = DESC_VALID | DESC_WRAP | DESC_LEN(1); /* VW L=1 */ desc.pointer = mod->free_page; memcpy_toio(dst, &desc, sizeof(desc)); mod->free_page++; spin_unlock_irqrestore(&mod->lock, flags); } /* * ICAN3 Fast Host Interface Setup */ static void __devinit ican3_init_fast_host_interface(struct ican3_dev *mod) { struct ican3_fast_desc desc; unsigned long flags; unsigned int addr; void __iomem *dst; int i; spin_lock_irqsave(&mod->lock, flags); /* save the start recv page */ mod->fastrx_start = mod->free_page; mod->fastrx_num = 0; mod->fastrx_int = 0; /* build a single fast tohost queue descriptor */ memset(&desc, 0, sizeof(desc)); desc.control = 0x00; desc.command = 1; /* build the tohost queue descriptor ring in memory */ addr = 0; for (i = 0; i < ICAN3_RX_BUFFERS; i++) { /* set the wrap bit on the last buffer */ if (i == ICAN3_RX_BUFFERS - 1) desc.control |= DESC_WRAP; /* switch to the correct page */ ican3_set_page(mod, mod->free_page); /* copy the descriptor to the DPM */ dst = mod->dpm + addr; memcpy_toio(dst, &desc, sizeof(desc)); addr += sizeof(desc); /* move to the next page if necessary */ if (addr >= DPM_PAGE_SIZE) { addr = 0; mod->free_page++; } } /* make sure we page-align the next queue */ if (addr != 0) mod->free_page++; /* save the start xmit page */ mod->fasttx_start = mod->free_page; mod->fasttx_num = 0; /* build a single fast fromhost queue descriptor */ memset(&desc, 0, sizeof(desc)); desc.control = DESC_VALID; desc.command = 1; /* build the fromhost queue descriptor ring in memory */ addr = 0; for (i = 0; i < ICAN3_TX_BUFFERS; i++) { /* set the wrap bit on the last buffer */ if (i == ICAN3_TX_BUFFERS - 1) desc.control |= DESC_WRAP; /* switch to the correct page */ ican3_set_page(mod, mod->free_page); /* copy the descriptor to the DPM */ dst = mod->dpm + addr; memcpy_toio(dst, &desc, sizeof(desc)); addr += sizeof(desc); /* move to the next page if necessary */ if (addr >= DPM_PAGE_SIZE) { addr = 0; mod->free_page++; } } spin_unlock_irqrestore(&mod->lock, flags); } /* * ICAN3 "new-style" Host Interface Message Helpers */ /* * LOCKING: must hold mod->lock */ static int ican3_new_send_msg(struct ican3_dev *mod, struct ican3_msg *msg) { struct ican3_new_desc desc; void __iomem *desc_addr = mod->dpm + (mod->tx_num * sizeof(desc)); /* switch to the fromhost mid queue, and read the buffer descriptor */ ican3_set_page(mod, QUEUE_FROMHOST_MID); memcpy_fromio(&desc, desc_addr, sizeof(desc)); if (!(desc.control & DESC_VALID)) { dev_dbg(mod->dev, "%s: no free buffers\n", __func__); return -ENOMEM; } /* switch to the data page, copy the data */ ican3_set_page(mod, desc.pointer); memcpy_toio(mod->dpm, msg, sizeof(*msg)); /* switch back to the descriptor, set the valid bit, write it back */ ican3_set_page(mod, QUEUE_FROMHOST_MID); desc.control ^= DESC_VALID; memcpy_toio(desc_addr, &desc, sizeof(desc)); /* update the tx number */ mod->tx_num = (desc.control & DESC_WRAP) ? 0 : (mod->tx_num + 1); return 0; } /* * LOCKING: must hold mod->lock */ static int ican3_new_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg) { struct ican3_new_desc desc; void __iomem *desc_addr = mod->dpm + (mod->rx_num * sizeof(desc)); /* switch to the tohost queue, and read the buffer descriptor */ ican3_set_page(mod, QUEUE_TOHOST); memcpy_fromio(&desc, desc_addr, sizeof(desc)); if (!(desc.control & DESC_VALID)) { dev_dbg(mod->dev, "%s: no buffers to recv\n", __func__); return -ENOMEM; } /* switch to the data page, copy the data */ ican3_set_page(mod, desc.pointer); memcpy_fromio(msg, mod->dpm, sizeof(*msg)); /* switch back to the descriptor, toggle the valid bit, write it back */ ican3_set_page(mod, QUEUE_TOHOST); desc.control ^= DESC_VALID; memcpy_toio(desc_addr, &desc, sizeof(desc)); /* update the rx number */ mod->rx_num = (desc.control & DESC_WRAP) ? 0 : (mod->rx_num + 1); return 0; } /* * Message Send / Recv Helpers */ static int ican3_send_msg(struct ican3_dev *mod, struct ican3_msg *msg) { unsigned long flags; int ret; spin_lock_irqsave(&mod->lock, flags); if (mod->iftype == 0) ret = ican3_old_send_msg(mod, msg); else ret = ican3_new_send_msg(mod, msg); spin_unlock_irqrestore(&mod->lock, flags); return ret; } static int ican3_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg) { unsigned long flags; int ret; spin_lock_irqsave(&mod->lock, flags); if (mod->iftype == 0) ret = ican3_old_recv_msg(mod, msg); else ret = ican3_new_recv_msg(mod, msg); spin_unlock_irqrestore(&mod->lock, flags); return ret; } /* * Quick Pre-constructed Messages */ static int __devinit ican3_msg_connect(struct ican3_dev *mod) { struct ican3_msg msg; memset(&msg, 0, sizeof(msg)); msg.spec = MSG_CONNECTI; msg.len = cpu_to_le16(0); return ican3_send_msg(mod, &msg); } static int __devexit ican3_msg_disconnect(struct ican3_dev *mod) { struct ican3_msg msg; memset(&msg, 0, sizeof(msg)); msg.spec = MSG_DISCONNECT; msg.len = cpu_to_le16(0); return ican3_send_msg(mod, &msg); } static int __devinit ican3_msg_newhostif(struct ican3_dev *mod) { struct ican3_msg msg; int ret; memset(&msg, 0, sizeof(msg)); msg.spec = MSG_NEWHOSTIF; msg.len = cpu_to_le16(0); /* If we're not using the old interface, switching seems bogus */ WARN_ON(mod->iftype != 0); ret = ican3_send_msg(mod, &msg); if (ret) return ret; /* mark the module as using the new host interface */ mod->iftype = 1; return 0; } static int __devinit ican3_msg_fasthostif(struct ican3_dev *mod) { struct ican3_msg msg; unsigned int addr; memset(&msg, 0, sizeof(msg)); msg.spec = MSG_INITFDPMQUEUE; msg.len = cpu_to_le16(8); /* write the tohost queue start address */ addr = DPM_PAGE_ADDR(mod->fastrx_start); msg.data[0] = addr & 0xff; msg.data[1] = (addr >> 8) & 0xff; msg.data[2] = (addr >> 16) & 0xff; msg.data[3] = (addr >> 24) & 0xff; /* write the fromhost queue start address */ addr = DPM_PAGE_ADDR(mod->fasttx_start); msg.data[4] = addr & 0xff; msg.data[5] = (addr >> 8) & 0xff; msg.data[6] = (addr >> 16) & 0xff; msg.data[7] = (addr >> 24) & 0xff; /* If we're not using the new interface yet, we cannot do this */ WARN_ON(mod->iftype != 1); return ican3_send_msg(mod, &msg); } /* * Setup the CAN filter to either accept or reject all * messages from the CAN bus. */ static int __devinit ican3_set_id_filter(struct ican3_dev *mod, bool accept) { struct ican3_msg msg; int ret; /* Standard Frame Format */ memset(&msg, 0, sizeof(msg)); msg.spec = MSG_SETAFILMASK; msg.len = cpu_to_le16(5); msg.data[0] = 0x00; /* IDLo LSB */ msg.data[1] = 0x00; /* IDLo MSB */ msg.data[2] = 0xff; /* IDHi LSB */ msg.data[3] = 0x07; /* IDHi MSB */ /* accept all frames for fast host if, or reject all frames */ msg.data[4] = accept ? SETAFILMASK_FASTIF : SETAFILMASK_REJECT; ret = ican3_send_msg(mod, &msg); if (ret) return ret; /* Extended Frame Format */ memset(&msg, 0, sizeof(msg)); msg.spec = MSG_SETAFILMASK; msg.len = cpu_to_le16(13); msg.data[0] = 0; /* MUX = 0 */ msg.data[1] = 0x00; /* IDLo LSB */ msg.data[2] = 0x00; msg.data[3] = 0x00; msg.data[4] = 0x20; /* IDLo MSB */ msg.data[5] = 0xff; /* IDHi LSB */ msg.data[6] = 0xff; msg.data[7] = 0xff; msg.data[8] = 0x3f; /* IDHi MSB */ /* accept all frames for fast host if, or reject all frames */ msg.data[9] = accept ? SETAFILMASK_FASTIF : SETAFILMASK_REJECT; return ican3_send_msg(mod, &msg); } /* * Bring the CAN bus online or offline */ static int ican3_set_bus_state(struct ican3_dev *mod, bool on) { struct ican3_msg msg; memset(&msg, 0, sizeof(msg)); msg.spec = on ? MSG_CONREQ : MSG_COFFREQ; msg.len = cpu_to_le16(0); return ican3_send_msg(mod, &msg); } static int ican3_set_termination(struct ican3_dev *mod, bool on) { struct ican3_msg msg; memset(&msg, 0, sizeof(msg)); msg.spec = MSG_HWCONF; msg.len = cpu_to_le16(2); msg.data[0] = 0x00; msg.data[1] = on ? HWCONF_TERMINATE_ON : HWCONF_TERMINATE_OFF; return ican3_send_msg(mod, &msg); } static int ican3_send_inquiry(struct ican3_dev *mod, u8 subspec) { struct ican3_msg msg; memset(&msg, 0, sizeof(msg)); msg.spec = MSG_INQUIRY; msg.len = cpu_to_le16(2); msg.data[0] = subspec; msg.data[1] = 0x00; return ican3_send_msg(mod, &msg); } static int ican3_set_buserror(struct ican3_dev *mod, u8 quota) { struct ican3_msg msg; memset(&msg, 0, sizeof(msg)); msg.spec = MSG_CCONFREQ; msg.len = cpu_to_le16(2); msg.data[0] = 0x00; msg.data[1] = quota; return ican3_send_msg(mod, &msg); } /* * ICAN3 to Linux CAN Frame Conversion */ static void ican3_to_can_frame(struct ican3_dev *mod, struct ican3_fast_desc *desc, struct can_frame *cf) { if ((desc->command & ICAN3_CAN_TYPE_MASK) == ICAN3_CAN_TYPE_SFF) { if (desc->data[1] & ICAN3_SFF_RTR) cf->can_id |= CAN_RTR_FLAG; cf->can_id |= desc->data[0] << 3; cf->can_id |= (desc->data[1] & 0xe0) >> 5; cf->can_dlc = desc->data[1] & ICAN3_CAN_DLC_MASK; memcpy(cf->data, &desc->data[2], sizeof(cf->data)); } else { cf->can_dlc = desc->data[0] & ICAN3_CAN_DLC_MASK; if (desc->data[0] & ICAN3_EFF_RTR) cf->can_id |= CAN_RTR_FLAG; if (desc->data[0] & ICAN3_EFF) { cf->can_id |= CAN_EFF_FLAG; cf->can_id |= desc->data[2] << 21; /* 28-21 */ cf->can_id |= desc->data[3] << 13; /* 20-13 */ cf->can_id |= desc->data[4] << 5; /* 12-5 */ cf->can_id |= (desc->data[5] & 0xf8) >> 3; } else { cf->can_id |= desc->data[2] << 3; /* 10-3 */ cf->can_id |= desc->data[3] >> 5; /* 2-0 */ } memcpy(cf->data, &desc->data[6], sizeof(cf->data)); } } static void can_frame_to_ican3(struct ican3_dev *mod, struct can_frame *cf, struct ican3_fast_desc *desc) { /* clear out any stale data in the descriptor */ memset(desc->data, 0, sizeof(desc->data)); /* we always use the extended format, with the ECHO flag set */ desc->command = ICAN3_CAN_TYPE_EFF; desc->data[0] |= cf->can_dlc; desc->data[1] |= ICAN3_ECHO; if (cf->can_id & CAN_RTR_FLAG) desc->data[0] |= ICAN3_EFF_RTR; /* pack the id into the correct places */ if (cf->can_id & CAN_EFF_FLAG) { desc->data[0] |= ICAN3_EFF; desc->data[2] = (cf->can_id & 0x1fe00000) >> 21; /* 28-21 */ desc->data[3] = (cf->can_id & 0x001fe000) >> 13; /* 20-13 */ desc->data[4] = (cf->can_id & 0x00001fe0) >> 5; /* 12-5 */ desc->data[5] = (cf->can_id & 0x0000001f) << 3; /* 4-0 */ } else { desc->data[2] = (cf->can_id & 0x7F8) >> 3; /* bits 10-3 */ desc->data[3] = (cf->can_id & 0x007) << 5; /* bits 2-0 */ } /* copy the data bits into the descriptor */ memcpy(&desc->data[6], cf->data, sizeof(cf->data)); } /* * Interrupt Handling */ /* * Handle an ID + Version message response from the firmware. We never generate * this message in production code, but it is very useful when debugging to be * able to display this message. */ static void ican3_handle_idvers(struct ican3_dev *mod, struct ican3_msg *msg) { dev_dbg(mod->dev, "IDVERS response: %s\n", msg->data); } static void ican3_handle_msglost(struct ican3_dev *mod, struct ican3_msg *msg) { struct net_device *dev = mod->ndev; struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; /* * Report that communication messages with the microcontroller firmware * are being lost. These are never CAN frames, so we do not generate an * error frame for userspace */ if (msg->spec == MSG_MSGLOST) { dev_err(mod->dev, "lost %d control messages\n", msg->data[0]); return; } /* * Oops, this indicates that we have lost messages in the fast queue, * which are exclusively CAN messages. Our driver isn't reading CAN * frames fast enough. * * We'll pretend that the SJA1000 told us that it ran out of buffer * space, because there is not a better message for this. */ skb = alloc_can_err_skb(dev, &cf); if (skb) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; stats->rx_errors++; stats->rx_bytes += cf->can_dlc; netif_rx(skb); } } /* * Handle CAN Event Indication Messages from the firmware * * The ICAN3 firmware provides the values of some SJA1000 registers when it * generates this message. The code below is largely copied from the * drivers/net/can/sja1000/sja1000.c file, and adapted as necessary */ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg) { struct net_device *dev = mod->ndev; struct net_device_stats *stats = &dev->stats; enum can_state state = mod->can.state; u8 status, isrc, rxerr, txerr; struct can_frame *cf; struct sk_buff *skb; /* we can only handle the SJA1000 part */ if (msg->data[1] != CEVTIND_CHIP_SJA1000) { dev_err(mod->dev, "unable to handle errors on non-SJA1000\n"); return -ENODEV; } /* check the message length for sanity */ if (le16_to_cpu(msg->len) < 6) { dev_err(mod->dev, "error message too short\n"); return -EINVAL; } skb = alloc_can_err_skb(dev, &cf); if (skb == NULL) return -ENOMEM; isrc = msg->data[0]; status = msg->data[3]; rxerr = msg->data[4]; txerr = msg->data[5]; /* data overrun interrupt */ if (isrc == CEVTIND_DOI || isrc == CEVTIND_LOST) { dev_dbg(mod->dev, "data overrun interrupt\n"); cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; stats->rx_over_errors++; stats->rx_errors++; } /* error warning + passive interrupt */ if (isrc == CEVTIND_EI) { dev_dbg(mod->dev, "error warning + passive interrupt\n"); if (status & SR_BS) { state = CAN_STATE_BUS_OFF; cf->can_id |= CAN_ERR_BUSOFF; can_bus_off(dev); } else if (status & SR_ES) { if (rxerr >= 128 || txerr >= 128) state = CAN_STATE_ERROR_PASSIVE; else state = CAN_STATE_ERROR_WARNING; } else { state = CAN_STATE_ERROR_ACTIVE; } } /* bus error interrupt */ if (isrc == CEVTIND_BEI) { u8 ecc = msg->data[2]; dev_dbg(mod->dev, "bus error interrupt\n"); mod->can.can_stats.bus_error++; stats->rx_errors++; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; switch (ecc & ECC_MASK) { case ECC_BIT: cf->data[2] |= CAN_ERR_PROT_BIT; break; case ECC_FORM: cf->data[2] |= CAN_ERR_PROT_FORM; break; case ECC_STUFF: cf->data[2] |= CAN_ERR_PROT_STUFF; break; default: cf->data[2] |= CAN_ERR_PROT_UNSPEC; cf->data[3] = ecc & ECC_SEG; break; } if ((ecc & ECC_DIR) == 0) cf->data[2] |= CAN_ERR_PROT_TX; cf->data[6] = txerr; cf->data[7] = rxerr; } if (state != mod->can.state && (state == CAN_STATE_ERROR_WARNING || state == CAN_STATE_ERROR_PASSIVE)) { cf->can_id |= CAN_ERR_CRTL; if (state == CAN_STATE_ERROR_WARNING) { mod->can.can_stats.error_warning++; cf->data[1] = (txerr > rxerr) ? CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; } else { mod->can.can_stats.error_passive++; cf->data[1] = (txerr > rxerr) ? CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; } cf->data[6] = txerr; cf->data[7] = rxerr; } mod->can.state = state; stats->rx_errors++; stats->rx_bytes += cf->can_dlc; netif_rx(skb); return 0; } static void ican3_handle_inquiry(struct ican3_dev *mod, struct ican3_msg *msg) { switch (msg->data[0]) { case INQUIRY_STATUS: case INQUIRY_EXTENDED: mod->bec.rxerr = msg->data[5]; mod->bec.txerr = msg->data[6]; complete(&mod->buserror_comp); break; case INQUIRY_TERMINATION: mod->termination_enabled = msg->data[6] & HWCONF_TERMINATE_ON; complete(&mod->termination_comp); break; default: dev_err(mod->dev, "received an unknown inquiry response\n"); break; } } static void ican3_handle_unknown_message(struct ican3_dev *mod, struct ican3_msg *msg) { dev_warn(mod->dev, "received unknown message: spec 0x%.2x length %d\n", msg->spec, le16_to_cpu(msg->len)); } /* * Handle a control message from the firmware */ static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg) { dev_dbg(mod->dev, "%s: modno %d spec 0x%.2x len %d bytes\n", __func__, mod->num, msg->spec, le16_to_cpu(msg->len)); switch (msg->spec) { case MSG_IDVERS: ican3_handle_idvers(mod, msg); break; case MSG_MSGLOST: case MSG_FMSGLOST: ican3_handle_msglost(mod, msg); break; case MSG_CEVTIND: ican3_handle_cevtind(mod, msg); break; case MSG_INQUIRY: ican3_handle_inquiry(mod, msg); break; default: ican3_handle_unknown_message(mod, msg); break; } } /* * Check that there is room in the TX ring to transmit another skb * * LOCKING: must hold mod->lock */ static bool ican3_txok(struct ican3_dev *mod) { struct ican3_fast_desc __iomem *desc; u8 control; /* copy the control bits of the descriptor */ ican3_set_page(mod, mod->fasttx_start + (mod->fasttx_num / 16)); desc = mod->dpm + ((mod->fasttx_num % 16) * sizeof(*desc)); control = ioread8(&desc->control); /* if the control bits are not valid, then we have no more space */ if (!(control & DESC_VALID)) return false; return true; } /* * Receive one CAN frame from the hardware * * CONTEXT: must be called from user context */ static int ican3_recv_skb(struct ican3_dev *mod) { struct net_device *ndev = mod->ndev; struct net_device_stats *stats = &ndev->stats; struct ican3_fast_desc desc; void __iomem *desc_addr; struct can_frame *cf; struct sk_buff *skb; unsigned long flags; spin_lock_irqsave(&mod->lock, flags); /* copy the whole descriptor */ ican3_set_page(mod, mod->fastrx_start + (mod->fastrx_num / 16)); desc_addr = mod->dpm + ((mod->fastrx_num % 16) * sizeof(desc)); memcpy_fromio(&desc, desc_addr, sizeof(desc)); spin_unlock_irqrestore(&mod->lock, flags); /* check that we actually have a CAN frame */ if (!(desc.control & DESC_VALID)) return -ENOBUFS; /* allocate an skb */ skb = alloc_can_skb(ndev, &cf); if (unlikely(skb == NULL)) { stats->rx_dropped++; goto err_noalloc; } /* convert the ICAN3 frame into Linux CAN format */ ican3_to_can_frame(mod, &desc, cf); /* receive the skb, update statistics */ netif_receive_skb(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; err_noalloc: /* toggle the valid bit and return the descriptor to the ring */ desc.control ^= DESC_VALID; spin_lock_irqsave(&mod->lock, flags); ican3_set_page(mod, mod->fastrx_start + (mod->fastrx_num / 16)); memcpy_toio(desc_addr, &desc, 1); /* update the next buffer pointer */ mod->fastrx_num = (desc.control & DESC_WRAP) ? 0 : (mod->fastrx_num + 1); /* there are still more buffers to process */ spin_unlock_irqrestore(&mod->lock, flags); return 0; } static int ican3_napi(struct napi_struct *napi, int budget) { struct ican3_dev *mod = container_of(napi, struct ican3_dev, napi); struct ican3_msg msg; unsigned long flags; int received = 0; int ret; /* process all communication messages */ while (true) { ret = ican3_recv_msg(mod, &msg); if (ret) break; ican3_handle_message(mod, &msg); } /* process all CAN frames from the fast interface */ while (received < budget) { ret = ican3_recv_skb(mod); if (ret) break; received++; } /* We have processed all packets that the adapter had, but it * was less than our budget, stop polling */ if (received < budget) napi_complete(napi); spin_lock_irqsave(&mod->lock, flags); /* Wake up the transmit queue if necessary */ if (netif_queue_stopped(mod->ndev) && ican3_txok(mod)) netif_wake_queue(mod->ndev); spin_unlock_irqrestore(&mod->lock, flags); /* re-enable interrupt generation */ iowrite8(1 << mod->num, &mod->ctrl->int_enable); return received; } static irqreturn_t ican3_irq(int irq, void *dev_id) { struct ican3_dev *mod = dev_id; u8 stat; /* * The interrupt status register on this device reports interrupts * as zeroes instead of using ones like most other devices */ stat = ioread8(&mod->ctrl->int_disable) & (1 << mod->num); if (stat == (1 << mod->num)) return IRQ_NONE; /* clear the MODULbus interrupt from the microcontroller */ ioread8(&mod->dpmctrl->interrupt); /* disable interrupt generation, schedule the NAPI poller */ iowrite8(1 << mod->num, &mod->ctrl->int_disable); napi_schedule(&mod->napi); return IRQ_HANDLED; } /* * Firmware reset, startup, and shutdown */ /* * Reset an ICAN module to its power-on state * * CONTEXT: no network device registered */ static int ican3_reset_module(struct ican3_dev *mod) { unsigned long start; u8 runold, runnew; /* disable interrupts so no more work is scheduled */ iowrite8(1 << mod->num, &mod->ctrl->int_disable); /* the first unallocated page in the DPM is #9 */ mod->free_page = DPM_FREE_START; ican3_set_page(mod, QUEUE_OLD_CONTROL); runold = ioread8(mod->dpm + TARGET_RUNNING); /* reset the module */ iowrite8(0x00, &mod->dpmctrl->hwreset); /* wait until the module has finished resetting and is running */ start = jiffies; do { ican3_set_page(mod, QUEUE_OLD_CONTROL); runnew = ioread8(mod->dpm + TARGET_RUNNING); if (runnew == (runold ^ 0xff)) return 0; msleep(10); } while (time_before(jiffies, start + HZ / 4)); dev_err(mod->dev, "failed to reset CAN module\n"); return -ETIMEDOUT; } static void __devexit ican3_shutdown_module(struct ican3_dev *mod) { ican3_msg_disconnect(mod); ican3_reset_module(mod); } /* * Startup an ICAN module, bringing it into fast mode */ static int __devinit ican3_startup_module(struct ican3_dev *mod) { int ret; ret = ican3_reset_module(mod); if (ret) { dev_err(mod->dev, "unable to reset module\n"); return ret; } /* re-enable interrupts so we can send messages */ iowrite8(1 << mod->num, &mod->ctrl->int_enable); ret = ican3_msg_connect(mod); if (ret) { dev_err(mod->dev, "unable to connect to module\n"); return ret; } ican3_init_new_host_interface(mod); ret = ican3_msg_newhostif(mod); if (ret) { dev_err(mod->dev, "unable to switch to new-style interface\n"); return ret; } /* default to "termination on" */ ret = ican3_set_termination(mod, true); if (ret) { dev_err(mod->dev, "unable to enable termination\n"); return ret; } /* default to "bus errors enabled" */ ret = ican3_set_buserror(mod, ICAN3_BUSERR_QUOTA_MAX); if (ret) { dev_err(mod->dev, "unable to set bus-error\n"); return ret; } ican3_init_fast_host_interface(mod); ret = ican3_msg_fasthostif(mod); if (ret) { dev_err(mod->dev, "unable to switch to fast host interface\n"); return ret; } ret = ican3_set_id_filter(mod, true); if (ret) { dev_err(mod->dev, "unable to set acceptance filter\n"); return ret; } return 0; } /* * CAN Network Device */ static int ican3_open(struct net_device *ndev) { struct ican3_dev *mod = netdev_priv(ndev); u8 quota; int ret; /* open the CAN layer */ ret = open_candev(ndev); if (ret) { dev_err(mod->dev, "unable to start CAN layer\n"); return ret; } /* set the bus error generation state appropriately */ if (mod->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) quota = ICAN3_BUSERR_QUOTA_MAX; else quota = 0; ret = ican3_set_buserror(mod, quota); if (ret) { dev_err(mod->dev, "unable to set bus-error\n"); close_candev(ndev); return ret; } /* bring the bus online */ ret = ican3_set_bus_state(mod, true); if (ret) { dev_err(mod->dev, "unable to set bus-on\n"); close_candev(ndev); return ret; } /* start up the network device */ mod->can.state = CAN_STATE_ERROR_ACTIVE; netif_start_queue(ndev); return 0; } static int ican3_stop(struct net_device *ndev) { struct ican3_dev *mod = netdev_priv(ndev); int ret; /* stop the network device xmit routine */ netif_stop_queue(ndev); mod->can.state = CAN_STATE_STOPPED; /* bring the bus offline, stop receiving packets */ ret = ican3_set_bus_state(mod, false); if (ret) { dev_err(mod->dev, "unable to set bus-off\n"); return ret; } /* close the CAN layer */ close_candev(ndev); return 0; } static int ican3_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ican3_dev *mod = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; struct can_frame *cf = (struct can_frame *)skb->data; struct ican3_fast_desc desc; void __iomem *desc_addr; unsigned long flags; spin_lock_irqsave(&mod->lock, flags); /* check that we can actually transmit */ if (!ican3_txok(mod)) { dev_err(mod->dev, "no free descriptors, stopping queue\n"); netif_stop_queue(ndev); spin_unlock_irqrestore(&mod->lock, flags); return NETDEV_TX_BUSY; } /* copy the control bits of the descriptor */ ican3_set_page(mod, mod->fasttx_start + (mod->fasttx_num / 16)); desc_addr = mod->dpm + ((mod->fasttx_num % 16) * sizeof(desc)); memset(&desc, 0, sizeof(desc)); memcpy_fromio(&desc, desc_addr, 1); /* convert the Linux CAN frame into ICAN3 format */ can_frame_to_ican3(mod, cf, &desc); /* * the programming manual says that you must set the IVALID bit, then * interrupt, then set the valid bit. Quite weird, but it seems to be * required for this to work */ desc.control |= DESC_IVALID; memcpy_toio(desc_addr, &desc, sizeof(desc)); /* generate a MODULbus interrupt to the microcontroller */ iowrite8(0x01, &mod->dpmctrl->interrupt); desc.control ^= DESC_VALID; memcpy_toio(desc_addr, &desc, sizeof(desc)); /* update the next buffer pointer */ mod->fasttx_num = (desc.control & DESC_WRAP) ? 0 : (mod->fasttx_num + 1); /* update statistics */ stats->tx_packets++; stats->tx_bytes += cf->can_dlc; kfree_skb(skb); /* * This hardware doesn't have TX-done notifications, so we'll try and * emulate it the best we can using ECHO skbs. Get the next TX * descriptor, and see if we have room to send. If not, stop the queue. * It will be woken when the ECHO skb for the current packet is recv'd. */ /* copy the control bits of the descriptor */ if (!ican3_txok(mod)) netif_stop_queue(ndev); spin_unlock_irqrestore(&mod->lock, flags); return NETDEV_TX_OK; } static const struct net_device_ops ican3_netdev_ops = { .ndo_open = ican3_open, .ndo_stop = ican3_stop, .ndo_start_xmit = ican3_xmit, }; /* * Low-level CAN Device */ /* This structure was stolen from drivers/net/can/sja1000/sja1000.c */ static struct can_bittiming_const ican3_bittiming_const = { .name = DRV_NAME, .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 64, .brp_inc = 1, }; /* * This routine was stolen from drivers/net/can/sja1000/sja1000.c * * The bittiming register command for the ICAN3 just sets the bit timing * registers on the SJA1000 chip directly */ static int ican3_set_bittiming(struct net_device *ndev) { struct ican3_dev *mod = netdev_priv(ndev); struct can_bittiming *bt = &mod->can.bittiming; struct ican3_msg msg; u8 btr0, btr1; btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6); btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) | (((bt->phase_seg2 - 1) & 0x7) << 4); if (mod->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) btr1 |= 0x80; memset(&msg, 0, sizeof(msg)); msg.spec = MSG_CBTRREQ; msg.len = cpu_to_le16(4); msg.data[0] = 0x00; msg.data[1] = 0x00; msg.data[2] = btr0; msg.data[3] = btr1; return ican3_send_msg(mod, &msg); } static int ican3_set_mode(struct net_device *ndev, enum can_mode mode) { struct ican3_dev *mod = netdev_priv(ndev); int ret; if (mode != CAN_MODE_START) return -ENOTSUPP; /* bring the bus online */ ret = ican3_set_bus_state(mod, true); if (ret) { dev_err(mod->dev, "unable to set bus-on\n"); return ret; } /* start up the network device */ mod->can.state = CAN_STATE_ERROR_ACTIVE; if (netif_queue_stopped(ndev)) netif_wake_queue(ndev); return 0; } static int ican3_get_berr_counter(const struct net_device *ndev, struct can_berr_counter *bec) { struct ican3_dev *mod = netdev_priv(ndev); int ret; ret = ican3_send_inquiry(mod, INQUIRY_STATUS); if (ret) return ret; ret = wait_for_completion_timeout(&mod->buserror_comp, HZ); if (ret <= 0) { dev_info(mod->dev, "%s timed out\n", __func__); return -ETIMEDOUT; } bec->rxerr = mod->bec.rxerr; bec->txerr = mod->bec.txerr; return 0; } /* * Sysfs Attributes */ static ssize_t ican3_sysfs_show_term(struct device *dev, struct device_attribute *attr, char *buf) { struct ican3_dev *mod = netdev_priv(to_net_dev(dev)); int ret; ret = ican3_send_inquiry(mod, INQUIRY_TERMINATION); if (ret) return ret; ret = wait_for_completion_timeout(&mod->termination_comp, HZ); if (ret <= 0) { dev_info(mod->dev, "%s timed out\n", __func__); return -ETIMEDOUT; } return snprintf(buf, PAGE_SIZE, "%u\n", mod->termination_enabled); } static ssize_t ican3_sysfs_set_term(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ican3_dev *mod = netdev_priv(to_net_dev(dev)); unsigned long enable; int ret; if (strict_strtoul(buf, 0, &enable)) return -EINVAL; ret = ican3_set_termination(mod, enable); if (ret) return ret; return count; } static DEVICE_ATTR(termination, S_IWUSR | S_IRUGO, ican3_sysfs_show_term, ican3_sysfs_set_term); static struct attribute *ican3_sysfs_attrs[] = { &dev_attr_termination.attr, NULL, }; static struct attribute_group ican3_sysfs_attr_group = { .attrs = ican3_sysfs_attrs, }; /* * PCI Subsystem */ static int __devinit ican3_probe(struct platform_device *pdev) { struct janz_platform_data *pdata; struct net_device *ndev; struct ican3_dev *mod; struct resource *res; struct device *dev; int ret; pdata = pdev->dev.platform_data; if (!pdata) return -ENXIO; dev_dbg(&pdev->dev, "probe: module number %d\n", pdata->modno); /* save the struct device for printing */ dev = &pdev->dev; /* allocate the CAN device and private data */ ndev = alloc_candev(sizeof(*mod), 0); if (!ndev) { dev_err(dev, "unable to allocate CANdev\n"); ret = -ENOMEM; goto out_return; } platform_set_drvdata(pdev, ndev); mod = netdev_priv(ndev); mod->ndev = ndev; mod->dev = &pdev->dev; mod->num = pdata->modno; netif_napi_add(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS); spin_lock_init(&mod->lock); init_completion(&mod->termination_comp); init_completion(&mod->buserror_comp); /* setup device-specific sysfs attributes */ ndev->sysfs_groups[0] = &ican3_sysfs_attr_group; /* the first unallocated page in the DPM is 9 */ mod->free_page = DPM_FREE_START; ndev->netdev_ops = &ican3_netdev_ops; ndev->flags |= IFF_ECHO; SET_NETDEV_DEV(ndev, &pdev->dev); mod->can.clock.freq = ICAN3_CAN_CLOCK; mod->can.bittiming_const = &ican3_bittiming_const; mod->can.do_set_bittiming = ican3_set_bittiming; mod->can.do_set_mode = ican3_set_mode; mod->can.do_get_berr_counter = ican3_get_berr_counter; mod->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_BERR_REPORTING; /* find our IRQ number */ mod->irq = platform_get_irq(pdev, 0); if (mod->irq < 0) { dev_err(dev, "IRQ line not found\n"); ret = -ENODEV; goto out_free_ndev; } ndev->irq = mod->irq; /* get access to the MODULbus registers for this module */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "MODULbus registers not found\n"); ret = -ENODEV; goto out_free_ndev; } mod->dpm = ioremap(res->start, resource_size(res)); if (!mod->dpm) { dev_err(dev, "MODULbus registers not ioremap\n"); ret = -ENOMEM; goto out_free_ndev; } mod->dpmctrl = mod->dpm + DPM_PAGE_SIZE; /* get access to the control registers for this module */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res) { dev_err(dev, "CONTROL registers not found\n"); ret = -ENODEV; goto out_iounmap_dpm; } mod->ctrl = ioremap(res->start, resource_size(res)); if (!mod->ctrl) { dev_err(dev, "CONTROL registers not ioremap\n"); ret = -ENOMEM; goto out_iounmap_dpm; } /* disable our IRQ, then hookup the IRQ handler */ iowrite8(1 << mod->num, &mod->ctrl->int_disable); ret = request_irq(mod->irq, ican3_irq, IRQF_SHARED, DRV_NAME, mod); if (ret) { dev_err(dev, "unable to request IRQ\n"); goto out_iounmap_ctrl; } /* reset and initialize the CAN controller into fast mode */ napi_enable(&mod->napi); ret = ican3_startup_module(mod); if (ret) { dev_err(dev, "%s: unable to start CANdev\n", __func__); goto out_free_irq; } /* register with the Linux CAN layer */ ret = register_candev(ndev); if (ret) { dev_err(dev, "%s: unable to register CANdev\n", __func__); goto out_free_irq; } dev_info(dev, "module %d: registered CAN device\n", pdata->modno); return 0; out_free_irq: napi_disable(&mod->napi); iowrite8(1 << mod->num, &mod->ctrl->int_disable); free_irq(mod->irq, mod); out_iounmap_ctrl: iounmap(mod->ctrl); out_iounmap_dpm: iounmap(mod->dpm); out_free_ndev: free_candev(ndev); out_return: return ret; } static int __devexit ican3_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct ican3_dev *mod = netdev_priv(ndev); /* unregister the netdevice, stop interrupts */ unregister_netdev(ndev); napi_disable(&mod->napi); iowrite8(1 << mod->num, &mod->ctrl->int_disable); free_irq(mod->irq, mod); /* put the module into reset */ ican3_shutdown_module(mod); /* unmap all registers */ iounmap(mod->ctrl); iounmap(mod->dpm); free_candev(ndev); return 0; } static struct platform_driver ican3_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, .probe = ican3_probe, .remove = __devexit_p(ican3_remove), }; module_platform_driver(ican3_driver); MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>"); MODULE_DESCRIPTION("Janz MODULbus VMOD-ICAN3 Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:janz-ican3");
gpl-2.0
minz1/xosp_kernel_xiaomi_msm8956
drivers/media/platform/exynos4-is/media-dev.c
1809
38179
/* * S5P/EXYNOS4 SoC series camera host interface media device driver * * Copyright (C) 2011 - 2012 Samsung Electronics Co., Ltd. * Sylwester Nawrocki <s.nawrocki@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 2 of the License, * or (at your option) any later version. */ #include <linux/bug.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/of_device.h> #include <linux/of_i2c.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/types.h> #include <linux/slab.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-of.h> #include <media/media-device.h> #include <media/s5p_fimc.h> #include "media-dev.h" #include "fimc-core.h" #include "fimc-is.h" #include "fimc-lite.h" #include "mipi-csis.h" static int __fimc_md_set_camclk(struct fimc_md *fmd, struct fimc_source_info *si, bool on); /** * fimc_pipeline_prepare - update pipeline information with subdevice pointers * @me: media entity terminating the pipeline * * Caller holds the graph mutex. */ static void fimc_pipeline_prepare(struct fimc_pipeline *p, struct media_entity *me) { struct v4l2_subdev *sd; int i; for (i = 0; i < IDX_MAX; i++) p->subdevs[i] = NULL; while (1) { struct media_pad *pad = NULL; /* Find remote source pad */ for (i = 0; i < me->num_pads; i++) { struct media_pad *spad = &me->pads[i]; if (!(spad->flags & MEDIA_PAD_FL_SINK)) continue; pad = media_entity_remote_source(spad); if (pad) break; } if (pad == NULL || media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV) break; sd = media_entity_to_v4l2_subdev(pad->entity); switch (sd->grp_id) { case GRP_ID_FIMC_IS_SENSOR: case GRP_ID_SENSOR: p->subdevs[IDX_SENSOR] = sd; break; case GRP_ID_CSIS: p->subdevs[IDX_CSIS] = sd; break; case GRP_ID_FLITE: p->subdevs[IDX_FLITE] = sd; break; case GRP_ID_FIMC: /* No need to control FIMC subdev through subdev ops */ break; case GRP_ID_FIMC_IS: p->subdevs[IDX_IS_ISP] = sd; break; default: break; } me = &sd->entity; if (me->num_pads == 1) break; } } /** * __subdev_set_power - change power state of a single subdev * @sd: subdevice to change power state for * @on: 1 to enable power or 0 to disable * * Return result of s_power subdev operation or -ENXIO if sd argument * is NULL. Return 0 if the subdevice does not implement s_power. */ static int __subdev_set_power(struct v4l2_subdev *sd, int on) { int *use_count; int ret; if (sd == NULL) return -ENXIO; use_count = &sd->entity.use_count; if (on && (*use_count)++ > 0) return 0; else if (!on && (*use_count == 0 || --(*use_count) > 0)) return 0; ret = v4l2_subdev_call(sd, core, s_power, on); return ret != -ENOIOCTLCMD ? ret : 0; } /** * fimc_pipeline_s_power - change power state of all pipeline subdevs * @fimc: fimc device terminating the pipeline * @state: true to power on, false to power off * * Needs to be called with the graph mutex held. */ static int fimc_pipeline_s_power(struct fimc_pipeline *p, bool on) { static const u8 seq[2][IDX_MAX - 1] = { { IDX_IS_ISP, IDX_SENSOR, IDX_CSIS, IDX_FLITE }, { IDX_CSIS, IDX_FLITE, IDX_SENSOR, IDX_IS_ISP }, }; int i, ret = 0; if (p->subdevs[IDX_SENSOR] == NULL) return -ENXIO; for (i = 0; i < IDX_MAX - 1; i++) { unsigned int idx = seq[on][i]; ret = __subdev_set_power(p->subdevs[idx], on); if (ret < 0 && ret != -ENXIO) goto error; } return 0; error: for (; i >= 0; i--) { unsigned int idx = seq[on][i]; __subdev_set_power(p->subdevs[idx], !on); } return ret; } /** * __fimc_pipeline_open - update the pipeline information, enable power * of all pipeline subdevs and the sensor clock * @me: media entity to start graph walk with * @prepare: true to walk the current pipeline and acquire all subdevs * * Called with the graph mutex held. */ static int __fimc_pipeline_open(struct fimc_pipeline *p, struct media_entity *me, bool prepare) { struct fimc_md *fmd = entity_to_fimc_mdev(me); struct v4l2_subdev *sd; int ret; if (WARN_ON(p == NULL || me == NULL)) return -EINVAL; if (prepare) fimc_pipeline_prepare(p, me); sd = p->subdevs[IDX_SENSOR]; if (sd == NULL) return -EINVAL; /* Disable PXLASYNC clock if this pipeline includes FIMC-IS */ if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP]) { ret = clk_prepare_enable(fmd->wbclk[CLK_IDX_WB_B]); if (ret < 0) return ret; } ret = fimc_md_set_camclk(sd, true); if (ret < 0) goto err_wbclk; ret = fimc_pipeline_s_power(p, 1); if (!ret) return 0; fimc_md_set_camclk(sd, false); err_wbclk: if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP]) clk_disable_unprepare(fmd->wbclk[CLK_IDX_WB_B]); return ret; } /** * __fimc_pipeline_close - disable the sensor clock and pipeline power * @fimc: fimc device terminating the pipeline * * Disable power of all subdevs and turn the external sensor clock off. */ static int __fimc_pipeline_close(struct fimc_pipeline *p) { struct v4l2_subdev *sd = p ? p->subdevs[IDX_SENSOR] : NULL; struct fimc_md *fmd; int ret = 0; if (WARN_ON(sd == NULL)) return -EINVAL; if (p->subdevs[IDX_SENSOR]) { ret = fimc_pipeline_s_power(p, 0); fimc_md_set_camclk(sd, false); } fmd = entity_to_fimc_mdev(&sd->entity); /* Disable PXLASYNC clock if this pipeline includes FIMC-IS */ if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP]) clk_disable_unprepare(fmd->wbclk[CLK_IDX_WB_B]); return ret == -ENXIO ? 0 : ret; } /** * __fimc_pipeline_s_stream - call s_stream() on pipeline subdevs * @pipeline: video pipeline structure * @on: passed as the s_stream() callback argument */ static int __fimc_pipeline_s_stream(struct fimc_pipeline *p, bool on) { static const u8 seq[2][IDX_MAX] = { { IDX_FIMC, IDX_SENSOR, IDX_IS_ISP, IDX_CSIS, IDX_FLITE }, { IDX_CSIS, IDX_FLITE, IDX_FIMC, IDX_SENSOR, IDX_IS_ISP }, }; int i, ret = 0; if (p->subdevs[IDX_SENSOR] == NULL) return -ENODEV; for (i = 0; i < IDX_MAX; i++) { unsigned int idx = seq[on][i]; ret = v4l2_subdev_call(p->subdevs[idx], video, s_stream, on); if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) goto error; } return 0; error: for (; i >= 0; i--) { unsigned int idx = seq[on][i]; v4l2_subdev_call(p->subdevs[idx], video, s_stream, !on); } return ret; } /* Media pipeline operations for the FIMC/FIMC-LITE video device driver */ static const struct fimc_pipeline_ops fimc_pipeline_ops = { .open = __fimc_pipeline_open, .close = __fimc_pipeline_close, .set_stream = __fimc_pipeline_s_stream, }; /* * Sensor subdevice helper functions */ static struct v4l2_subdev *fimc_md_register_sensor(struct fimc_md *fmd, struct fimc_source_info *si) { struct i2c_adapter *adapter; struct v4l2_subdev *sd = NULL; if (!si || !fmd) return NULL; /* * If FIMC bus type is not Writeback FIFO assume it is same * as sensor_bus_type. */ si->fimc_bus_type = si->sensor_bus_type; adapter = i2c_get_adapter(si->i2c_bus_num); if (!adapter) { v4l2_warn(&fmd->v4l2_dev, "Failed to get I2C adapter %d, deferring probe\n", si->i2c_bus_num); return ERR_PTR(-EPROBE_DEFER); } sd = v4l2_i2c_new_subdev_board(&fmd->v4l2_dev, adapter, si->board_info, NULL); if (IS_ERR_OR_NULL(sd)) { i2c_put_adapter(adapter); v4l2_warn(&fmd->v4l2_dev, "Failed to acquire subdev %s, deferring probe\n", si->board_info->type); return ERR_PTR(-EPROBE_DEFER); } v4l2_set_subdev_hostdata(sd, si); sd->grp_id = GRP_ID_SENSOR; v4l2_info(&fmd->v4l2_dev, "Registered sensor subdevice %s\n", sd->name); return sd; } static void fimc_md_unregister_sensor(struct v4l2_subdev *sd) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct i2c_adapter *adapter; if (!client) return; v4l2_device_unregister_subdev(sd); if (!client->dev.of_node) { adapter = client->adapter; i2c_unregister_device(client); if (adapter) i2c_put_adapter(adapter); } } #ifdef CONFIG_OF /* Register I2C client subdev associated with @node. */ static int fimc_md_of_add_sensor(struct fimc_md *fmd, struct device_node *node, int index) { struct fimc_sensor_info *si; struct i2c_client *client; struct v4l2_subdev *sd; int ret; if (WARN_ON(index >= ARRAY_SIZE(fmd->sensor))) return -EINVAL; si = &fmd->sensor[index]; client = of_find_i2c_device_by_node(node); if (!client) return -EPROBE_DEFER; device_lock(&client->dev); if (!client->driver || !try_module_get(client->driver->driver.owner)) { ret = -EPROBE_DEFER; v4l2_info(&fmd->v4l2_dev, "No driver found for %s\n", node->full_name); goto dev_put; } /* Enable sensor's master clock */ ret = __fimc_md_set_camclk(fmd, &si->pdata, true); if (ret < 0) goto mod_put; sd = i2c_get_clientdata(client); ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd); __fimc_md_set_camclk(fmd, &si->pdata, false); if (ret < 0) goto mod_put; v4l2_set_subdev_hostdata(sd, &si->pdata); if (si->pdata.fimc_bus_type == FIMC_BUS_TYPE_ISP_WRITEBACK) sd->grp_id = GRP_ID_FIMC_IS_SENSOR; else sd->grp_id = GRP_ID_SENSOR; si->subdev = sd; v4l2_info(&fmd->v4l2_dev, "Registered sensor subdevice: %s (%d)\n", sd->name, fmd->num_sensors); fmd->num_sensors++; mod_put: module_put(client->driver->driver.owner); dev_put: device_unlock(&client->dev); put_device(&client->dev); return ret; } /* Parse port node and register as a sub-device any sensor specified there. */ static int fimc_md_parse_port_node(struct fimc_md *fmd, struct device_node *port, unsigned int index) { struct device_node *rem, *ep, *np; struct fimc_source_info *pd; struct v4l2_of_endpoint endpoint; int ret; u32 val; pd = &fmd->sensor[index].pdata; /* Assume here a port node can have only one endpoint node. */ ep = of_get_next_child(port, NULL); if (!ep) return 0; v4l2_of_parse_endpoint(ep, &endpoint); if (WARN_ON(endpoint.port == 0) || index >= FIMC_MAX_SENSORS) return -EINVAL; pd->mux_id = (endpoint.port - 1) & 0x1; rem = v4l2_of_get_remote_port_parent(ep); of_node_put(ep); if (rem == NULL) { v4l2_info(&fmd->v4l2_dev, "Remote device at %s not found\n", ep->full_name); return 0; } if (!of_property_read_u32(rem, "samsung,camclk-out", &val)) pd->clk_id = val; if (!of_property_read_u32(rem, "clock-frequency", &val)) pd->clk_frequency = val; if (pd->clk_frequency == 0) { v4l2_err(&fmd->v4l2_dev, "Wrong clock frequency at node %s\n", rem->full_name); of_node_put(rem); return -EINVAL; } if (fimc_input_is_parallel(endpoint.port)) { if (endpoint.bus_type == V4L2_MBUS_PARALLEL) pd->sensor_bus_type = FIMC_BUS_TYPE_ITU_601; else pd->sensor_bus_type = FIMC_BUS_TYPE_ITU_656; pd->flags = endpoint.bus.parallel.flags; } else if (fimc_input_is_mipi_csi(endpoint.port)) { /* * MIPI CSI-2: only input mux selection and * the sensor's clock frequency is needed. */ pd->sensor_bus_type = FIMC_BUS_TYPE_MIPI_CSI2; } else { v4l2_err(&fmd->v4l2_dev, "Wrong port id (%u) at node %s\n", endpoint.port, rem->full_name); } /* * For FIMC-IS handled sensors, that are placed under i2c-isp device * node, FIMC is connected to the FIMC-IS through its ISP Writeback * input. Sensors are attached to the FIMC-LITE hostdata interface * directly or through MIPI-CSIS, depending on the external media bus * used. This needs to be handled in a more reliable way, not by just * checking parent's node name. */ np = of_get_parent(rem); if (np && !of_node_cmp(np->name, "i2c-isp")) pd->fimc_bus_type = FIMC_BUS_TYPE_ISP_WRITEBACK; else pd->fimc_bus_type = pd->sensor_bus_type; ret = fimc_md_of_add_sensor(fmd, rem, index); of_node_put(rem); return ret; } /* Register all SoC external sub-devices */ static int fimc_md_of_sensors_register(struct fimc_md *fmd, struct device_node *np) { struct device_node *parent = fmd->pdev->dev.of_node; struct device_node *node, *ports; int index = 0; int ret; /* Attach sensors linked to MIPI CSI-2 receivers */ for_each_available_child_of_node(parent, node) { struct device_node *port; if (of_node_cmp(node->name, "csis")) continue; /* The csis node can have only port subnode. */ port = of_get_next_child(node, NULL); if (!port) continue; ret = fimc_md_parse_port_node(fmd, port, index); if (ret < 0) return ret; index++; } /* Attach sensors listed in the parallel-ports node */ ports = of_get_child_by_name(parent, "parallel-ports"); if (!ports) return 0; for_each_child_of_node(ports, node) { ret = fimc_md_parse_port_node(fmd, node, index); if (ret < 0) break; index++; } return 0; } static int __of_get_csis_id(struct device_node *np) { u32 reg = 0; np = of_get_child_by_name(np, "port"); if (!np) return -EINVAL; of_property_read_u32(np, "reg", &reg); return reg - FIMC_INPUT_MIPI_CSI2_0; } #else #define fimc_md_of_sensors_register(fmd, np) (-ENOSYS) #define __of_get_csis_id(np) (-ENOSYS) #endif static int fimc_md_register_sensor_entities(struct fimc_md *fmd) { struct s5p_platform_fimc *pdata = fmd->pdev->dev.platform_data; struct device_node *of_node = fmd->pdev->dev.of_node; int num_clients = 0; int ret, i; /* * Runtime resume one of the FIMC entities to make sure * the sclk_cam clocks are not globally disabled. */ if (!fmd->pmf) return -ENXIO; ret = pm_runtime_get_sync(fmd->pmf); if (ret < 0) return ret; if (of_node) { fmd->num_sensors = 0; ret = fimc_md_of_sensors_register(fmd, of_node); } else if (pdata) { WARN_ON(pdata->num_clients > ARRAY_SIZE(fmd->sensor)); num_clients = min_t(u32, pdata->num_clients, ARRAY_SIZE(fmd->sensor)); fmd->num_sensors = num_clients; for (i = 0; i < num_clients; i++) { struct fimc_sensor_info *si = &fmd->sensor[i]; struct v4l2_subdev *sd; si->pdata = pdata->source_info[i]; ret = __fimc_md_set_camclk(fmd, &si->pdata, true); if (ret) break; sd = fimc_md_register_sensor(fmd, &si->pdata); ret = __fimc_md_set_camclk(fmd, &si->pdata, false); if (IS_ERR(sd)) { si->subdev = NULL; ret = PTR_ERR(sd); break; } si->subdev = sd; if (ret) break; } } pm_runtime_put(fmd->pmf); return ret; } /* * MIPI-CSIS, FIMC and FIMC-LITE platform devices registration. */ static int register_fimc_lite_entity(struct fimc_md *fmd, struct fimc_lite *fimc_lite) { struct v4l2_subdev *sd; int ret; if (WARN_ON(fimc_lite->index >= FIMC_LITE_MAX_DEVS || fmd->fimc_lite[fimc_lite->index])) return -EBUSY; sd = &fimc_lite->subdev; sd->grp_id = GRP_ID_FLITE; v4l2_set_subdev_hostdata(sd, (void *)&fimc_pipeline_ops); ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd); if (!ret) fmd->fimc_lite[fimc_lite->index] = fimc_lite; else v4l2_err(&fmd->v4l2_dev, "Failed to register FIMC.LITE%d\n", fimc_lite->index); return ret; } static int register_fimc_entity(struct fimc_md *fmd, struct fimc_dev *fimc) { struct v4l2_subdev *sd; int ret; if (WARN_ON(fimc->id >= FIMC_MAX_DEVS || fmd->fimc[fimc->id])) return -EBUSY; sd = &fimc->vid_cap.subdev; sd->grp_id = GRP_ID_FIMC; v4l2_set_subdev_hostdata(sd, (void *)&fimc_pipeline_ops); ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd); if (!ret) { if (!fmd->pmf && fimc->pdev) fmd->pmf = &fimc->pdev->dev; fmd->fimc[fimc->id] = fimc; fimc->vid_cap.user_subdev_api = fmd->user_subdev_api; } else { v4l2_err(&fmd->v4l2_dev, "Failed to register FIMC.%d (%d)\n", fimc->id, ret); } return ret; } static int register_csis_entity(struct fimc_md *fmd, struct platform_device *pdev, struct v4l2_subdev *sd) { struct device_node *node = pdev->dev.of_node; int id, ret; id = node ? __of_get_csis_id(node) : max(0, pdev->id); if (WARN_ON(id < 0 || id >= CSIS_MAX_ENTITIES)) return -ENOENT; if (WARN_ON(fmd->csis[id].sd)) return -EBUSY; sd->grp_id = GRP_ID_CSIS; ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd); if (!ret) fmd->csis[id].sd = sd; else v4l2_err(&fmd->v4l2_dev, "Failed to register MIPI-CSIS.%d (%d)\n", id, ret); return ret; } static int register_fimc_is_entity(struct fimc_md *fmd, struct fimc_is *is) { struct v4l2_subdev *sd = &is->isp.subdev; int ret; ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd); if (ret) { v4l2_err(&fmd->v4l2_dev, "Failed to register FIMC-ISP (%d)\n", ret); return ret; } fmd->fimc_is = is; return 0; } static int fimc_md_register_platform_entity(struct fimc_md *fmd, struct platform_device *pdev, int plat_entity) { struct device *dev = &pdev->dev; int ret = -EPROBE_DEFER; void *drvdata; /* Lock to ensure dev->driver won't change. */ device_lock(dev); if (!dev->driver || !try_module_get(dev->driver->owner)) goto dev_unlock; drvdata = dev_get_drvdata(dev); /* Some subdev didn't probe succesfully id drvdata is NULL */ if (drvdata) { switch (plat_entity) { case IDX_FIMC: ret = register_fimc_entity(fmd, drvdata); break; case IDX_FLITE: ret = register_fimc_lite_entity(fmd, drvdata); break; case IDX_CSIS: ret = register_csis_entity(fmd, pdev, drvdata); break; case IDX_IS_ISP: ret = register_fimc_is_entity(fmd, drvdata); break; default: ret = -ENODEV; } } module_put(dev->driver->owner); dev_unlock: device_unlock(dev); if (ret == -EPROBE_DEFER) dev_info(&fmd->pdev->dev, "deferring %s device registration\n", dev_name(dev)); else if (ret < 0) dev_err(&fmd->pdev->dev, "%s device registration failed (%d)\n", dev_name(dev), ret); return ret; } static int fimc_md_pdev_match(struct device *dev, void *data) { struct platform_device *pdev = to_platform_device(dev); int plat_entity = -1; int ret; char *p; if (!get_device(dev)) return -ENODEV; if (!strcmp(pdev->name, CSIS_DRIVER_NAME)) { plat_entity = IDX_CSIS; } else if (!strcmp(pdev->name, FIMC_LITE_DRV_NAME)) { plat_entity = IDX_FLITE; } else { p = strstr(pdev->name, "fimc"); if (p && *(p + 4) == 0) plat_entity = IDX_FIMC; } if (plat_entity >= 0) ret = fimc_md_register_platform_entity(data, pdev, plat_entity); put_device(dev); return 0; } /* Register FIMC, FIMC-LITE and CSIS media entities */ #ifdef CONFIG_OF static int fimc_md_register_of_platform_entities(struct fimc_md *fmd, struct device_node *parent) { struct device_node *node; int ret = 0; for_each_available_child_of_node(parent, node) { struct platform_device *pdev; int plat_entity = -1; pdev = of_find_device_by_node(node); if (!pdev) continue; /* If driver of any entity isn't ready try all again later. */ if (!strcmp(node->name, CSIS_OF_NODE_NAME)) plat_entity = IDX_CSIS; else if (!strcmp(node->name, FIMC_IS_OF_NODE_NAME)) plat_entity = IDX_IS_ISP; else if (!strcmp(node->name, FIMC_LITE_OF_NODE_NAME)) plat_entity = IDX_FLITE; else if (!strcmp(node->name, FIMC_OF_NODE_NAME) && !of_property_read_bool(node, "samsung,lcd-wb")) plat_entity = IDX_FIMC; if (plat_entity >= 0) ret = fimc_md_register_platform_entity(fmd, pdev, plat_entity); put_device(&pdev->dev); if (ret < 0) break; } return ret; } #else #define fimc_md_register_of_platform_entities(fmd, node) (-ENOSYS) #endif static void fimc_md_unregister_entities(struct fimc_md *fmd) { int i; for (i = 0; i < FIMC_MAX_DEVS; i++) { if (fmd->fimc[i] == NULL) continue; v4l2_device_unregister_subdev(&fmd->fimc[i]->vid_cap.subdev); fmd->fimc[i]->pipeline_ops = NULL; fmd->fimc[i] = NULL; } for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) { if (fmd->fimc_lite[i] == NULL) continue; v4l2_device_unregister_subdev(&fmd->fimc_lite[i]->subdev); fmd->fimc_lite[i]->pipeline_ops = NULL; fmd->fimc_lite[i] = NULL; } for (i = 0; i < CSIS_MAX_ENTITIES; i++) { if (fmd->csis[i].sd == NULL) continue; v4l2_device_unregister_subdev(fmd->csis[i].sd); fmd->csis[i].sd = NULL; } for (i = 0; i < fmd->num_sensors; i++) { if (fmd->sensor[i].subdev == NULL) continue; fimc_md_unregister_sensor(fmd->sensor[i].subdev); fmd->sensor[i].subdev = NULL; } if (fmd->fimc_is) v4l2_device_unregister_subdev(&fmd->fimc_is->isp.subdev); v4l2_info(&fmd->v4l2_dev, "Unregistered all entities\n"); } /** * __fimc_md_create_fimc_links - create links to all FIMC entities * @fmd: fimc media device * @source: the source entity to create links to all fimc entities from * @sensor: sensor subdev linked to FIMC[fimc_id] entity, may be null * @pad: the source entity pad index * @link_mask: bitmask of the fimc devices for which link should be enabled */ static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd, struct media_entity *source, struct v4l2_subdev *sensor, int pad, int link_mask) { struct fimc_source_info *si = NULL; struct media_entity *sink; unsigned int flags = 0; int i, ret = 0; if (sensor) { si = v4l2_get_subdev_hostdata(sensor); /* Skip direct FIMC links in the logical FIMC-IS sensor path */ if (si && si->fimc_bus_type == FIMC_BUS_TYPE_ISP_WRITEBACK) ret = 1; } for (i = 0; !ret && i < FIMC_MAX_DEVS; i++) { if (!fmd->fimc[i]) continue; /* * Some FIMC variants are not fitted with camera capture * interface. Skip creating a link from sensor for those. */ if (!fmd->fimc[i]->variant->has_cam_if) continue; flags = ((1 << i) & link_mask) ? MEDIA_LNK_FL_ENABLED : 0; sink = &fmd->fimc[i]->vid_cap.subdev.entity; ret = media_entity_create_link(source, pad, sink, FIMC_SD_PAD_SINK_CAM, flags); if (ret) return ret; /* Notify FIMC capture subdev entity */ ret = media_entity_call(sink, link_setup, &sink->pads[0], &source->pads[pad], flags); if (ret) break; v4l2_info(&fmd->v4l2_dev, "created link [%s] %c> [%s]\n", source->name, flags ? '=' : '-', sink->name); if (flags == 0 || sensor == NULL) continue; if (!WARN_ON(si == NULL)) { unsigned long irq_flags; struct fimc_sensor_info *inf = source_to_sensor_info(si); spin_lock_irqsave(&fmd->slock, irq_flags); inf->host = fmd->fimc[i]; spin_unlock_irqrestore(&fmd->slock, irq_flags); } } for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) { if (!fmd->fimc_lite[i]) continue; sink = &fmd->fimc_lite[i]->subdev.entity; ret = media_entity_create_link(source, pad, sink, FLITE_SD_PAD_SINK, 0); if (ret) return ret; /* Notify FIMC-LITE subdev entity */ ret = media_entity_call(sink, link_setup, &sink->pads[0], &source->pads[pad], 0); if (ret) break; v4l2_info(&fmd->v4l2_dev, "created link [%s] -> [%s]\n", source->name, sink->name); } return 0; } /* Create links from FIMC-LITE source pads to other entities */ static int __fimc_md_create_flite_source_links(struct fimc_md *fmd) { struct media_entity *source, *sink; int i, ret = 0; for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) { struct fimc_lite *fimc = fmd->fimc_lite[i]; if (fimc == NULL) continue; source = &fimc->subdev.entity; sink = &fimc->vfd.entity; /* FIMC-LITE's subdev and video node */ ret = media_entity_create_link(source, FLITE_SD_PAD_SOURCE_DMA, sink, 0, 0); if (ret) break; /* Link from FIMC-LITE to IS-ISP subdev */ sink = &fmd->fimc_is->isp.subdev.entity; ret = media_entity_create_link(source, FLITE_SD_PAD_SOURCE_ISP, sink, 0, 0); if (ret) break; } return ret; } /* Create FIMC-IS links */ static int __fimc_md_create_fimc_is_links(struct fimc_md *fmd) { struct media_entity *source, *sink; int i, ret; source = &fmd->fimc_is->isp.subdev.entity; for (i = 0; i < FIMC_MAX_DEVS; i++) { if (fmd->fimc[i] == NULL) continue; /* Link from IS-ISP subdev to FIMC */ sink = &fmd->fimc[i]->vid_cap.subdev.entity; ret = media_entity_create_link(source, FIMC_ISP_SD_PAD_SRC_FIFO, sink, FIMC_SD_PAD_SINK_FIFO, 0); if (ret) return ret; } return ret; } /** * fimc_md_create_links - create default links between registered entities * * Parallel interface sensor entities are connected directly to FIMC capture * entities. The sensors using MIPI CSIS bus are connected through immutable * link with CSI receiver entity specified by mux_id. Any registered CSIS * entity has a link to each registered FIMC capture entity. Enabled links * are created by default between each subsequent registered sensor and * subsequent FIMC capture entity. The number of default active links is * determined by the number of available sensors or FIMC entities, * whichever is less. */ static int fimc_md_create_links(struct fimc_md *fmd) { struct v4l2_subdev *csi_sensors[CSIS_MAX_ENTITIES] = { NULL }; struct v4l2_subdev *sensor, *csis; struct fimc_source_info *pdata; struct media_entity *source, *sink; int i, pad, fimc_id = 0, ret = 0; u32 flags, link_mask = 0; for (i = 0; i < fmd->num_sensors; i++) { if (fmd->sensor[i].subdev == NULL) continue; sensor = fmd->sensor[i].subdev; pdata = v4l2_get_subdev_hostdata(sensor); if (!pdata) continue; source = NULL; switch (pdata->sensor_bus_type) { case FIMC_BUS_TYPE_MIPI_CSI2: if (WARN(pdata->mux_id >= CSIS_MAX_ENTITIES, "Wrong CSI channel id: %d\n", pdata->mux_id)) return -EINVAL; csis = fmd->csis[pdata->mux_id].sd; if (WARN(csis == NULL, "MIPI-CSI interface specified " "but s5p-csis module is not loaded!\n")) return -EINVAL; pad = sensor->entity.num_pads - 1; ret = media_entity_create_link(&sensor->entity, pad, &csis->entity, CSIS_PAD_SINK, MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); if (ret) return ret; v4l2_info(&fmd->v4l2_dev, "created link [%s] => [%s]\n", sensor->entity.name, csis->entity.name); source = NULL; csi_sensors[pdata->mux_id] = sensor; break; case FIMC_BUS_TYPE_ITU_601...FIMC_BUS_TYPE_ITU_656: source = &sensor->entity; pad = 0; break; default: v4l2_err(&fmd->v4l2_dev, "Wrong bus_type: %x\n", pdata->sensor_bus_type); return -EINVAL; } if (source == NULL) continue; link_mask = 1 << fimc_id++; ret = __fimc_md_create_fimc_sink_links(fmd, source, sensor, pad, link_mask); } for (i = 0; i < CSIS_MAX_ENTITIES; i++) { if (fmd->csis[i].sd == NULL) continue; source = &fmd->csis[i].sd->entity; pad = CSIS_PAD_SOURCE; sensor = csi_sensors[i]; link_mask = 1 << fimc_id++; ret = __fimc_md_create_fimc_sink_links(fmd, source, sensor, pad, link_mask); } /* Create immutable links between each FIMC's subdev and video node */ flags = MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED; for (i = 0; i < FIMC_MAX_DEVS; i++) { if (!fmd->fimc[i]) continue; source = &fmd->fimc[i]->vid_cap.subdev.entity; sink = &fmd->fimc[i]->vid_cap.vfd.entity; ret = media_entity_create_link(source, FIMC_SD_PAD_SOURCE, sink, 0, flags); if (ret) break; } ret = __fimc_md_create_flite_source_links(fmd); if (ret < 0) return ret; if (fmd->use_isp) ret = __fimc_md_create_fimc_is_links(fmd); return ret; } /* * The peripheral sensor and CAM_BLK (PIXELASYNCMx) clocks management. */ static void fimc_md_put_clocks(struct fimc_md *fmd) { int i = FIMC_MAX_CAMCLKS; while (--i >= 0) { if (IS_ERR(fmd->camclk[i].clock)) continue; clk_unprepare(fmd->camclk[i].clock); clk_put(fmd->camclk[i].clock); fmd->camclk[i].clock = ERR_PTR(-EINVAL); } /* Writeback (PIXELASYNCMx) clocks */ for (i = 0; i < FIMC_MAX_WBCLKS; i++) { if (IS_ERR(fmd->wbclk[i])) continue; clk_put(fmd->wbclk[i]); fmd->wbclk[i] = ERR_PTR(-EINVAL); } } static int fimc_md_get_clocks(struct fimc_md *fmd) { struct device *dev = NULL; char clk_name[32]; struct clk *clock; int ret, i; for (i = 0; i < FIMC_MAX_CAMCLKS; i++) fmd->camclk[i].clock = ERR_PTR(-EINVAL); if (fmd->pdev->dev.of_node) dev = &fmd->pdev->dev; for (i = 0; i < FIMC_MAX_CAMCLKS; i++) { snprintf(clk_name, sizeof(clk_name), "sclk_cam%u", i); clock = clk_get(dev, clk_name); if (IS_ERR(clock)) { dev_err(&fmd->pdev->dev, "Failed to get clock: %s\n", clk_name); ret = PTR_ERR(clock); break; } ret = clk_prepare(clock); if (ret < 0) { clk_put(clock); fmd->camclk[i].clock = ERR_PTR(-EINVAL); break; } fmd->camclk[i].clock = clock; } if (ret) fimc_md_put_clocks(fmd); if (!fmd->use_isp) return 0; /* * For now get only PIXELASYNCM1 clock (Writeback B/ISP), * leave PIXELASYNCM0 out for the LCD Writeback driver. */ fmd->wbclk[CLK_IDX_WB_A] = ERR_PTR(-EINVAL); for (i = CLK_IDX_WB_B; i < FIMC_MAX_WBCLKS; i++) { snprintf(clk_name, sizeof(clk_name), "pxl_async%u", i); clock = clk_get(dev, clk_name); if (IS_ERR(clock)) { v4l2_err(&fmd->v4l2_dev, "Failed to get clock: %s\n", clk_name); ret = PTR_ERR(clock); break; } fmd->wbclk[i] = clock; } if (ret) fimc_md_put_clocks(fmd); return ret; } static int __fimc_md_set_camclk(struct fimc_md *fmd, struct fimc_source_info *si, bool on) { struct fimc_camclk_info *camclk; int ret = 0; if (WARN_ON(si->clk_id >= FIMC_MAX_CAMCLKS) || !fmd || !fmd->pmf) return -EINVAL; camclk = &fmd->camclk[si->clk_id]; dbg("camclk %d, f: %lu, use_count: %d, on: %d", si->clk_id, si->clk_frequency, camclk->use_count, on); if (on) { if (camclk->use_count > 0 && camclk->frequency != si->clk_frequency) return -EINVAL; if (camclk->use_count++ == 0) { clk_set_rate(camclk->clock, si->clk_frequency); camclk->frequency = si->clk_frequency; ret = pm_runtime_get_sync(fmd->pmf); if (ret < 0) return ret; ret = clk_enable(camclk->clock); dbg("Enabled camclk %d: f: %lu", si->clk_id, clk_get_rate(camclk->clock)); } return ret; } if (WARN_ON(camclk->use_count == 0)) return 0; if (--camclk->use_count == 0) { clk_disable(camclk->clock); pm_runtime_put(fmd->pmf); dbg("Disabled camclk %d", si->clk_id); } return ret; } /** * fimc_md_set_camclk - peripheral sensor clock setup * @sd: sensor subdev to configure sclk_cam clock for * @on: 1 to enable or 0 to disable the clock * * There are 2 separate clock outputs available in the SoC for external * image processors. These clocks are shared between all registered FIMC * devices to which sensors can be attached, either directly or through * the MIPI CSI receiver. The clock is allowed here to be used by * multiple sensors concurrently if they use same frequency. * This function should only be called when the graph mutex is held. */ int fimc_md_set_camclk(struct v4l2_subdev *sd, bool on) { struct fimc_source_info *si = v4l2_get_subdev_hostdata(sd); struct fimc_md *fmd = entity_to_fimc_mdev(&sd->entity); return __fimc_md_set_camclk(fmd, si, on); } static int fimc_md_link_notify(struct media_pad *source, struct media_pad *sink, u32 flags) { struct fimc_lite *fimc_lite = NULL; struct fimc_dev *fimc = NULL; struct fimc_pipeline *pipeline; struct v4l2_subdev *sd; struct mutex *lock; int i, ret = 0; int ref_count; if (media_entity_type(sink->entity) != MEDIA_ENT_T_V4L2_SUBDEV) return 0; sd = media_entity_to_v4l2_subdev(sink->entity); switch (sd->grp_id) { case GRP_ID_FLITE: fimc_lite = v4l2_get_subdevdata(sd); if (WARN_ON(fimc_lite == NULL)) return 0; pipeline = &fimc_lite->pipeline; lock = &fimc_lite->lock; break; case GRP_ID_FIMC: fimc = v4l2_get_subdevdata(sd); if (WARN_ON(fimc == NULL)) return 0; pipeline = &fimc->pipeline; lock = &fimc->lock; break; default: return 0; } mutex_lock(lock); ref_count = fimc ? fimc->vid_cap.refcnt : fimc_lite->ref_count; if (!(flags & MEDIA_LNK_FL_ENABLED)) { if (ref_count > 0) { ret = __fimc_pipeline_close(pipeline); if (!ret && fimc) fimc_ctrls_delete(fimc->vid_cap.ctx); } for (i = 0; i < IDX_MAX; i++) pipeline->subdevs[i] = NULL; } else if (ref_count > 0) { /* * Link activation. Enable power of pipeline elements only if * the pipeline is already in use, i.e. its video node is open. * Recreate the controls destroyed during the link deactivation. */ ret = __fimc_pipeline_open(pipeline, source->entity, true); if (!ret && fimc) ret = fimc_capture_ctrls_create(fimc); } mutex_unlock(lock); return ret ? -EPIPE : ret; } static ssize_t fimc_md_sysfs_show(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *pdev = to_platform_device(dev); struct fimc_md *fmd = platform_get_drvdata(pdev); if (fmd->user_subdev_api) return strlcpy(buf, "Sub-device API (sub-dev)\n", PAGE_SIZE); return strlcpy(buf, "V4L2 video node only API (vid-dev)\n", PAGE_SIZE); } static ssize_t fimc_md_sysfs_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct platform_device *pdev = to_platform_device(dev); struct fimc_md *fmd = platform_get_drvdata(pdev); bool subdev_api; int i; if (!strcmp(buf, "vid-dev\n")) subdev_api = false; else if (!strcmp(buf, "sub-dev\n")) subdev_api = true; else return count; fmd->user_subdev_api = subdev_api; for (i = 0; i < FIMC_MAX_DEVS; i++) if (fmd->fimc[i]) fmd->fimc[i]->vid_cap.user_subdev_api = subdev_api; return count; } /* * This device attribute is to select video pipeline configuration method. * There are following valid values: * vid-dev - for V4L2 video node API only, subdevice will be configured * by the host driver. * sub-dev - for media controller API, subdevs must be configured in user * space before starting streaming. */ static DEVICE_ATTR(subdev_conf_mode, S_IWUSR | S_IRUGO, fimc_md_sysfs_show, fimc_md_sysfs_store); static int fimc_md_get_pinctrl(struct fimc_md *fmd) { struct device *dev = &fmd->pdev->dev; struct fimc_pinctrl *pctl = &fmd->pinctl; pctl->pinctrl = devm_pinctrl_get(dev); if (IS_ERR(pctl->pinctrl)) return PTR_ERR(pctl->pinctrl); pctl->state_default = pinctrl_lookup_state(pctl->pinctrl, PINCTRL_STATE_DEFAULT); if (IS_ERR(pctl->state_default)) return PTR_ERR(pctl->state_default); pctl->state_idle = pinctrl_lookup_state(pctl->pinctrl, PINCTRL_STATE_IDLE); return 0; } static int fimc_md_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct v4l2_device *v4l2_dev; struct fimc_md *fmd; int ret; fmd = devm_kzalloc(dev, sizeof(*fmd), GFP_KERNEL); if (!fmd) return -ENOMEM; spin_lock_init(&fmd->slock); fmd->pdev = pdev; strlcpy(fmd->media_dev.model, "SAMSUNG S5P FIMC", sizeof(fmd->media_dev.model)); fmd->media_dev.link_notify = fimc_md_link_notify; fmd->media_dev.dev = dev; v4l2_dev = &fmd->v4l2_dev; v4l2_dev->mdev = &fmd->media_dev; v4l2_dev->notify = fimc_sensor_notify; strlcpy(v4l2_dev->name, "s5p-fimc-md", sizeof(v4l2_dev->name)); fmd->use_isp = fimc_md_is_isp_available(dev->of_node); ret = v4l2_device_register(dev, &fmd->v4l2_dev); if (ret < 0) { v4l2_err(v4l2_dev, "Failed to register v4l2_device: %d\n", ret); return ret; } ret = media_device_register(&fmd->media_dev); if (ret < 0) { v4l2_err(v4l2_dev, "Failed to register media device: %d\n", ret); goto err_md; } ret = fimc_md_get_clocks(fmd); if (ret) goto err_clk; fmd->user_subdev_api = (dev->of_node != NULL); /* Protect the media graph while we're registering entities */ mutex_lock(&fmd->media_dev.graph_mutex); ret = fimc_md_get_pinctrl(fmd); if (ret < 0) { if (ret != EPROBE_DEFER) dev_err(dev, "Failed to get pinctrl: %d\n", ret); goto err_unlock; } if (dev->of_node) ret = fimc_md_register_of_platform_entities(fmd, dev->of_node); else ret = bus_for_each_dev(&platform_bus_type, NULL, fmd, fimc_md_pdev_match); if (ret) goto err_unlock; if (dev->platform_data || dev->of_node) { ret = fimc_md_register_sensor_entities(fmd); if (ret) goto err_unlock; } ret = fimc_md_create_links(fmd); if (ret) goto err_unlock; ret = v4l2_device_register_subdev_nodes(&fmd->v4l2_dev); if (ret) goto err_unlock; ret = device_create_file(&pdev->dev, &dev_attr_subdev_conf_mode); if (ret) goto err_unlock; platform_set_drvdata(pdev, fmd); mutex_unlock(&fmd->media_dev.graph_mutex); return 0; err_unlock: mutex_unlock(&fmd->media_dev.graph_mutex); err_clk: fimc_md_put_clocks(fmd); fimc_md_unregister_entities(fmd); media_device_unregister(&fmd->media_dev); err_md: v4l2_device_unregister(&fmd->v4l2_dev); return ret; } static int fimc_md_remove(struct platform_device *pdev) { struct fimc_md *fmd = platform_get_drvdata(pdev); if (!fmd) return 0; device_remove_file(&pdev->dev, &dev_attr_subdev_conf_mode); fimc_md_unregister_entities(fmd); media_device_unregister(&fmd->media_dev); fimc_md_put_clocks(fmd); return 0; } static struct platform_device_id fimc_driver_ids[] __always_unused = { { .name = "s5p-fimc-md" }, { }, }; MODULE_DEVICE_TABLE(platform, fimc_driver_ids); static const struct of_device_id fimc_md_of_match[] = { { .compatible = "samsung,fimc" }, { }, }; MODULE_DEVICE_TABLE(of, fimc_md_of_match); static struct platform_driver fimc_md_driver = { .probe = fimc_md_probe, .remove = fimc_md_remove, .driver = { .of_match_table = of_match_ptr(fimc_md_of_match), .name = "s5p-fimc-md", .owner = THIS_MODULE, } }; static int __init fimc_md_init(void) { int ret; request_module("s5p-csis"); ret = fimc_register_driver(); if (ret) return ret; return platform_driver_register(&fimc_md_driver); } static void __exit fimc_md_exit(void) { platform_driver_unregister(&fimc_md_driver); fimc_unregister_driver(); } module_init(fimc_md_init); module_exit(fimc_md_exit); MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>"); MODULE_DESCRIPTION("S5P FIMC camera host interface/video postprocessor driver"); MODULE_LICENSE("GPL"); MODULE_VERSION("2.0.1");
gpl-2.0
cuteprince/kernel_2.6_golfu
drivers/media/dvb/dvb-usb/ce6230.c
1809
8130
/* * DVB USB Linux driver for Intel CE6230 DVB-T USB2.0 receiver * * Copyright (C) 2009 Antti Palosaari <crope@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include "ce6230.h" #include "zl10353.h" #include "mxl5005s.h" /* debug */ static int dvb_usb_ce6230_debug; module_param_named(debug, dvb_usb_ce6230_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level" DVB_USB_DEBUG_STATUS); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static struct zl10353_config ce6230_zl10353_config; static int ce6230_rw_udev(struct usb_device *udev, struct req_t *req) { int ret; unsigned int pipe; u8 request; u8 requesttype; u16 value; u16 index; u8 buf[req->data_len]; request = req->cmd; value = req->value; index = req->index; switch (req->cmd) { case I2C_READ: case DEMOD_READ: case REG_READ: requesttype = (USB_TYPE_VENDOR | USB_DIR_IN); break; case I2C_WRITE: case DEMOD_WRITE: case REG_WRITE: requesttype = (USB_TYPE_VENDOR | USB_DIR_OUT); break; default: err("unknown command:%02x", req->cmd); ret = -EPERM; goto error; } if (requesttype == (USB_TYPE_VENDOR | USB_DIR_OUT)) { /* write */ memcpy(buf, req->data, req->data_len); pipe = usb_sndctrlpipe(udev, 0); } else { /* read */ pipe = usb_rcvctrlpipe(udev, 0); } msleep(1); /* avoid I2C errors */ ret = usb_control_msg(udev, pipe, request, requesttype, value, index, buf, sizeof(buf), CE6230_USB_TIMEOUT); ce6230_debug_dump(request, requesttype, value, index, buf, req->data_len, deb_xfer); if (ret < 0) deb_info("%s: usb_control_msg failed:%d\n", __func__, ret); else ret = 0; /* read request, copy returned data to return buf */ if (!ret && requesttype == (USB_TYPE_VENDOR | USB_DIR_IN)) memcpy(req->data, buf, req->data_len); error: return ret; } static int ce6230_ctrl_msg(struct dvb_usb_device *d, struct req_t *req) { return ce6230_rw_udev(d->udev, req); } /* I2C */ static int ce6230_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int i = 0; struct req_t req; int ret = 0; memset(&req, 0, sizeof(req)); if (num > 2) return -EINVAL; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; while (i < num) { if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) { if (msg[i].addr == ce6230_zl10353_config.demod_address) { req.cmd = DEMOD_READ; req.value = msg[i].addr >> 1; req.index = msg[i].buf[0]; req.data_len = msg[i+1].len; req.data = &msg[i+1].buf[0]; ret = ce6230_ctrl_msg(d, &req); } else { err("i2c read not implemented"); ret = -EPERM; } i += 2; } else { if (msg[i].addr == ce6230_zl10353_config.demod_address) { req.cmd = DEMOD_WRITE; req.value = msg[i].addr >> 1; req.index = msg[i].buf[0]; req.data_len = msg[i].len-1; req.data = &msg[i].buf[1]; ret = ce6230_ctrl_msg(d, &req); } else { req.cmd = I2C_WRITE; req.value = 0x2000 + (msg[i].addr >> 1); req.index = 0x0000; req.data_len = msg[i].len; req.data = &msg[i].buf[0]; ret = ce6230_ctrl_msg(d, &req); } i += 1; } if (ret) break; } mutex_unlock(&d->i2c_mutex); return ret ? ret : i; } static u32 ce6230_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm ce6230_i2c_algo = { .master_xfer = ce6230_i2c_xfer, .functionality = ce6230_i2c_func, }; /* Callbacks for DVB USB */ static struct zl10353_config ce6230_zl10353_config = { .demod_address = 0x1e, .adc_clock = 450000, .if2 = 45700, .no_tuner = 1, .parallel_ts = 1, .clock_ctl_1 = 0x34, .pll_0 = 0x0e, }; static int ce6230_zl10353_frontend_attach(struct dvb_usb_adapter *adap) { deb_info("%s:\n", __func__); adap->fe = dvb_attach(zl10353_attach, &ce6230_zl10353_config, &adap->dev->i2c_adap); if (adap->fe == NULL) return -ENODEV; return 0; } static struct mxl5005s_config ce6230_mxl5003s_config = { .i2c_address = 0xc6, .if_freq = IF_FREQ_4570000HZ, .xtal_freq = CRYSTAL_FREQ_16000000HZ, .agc_mode = MXL_SINGLE_AGC, .tracking_filter = MXL_TF_DEFAULT, .rssi_enable = MXL_RSSI_ENABLE, .cap_select = MXL_CAP_SEL_ENABLE, .div_out = MXL_DIV_OUT_4, .clock_out = MXL_CLOCK_OUT_DISABLE, .output_load = MXL5005S_IF_OUTPUT_LOAD_200_OHM, .top = MXL5005S_TOP_25P2, .mod_mode = MXL_DIGITAL_MODE, .if_mode = MXL_ZERO_IF, .AgcMasterByte = 0x00, }; static int ce6230_mxl5003s_tuner_attach(struct dvb_usb_adapter *adap) { int ret; deb_info("%s:\n", __func__); ret = dvb_attach(mxl5005s_attach, adap->fe, &adap->dev->i2c_adap, &ce6230_mxl5003s_config) == NULL ? -ENODEV : 0; return ret; } static int ce6230_power_ctrl(struct dvb_usb_device *d, int onoff) { int ret; deb_info("%s: onoff:%d\n", __func__, onoff); /* InterfaceNumber 1 / AlternateSetting 0 idle InterfaceNumber 1 / AlternateSetting 1 streaming */ ret = usb_set_interface(d->udev, 1, onoff); if (ret) err("usb_set_interface failed with error:%d", ret); return ret; } /* DVB USB Driver stuff */ static struct dvb_usb_device_properties ce6230_properties; static int ce6230_probe(struct usb_interface *intf, const struct usb_device_id *id) { int ret = 0; struct dvb_usb_device *d = NULL; deb_info("%s: interface:%d\n", __func__, intf->cur_altsetting->desc.bInterfaceNumber); if (intf->cur_altsetting->desc.bInterfaceNumber == 1) { ret = dvb_usb_device_init(intf, &ce6230_properties, THIS_MODULE, &d, adapter_nr); if (ret) err("init failed with error:%d\n", ret); } return ret; } static struct usb_device_id ce6230_table[] = { { USB_DEVICE(USB_VID_INTEL, USB_PID_INTEL_CE9500) }, { USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A310) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, ce6230_table); static struct dvb_usb_device_properties ce6230_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .no_reconnect = 1, .size_of_priv = 0, .num_adapters = 1, .adapter = { { .frontend_attach = ce6230_zl10353_frontend_attach, .tuner_attach = ce6230_mxl5003s_tuner_attach, .stream = { .type = USB_BULK, .count = 6, .endpoint = 0x82, .u = { .bulk = { .buffersize = (16*512), } } }, } }, .power_ctrl = ce6230_power_ctrl, .i2c_algo = &ce6230_i2c_algo, .num_device_descs = 2, .devices = { { .name = "Intel CE9500 reference design", .cold_ids = {NULL}, .warm_ids = {&ce6230_table[0], NULL}, }, { .name = "AVerMedia A310 USB 2.0 DVB-T tuner", .cold_ids = {NULL}, .warm_ids = {&ce6230_table[1], NULL}, }, } }; static struct usb_driver ce6230_driver = { .name = "dvb_usb_ce6230", .probe = ce6230_probe, .disconnect = dvb_usb_device_exit, .id_table = ce6230_table, }; /* module stuff */ static int __init ce6230_module_init(void) { int ret; deb_info("%s:\n", __func__); ret = usb_register(&ce6230_driver); if (ret) err("usb_register failed with error:%d", ret); return ret; } static void __exit ce6230_module_exit(void) { deb_info("%s:\n", __func__); /* deregister this driver from the USB subsystem */ usb_deregister(&ce6230_driver); } module_init(ce6230_module_init); module_exit(ce6230_module_exit); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>"); MODULE_DESCRIPTION("Driver for Intel CE6230 DVB-T USB2.0"); MODULE_LICENSE("GPL");
gpl-2.0
alesaiko/UK-PRO5
drivers/staging/imx-drm/ipuv3-crtc.c
2065
13790
/* * i.MX IPUv3 Graphics driver * * Copyright (C) 2011 Sascha Hauer, Pengutronix * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. */ #include <linux/module.h> #include <linux/export.h> #include <linux/device.h> #include <linux/platform_device.h> #include <drm/drmP.h> #include <drm/drm_fb_helper.h> #include <drm/drm_crtc_helper.h> #include <linux/fb.h> #include <linux/clk.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_fb_cma_helper.h> #include "ipu-v3/imx-ipu-v3.h" #include "imx-drm.h" #define DRIVER_DESC "i.MX IPUv3 Graphics" struct ipu_framebuffer { struct drm_framebuffer base; void *virt; dma_addr_t phys; size_t len; }; struct ipu_crtc { struct drm_fb_helper fb_helper; struct ipu_framebuffer ifb; int num_crtcs; struct device *dev; struct drm_crtc base; struct imx_drm_crtc *imx_crtc; struct ipuv3_channel *ipu_ch; struct ipu_dc *dc; struct ipu_dp *dp; struct dmfc_channel *dmfc; struct ipu_di *di; int enabled; struct ipu_priv *ipu_priv; struct drm_pending_vblank_event *page_flip_event; struct drm_framebuffer *newfb; int irq; u32 interface_pix_fmt; unsigned long di_clkflags; int di_hsync_pin; int di_vsync_pin; }; #define to_ipu_crtc(x) container_of(x, struct ipu_crtc, base) static int calc_vref(struct drm_display_mode *mode) { unsigned long htotal, vtotal; htotal = mode->htotal; vtotal = mode->vtotal; if (!htotal || !vtotal) return 60; return mode->clock * 1000 / vtotal / htotal; } static int calc_bandwidth(struct drm_display_mode *mode, unsigned int vref) { return mode->hdisplay * mode->vdisplay * vref; } static void ipu_fb_enable(struct ipu_crtc *ipu_crtc) { if (ipu_crtc->enabled) return; ipu_di_enable(ipu_crtc->di); ipu_dmfc_enable_channel(ipu_crtc->dmfc); ipu_idmac_enable_channel(ipu_crtc->ipu_ch); ipu_dc_enable_channel(ipu_crtc->dc); if (ipu_crtc->dp) ipu_dp_enable_channel(ipu_crtc->dp); ipu_crtc->enabled = 1; } static void ipu_fb_disable(struct ipu_crtc *ipu_crtc) { if (!ipu_crtc->enabled) return; if (ipu_crtc->dp) ipu_dp_disable_channel(ipu_crtc->dp); ipu_dc_disable_channel(ipu_crtc->dc); ipu_idmac_disable_channel(ipu_crtc->ipu_ch); ipu_dmfc_disable_channel(ipu_crtc->dmfc); ipu_di_disable(ipu_crtc->di); ipu_crtc->enabled = 0; } static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); dev_dbg(ipu_crtc->dev, "%s mode: %d\n", __func__, mode); switch (mode) { case DRM_MODE_DPMS_ON: ipu_fb_enable(ipu_crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: ipu_fb_disable(ipu_crtc); break; } } static int ipu_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); int ret; if (ipu_crtc->newfb) return -EBUSY; ret = imx_drm_crtc_vblank_get(ipu_crtc->imx_crtc); if (ret) { dev_dbg(ipu_crtc->dev, "failed to acquire vblank counter\n"); list_del(&event->base.link); return ret; } ipu_crtc->newfb = fb; ipu_crtc->page_flip_event = event; return 0; } static const struct drm_crtc_funcs ipu_crtc_funcs = { .set_config = drm_crtc_helper_set_config, .destroy = drm_crtc_cleanup, .page_flip = ipu_page_flip, }; static int ipu_drm_set_base(struct drm_crtc *crtc, int x, int y) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); struct drm_gem_cma_object *cma_obj; struct drm_framebuffer *fb = crtc->fb; unsigned long phys; cma_obj = drm_fb_cma_get_gem_obj(fb, 0); if (!cma_obj) { DRM_LOG_KMS("entry is null.\n"); return -EFAULT; } phys = cma_obj->paddr; phys += x * (fb->bits_per_pixel >> 3); phys += y * fb->pitches[0]; dev_dbg(ipu_crtc->dev, "%s: phys: 0x%lx\n", __func__, phys); dev_dbg(ipu_crtc->dev, "%s: xy: %dx%d\n", __func__, x, y); ipu_cpmem_set_stride(ipu_get_cpmem(ipu_crtc->ipu_ch), fb->pitches[0]); ipu_cpmem_set_buffer(ipu_get_cpmem(ipu_crtc->ipu_ch), 0, phys); return 0; } static int ipu_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *orig_mode, struct drm_display_mode *mode, int x, int y, struct drm_framebuffer *old_fb) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); struct drm_framebuffer *fb = ipu_crtc->base.fb; int ret; struct ipu_di_signal_cfg sig_cfg = {}; u32 out_pixel_fmt; struct ipu_ch_param __iomem *cpmem = ipu_get_cpmem(ipu_crtc->ipu_ch); int bpp; u32 v4l2_fmt; dev_dbg(ipu_crtc->dev, "%s: mode->hdisplay: %d\n", __func__, mode->hdisplay); dev_dbg(ipu_crtc->dev, "%s: mode->vdisplay: %d\n", __func__, mode->vdisplay); ipu_ch_param_zero(cpmem); switch (fb->pixel_format) { case DRM_FORMAT_XRGB8888: case DRM_FORMAT_ARGB8888: v4l2_fmt = V4L2_PIX_FMT_RGB32; bpp = 32; break; case DRM_FORMAT_RGB565: v4l2_fmt = V4L2_PIX_FMT_RGB565; bpp = 16; break; case DRM_FORMAT_RGB888: v4l2_fmt = V4L2_PIX_FMT_RGB24; bpp = 24; break; default: dev_err(ipu_crtc->dev, "unsupported pixel format 0x%08x\n", fb->pixel_format); return -EINVAL; } out_pixel_fmt = ipu_crtc->interface_pix_fmt; if (mode->flags & DRM_MODE_FLAG_INTERLACE) sig_cfg.interlaced = 1; if (mode->flags & DRM_MODE_FLAG_PHSYNC) sig_cfg.Hsync_pol = 1; if (mode->flags & DRM_MODE_FLAG_PVSYNC) sig_cfg.Vsync_pol = 1; sig_cfg.enable_pol = 1; sig_cfg.clk_pol = 0; sig_cfg.width = mode->hdisplay; sig_cfg.height = mode->vdisplay; sig_cfg.pixel_fmt = out_pixel_fmt; sig_cfg.h_start_width = mode->htotal - mode->hsync_end; sig_cfg.h_sync_width = mode->hsync_end - mode->hsync_start; sig_cfg.h_end_width = mode->hsync_start - mode->hdisplay; sig_cfg.v_start_width = mode->vtotal - mode->vsync_end; sig_cfg.v_sync_width = mode->vsync_end - mode->vsync_start; sig_cfg.v_end_width = mode->vsync_start - mode->vdisplay; sig_cfg.pixelclock = mode->clock * 1000; sig_cfg.clkflags = ipu_crtc->di_clkflags; sig_cfg.v_to_h_sync = 0; sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin; sig_cfg.vsync_pin = ipu_crtc->di_vsync_pin; if (ipu_crtc->dp) { ret = ipu_dp_setup_channel(ipu_crtc->dp, IPUV3_COLORSPACE_RGB, IPUV3_COLORSPACE_RGB); if (ret) { dev_err(ipu_crtc->dev, "initializing display processor failed with %d\n", ret); return ret; } ipu_dp_set_global_alpha(ipu_crtc->dp, 1, 0, 1); } ret = ipu_dc_init_sync(ipu_crtc->dc, ipu_crtc->di, sig_cfg.interlaced, out_pixel_fmt, mode->hdisplay); if (ret) { dev_err(ipu_crtc->dev, "initializing display controller failed with %d\n", ret); return ret; } ret = ipu_di_init_sync_panel(ipu_crtc->di, &sig_cfg); if (ret) { dev_err(ipu_crtc->dev, "initializing panel failed with %d\n", ret); return ret; } ipu_cpmem_set_resolution(cpmem, mode->hdisplay, mode->vdisplay); ipu_cpmem_set_fmt(cpmem, v4l2_fmt); ipu_cpmem_set_high_priority(ipu_crtc->ipu_ch); ret = ipu_dmfc_init_channel(ipu_crtc->dmfc, mode->hdisplay); if (ret) { dev_err(ipu_crtc->dev, "initializing dmfc channel failed with %d\n", ret); return ret; } ret = ipu_dmfc_alloc_bandwidth(ipu_crtc->dmfc, calc_bandwidth(mode, calc_vref(mode)), 64); if (ret) { dev_err(ipu_crtc->dev, "allocating dmfc bandwidth failed with %d\n", ret); return ret; } ipu_drm_set_base(crtc, x, y); return 0; } static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc) { unsigned long flags; struct drm_device *drm = ipu_crtc->base.dev; spin_lock_irqsave(&drm->event_lock, flags); if (ipu_crtc->page_flip_event) drm_send_vblank_event(drm, -1, ipu_crtc->page_flip_event); ipu_crtc->page_flip_event = NULL; imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc); spin_unlock_irqrestore(&drm->event_lock, flags); } static irqreturn_t ipu_irq_handler(int irq, void *dev_id) { struct ipu_crtc *ipu_crtc = dev_id; imx_drm_handle_vblank(ipu_crtc->imx_crtc); if (ipu_crtc->newfb) { ipu_crtc->base.fb = ipu_crtc->newfb; ipu_crtc->newfb = NULL; ipu_drm_set_base(&ipu_crtc->base, 0, 0); ipu_crtc_handle_pageflip(ipu_crtc); } return IRQ_HANDLED; } static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; } static void ipu_crtc_prepare(struct drm_crtc *crtc) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); ipu_fb_disable(ipu_crtc); } static void ipu_crtc_commit(struct drm_crtc *crtc) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); ipu_fb_enable(ipu_crtc); } static void ipu_crtc_load_lut(struct drm_crtc *crtc) { } static struct drm_crtc_helper_funcs ipu_helper_funcs = { .dpms = ipu_crtc_dpms, .mode_fixup = ipu_crtc_mode_fixup, .mode_set = ipu_crtc_mode_set, .prepare = ipu_crtc_prepare, .commit = ipu_crtc_commit, .load_lut = ipu_crtc_load_lut, }; static int ipu_enable_vblank(struct drm_crtc *crtc) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); enable_irq(ipu_crtc->irq); return 0; } static void ipu_disable_vblank(struct drm_crtc *crtc) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); disable_irq(ipu_crtc->irq); } static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc, u32 encoder_type, u32 pixfmt, int hsync_pin, int vsync_pin) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); ipu_crtc->interface_pix_fmt = pixfmt; ipu_crtc->di_hsync_pin = hsync_pin; ipu_crtc->di_vsync_pin = vsync_pin; switch (encoder_type) { case DRM_MODE_ENCODER_DAC: case DRM_MODE_ENCODER_TVDAC: case DRM_MODE_ENCODER_LVDS: ipu_crtc->di_clkflags = IPU_DI_CLKMODE_SYNC | IPU_DI_CLKMODE_EXT; break; case DRM_MODE_ENCODER_NONE: ipu_crtc->di_clkflags = 0; break; } return 0; } static const struct imx_drm_crtc_helper_funcs ipu_crtc_helper_funcs = { .enable_vblank = ipu_enable_vblank, .disable_vblank = ipu_disable_vblank, .set_interface_pix_fmt = ipu_set_interface_pix_fmt, .crtc_funcs = &ipu_crtc_funcs, .crtc_helper_funcs = &ipu_helper_funcs, }; static void ipu_put_resources(struct ipu_crtc *ipu_crtc) { if (!IS_ERR_OR_NULL(ipu_crtc->ipu_ch)) ipu_idmac_put(ipu_crtc->ipu_ch); if (!IS_ERR_OR_NULL(ipu_crtc->dmfc)) ipu_dmfc_put(ipu_crtc->dmfc); if (!IS_ERR_OR_NULL(ipu_crtc->dp)) ipu_dp_put(ipu_crtc->dp); if (!IS_ERR_OR_NULL(ipu_crtc->di)) ipu_di_put(ipu_crtc->di); } static int ipu_get_resources(struct ipu_crtc *ipu_crtc, struct ipu_client_platformdata *pdata) { struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); int ret; ipu_crtc->ipu_ch = ipu_idmac_get(ipu, pdata->dma[0]); if (IS_ERR(ipu_crtc->ipu_ch)) { ret = PTR_ERR(ipu_crtc->ipu_ch); goto err_out; } ipu_crtc->dc = ipu_dc_get(ipu, pdata->dc); if (IS_ERR(ipu_crtc->dc)) { ret = PTR_ERR(ipu_crtc->dc); goto err_out; } ipu_crtc->dmfc = ipu_dmfc_get(ipu, pdata->dma[0]); if (IS_ERR(ipu_crtc->dmfc)) { ret = PTR_ERR(ipu_crtc->dmfc); goto err_out; } if (pdata->dp >= 0) { ipu_crtc->dp = ipu_dp_get(ipu, pdata->dp); if (IS_ERR(ipu_crtc->dp)) { ret = PTR_ERR(ipu_crtc->dp); goto err_out; } } ipu_crtc->di = ipu_di_get(ipu, pdata->di); if (IS_ERR(ipu_crtc->di)) { ret = PTR_ERR(ipu_crtc->di); goto err_out; } return 0; err_out: ipu_put_resources(ipu_crtc); return ret; } static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, struct ipu_client_platformdata *pdata) { struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); int ret; ret = ipu_get_resources(ipu_crtc, pdata); if (ret) { dev_err(ipu_crtc->dev, "getting resources failed with %d.\n", ret); return ret; } ret = imx_drm_add_crtc(&ipu_crtc->base, &ipu_crtc->imx_crtc, &ipu_crtc_helper_funcs, THIS_MODULE, ipu_crtc->dev->parent->of_node, pdata->di); if (ret) { dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret); goto err_put_resources; } ipu_crtc->irq = ipu_idmac_channel_irq(ipu, ipu_crtc->ipu_ch, IPU_IRQ_EOF); ret = devm_request_irq(ipu_crtc->dev, ipu_crtc->irq, ipu_irq_handler, 0, "imx_drm", ipu_crtc); if (ret < 0) { dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret); goto err_put_resources; } disable_irq(ipu_crtc->irq); return 0; err_put_resources: ipu_put_resources(ipu_crtc); return ret; } static int ipu_drm_probe(struct platform_device *pdev) { struct ipu_client_platformdata *pdata = pdev->dev.platform_data; struct ipu_crtc *ipu_crtc; int ret; if (!pdata) return -EINVAL; pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); ipu_crtc = devm_kzalloc(&pdev->dev, sizeof(*ipu_crtc), GFP_KERNEL); if (!ipu_crtc) return -ENOMEM; ipu_crtc->dev = &pdev->dev; ret = ipu_crtc_init(ipu_crtc, pdata); if (ret) return ret; platform_set_drvdata(pdev, ipu_crtc); return 0; } static int ipu_drm_remove(struct platform_device *pdev) { struct ipu_crtc *ipu_crtc = platform_get_drvdata(pdev); imx_drm_remove_crtc(ipu_crtc->imx_crtc); ipu_put_resources(ipu_crtc); return 0; } static struct platform_driver ipu_drm_driver = { .driver = { .name = "imx-ipuv3-crtc", }, .probe = ipu_drm_probe, .remove = ipu_drm_remove, }; module_platform_driver(ipu_drm_driver); MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
gpl-2.0
sg3/android_kernel_samsung_apollo
arch/x86/kvm/mmu.c
2321
94071
/* * Kernel-based Virtual Machine driver for Linux * * This module enables machines with Intel VT-x extensions to run virtual * machines without emulation or binary translation. * * MMU support * * Copyright (C) 2006 Qumranet, Inc. * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Authors: * Yaniv Kamay <yaniv@qumranet.com> * Avi Kivity <avi@qumranet.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * */ #include "irq.h" #include "mmu.h" #include "x86.h" #include "kvm_cache_regs.h" #include "x86.h" #include <linux/kvm_host.h> #include <linux/types.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/module.h> #include <linux/swap.h> #include <linux/hugetlb.h> #include <linux/compiler.h> #include <linux/srcu.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <asm/page.h> #include <asm/cmpxchg.h> #include <asm/io.h> #include <asm/vmx.h> /* * When setting this variable to true it enables Two-Dimensional-Paging * where the hardware walks 2 page tables: * 1. the guest-virtual to guest-physical * 2. while doing 1. it walks guest-physical to host-physical * If the hardware supports that we don't need to do shadow paging. */ bool tdp_enabled = false; enum { AUDIT_PRE_PAGE_FAULT, AUDIT_POST_PAGE_FAULT, AUDIT_PRE_PTE_WRITE, AUDIT_POST_PTE_WRITE, AUDIT_PRE_SYNC, AUDIT_POST_SYNC }; char *audit_point_name[] = { "pre page fault", "post page fault", "pre pte write", "post pte write", "pre sync", "post sync" }; #undef MMU_DEBUG #ifdef MMU_DEBUG #define pgprintk(x...) do { if (dbg) printk(x); } while (0) #define rmap_printk(x...) do { if (dbg) printk(x); } while (0) #else #define pgprintk(x...) do { } while (0) #define rmap_printk(x...) do { } while (0) #endif #ifdef MMU_DEBUG static int dbg = 0; module_param(dbg, bool, 0644); #endif static int oos_shadow = 1; module_param(oos_shadow, bool, 0644); #ifndef MMU_DEBUG #define ASSERT(x) do { } while (0) #else #define ASSERT(x) \ if (!(x)) { \ printk(KERN_WARNING "assertion failed %s:%d: %s\n", \ __FILE__, __LINE__, #x); \ } #endif #define PTE_PREFETCH_NUM 8 #define PT_FIRST_AVAIL_BITS_SHIFT 9 #define PT64_SECOND_AVAIL_BITS_SHIFT 52 #define PT64_LEVEL_BITS 9 #define PT64_LEVEL_SHIFT(level) \ (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS) #define PT64_INDEX(address, level)\ (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1)) #define PT32_LEVEL_BITS 10 #define PT32_LEVEL_SHIFT(level) \ (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS) #define PT32_LVL_OFFSET_MASK(level) \ (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \ * PT32_LEVEL_BITS))) - 1)) #define PT32_INDEX(address, level)\ (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1)) #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)) #define PT64_DIR_BASE_ADDR_MASK \ (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1)) #define PT64_LVL_ADDR_MASK(level) \ (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \ * PT64_LEVEL_BITS))) - 1)) #define PT64_LVL_OFFSET_MASK(level) \ (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \ * PT64_LEVEL_BITS))) - 1)) #define PT32_BASE_ADDR_MASK PAGE_MASK #define PT32_DIR_BASE_ADDR_MASK \ (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1)) #define PT32_LVL_ADDR_MASK(level) \ (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \ * PT32_LEVEL_BITS))) - 1)) #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \ | PT64_NX_MASK) #define RMAP_EXT 4 #define ACC_EXEC_MASK 1 #define ACC_WRITE_MASK PT_WRITABLE_MASK #define ACC_USER_MASK PT_USER_MASK #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK) #include <trace/events/kvm.h> #define CREATE_TRACE_POINTS #include "mmutrace.h" #define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT) #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) struct kvm_rmap_desc { u64 *sptes[RMAP_EXT]; struct kvm_rmap_desc *more; }; struct kvm_shadow_walk_iterator { u64 addr; hpa_t shadow_addr; int level; u64 *sptep; unsigned index; }; #define for_each_shadow_entry(_vcpu, _addr, _walker) \ for (shadow_walk_init(&(_walker), _vcpu, _addr); \ shadow_walk_okay(&(_walker)); \ shadow_walk_next(&(_walker))) typedef void (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp, u64 *spte); static struct kmem_cache *pte_chain_cache; static struct kmem_cache *rmap_desc_cache; static struct kmem_cache *mmu_page_header_cache; static struct percpu_counter kvm_total_used_mmu_pages; static u64 __read_mostly shadow_trap_nonpresent_pte; static u64 __read_mostly shadow_notrap_nonpresent_pte; static u64 __read_mostly shadow_nx_mask; static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ static u64 __read_mostly shadow_user_mask; static u64 __read_mostly shadow_accessed_mask; static u64 __read_mostly shadow_dirty_mask; static inline u64 rsvd_bits(int s, int e) { return ((1ULL << (e - s + 1)) - 1) << s; } void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) { shadow_trap_nonpresent_pte = trap_pte; shadow_notrap_nonpresent_pte = notrap_pte; } EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes); void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, u64 dirty_mask, u64 nx_mask, u64 x_mask) { shadow_user_mask = user_mask; shadow_accessed_mask = accessed_mask; shadow_dirty_mask = dirty_mask; shadow_nx_mask = nx_mask; shadow_x_mask = x_mask; } EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); static bool is_write_protection(struct kvm_vcpu *vcpu) { return kvm_read_cr0_bits(vcpu, X86_CR0_WP); } static int is_cpuid_PSE36(void) { return 1; } static int is_nx(struct kvm_vcpu *vcpu) { return vcpu->arch.efer & EFER_NX; } static int is_shadow_present_pte(u64 pte) { return pte != shadow_trap_nonpresent_pte && pte != shadow_notrap_nonpresent_pte; } static int is_large_pte(u64 pte) { return pte & PT_PAGE_SIZE_MASK; } static int is_writable_pte(unsigned long pte) { return pte & PT_WRITABLE_MASK; } static int is_dirty_gpte(unsigned long pte) { return pte & PT_DIRTY_MASK; } static int is_rmap_spte(u64 pte) { return is_shadow_present_pte(pte); } static int is_last_spte(u64 pte, int level) { if (level == PT_PAGE_TABLE_LEVEL) return 1; if (is_large_pte(pte)) return 1; return 0; } static pfn_t spte_to_pfn(u64 pte) { return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; } static gfn_t pse36_gfn_delta(u32 gpte) { int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT; return (gpte & PT32_DIR_PSE36_MASK) << shift; } static void __set_spte(u64 *sptep, u64 spte) { set_64bit(sptep, spte); } static u64 __xchg_spte(u64 *sptep, u64 new_spte) { #ifdef CONFIG_X86_64 return xchg(sptep, new_spte); #else u64 old_spte; do { old_spte = *sptep; } while (cmpxchg64(sptep, old_spte, new_spte) != old_spte); return old_spte; #endif } static bool spte_has_volatile_bits(u64 spte) { if (!shadow_accessed_mask) return false; if (!is_shadow_present_pte(spte)) return false; if ((spte & shadow_accessed_mask) && (!is_writable_pte(spte) || (spte & shadow_dirty_mask))) return false; return true; } static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask) { return (old_spte & bit_mask) && !(new_spte & bit_mask); } static void update_spte(u64 *sptep, u64 new_spte) { u64 mask, old_spte = *sptep; WARN_ON(!is_rmap_spte(new_spte)); new_spte |= old_spte & shadow_dirty_mask; mask = shadow_accessed_mask; if (is_writable_pte(old_spte)) mask |= shadow_dirty_mask; if (!spte_has_volatile_bits(old_spte) || (new_spte & mask) == mask) __set_spte(sptep, new_spte); else old_spte = __xchg_spte(sptep, new_spte); if (!shadow_accessed_mask) return; if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask)) kvm_set_pfn_accessed(spte_to_pfn(old_spte)); if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask)) kvm_set_pfn_dirty(spte_to_pfn(old_spte)); } static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, struct kmem_cache *base_cache, int min) { void *obj; if (cache->nobjs >= min) return 0; while (cache->nobjs < ARRAY_SIZE(cache->objects)) { obj = kmem_cache_zalloc(base_cache, GFP_KERNEL); if (!obj) return -ENOMEM; cache->objects[cache->nobjs++] = obj; } return 0; } static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc, struct kmem_cache *cache) { while (mc->nobjs) kmem_cache_free(cache, mc->objects[--mc->nobjs]); } static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, int min) { void *page; if (cache->nobjs >= min) return 0; while (cache->nobjs < ARRAY_SIZE(cache->objects)) { page = (void *)__get_free_page(GFP_KERNEL); if (!page) return -ENOMEM; cache->objects[cache->nobjs++] = page; } return 0; } static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc) { while (mc->nobjs) free_page((unsigned long)mc->objects[--mc->nobjs]); } static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) { int r; r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache, pte_chain_cache, 4); if (r) goto out; r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, rmap_desc_cache, 4 + PTE_PREFETCH_NUM); if (r) goto out; r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); if (r) goto out; r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, mmu_page_header_cache, 4); out: return r; } static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) { mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache, pte_chain_cache); mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, rmap_desc_cache); mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache); mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache, mmu_page_header_cache); } static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc, size_t size) { void *p; BUG_ON(!mc->nobjs); p = mc->objects[--mc->nobjs]; return p; } static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu) { return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache, sizeof(struct kvm_pte_chain)); } static void mmu_free_pte_chain(struct kvm_pte_chain *pc) { kmem_cache_free(pte_chain_cache, pc); } static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu) { return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache, sizeof(struct kvm_rmap_desc)); } static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd) { kmem_cache_free(rmap_desc_cache, rd); } static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) { if (!sp->role.direct) return sp->gfns[index]; return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS)); } static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) { if (sp->role.direct) BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index)); else sp->gfns[index] = gfn; } /* * Return the pointer to the large page information for a given gfn, * handling slots that are not large page aligned. */ static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, struct kvm_memory_slot *slot, int level) { unsigned long idx; idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); return &slot->lpage_info[level - 2][idx]; } static void account_shadowed(struct kvm *kvm, gfn_t gfn) { struct kvm_memory_slot *slot; struct kvm_lpage_info *linfo; int i; slot = gfn_to_memslot(kvm, gfn); for (i = PT_DIRECTORY_LEVEL; i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { linfo = lpage_info_slot(gfn, slot, i); linfo->write_count += 1; } } static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) { struct kvm_memory_slot *slot; struct kvm_lpage_info *linfo; int i; slot = gfn_to_memslot(kvm, gfn); for (i = PT_DIRECTORY_LEVEL; i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { linfo = lpage_info_slot(gfn, slot, i); linfo->write_count -= 1; WARN_ON(linfo->write_count < 0); } } static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn, int level) { struct kvm_memory_slot *slot; struct kvm_lpage_info *linfo; slot = gfn_to_memslot(kvm, gfn); if (slot) { linfo = lpage_info_slot(gfn, slot, level); return linfo->write_count; } return 1; } static int host_mapping_level(struct kvm *kvm, gfn_t gfn) { unsigned long page_size; int i, ret = 0; page_size = kvm_host_page_size(kvm, gfn); for (i = PT_PAGE_TABLE_LEVEL; i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) { if (page_size >= KVM_HPAGE_SIZE(i)) ret = i; else break; } return ret; } static struct kvm_memory_slot * gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, bool no_dirty_log) { struct kvm_memory_slot *slot; slot = gfn_to_memslot(vcpu->kvm, gfn); if (!slot || slot->flags & KVM_MEMSLOT_INVALID || (no_dirty_log && slot->dirty_bitmap)) slot = NULL; return slot; } static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn) { return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true); } static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) { int host_level, level, max_level; host_level = host_mapping_level(vcpu->kvm, large_gfn); if (host_level == PT_PAGE_TABLE_LEVEL) return host_level; max_level = kvm_x86_ops->get_lpage_level() < host_level ? kvm_x86_ops->get_lpage_level() : host_level; for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level) if (has_wrprotected_page(vcpu->kvm, large_gfn, level)) break; return level - 1; } /* * Take gfn and return the reverse mapping to it. */ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level) { struct kvm_memory_slot *slot; struct kvm_lpage_info *linfo; slot = gfn_to_memslot(kvm, gfn); if (likely(level == PT_PAGE_TABLE_LEVEL)) return &slot->rmap[gfn - slot->base_gfn]; linfo = lpage_info_slot(gfn, slot, level); return &linfo->rmap_pde; } /* * Reverse mapping data structures: * * If rmapp bit zero is zero, then rmapp point to the shadw page table entry * that points to page_address(page). * * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc * containing more mappings. * * Returns the number of rmap entries before the spte was added or zero if * the spte was not added. * */ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) { struct kvm_mmu_page *sp; struct kvm_rmap_desc *desc; unsigned long *rmapp; int i, count = 0; if (!is_rmap_spte(*spte)) return count; sp = page_header(__pa(spte)); kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); if (!*rmapp) { rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte); *rmapp = (unsigned long)spte; } else if (!(*rmapp & 1)) { rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte); desc = mmu_alloc_rmap_desc(vcpu); desc->sptes[0] = (u64 *)*rmapp; desc->sptes[1] = spte; *rmapp = (unsigned long)desc | 1; ++count; } else { rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte); desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); while (desc->sptes[RMAP_EXT-1] && desc->more) { desc = desc->more; count += RMAP_EXT; } if (desc->sptes[RMAP_EXT-1]) { desc->more = mmu_alloc_rmap_desc(vcpu); desc = desc->more; } for (i = 0; desc->sptes[i]; ++i) ++count; desc->sptes[i] = spte; } return count; } static void rmap_desc_remove_entry(unsigned long *rmapp, struct kvm_rmap_desc *desc, int i, struct kvm_rmap_desc *prev_desc) { int j; for (j = RMAP_EXT - 1; !desc->sptes[j] && j > i; --j) ; desc->sptes[i] = desc->sptes[j]; desc->sptes[j] = NULL; if (j != 0) return; if (!prev_desc && !desc->more) *rmapp = (unsigned long)desc->sptes[0]; else if (prev_desc) prev_desc->more = desc->more; else *rmapp = (unsigned long)desc->more | 1; mmu_free_rmap_desc(desc); } static void rmap_remove(struct kvm *kvm, u64 *spte) { struct kvm_rmap_desc *desc; struct kvm_rmap_desc *prev_desc; struct kvm_mmu_page *sp; gfn_t gfn; unsigned long *rmapp; int i; sp = page_header(__pa(spte)); gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); rmapp = gfn_to_rmap(kvm, gfn, sp->role.level); if (!*rmapp) { printk(KERN_ERR "rmap_remove: %p 0->BUG\n", spte); BUG(); } else if (!(*rmapp & 1)) { rmap_printk("rmap_remove: %p 1->0\n", spte); if ((u64 *)*rmapp != spte) { printk(KERN_ERR "rmap_remove: %p 1->BUG\n", spte); BUG(); } *rmapp = 0; } else { rmap_printk("rmap_remove: %p many->many\n", spte); desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); prev_desc = NULL; while (desc) { for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) if (desc->sptes[i] == spte) { rmap_desc_remove_entry(rmapp, desc, i, prev_desc); return; } prev_desc = desc; desc = desc->more; } pr_err("rmap_remove: %p many->many\n", spte); BUG(); } } static int set_spte_track_bits(u64 *sptep, u64 new_spte) { pfn_t pfn; u64 old_spte = *sptep; if (!spte_has_volatile_bits(old_spte)) __set_spte(sptep, new_spte); else old_spte = __xchg_spte(sptep, new_spte); if (!is_rmap_spte(old_spte)) return 0; pfn = spte_to_pfn(old_spte); if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) kvm_set_pfn_accessed(pfn); if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask)) kvm_set_pfn_dirty(pfn); return 1; } static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte) { if (set_spte_track_bits(sptep, new_spte)) rmap_remove(kvm, sptep); } static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte) { struct kvm_rmap_desc *desc; u64 *prev_spte; int i; if (!*rmapp) return NULL; else if (!(*rmapp & 1)) { if (!spte) return (u64 *)*rmapp; return NULL; } desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); prev_spte = NULL; while (desc) { for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) { if (prev_spte == spte) return desc->sptes[i]; prev_spte = desc->sptes[i]; } desc = desc->more; } return NULL; } static int rmap_write_protect(struct kvm *kvm, u64 gfn) { unsigned long *rmapp; u64 *spte; int i, write_protected = 0; rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL); spte = rmap_next(kvm, rmapp, NULL); while (spte) { BUG_ON(!spte); BUG_ON(!(*spte & PT_PRESENT_MASK)); rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); if (is_writable_pte(*spte)) { update_spte(spte, *spte & ~PT_WRITABLE_MASK); write_protected = 1; } spte = rmap_next(kvm, rmapp, spte); } /* check for huge page mappings */ for (i = PT_DIRECTORY_LEVEL; i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { rmapp = gfn_to_rmap(kvm, gfn, i); spte = rmap_next(kvm, rmapp, NULL); while (spte) { BUG_ON(!spte); BUG_ON(!(*spte & PT_PRESENT_MASK)); BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)); pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn); if (is_writable_pte(*spte)) { drop_spte(kvm, spte, shadow_trap_nonpresent_pte); --kvm->stat.lpages; spte = NULL; write_protected = 1; } spte = rmap_next(kvm, rmapp, spte); } } return write_protected; } static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, unsigned long data) { u64 *spte; int need_tlb_flush = 0; while ((spte = rmap_next(kvm, rmapp, NULL))) { BUG_ON(!(*spte & PT_PRESENT_MASK)); rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte); drop_spte(kvm, spte, shadow_trap_nonpresent_pte); need_tlb_flush = 1; } return need_tlb_flush; } static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, unsigned long data) { int need_flush = 0; u64 *spte, new_spte; pte_t *ptep = (pte_t *)data; pfn_t new_pfn; WARN_ON(pte_huge(*ptep)); new_pfn = pte_pfn(*ptep); spte = rmap_next(kvm, rmapp, NULL); while (spte) { BUG_ON(!is_shadow_present_pte(*spte)); rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte); need_flush = 1; if (pte_write(*ptep)) { drop_spte(kvm, spte, shadow_trap_nonpresent_pte); spte = rmap_next(kvm, rmapp, NULL); } else { new_spte = *spte &~ (PT64_BASE_ADDR_MASK); new_spte |= (u64)new_pfn << PAGE_SHIFT; new_spte &= ~PT_WRITABLE_MASK; new_spte &= ~SPTE_HOST_WRITEABLE; new_spte &= ~shadow_accessed_mask; set_spte_track_bits(spte, new_spte); spte = rmap_next(kvm, rmapp, spte); } } if (need_flush) kvm_flush_remote_tlbs(kvm); return 0; } static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, unsigned long data, int (*handler)(struct kvm *kvm, unsigned long *rmapp, unsigned long data)) { int i, j; int ret; int retval = 0; struct kvm_memslots *slots; slots = kvm_memslots(kvm); for (i = 0; i < slots->nmemslots; i++) { struct kvm_memory_slot *memslot = &slots->memslots[i]; unsigned long start = memslot->userspace_addr; unsigned long end; end = start + (memslot->npages << PAGE_SHIFT); if (hva >= start && hva < end) { gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; gfn_t gfn = memslot->base_gfn + gfn_offset; ret = handler(kvm, &memslot->rmap[gfn_offset], data); for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) { struct kvm_lpage_info *linfo; linfo = lpage_info_slot(gfn, memslot, PT_DIRECTORY_LEVEL + j); ret |= handler(kvm, &linfo->rmap_pde, data); } trace_kvm_age_page(hva, memslot, ret); retval |= ret; } } return retval; } int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) { return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp); } void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) { kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp); } static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, unsigned long data) { u64 *spte; int young = 0; /* * Emulate the accessed bit for EPT, by checking if this page has * an EPT mapping, and clearing it if it does. On the next access, * a new EPT mapping will be established. * This has some overhead, but not as much as the cost of swapping * out actively used pages or breaking up actively used hugepages. */ if (!shadow_accessed_mask) return kvm_unmap_rmapp(kvm, rmapp, data); spte = rmap_next(kvm, rmapp, NULL); while (spte) { int _young; u64 _spte = *spte; BUG_ON(!(_spte & PT_PRESENT_MASK)); _young = _spte & PT_ACCESSED_MASK; if (_young) { young = 1; clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte); } spte = rmap_next(kvm, rmapp, spte); } return young; } static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, unsigned long data) { u64 *spte; int young = 0; /* * If there's no access bit in the secondary pte set by the * hardware it's up to gup-fast/gup to set the access bit in * the primary pte or in the page structure. */ if (!shadow_accessed_mask) goto out; spte = rmap_next(kvm, rmapp, NULL); while (spte) { u64 _spte = *spte; BUG_ON(!(_spte & PT_PRESENT_MASK)); young = _spte & PT_ACCESSED_MASK; if (young) { young = 1; break; } spte = rmap_next(kvm, rmapp, spte); } out: return young; } #define RMAP_RECYCLE_THRESHOLD 1000 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) { unsigned long *rmapp; struct kvm_mmu_page *sp; sp = page_header(__pa(spte)); rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); kvm_unmap_rmapp(vcpu->kvm, rmapp, 0); kvm_flush_remote_tlbs(vcpu->kvm); } int kvm_age_hva(struct kvm *kvm, unsigned long hva) { return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp); } int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) { return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp); } #ifdef MMU_DEBUG static int is_empty_shadow_page(u64 *spt) { u64 *pos; u64 *end; for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) if (is_shadow_present_pte(*pos)) { printk(KERN_ERR "%s: %p %llx\n", __func__, pos, *pos); return 0; } return 1; } #endif /* * This value is the sum of all of the kvm instances's * kvm->arch.n_used_mmu_pages values. We need a global, * aggregate version in order to make the slab shrinker * faster */ static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr) { kvm->arch.n_used_mmu_pages += nr; percpu_counter_add(&kvm_total_used_mmu_pages, nr); } static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp) { ASSERT(is_empty_shadow_page(sp->spt)); hlist_del(&sp->hash_link); list_del(&sp->link); free_page((unsigned long)sp->spt); if (!sp->role.direct) free_page((unsigned long)sp->gfns); kmem_cache_free(mmu_page_header_cache, sp); kvm_mod_used_mmu_pages(kvm, -1); } static unsigned kvm_page_table_hashfn(gfn_t gfn) { return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1); } static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, u64 *parent_pte, int direct) { struct kvm_mmu_page *sp; sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp); sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); if (!direct) sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); set_page_private(virt_to_page(sp->spt), (unsigned long)sp); list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); sp->multimapped = 0; sp->parent_pte = parent_pte; kvm_mod_used_mmu_pages(vcpu->kvm, +1); return sp; } static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *parent_pte) { struct kvm_pte_chain *pte_chain; struct hlist_node *node; int i; if (!parent_pte) return; if (!sp->multimapped) { u64 *old = sp->parent_pte; if (!old) { sp->parent_pte = parent_pte; return; } sp->multimapped = 1; pte_chain = mmu_alloc_pte_chain(vcpu); INIT_HLIST_HEAD(&sp->parent_ptes); hlist_add_head(&pte_chain->link, &sp->parent_ptes); pte_chain->parent_ptes[0] = old; } hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) { if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1]) continue; for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) if (!pte_chain->parent_ptes[i]) { pte_chain->parent_ptes[i] = parent_pte; return; } } pte_chain = mmu_alloc_pte_chain(vcpu); BUG_ON(!pte_chain); hlist_add_head(&pte_chain->link, &sp->parent_ptes); pte_chain->parent_ptes[0] = parent_pte; } static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, u64 *parent_pte) { struct kvm_pte_chain *pte_chain; struct hlist_node *node; int i; if (!sp->multimapped) { BUG_ON(sp->parent_pte != parent_pte); sp->parent_pte = NULL; return; } hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) { if (!pte_chain->parent_ptes[i]) break; if (pte_chain->parent_ptes[i] != parent_pte) continue; while (i + 1 < NR_PTE_CHAIN_ENTRIES && pte_chain->parent_ptes[i + 1]) { pte_chain->parent_ptes[i] = pte_chain->parent_ptes[i + 1]; ++i; } pte_chain->parent_ptes[i] = NULL; if (i == 0) { hlist_del(&pte_chain->link); mmu_free_pte_chain(pte_chain); if (hlist_empty(&sp->parent_ptes)) { sp->multimapped = 0; sp->parent_pte = NULL; } } return; } BUG(); } static void mmu_parent_walk(struct kvm_mmu_page *sp, mmu_parent_walk_fn fn) { struct kvm_pte_chain *pte_chain; struct hlist_node *node; struct kvm_mmu_page *parent_sp; int i; if (!sp->multimapped && sp->parent_pte) { parent_sp = page_header(__pa(sp->parent_pte)); fn(parent_sp, sp->parent_pte); return; } hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) { u64 *spte = pte_chain->parent_ptes[i]; if (!spte) break; parent_sp = page_header(__pa(spte)); fn(parent_sp, spte); } } static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte); static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp) { mmu_parent_walk(sp, mark_unsync); } static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte) { unsigned int index; index = spte - sp->spt; if (__test_and_set_bit(index, sp->unsync_child_bitmap)) return; if (sp->unsync_children++) return; kvm_mmu_mark_parents_unsync(sp); } static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { int i; for (i = 0; i < PT64_ENT_PER_PAGE; ++i) sp->spt[i] = shadow_trap_nonpresent_pte; } static int nonpaging_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { return 1; } static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva) { } static void nonpaging_update_pte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, const void *pte) { WARN_ON(1); } #define KVM_PAGE_ARRAY_NR 16 struct kvm_mmu_pages { struct mmu_page_and_offset { struct kvm_mmu_page *sp; unsigned int idx; } page[KVM_PAGE_ARRAY_NR]; unsigned int nr; }; #define for_each_unsync_children(bitmap, idx) \ for (idx = find_first_bit(bitmap, 512); \ idx < 512; \ idx = find_next_bit(bitmap, 512, idx+1)) static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, int idx) { int i; if (sp->unsync) for (i=0; i < pvec->nr; i++) if (pvec->page[i].sp == sp) return 0; pvec->page[pvec->nr].sp = sp; pvec->page[pvec->nr].idx = idx; pvec->nr++; return (pvec->nr == KVM_PAGE_ARRAY_NR); } static int __mmu_unsync_walk(struct kvm_mmu_page *sp, struct kvm_mmu_pages *pvec) { int i, ret, nr_unsync_leaf = 0; for_each_unsync_children(sp->unsync_child_bitmap, i) { struct kvm_mmu_page *child; u64 ent = sp->spt[i]; if (!is_shadow_present_pte(ent) || is_large_pte(ent)) goto clear_child_bitmap; child = page_header(ent & PT64_BASE_ADDR_MASK); if (child->unsync_children) { if (mmu_pages_add(pvec, child, i)) return -ENOSPC; ret = __mmu_unsync_walk(child, pvec); if (!ret) goto clear_child_bitmap; else if (ret > 0) nr_unsync_leaf += ret; else return ret; } else if (child->unsync) { nr_unsync_leaf++; if (mmu_pages_add(pvec, child, i)) return -ENOSPC; } else goto clear_child_bitmap; continue; clear_child_bitmap: __clear_bit(i, sp->unsync_child_bitmap); sp->unsync_children--; WARN_ON((int)sp->unsync_children < 0); } return nr_unsync_leaf; } static int mmu_unsync_walk(struct kvm_mmu_page *sp, struct kvm_mmu_pages *pvec) { if (!sp->unsync_children) return 0; mmu_pages_add(pvec, sp, 0); return __mmu_unsync_walk(sp, pvec); } static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) { WARN_ON(!sp->unsync); trace_kvm_mmu_sync_page(sp); sp->unsync = 0; --kvm->stat.mmu_unsync; } static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, struct list_head *invalid_list); static void kvm_mmu_commit_zap_page(struct kvm *kvm, struct list_head *invalid_list); #define for_each_gfn_sp(kvm, sp, gfn, pos) \ hlist_for_each_entry(sp, pos, \ &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \ if ((sp)->gfn != (gfn)) {} else #define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos) \ hlist_for_each_entry(sp, pos, \ &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \ if ((sp)->gfn != (gfn) || (sp)->role.direct || \ (sp)->role.invalid) {} else /* @sp->gfn should be write-protected at the call site */ static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, struct list_head *invalid_list, bool clear_unsync) { if (sp->role.cr4_pae != !!is_pae(vcpu)) { kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); return 1; } if (clear_unsync) kvm_unlink_unsync_page(vcpu->kvm, sp); if (vcpu->arch.mmu.sync_page(vcpu, sp)) { kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); return 1; } kvm_mmu_flush_tlb(vcpu); return 0; } static int kvm_sync_page_transient(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { LIST_HEAD(invalid_list); int ret; ret = __kvm_sync_page(vcpu, sp, &invalid_list, false); if (ret) kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); return ret; } static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, struct list_head *invalid_list) { return __kvm_sync_page(vcpu, sp, invalid_list, true); } /* @gfn should be write-protected at the call site */ static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) { struct kvm_mmu_page *s; struct hlist_node *node; LIST_HEAD(invalid_list); bool flush = false; for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { if (!s->unsync) continue; WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); kvm_unlink_unsync_page(vcpu->kvm, s); if ((s->role.cr4_pae != !!is_pae(vcpu)) || (vcpu->arch.mmu.sync_page(vcpu, s))) { kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list); continue; } flush = true; } kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); if (flush) kvm_mmu_flush_tlb(vcpu); } struct mmu_page_path { struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1]; unsigned int idx[PT64_ROOT_LEVEL-1]; }; #define for_each_sp(pvec, sp, parents, i) \ for (i = mmu_pages_next(&pvec, &parents, -1), \ sp = pvec.page[i].sp; \ i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \ i = mmu_pages_next(&pvec, &parents, i)) static int mmu_pages_next(struct kvm_mmu_pages *pvec, struct mmu_page_path *parents, int i) { int n; for (n = i+1; n < pvec->nr; n++) { struct kvm_mmu_page *sp = pvec->page[n].sp; if (sp->role.level == PT_PAGE_TABLE_LEVEL) { parents->idx[0] = pvec->page[n].idx; return n; } parents->parent[sp->role.level-2] = sp; parents->idx[sp->role.level-1] = pvec->page[n].idx; } return n; } static void mmu_pages_clear_parents(struct mmu_page_path *parents) { struct kvm_mmu_page *sp; unsigned int level = 0; do { unsigned int idx = parents->idx[level]; sp = parents->parent[level]; if (!sp) return; --sp->unsync_children; WARN_ON((int)sp->unsync_children < 0); __clear_bit(idx, sp->unsync_child_bitmap); level++; } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children); } static void kvm_mmu_pages_init(struct kvm_mmu_page *parent, struct mmu_page_path *parents, struct kvm_mmu_pages *pvec) { parents->parent[parent->role.level-1] = NULL; pvec->nr = 0; } static void mmu_sync_children(struct kvm_vcpu *vcpu, struct kvm_mmu_page *parent) { int i; struct kvm_mmu_page *sp; struct mmu_page_path parents; struct kvm_mmu_pages pages; LIST_HEAD(invalid_list); kvm_mmu_pages_init(parent, &parents, &pages); while (mmu_unsync_walk(parent, &pages)) { int protected = 0; for_each_sp(pages, sp, parents, i) protected |= rmap_write_protect(vcpu->kvm, sp->gfn); if (protected) kvm_flush_remote_tlbs(vcpu->kvm); for_each_sp(pages, sp, parents, i) { kvm_sync_page(vcpu, sp, &invalid_list); mmu_pages_clear_parents(&parents); } kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); cond_resched_lock(&vcpu->kvm->mmu_lock); kvm_mmu_pages_init(parent, &parents, &pages); } } static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gaddr, unsigned level, int direct, unsigned access, u64 *parent_pte) { union kvm_mmu_page_role role; unsigned quadrant; struct kvm_mmu_page *sp; struct hlist_node *node; bool need_sync = false; role = vcpu->arch.mmu.base_role; role.level = level; role.direct = direct; if (role.direct) role.cr4_pae = 0; role.access = access; if (!vcpu->arch.mmu.direct_map && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; role.quadrant = quadrant; } for_each_gfn_sp(vcpu->kvm, sp, gfn, node) { if (!need_sync && sp->unsync) need_sync = true; if (sp->role.word != role.word) continue; if (sp->unsync && kvm_sync_page_transient(vcpu, sp)) break; mmu_page_add_parent_pte(vcpu, sp, parent_pte); if (sp->unsync_children) { kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); kvm_mmu_mark_parents_unsync(sp); } else if (sp->unsync) kvm_mmu_mark_parents_unsync(sp); trace_kvm_mmu_get_page(sp, false); return sp; } ++vcpu->kvm->stat.mmu_cache_miss; sp = kvm_mmu_alloc_page(vcpu, parent_pte, direct); if (!sp) return sp; sp->gfn = gfn; sp->role = role; hlist_add_head(&sp->hash_link, &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]); if (!direct) { if (rmap_write_protect(vcpu->kvm, gfn)) kvm_flush_remote_tlbs(vcpu->kvm); if (level > PT_PAGE_TABLE_LEVEL && need_sync) kvm_sync_pages(vcpu, gfn); account_shadowed(vcpu->kvm, gfn); } if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte) vcpu->arch.mmu.prefetch_page(vcpu, sp); else nonpaging_prefetch_page(vcpu, sp); trace_kvm_mmu_get_page(sp, true); return sp; } static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator, struct kvm_vcpu *vcpu, u64 addr) { iterator->addr = addr; iterator->shadow_addr = vcpu->arch.mmu.root_hpa; iterator->level = vcpu->arch.mmu.shadow_root_level; if (iterator->level == PT64_ROOT_LEVEL && vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL && !vcpu->arch.mmu.direct_map) --iterator->level; if (iterator->level == PT32E_ROOT_LEVEL) { iterator->shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; iterator->shadow_addr &= PT64_BASE_ADDR_MASK; --iterator->level; if (!iterator->shadow_addr) iterator->level = 0; } } static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator) { if (iterator->level < PT_PAGE_TABLE_LEVEL) return false; if (iterator->level == PT_PAGE_TABLE_LEVEL) if (is_large_pte(*iterator->sptep)) return false; iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level); iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index; return true; } static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator) { iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK; --iterator->level; } static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp) { u64 spte; spte = __pa(sp->spt) | PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK; __set_spte(sptep, spte); } static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) { if (is_large_pte(*sptep)) { drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte); kvm_flush_remote_tlbs(vcpu->kvm); } } static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned direct_access) { if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) { struct kvm_mmu_page *child; /* * For the direct sp, if the guest pte's dirty bit * changed form clean to dirty, it will corrupt the * sp's access: allow writable in the read-only sp, * so we should update the spte at this point to get * a new sp with the correct access. */ child = page_header(*sptep & PT64_BASE_ADDR_MASK); if (child->role.access == direct_access) return; mmu_page_remove_parent_pte(child, sptep); __set_spte(sptep, shadow_trap_nonpresent_pte); kvm_flush_remote_tlbs(vcpu->kvm); } } static void kvm_mmu_page_unlink_children(struct kvm *kvm, struct kvm_mmu_page *sp) { unsigned i; u64 *pt; u64 ent; pt = sp->spt; for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { ent = pt[i]; if (is_shadow_present_pte(ent)) { if (!is_last_spte(ent, sp->role.level)) { ent &= PT64_BASE_ADDR_MASK; mmu_page_remove_parent_pte(page_header(ent), &pt[i]); } else { if (is_large_pte(ent)) --kvm->stat.lpages; drop_spte(kvm, &pt[i], shadow_trap_nonpresent_pte); } } pt[i] = shadow_trap_nonpresent_pte; } } static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte) { mmu_page_remove_parent_pte(sp, parent_pte); } static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm) { int i; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) vcpu->arch.last_pte_updated = NULL; } static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) { u64 *parent_pte; while (sp->multimapped || sp->parent_pte) { if (!sp->multimapped) parent_pte = sp->parent_pte; else { struct kvm_pte_chain *chain; chain = container_of(sp->parent_ptes.first, struct kvm_pte_chain, link); parent_pte = chain->parent_ptes[0]; } BUG_ON(!parent_pte); kvm_mmu_put_page(sp, parent_pte); __set_spte(parent_pte, shadow_trap_nonpresent_pte); } } static int mmu_zap_unsync_children(struct kvm *kvm, struct kvm_mmu_page *parent, struct list_head *invalid_list) { int i, zapped = 0; struct mmu_page_path parents; struct kvm_mmu_pages pages; if (parent->role.level == PT_PAGE_TABLE_LEVEL) return 0; kvm_mmu_pages_init(parent, &parents, &pages); while (mmu_unsync_walk(parent, &pages)) { struct kvm_mmu_page *sp; for_each_sp(pages, sp, parents, i) { kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); mmu_pages_clear_parents(&parents); zapped++; } kvm_mmu_pages_init(parent, &parents, &pages); } return zapped; } static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, struct list_head *invalid_list) { int ret; trace_kvm_mmu_prepare_zap_page(sp); ++kvm->stat.mmu_shadow_zapped; ret = mmu_zap_unsync_children(kvm, sp, invalid_list); kvm_mmu_page_unlink_children(kvm, sp); kvm_mmu_unlink_parents(kvm, sp); if (!sp->role.invalid && !sp->role.direct) unaccount_shadowed(kvm, sp->gfn); if (sp->unsync) kvm_unlink_unsync_page(kvm, sp); if (!sp->root_count) { /* Count self */ ret++; list_move(&sp->link, invalid_list); } else { list_move(&sp->link, &kvm->arch.active_mmu_pages); kvm_reload_remote_mmus(kvm); } sp->role.invalid = 1; kvm_mmu_reset_last_pte_updated(kvm); return ret; } static void kvm_mmu_commit_zap_page(struct kvm *kvm, struct list_head *invalid_list) { struct kvm_mmu_page *sp; if (list_empty(invalid_list)) return; kvm_flush_remote_tlbs(kvm); do { sp = list_first_entry(invalid_list, struct kvm_mmu_page, link); WARN_ON(!sp->role.invalid || sp->root_count); kvm_mmu_free_page(kvm, sp); } while (!list_empty(invalid_list)); } /* * Changing the number of mmu pages allocated to the vm * Note: if goal_nr_mmu_pages is too small, you will get dead lock */ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages) { LIST_HEAD(invalid_list); /* * If we set the number of mmu pages to be smaller be than the * number of actived pages , we must to free some mmu pages before we * change the value */ if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages && !list_empty(&kvm->arch.active_mmu_pages)) { struct kvm_mmu_page *page; page = container_of(kvm->arch.active_mmu_pages.prev, struct kvm_mmu_page, link); kvm_mmu_prepare_zap_page(kvm, page, &invalid_list); kvm_mmu_commit_zap_page(kvm, &invalid_list); } goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; } kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; } static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) { struct kvm_mmu_page *sp; struct hlist_node *node; LIST_HEAD(invalid_list); int r; pgprintk("%s: looking for gfn %llx\n", __func__, gfn); r = 0; for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) { pgprintk("%s: gfn %llx role %x\n", __func__, gfn, sp->role.word); r = 1; kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); } kvm_mmu_commit_zap_page(kvm, &invalid_list); return r; } static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) { struct kvm_mmu_page *sp; struct hlist_node *node; LIST_HEAD(invalid_list); for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) { pgprintk("%s: zap %llx %x\n", __func__, gfn, sp->role.word); kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); } kvm_mmu_commit_zap_page(kvm, &invalid_list); } static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn) { int slot = memslot_id(kvm, gfn); struct kvm_mmu_page *sp = page_header(__pa(pte)); __set_bit(slot, sp->slot_bitmap); } static void mmu_convert_notrap(struct kvm_mmu_page *sp) { int i; u64 *pt = sp->spt; if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte) return; for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { if (pt[i] == shadow_notrap_nonpresent_pte) __set_spte(&pt[i], shadow_trap_nonpresent_pte); } } /* * The function is based on mtrr_type_lookup() in * arch/x86/kernel/cpu/mtrr/generic.c */ static int get_mtrr_type(struct mtrr_state_type *mtrr_state, u64 start, u64 end) { int i; u64 base, mask; u8 prev_match, curr_match; int num_var_ranges = KVM_NR_VAR_MTRR; if (!mtrr_state->enabled) return 0xFF; /* Make end inclusive end, instead of exclusive */ end--; /* Look in fixed ranges. Just return the type as per start */ if (mtrr_state->have_fixed && (start < 0x100000)) { int idx; if (start < 0x80000) { idx = 0; idx += (start >> 16); return mtrr_state->fixed_ranges[idx]; } else if (start < 0xC0000) { idx = 1 * 8; idx += ((start - 0x80000) >> 14); return mtrr_state->fixed_ranges[idx]; } else if (start < 0x1000000) { idx = 3 * 8; idx += ((start - 0xC0000) >> 12); return mtrr_state->fixed_ranges[idx]; } } /* * Look in variable ranges * Look of multiple ranges matching this address and pick type * as per MTRR precedence */ if (!(mtrr_state->enabled & 2)) return mtrr_state->def_type; prev_match = 0xFF; for (i = 0; i < num_var_ranges; ++i) { unsigned short start_state, end_state; if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11))) continue; base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) + (mtrr_state->var_ranges[i].base_lo & PAGE_MASK); mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) + (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK); start_state = ((start & mask) == (base & mask)); end_state = ((end & mask) == (base & mask)); if (start_state != end_state) return 0xFE; if ((start & mask) != (base & mask)) continue; curr_match = mtrr_state->var_ranges[i].base_lo & 0xff; if (prev_match == 0xFF) { prev_match = curr_match; continue; } if (prev_match == MTRR_TYPE_UNCACHABLE || curr_match == MTRR_TYPE_UNCACHABLE) return MTRR_TYPE_UNCACHABLE; if ((prev_match == MTRR_TYPE_WRBACK && curr_match == MTRR_TYPE_WRTHROUGH) || (prev_match == MTRR_TYPE_WRTHROUGH && curr_match == MTRR_TYPE_WRBACK)) { prev_match = MTRR_TYPE_WRTHROUGH; curr_match = MTRR_TYPE_WRTHROUGH; } if (prev_match != curr_match) return MTRR_TYPE_UNCACHABLE; } if (prev_match != 0xFF) return prev_match; return mtrr_state->def_type; } u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) { u8 mtrr; mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT, (gfn << PAGE_SHIFT) + PAGE_SIZE); if (mtrr == 0xfe || mtrr == 0xff) mtrr = MTRR_TYPE_WRBACK; return mtrr; } EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type); static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { trace_kvm_mmu_unsync_page(sp); ++vcpu->kvm->stat.mmu_unsync; sp->unsync = 1; kvm_mmu_mark_parents_unsync(sp); mmu_convert_notrap(sp); } static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) { struct kvm_mmu_page *s; struct hlist_node *node; for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { if (s->unsync) continue; WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); __kvm_unsync_page(vcpu, s); } } static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync) { struct kvm_mmu_page *s; struct hlist_node *node; bool need_unsync = false; for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { if (!can_unsync) return 1; if (s->role.level != PT_PAGE_TABLE_LEVEL) return 1; if (!need_unsync && !s->unsync) { if (!oos_shadow) return 1; need_unsync = true; } } if (need_unsync) kvm_unsync_pages(vcpu, gfn); return 0; } static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, int user_fault, int write_fault, int dirty, int level, gfn_t gfn, pfn_t pfn, bool speculative, bool can_unsync, bool host_writable) { u64 spte, entry = *sptep; int ret = 0; /* * We don't set the accessed bit, since we sometimes want to see * whether the guest actually used the pte (in order to detect * demand paging). */ spte = PT_PRESENT_MASK; if (!speculative) spte |= shadow_accessed_mask; if (!dirty) pte_access &= ~ACC_WRITE_MASK; if (pte_access & ACC_EXEC_MASK) spte |= shadow_x_mask; else spte |= shadow_nx_mask; if (pte_access & ACC_USER_MASK) spte |= shadow_user_mask; if (level > PT_PAGE_TABLE_LEVEL) spte |= PT_PAGE_SIZE_MASK; if (tdp_enabled) spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, kvm_is_mmio_pfn(pfn)); if (host_writable) spte |= SPTE_HOST_WRITEABLE; else pte_access &= ~ACC_WRITE_MASK; spte |= (u64)pfn << PAGE_SHIFT; if ((pte_access & ACC_WRITE_MASK) || (!vcpu->arch.mmu.direct_map && write_fault && !is_write_protection(vcpu) && !user_fault)) { if (level > PT_PAGE_TABLE_LEVEL && has_wrprotected_page(vcpu->kvm, gfn, level)) { ret = 1; drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte); goto done; } spte |= PT_WRITABLE_MASK; if (!vcpu->arch.mmu.direct_map && !(pte_access & ACC_WRITE_MASK)) spte &= ~PT_USER_MASK; /* * Optimization: for pte sync, if spte was writable the hash * lookup is unnecessary (and expensive). Write protection * is responsibility of mmu_get_page / kvm_sync_page. * Same reasoning can be applied to dirty page accounting. */ if (!can_unsync && is_writable_pte(*sptep)) goto set_pte; if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { pgprintk("%s: found shadow page for %llx, marking ro\n", __func__, gfn); ret = 1; pte_access &= ~ACC_WRITE_MASK; if (is_writable_pte(spte)) spte &= ~PT_WRITABLE_MASK; } } if (pte_access & ACC_WRITE_MASK) mark_page_dirty(vcpu->kvm, gfn); set_pte: update_spte(sptep, spte); /* * If we overwrite a writable spte with a read-only one we * should flush remote TLBs. Otherwise rmap_write_protect * will find a read-only spte, even though the writable spte * might be cached on a CPU's TLB. */ if (is_writable_pte(entry) && !is_writable_pte(*sptep)) kvm_flush_remote_tlbs(vcpu->kvm); done: return ret; } static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pt_access, unsigned pte_access, int user_fault, int write_fault, int dirty, int *ptwrite, int level, gfn_t gfn, pfn_t pfn, bool speculative, bool host_writable) { int was_rmapped = 0; int rmap_count; pgprintk("%s: spte %llx access %x write_fault %d" " user_fault %d gfn %llx\n", __func__, *sptep, pt_access, write_fault, user_fault, gfn); if (is_rmap_spte(*sptep)) { /* * If we overwrite a PTE page pointer with a 2MB PMD, unlink * the parent of the now unreachable PTE. */ if (level > PT_PAGE_TABLE_LEVEL && !is_large_pte(*sptep)) { struct kvm_mmu_page *child; u64 pte = *sptep; child = page_header(pte & PT64_BASE_ADDR_MASK); mmu_page_remove_parent_pte(child, sptep); __set_spte(sptep, shadow_trap_nonpresent_pte); kvm_flush_remote_tlbs(vcpu->kvm); } else if (pfn != spte_to_pfn(*sptep)) { pgprintk("hfn old %llx new %llx\n", spte_to_pfn(*sptep), pfn); drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte); kvm_flush_remote_tlbs(vcpu->kvm); } else was_rmapped = 1; } if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault, dirty, level, gfn, pfn, speculative, true, host_writable)) { if (write_fault) *ptwrite = 1; kvm_mmu_flush_tlb(vcpu); } pgprintk("%s: setting spte %llx\n", __func__, *sptep); pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n", is_large_pte(*sptep)? "2MB" : "4kB", *sptep & PT_PRESENT_MASK ?"RW":"R", gfn, *sptep, sptep); if (!was_rmapped && is_large_pte(*sptep)) ++vcpu->kvm->stat.lpages; page_header_update_slot(vcpu->kvm, sptep, gfn); if (!was_rmapped) { rmap_count = rmap_add(vcpu, sptep, gfn); if (rmap_count > RMAP_RECYCLE_THRESHOLD) rmap_recycle(vcpu, sptep, gfn); } kvm_release_pfn_clean(pfn); if (speculative) { vcpu->arch.last_pte_updated = sptep; vcpu->arch.last_pte_gfn = gfn; } } static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) { } static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool no_dirty_log) { struct kvm_memory_slot *slot; unsigned long hva; slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log); if (!slot) { get_page(bad_page); return page_to_pfn(bad_page); } hva = gfn_to_hva_memslot(slot, gfn); return hva_to_pfn_atomic(vcpu->kvm, hva); } static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *start, u64 *end) { struct page *pages[PTE_PREFETCH_NUM]; unsigned access = sp->role.access; int i, ret; gfn_t gfn; gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt); if (!gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK)) return -1; ret = gfn_to_page_many_atomic(vcpu->kvm, gfn, pages, end - start); if (ret <= 0) return -1; for (i = 0; i < ret; i++, gfn++, start++) mmu_set_spte(vcpu, start, ACC_ALL, access, 0, 0, 1, NULL, sp->role.level, gfn, page_to_pfn(pages[i]), true, true); return 0; } static void __direct_pte_prefetch(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *sptep) { u64 *spte, *start = NULL; int i; WARN_ON(!sp->role.direct); i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1); spte = sp->spt + i; for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { if (*spte != shadow_trap_nonpresent_pte || spte == sptep) { if (!start) continue; if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0) break; start = NULL; } else if (!start) start = spte; } } static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) { struct kvm_mmu_page *sp; /* * Since it's no accessed bit on EPT, it's no way to * distinguish between actually accessed translations * and prefetched, so disable pte prefetch if EPT is * enabled. */ if (!shadow_accessed_mask) return; sp = page_header(__pa(sptep)); if (sp->role.level > PT_PAGE_TABLE_LEVEL) return; __direct_pte_prefetch(vcpu, sp, sptep); } static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, int map_writable, int level, gfn_t gfn, pfn_t pfn, bool prefault) { struct kvm_shadow_walk_iterator iterator; struct kvm_mmu_page *sp; int pt_write = 0; gfn_t pseudo_gfn; for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { if (iterator.level == level) { unsigned pte_access = ACC_ALL; mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access, 0, write, 1, &pt_write, level, gfn, pfn, prefault, map_writable); direct_pte_prefetch(vcpu, iterator.sptep); ++vcpu->stat.pf_fixed; break; } if (*iterator.sptep == shadow_trap_nonpresent_pte) { u64 base_addr = iterator.addr; base_addr &= PT64_LVL_ADDR_MASK(iterator.level); pseudo_gfn = base_addr >> PAGE_SHIFT; sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr, iterator.level - 1, 1, ACC_ALL, iterator.sptep); if (!sp) { pgprintk("nonpaging_map: ENOMEM\n"); kvm_release_pfn_clean(pfn); return -ENOMEM; } __set_spte(iterator.sptep, __pa(sp->spt) | PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask | shadow_x_mask | shadow_accessed_mask); } } return pt_write; } static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk) { siginfo_t info; info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_MCEERR_AR; info.si_addr = (void __user *)address; info.si_addr_lsb = PAGE_SHIFT; send_sig_info(SIGBUS, &info, tsk); } static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn) { kvm_release_pfn_clean(pfn); if (is_hwpoison_pfn(pfn)) { kvm_send_hwpoison_signal(gfn_to_hva(kvm, gfn), current); return 0; } else if (is_fault_pfn(pfn)) return -EFAULT; return 1; } static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t *gfnp, pfn_t *pfnp, int *levelp) { pfn_t pfn = *pfnp; gfn_t gfn = *gfnp; int level = *levelp; /* * Check if it's a transparent hugepage. If this would be an * hugetlbfs page, level wouldn't be set to * PT_PAGE_TABLE_LEVEL and there would be no adjustment done * here. */ if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL && PageTransCompound(pfn_to_page(pfn)) && !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) { unsigned long mask; /* * mmu_notifier_retry was successful and we hold the * mmu_lock here, so the pmd can't become splitting * from under us, and in turn * __split_huge_page_refcount() can't run from under * us and we can safely transfer the refcount from * PG_tail to PG_head as we switch the pfn to tail to * head. */ *levelp = level = PT_DIRECTORY_LEVEL; mask = KVM_PAGES_PER_HPAGE(level) - 1; VM_BUG_ON((gfn & mask) != (pfn & mask)); if (pfn & mask) { gfn &= ~mask; *gfnp = gfn; kvm_release_pfn_clean(pfn); pfn &= ~mask; if (!get_page_unless_zero(pfn_to_page(pfn))) BUG(); *pfnp = pfn; } } } static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, gva_t gva, pfn_t *pfn, bool write, bool *writable); static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn, bool prefault) { int r; int level; int force_pt_level; pfn_t pfn; unsigned long mmu_seq; bool map_writable; force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn); if (likely(!force_pt_level)) { level = mapping_level(vcpu, gfn); /* * This path builds a PAE pagetable - so we can map * 2mb pages at maximum. Therefore check if the level * is larger than that. */ if (level > PT_DIRECTORY_LEVEL) level = PT_DIRECTORY_LEVEL; gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); } else level = PT_PAGE_TABLE_LEVEL; mmu_seq = vcpu->kvm->mmu_notifier_seq; smp_rmb(); if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable)) return 0; /* mmio */ if (is_error_pfn(pfn)) return kvm_handle_bad_page(vcpu->kvm, gfn, pfn); spin_lock(&vcpu->kvm->mmu_lock); if (mmu_notifier_retry(vcpu, mmu_seq)) goto out_unlock; kvm_mmu_free_some_pages(vcpu); if (likely(!force_pt_level)) transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn, prefault); spin_unlock(&vcpu->kvm->mmu_lock); return r; out_unlock: spin_unlock(&vcpu->kvm->mmu_lock); kvm_release_pfn_clean(pfn); return 0; } static void mmu_free_roots(struct kvm_vcpu *vcpu) { int i; struct kvm_mmu_page *sp; LIST_HEAD(invalid_list); if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) return; spin_lock(&vcpu->kvm->mmu_lock); if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL && (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL || vcpu->arch.mmu.direct_map)) { hpa_t root = vcpu->arch.mmu.root_hpa; sp = page_header(root); --sp->root_count; if (!sp->root_count && sp->role.invalid) { kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); } vcpu->arch.mmu.root_hpa = INVALID_PAGE; spin_unlock(&vcpu->kvm->mmu_lock); return; } for (i = 0; i < 4; ++i) { hpa_t root = vcpu->arch.mmu.pae_root[i]; if (root) { root &= PT64_BASE_ADDR_MASK; sp = page_header(root); --sp->root_count; if (!sp->root_count && sp->role.invalid) kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); } vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; } kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); spin_unlock(&vcpu->kvm->mmu_lock); vcpu->arch.mmu.root_hpa = INVALID_PAGE; } static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn) { int ret = 0; if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) { kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); ret = 1; } return ret; } static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) { struct kvm_mmu_page *sp; unsigned i; if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { spin_lock(&vcpu->kvm->mmu_lock); kvm_mmu_free_some_pages(vcpu); sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL, 1, ACC_ALL, NULL); ++sp->root_count; spin_unlock(&vcpu->kvm->mmu_lock); vcpu->arch.mmu.root_hpa = __pa(sp->spt); } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) { for (i = 0; i < 4; ++i) { hpa_t root = vcpu->arch.mmu.pae_root[i]; ASSERT(!VALID_PAGE(root)); spin_lock(&vcpu->kvm->mmu_lock); kvm_mmu_free_some_pages(vcpu); sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT), i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL, NULL); root = __pa(sp->spt); ++sp->root_count; spin_unlock(&vcpu->kvm->mmu_lock); vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; } vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); } else BUG(); return 0; } static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) { struct kvm_mmu_page *sp; u64 pdptr, pm_mask; gfn_t root_gfn; int i; root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT; if (mmu_check_root(vcpu, root_gfn)) return 1; /* * Do we shadow a long mode page table? If so we need to * write-protect the guests page table root. */ if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { hpa_t root = vcpu->arch.mmu.root_hpa; ASSERT(!VALID_PAGE(root)); spin_lock(&vcpu->kvm->mmu_lock); kvm_mmu_free_some_pages(vcpu); sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL, 0, ACC_ALL, NULL); root = __pa(sp->spt); ++sp->root_count; spin_unlock(&vcpu->kvm->mmu_lock); vcpu->arch.mmu.root_hpa = root; return 0; } /* * We shadow a 32 bit page table. This may be a legacy 2-level * or a PAE 3-level page table. In either case we need to be aware that * the shadow page table may be a PAE or a long mode page table. */ pm_mask = PT_PRESENT_MASK; if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK; for (i = 0; i < 4; ++i) { hpa_t root = vcpu->arch.mmu.pae_root[i]; ASSERT(!VALID_PAGE(root)); if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { pdptr = kvm_pdptr_read_mmu(vcpu, &vcpu->arch.mmu, i); if (!is_present_gpte(pdptr)) { vcpu->arch.mmu.pae_root[i] = 0; continue; } root_gfn = pdptr >> PAGE_SHIFT; if (mmu_check_root(vcpu, root_gfn)) return 1; } spin_lock(&vcpu->kvm->mmu_lock); kvm_mmu_free_some_pages(vcpu); sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL, 0, ACC_ALL, NULL); root = __pa(sp->spt); ++sp->root_count; spin_unlock(&vcpu->kvm->mmu_lock); vcpu->arch.mmu.pae_root[i] = root | pm_mask; } vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); /* * If we shadow a 32 bit page table with a long mode page * table we enter this path. */ if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { if (vcpu->arch.mmu.lm_root == NULL) { /* * The additional page necessary for this is only * allocated on demand. */ u64 *lm_root; lm_root = (void*)get_zeroed_page(GFP_KERNEL); if (lm_root == NULL) return 1; lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask; vcpu->arch.mmu.lm_root = lm_root; } vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root); } return 0; } static int mmu_alloc_roots(struct kvm_vcpu *vcpu) { if (vcpu->arch.mmu.direct_map) return mmu_alloc_direct_roots(vcpu); else return mmu_alloc_shadow_roots(vcpu); } static void mmu_sync_roots(struct kvm_vcpu *vcpu) { int i; struct kvm_mmu_page *sp; if (vcpu->arch.mmu.direct_map) return; if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) return; trace_kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { hpa_t root = vcpu->arch.mmu.root_hpa; sp = page_header(root); mmu_sync_children(vcpu, sp); trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); return; } for (i = 0; i < 4; ++i) { hpa_t root = vcpu->arch.mmu.pae_root[i]; if (root && VALID_PAGE(root)) { root &= PT64_BASE_ADDR_MASK; sp = page_header(root); mmu_sync_children(vcpu, sp); } } trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); } void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) { spin_lock(&vcpu->kvm->mmu_lock); mmu_sync_roots(vcpu); spin_unlock(&vcpu->kvm->mmu_lock); } static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, struct x86_exception *exception) { if (exception) exception->error_code = 0; return vaddr; } static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, struct x86_exception *exception) { if (exception) exception->error_code = 0; return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access); } static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code, bool prefault) { gfn_t gfn; int r; pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code); r = mmu_topup_memory_caches(vcpu); if (r) return r; ASSERT(vcpu); ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa)); gfn = gva >> PAGE_SHIFT; return nonpaging_map(vcpu, gva & PAGE_MASK, error_code & PFERR_WRITE_MASK, gfn, prefault); } static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) { struct kvm_arch_async_pf arch; arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; arch.gfn = gfn; arch.direct_map = vcpu->arch.mmu.direct_map; arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu); return kvm_setup_async_pf(vcpu, gva, gfn, &arch); } static bool can_do_async_pf(struct kvm_vcpu *vcpu) { if (unlikely(!irqchip_in_kernel(vcpu->kvm) || kvm_event_needs_reinjection(vcpu))) return false; return kvm_x86_ops->interrupt_allowed(vcpu); } static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, gva_t gva, pfn_t *pfn, bool write, bool *writable) { bool async; *pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async, write, writable); if (!async) return false; /* *pfn has correct page already */ put_page(pfn_to_page(*pfn)); if (!prefault && can_do_async_pf(vcpu)) { trace_kvm_try_async_get_page(gva, gfn); if (kvm_find_async_pf_gfn(vcpu, gfn)) { trace_kvm_async_pf_doublefault(gva, gfn); kvm_make_request(KVM_REQ_APF_HALT, vcpu); return true; } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn)) return true; } *pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write, writable); return false; } static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, bool prefault) { pfn_t pfn; int r; int level; int force_pt_level; gfn_t gfn = gpa >> PAGE_SHIFT; unsigned long mmu_seq; int write = error_code & PFERR_WRITE_MASK; bool map_writable; ASSERT(vcpu); ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa)); r = mmu_topup_memory_caches(vcpu); if (r) return r; force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn); if (likely(!force_pt_level)) { level = mapping_level(vcpu, gfn); gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); } else level = PT_PAGE_TABLE_LEVEL; mmu_seq = vcpu->kvm->mmu_notifier_seq; smp_rmb(); if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) return 0; /* mmio */ if (is_error_pfn(pfn)) return kvm_handle_bad_page(vcpu->kvm, gfn, pfn); spin_lock(&vcpu->kvm->mmu_lock); if (mmu_notifier_retry(vcpu, mmu_seq)) goto out_unlock; kvm_mmu_free_some_pages(vcpu); if (likely(!force_pt_level)) transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); r = __direct_map(vcpu, gpa, write, map_writable, level, gfn, pfn, prefault); spin_unlock(&vcpu->kvm->mmu_lock); return r; out_unlock: spin_unlock(&vcpu->kvm->mmu_lock); kvm_release_pfn_clean(pfn); return 0; } static void nonpaging_free(struct kvm_vcpu *vcpu) { mmu_free_roots(vcpu); } static int nonpaging_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context) { context->new_cr3 = nonpaging_new_cr3; context->page_fault = nonpaging_page_fault; context->gva_to_gpa = nonpaging_gva_to_gpa; context->free = nonpaging_free; context->prefetch_page = nonpaging_prefetch_page; context->sync_page = nonpaging_sync_page; context->invlpg = nonpaging_invlpg; context->update_pte = nonpaging_update_pte; context->root_level = 0; context->shadow_root_level = PT32E_ROOT_LEVEL; context->root_hpa = INVALID_PAGE; context->direct_map = true; context->nx = false; return 0; } void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu) { ++vcpu->stat.tlb_flush; kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); } static void paging_new_cr3(struct kvm_vcpu *vcpu) { pgprintk("%s: cr3 %lx\n", __func__, kvm_read_cr3(vcpu)); mmu_free_roots(vcpu); } static unsigned long get_cr3(struct kvm_vcpu *vcpu) { return kvm_read_cr3(vcpu); } static void inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { vcpu->arch.mmu.inject_page_fault(vcpu, fault); } static void paging_free(struct kvm_vcpu *vcpu) { nonpaging_free(vcpu); } static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level) { int bit7; bit7 = (gpte >> 7) & 1; return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0; } #define PTTYPE 64 #include "paging_tmpl.h" #undef PTTYPE #define PTTYPE 32 #include "paging_tmpl.h" #undef PTTYPE static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context, int level) { int maxphyaddr = cpuid_maxphyaddr(vcpu); u64 exb_bit_rsvd = 0; if (!context->nx) exb_bit_rsvd = rsvd_bits(63, 63); switch (level) { case PT32_ROOT_LEVEL: /* no rsvd bits for 2 level 4K page table entries */ context->rsvd_bits_mask[0][1] = 0; context->rsvd_bits_mask[0][0] = 0; context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0]; if (!is_pse(vcpu)) { context->rsvd_bits_mask[1][1] = 0; break; } if (is_cpuid_PSE36()) /* 36bits PSE 4MB page */ context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21); else /* 32 bits PSE 4MB page */ context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21); break; case PT32E_ROOT_LEVEL: context->rsvd_bits_mask[0][2] = rsvd_bits(maxphyaddr, 63) | rsvd_bits(7, 8) | rsvd_bits(1, 2); /* PDPTE */ context->rsvd_bits_mask[0][1] = exb_bit_rsvd | rsvd_bits(maxphyaddr, 62); /* PDE */ context->rsvd_bits_mask[0][0] = exb_bit_rsvd | rsvd_bits(maxphyaddr, 62); /* PTE */ context->rsvd_bits_mask[1][1] = exb_bit_rsvd | rsvd_bits(maxphyaddr, 62) | rsvd_bits(13, 20); /* large page */ context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0]; break; case PT64_ROOT_LEVEL: context->rsvd_bits_mask[0][3] = exb_bit_rsvd | rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8); context->rsvd_bits_mask[0][2] = exb_bit_rsvd | rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8); context->rsvd_bits_mask[0][1] = exb_bit_rsvd | rsvd_bits(maxphyaddr, 51); context->rsvd_bits_mask[0][0] = exb_bit_rsvd | rsvd_bits(maxphyaddr, 51); context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3]; context->rsvd_bits_mask[1][2] = exb_bit_rsvd | rsvd_bits(maxphyaddr, 51) | rsvd_bits(13, 29); context->rsvd_bits_mask[1][1] = exb_bit_rsvd | rsvd_bits(maxphyaddr, 51) | rsvd_bits(13, 20); /* large page */ context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0]; break; } } static int paging64_init_context_common(struct kvm_vcpu *vcpu, struct kvm_mmu *context, int level) { context->nx = is_nx(vcpu); reset_rsvds_bits_mask(vcpu, context, level); ASSERT(is_pae(vcpu)); context->new_cr3 = paging_new_cr3; context->page_fault = paging64_page_fault; context->gva_to_gpa = paging64_gva_to_gpa; context->prefetch_page = paging64_prefetch_page; context->sync_page = paging64_sync_page; context->invlpg = paging64_invlpg; context->update_pte = paging64_update_pte; context->free = paging_free; context->root_level = level; context->shadow_root_level = level; context->root_hpa = INVALID_PAGE; context->direct_map = false; return 0; } static int paging64_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context) { return paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL); } static int paging32_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context) { context->nx = false; reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL); context->new_cr3 = paging_new_cr3; context->page_fault = paging32_page_fault; context->gva_to_gpa = paging32_gva_to_gpa; context->free = paging_free; context->prefetch_page = paging32_prefetch_page; context->sync_page = paging32_sync_page; context->invlpg = paging32_invlpg; context->update_pte = paging32_update_pte; context->root_level = PT32_ROOT_LEVEL; context->shadow_root_level = PT32E_ROOT_LEVEL; context->root_hpa = INVALID_PAGE; context->direct_map = false; return 0; } static int paging32E_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context) { return paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL); } static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) { struct kvm_mmu *context = vcpu->arch.walk_mmu; context->base_role.word = 0; context->new_cr3 = nonpaging_new_cr3; context->page_fault = tdp_page_fault; context->free = nonpaging_free; context->prefetch_page = nonpaging_prefetch_page; context->sync_page = nonpaging_sync_page; context->invlpg = nonpaging_invlpg; context->update_pte = nonpaging_update_pte; context->shadow_root_level = kvm_x86_ops->get_tdp_level(); context->root_hpa = INVALID_PAGE; context->direct_map = true; context->set_cr3 = kvm_x86_ops->set_tdp_cr3; context->get_cr3 = get_cr3; context->inject_page_fault = kvm_inject_page_fault; context->nx = is_nx(vcpu); if (!is_paging(vcpu)) { context->nx = false; context->gva_to_gpa = nonpaging_gva_to_gpa; context->root_level = 0; } else if (is_long_mode(vcpu)) { context->nx = is_nx(vcpu); reset_rsvds_bits_mask(vcpu, context, PT64_ROOT_LEVEL); context->gva_to_gpa = paging64_gva_to_gpa; context->root_level = PT64_ROOT_LEVEL; } else if (is_pae(vcpu)) { context->nx = is_nx(vcpu); reset_rsvds_bits_mask(vcpu, context, PT32E_ROOT_LEVEL); context->gva_to_gpa = paging64_gva_to_gpa; context->root_level = PT32E_ROOT_LEVEL; } else { context->nx = false; reset_rsvds_bits_mask(vcpu, context, PT32_ROOT_LEVEL); context->gva_to_gpa = paging32_gva_to_gpa; context->root_level = PT32_ROOT_LEVEL; } return 0; } int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context) { int r; ASSERT(vcpu); ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); if (!is_paging(vcpu)) r = nonpaging_init_context(vcpu, context); else if (is_long_mode(vcpu)) r = paging64_init_context(vcpu, context); else if (is_pae(vcpu)) r = paging32E_init_context(vcpu, context); else r = paging32_init_context(vcpu, context); vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu); vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu); return r; } EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); static int init_kvm_softmmu(struct kvm_vcpu *vcpu) { int r = kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu); vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3; vcpu->arch.walk_mmu->get_cr3 = get_cr3; vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; return r; } static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu) { struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; g_context->get_cr3 = get_cr3; g_context->inject_page_fault = kvm_inject_page_fault; /* * Note that arch.mmu.gva_to_gpa translates l2_gva to l1_gpa. The * translation of l2_gpa to l1_gpa addresses is done using the * arch.nested_mmu.gva_to_gpa function. Basically the gva_to_gpa * functions between mmu and nested_mmu are swapped. */ if (!is_paging(vcpu)) { g_context->nx = false; g_context->root_level = 0; g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested; } else if (is_long_mode(vcpu)) { g_context->nx = is_nx(vcpu); reset_rsvds_bits_mask(vcpu, g_context, PT64_ROOT_LEVEL); g_context->root_level = PT64_ROOT_LEVEL; g_context->gva_to_gpa = paging64_gva_to_gpa_nested; } else if (is_pae(vcpu)) { g_context->nx = is_nx(vcpu); reset_rsvds_bits_mask(vcpu, g_context, PT32E_ROOT_LEVEL); g_context->root_level = PT32E_ROOT_LEVEL; g_context->gva_to_gpa = paging64_gva_to_gpa_nested; } else { g_context->nx = false; reset_rsvds_bits_mask(vcpu, g_context, PT32_ROOT_LEVEL); g_context->root_level = PT32_ROOT_LEVEL; g_context->gva_to_gpa = paging32_gva_to_gpa_nested; } return 0; } static int init_kvm_mmu(struct kvm_vcpu *vcpu) { if (mmu_is_nested(vcpu)) return init_kvm_nested_mmu(vcpu); else if (tdp_enabled) return init_kvm_tdp_mmu(vcpu); else return init_kvm_softmmu(vcpu); } static void destroy_kvm_mmu(struct kvm_vcpu *vcpu) { ASSERT(vcpu); if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) /* mmu.free() should set root_hpa = INVALID_PAGE */ vcpu->arch.mmu.free(vcpu); } int kvm_mmu_reset_context(struct kvm_vcpu *vcpu) { destroy_kvm_mmu(vcpu); return init_kvm_mmu(vcpu); } EXPORT_SYMBOL_GPL(kvm_mmu_reset_context); int kvm_mmu_load(struct kvm_vcpu *vcpu) { int r; r = mmu_topup_memory_caches(vcpu); if (r) goto out; r = mmu_alloc_roots(vcpu); spin_lock(&vcpu->kvm->mmu_lock); mmu_sync_roots(vcpu); spin_unlock(&vcpu->kvm->mmu_lock); if (r) goto out; /* set_cr3() should ensure TLB has been flushed */ vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa); out: return r; } EXPORT_SYMBOL_GPL(kvm_mmu_load); void kvm_mmu_unload(struct kvm_vcpu *vcpu) { mmu_free_roots(vcpu); } EXPORT_SYMBOL_GPL(kvm_mmu_unload); static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte) { u64 pte; struct kvm_mmu_page *child; pte = *spte; if (is_shadow_present_pte(pte)) { if (is_last_spte(pte, sp->role.level)) drop_spte(vcpu->kvm, spte, shadow_trap_nonpresent_pte); else { child = page_header(pte & PT64_BASE_ADDR_MASK); mmu_page_remove_parent_pte(child, spte); } } __set_spte(spte, shadow_trap_nonpresent_pte); if (is_large_pte(pte)) --vcpu->kvm->stat.lpages; } static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, const void *new) { if (sp->role.level != PT_PAGE_TABLE_LEVEL) { ++vcpu->kvm->stat.mmu_pde_zapped; return; } ++vcpu->kvm->stat.mmu_pte_updated; vcpu->arch.mmu.update_pte(vcpu, sp, spte, new); } static bool need_remote_flush(u64 old, u64 new) { if (!is_shadow_present_pte(old)) return false; if (!is_shadow_present_pte(new)) return true; if ((old ^ new) & PT64_BASE_ADDR_MASK) return true; old ^= PT64_NX_MASK; new ^= PT64_NX_MASK; return (old & ~new & PT64_PERM_MASK) != 0; } static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page, bool remote_flush, bool local_flush) { if (zap_page) return; if (remote_flush) kvm_flush_remote_tlbs(vcpu->kvm); else if (local_flush) kvm_mmu_flush_tlb(vcpu); } static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu) { u64 *spte = vcpu->arch.last_pte_updated; return !!(spte && (*spte & shadow_accessed_mask)); } static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn) { u64 *spte = vcpu->arch.last_pte_updated; if (spte && vcpu->arch.last_pte_gfn == gfn && shadow_accessed_mask && !(*spte & shadow_accessed_mask) && is_shadow_present_pte(*spte)) set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte); } void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, int bytes, bool guest_initiated) { gfn_t gfn = gpa >> PAGE_SHIFT; union kvm_mmu_page_role mask = { .word = 0 }; struct kvm_mmu_page *sp; struct hlist_node *node; LIST_HEAD(invalid_list); u64 entry, gentry, *spte; unsigned pte_size, page_offset, misaligned, quadrant, offset; int level, npte, invlpg_counter, r, flooded = 0; bool remote_flush, local_flush, zap_page; zap_page = remote_flush = local_flush = false; offset = offset_in_page(gpa); pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter); /* * Assume that the pte write on a page table of the same type * as the current vcpu paging mode since we update the sptes only * when they have the same mode. */ if ((is_pae(vcpu) && bytes == 4) || !new) { /* Handle a 32-bit guest writing two halves of a 64-bit gpte */ if (is_pae(vcpu)) { gpa &= ~(gpa_t)7; bytes = 8; } r = kvm_read_guest(vcpu->kvm, gpa, &gentry, min(bytes, 8)); if (r) gentry = 0; new = (const u8 *)&gentry; } switch (bytes) { case 4: gentry = *(const u32 *)new; break; case 8: gentry = *(const u64 *)new; break; default: gentry = 0; break; } spin_lock(&vcpu->kvm->mmu_lock); if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter) gentry = 0; kvm_mmu_free_some_pages(vcpu); ++vcpu->kvm->stat.mmu_pte_write; trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); if (guest_initiated) { kvm_mmu_access_page(vcpu, gfn); if (gfn == vcpu->arch.last_pt_write_gfn && !last_updated_pte_accessed(vcpu)) { ++vcpu->arch.last_pt_write_count; if (vcpu->arch.last_pt_write_count >= 3) flooded = 1; } else { vcpu->arch.last_pt_write_gfn = gfn; vcpu->arch.last_pt_write_count = 1; vcpu->arch.last_pte_updated = NULL; } } mask.cr0_wp = mask.cr4_pae = mask.nxe = 1; for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) { pte_size = sp->role.cr4_pae ? 8 : 4; misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); misaligned |= bytes < 4; if (misaligned || flooded) { /* * Misaligned accesses are too much trouble to fix * up; also, they usually indicate a page is not used * as a page table. * * If we're seeing too many writes to a page, * it may no longer be a page table, or we may be * forking, in which case it is better to unmap the * page. */ pgprintk("misaligned: gpa %llx bytes %d role %x\n", gpa, bytes, sp->role.word); zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); ++vcpu->kvm->stat.mmu_flooded; continue; } page_offset = offset; level = sp->role.level; npte = 1; if (!sp->role.cr4_pae) { page_offset <<= 1; /* 32->64 */ /* * A 32-bit pde maps 4MB while the shadow pdes map * only 2MB. So we need to double the offset again * and zap two pdes instead of one. */ if (level == PT32_ROOT_LEVEL) { page_offset &= ~7; /* kill rounding error */ page_offset <<= 1; npte = 2; } quadrant = page_offset >> PAGE_SHIFT; page_offset &= ~PAGE_MASK; if (quadrant != sp->role.quadrant) continue; } local_flush = true; spte = &sp->spt[page_offset / sizeof(*spte)]; while (npte--) { entry = *spte; mmu_pte_write_zap_pte(vcpu, sp, spte); if (gentry && !((sp->role.word ^ vcpu->arch.mmu.base_role.word) & mask.word)) mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); if (!remote_flush && need_remote_flush(entry, *spte)) remote_flush = true; ++spte; } } mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush); kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); trace_kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE); spin_unlock(&vcpu->kvm->mmu_lock); } int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) { gpa_t gpa; int r; if (vcpu->arch.mmu.direct_map) return 0; gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); spin_lock(&vcpu->kvm->mmu_lock); r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); spin_unlock(&vcpu->kvm->mmu_lock); return r; } EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt); void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) { LIST_HEAD(invalid_list); while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES && !list_empty(&vcpu->kvm->arch.active_mmu_pages)) { struct kvm_mmu_page *sp; sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev, struct kvm_mmu_page, link); kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); ++vcpu->kvm->stat.mmu_recycled; } } int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code, void *insn, int insn_len) { int r; enum emulation_result er; r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false); if (r < 0) goto out; if (!r) { r = 1; goto out; } r = mmu_topup_memory_caches(vcpu); if (r) goto out; er = x86_emulate_instruction(vcpu, cr2, 0, insn, insn_len); switch (er) { case EMULATE_DONE: return 1; case EMULATE_DO_MMIO: ++vcpu->stat.mmio_exits; /* fall through */ case EMULATE_FAIL: return 0; default: BUG(); } out: return r; } EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) { vcpu->arch.mmu.invlpg(vcpu, gva); kvm_mmu_flush_tlb(vcpu); ++vcpu->stat.invlpg; } EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); void kvm_enable_tdp(void) { tdp_enabled = true; } EXPORT_SYMBOL_GPL(kvm_enable_tdp); void kvm_disable_tdp(void) { tdp_enabled = false; } EXPORT_SYMBOL_GPL(kvm_disable_tdp); static void free_mmu_pages(struct kvm_vcpu *vcpu) { free_page((unsigned long)vcpu->arch.mmu.pae_root); if (vcpu->arch.mmu.lm_root != NULL) free_page((unsigned long)vcpu->arch.mmu.lm_root); } static int alloc_mmu_pages(struct kvm_vcpu *vcpu) { struct page *page; int i; ASSERT(vcpu); /* * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. * Therefore we need to allocate shadow page tables in the first * 4GB of memory, which happens to fit the DMA32 zone. */ page = alloc_page(GFP_KERNEL | __GFP_DMA32); if (!page) return -ENOMEM; vcpu->arch.mmu.pae_root = page_address(page); for (i = 0; i < 4; ++i) vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; return 0; } int kvm_mmu_create(struct kvm_vcpu *vcpu) { ASSERT(vcpu); ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); return alloc_mmu_pages(vcpu); } int kvm_mmu_setup(struct kvm_vcpu *vcpu) { ASSERT(vcpu); ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); return init_kvm_mmu(vcpu); } void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) { struct kvm_mmu_page *sp; list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) { int i; u64 *pt; if (!test_bit(slot, sp->slot_bitmap)) continue; pt = sp->spt; for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { if (!is_shadow_present_pte(pt[i]) || !is_last_spte(pt[i], sp->role.level)) continue; if (is_large_pte(pt[i])) { drop_spte(kvm, &pt[i], shadow_trap_nonpresent_pte); --kvm->stat.lpages; continue; } /* avoid RMW */ if (is_writable_pte(pt[i])) update_spte(&pt[i], pt[i] & ~PT_WRITABLE_MASK); } } kvm_flush_remote_tlbs(kvm); } void kvm_mmu_zap_all(struct kvm *kvm) { struct kvm_mmu_page *sp, *node; LIST_HEAD(invalid_list); spin_lock(&kvm->mmu_lock); restart: list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list)) goto restart; kvm_mmu_commit_zap_page(kvm, &invalid_list); spin_unlock(&kvm->mmu_lock); } static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm, struct list_head *invalid_list) { struct kvm_mmu_page *page; page = container_of(kvm->arch.active_mmu_pages.prev, struct kvm_mmu_page, link); return kvm_mmu_prepare_zap_page(kvm, page, invalid_list); } static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc) { struct kvm *kvm; struct kvm *kvm_freed = NULL; int nr_to_scan = sc->nr_to_scan; if (nr_to_scan == 0) goto out; raw_spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { int idx, freed_pages; LIST_HEAD(invalid_list); idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); if (!kvm_freed && nr_to_scan > 0 && kvm->arch.n_used_mmu_pages > 0) { freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm, &invalid_list); kvm_freed = kvm; } nr_to_scan--; kvm_mmu_commit_zap_page(kvm, &invalid_list); spin_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); } if (kvm_freed) list_move_tail(&kvm_freed->vm_list, &vm_list); raw_spin_unlock(&kvm_lock); out: return percpu_counter_read_positive(&kvm_total_used_mmu_pages); } static struct shrinker mmu_shrinker = { .shrink = mmu_shrink, .seeks = DEFAULT_SEEKS * 10, }; static void mmu_destroy_caches(void) { if (pte_chain_cache) kmem_cache_destroy(pte_chain_cache); if (rmap_desc_cache) kmem_cache_destroy(rmap_desc_cache); if (mmu_page_header_cache) kmem_cache_destroy(mmu_page_header_cache); } int kvm_mmu_module_init(void) { pte_chain_cache = kmem_cache_create("kvm_pte_chain", sizeof(struct kvm_pte_chain), 0, 0, NULL); if (!pte_chain_cache) goto nomem; rmap_desc_cache = kmem_cache_create("kvm_rmap_desc", sizeof(struct kvm_rmap_desc), 0, 0, NULL); if (!rmap_desc_cache) goto nomem; mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", sizeof(struct kvm_mmu_page), 0, 0, NULL); if (!mmu_page_header_cache) goto nomem; if (percpu_counter_init(&kvm_total_used_mmu_pages, 0)) goto nomem; register_shrinker(&mmu_shrinker); return 0; nomem: mmu_destroy_caches(); return -ENOMEM; } /* * Caculate mmu pages needed for kvm. */ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) { int i; unsigned int nr_mmu_pages; unsigned int nr_pages = 0; struct kvm_memslots *slots; slots = kvm_memslots(kvm); for (i = 0; i < slots->nmemslots; i++) nr_pages += slots->memslots[i].npages; nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; nr_mmu_pages = max(nr_mmu_pages, (unsigned int) KVM_MIN_ALLOC_MMU_PAGES); return nr_mmu_pages; } static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer, unsigned len) { if (len > buffer->len) return NULL; return buffer->ptr; } static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer, unsigned len) { void *ret; ret = pv_mmu_peek_buffer(buffer, len); if (!ret) return ret; buffer->ptr += len; buffer->len -= len; buffer->processed += len; return ret; } static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu, gpa_t addr, gpa_t value) { int bytes = 8; int r; if (!is_long_mode(vcpu) && !is_pae(vcpu)) bytes = 4; r = mmu_topup_memory_caches(vcpu); if (r) return r; if (!emulator_write_phys(vcpu, addr, &value, bytes)) return -EFAULT; return 1; } static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu) { (void)kvm_set_cr3(vcpu, kvm_read_cr3(vcpu)); return 1; } static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr) { spin_lock(&vcpu->kvm->mmu_lock); mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT); spin_unlock(&vcpu->kvm->mmu_lock); return 1; } static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu, struct kvm_pv_mmu_op_buffer *buffer) { struct kvm_mmu_op_header *header; header = pv_mmu_peek_buffer(buffer, sizeof *header); if (!header) return 0; switch (header->op) { case KVM_MMU_OP_WRITE_PTE: { struct kvm_mmu_op_write_pte *wpte; wpte = pv_mmu_read_buffer(buffer, sizeof *wpte); if (!wpte) return 0; return kvm_pv_mmu_write(vcpu, wpte->pte_phys, wpte->pte_val); } case KVM_MMU_OP_FLUSH_TLB: { struct kvm_mmu_op_flush_tlb *ftlb; ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb); if (!ftlb) return 0; return kvm_pv_mmu_flush_tlb(vcpu); } case KVM_MMU_OP_RELEASE_PT: { struct kvm_mmu_op_release_pt *rpt; rpt = pv_mmu_read_buffer(buffer, sizeof *rpt); if (!rpt) return 0; return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys); } default: return 0; } } int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes, gpa_t addr, unsigned long *ret) { int r; struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer; buffer->ptr = buffer->buf; buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf); buffer->processed = 0; r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len); if (r) goto out; while (buffer->len) { r = kvm_pv_mmu_op_one(vcpu, buffer); if (r < 0) goto out; if (r == 0) break; } r = 1; out: *ret = buffer->processed; return r; } int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]) { struct kvm_shadow_walk_iterator iterator; int nr_sptes = 0; spin_lock(&vcpu->kvm->mmu_lock); for_each_shadow_entry(vcpu, addr, iterator) { sptes[iterator.level-1] = *iterator.sptep; nr_sptes++; if (!is_shadow_present_pte(*iterator.sptep)) break; } spin_unlock(&vcpu->kvm->mmu_lock); return nr_sptes; } EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy); void kvm_mmu_destroy(struct kvm_vcpu *vcpu) { ASSERT(vcpu); destroy_kvm_mmu(vcpu); free_mmu_pages(vcpu); mmu_free_memory_caches(vcpu); } #ifdef CONFIG_KVM_MMU_AUDIT #include "mmu_audit.c" #else static void mmu_audit_disable(void) { } #endif void kvm_mmu_module_exit(void) { mmu_destroy_caches(); percpu_counter_destroy(&kvm_total_used_mmu_pages); unregister_shrinker(&mmu_shrinker); mmu_audit_disable(); }
gpl-2.0
somcom3x/kernel_samsung_msm8660-common
drivers/staging/comedi/drivers/das6402.c
3345
8937
/* Some comments on the code.. - it shouldn't be necessary to use outb_p(). - ignoreirq creates a race condition. It needs to be fixed. */ /* comedi/drivers/das6402.c An experimental driver for Computerboards' DAS6402 I/O card Copyright (C) 1999 Oystein Svendsen <svendsen@pvv.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: das6402 Description: Keithley Metrabyte DAS6402 (& compatibles) Author: Oystein Svendsen <svendsen@pvv.org> Status: bitrotten Devices: [Keithley Metrabyte] DAS6402 (das6402) This driver has suffered bitrot. */ #include <linux/interrupt.h> #include "../comedidev.h" #include <linux/ioport.h> #define DAS6402_SIZE 16 #define N_WORDS (3000*64) #define STOP 0 #define START 1 #define SCANL 0x3f00 #define BYTE unsigned char #define WORD unsigned short /*----- register 8 ----*/ #define CLRINT 0x01 #define CLRXTR 0x02 #define CLRXIN 0x04 #define EXTEND 0x10 #define ARMED 0x20 /* enable conting of post sample conv */ #define POSTMODE 0x40 #define MHZ 0x80 /* 10 MHz clock */ /*---------------------*/ /*----- register 9 ----*/ #define IRQ (0x04 << 4) /* these two are */ #define IRQV 10 /* dependent on each other */ #define CONVSRC 0x03 /* trig src is Intarnal pacer */ #define BURSTEN 0x04 /* enable burst */ #define XINTE 0x08 /* use external int. trig */ #define INTE 0x80 /* enable analog interrupts */ /*---------------------*/ /*----- register 10 ---*/ #define TGEN 0x01 /* Use pin DI1 for externl trigging? */ #define TGSEL 0x02 /* Use edge triggering */ #define TGPOL 0x04 /* active edge is falling */ #define PRETRIG 0x08 /* pretrig */ /*---------------------*/ /*----- register 11 ---*/ #define EOB 0x0c #define FIFOHFULL 0x08 #define GAIN 0x01 #define FIFONEPTY 0x04 #define MODE 0x10 #define SEM 0x20 #define BIP 0x40 /*---------------------*/ #define M0 0x00 #define M2 0x04 #define C0 0x00 #define C1 0x40 #define C2 0x80 #define RWLH 0x30 static int das6402_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int das6402_detach(struct comedi_device *dev); static struct comedi_driver driver_das6402 = { .driver_name = "das6402", .module = THIS_MODULE, .attach = das6402_attach, .detach = das6402_detach, }; static int __init driver_das6402_init_module(void) { return comedi_driver_register(&driver_das6402); } static void __exit driver_das6402_cleanup_module(void) { comedi_driver_unregister(&driver_das6402); } module_init(driver_das6402_init_module); module_exit(driver_das6402_cleanup_module); struct das6402_private { int ai_bytes_to_read; int das6402_ignoreirq; }; #define devpriv ((struct das6402_private *)dev->private) static void das6402_ai_fifo_dregs(struct comedi_device *dev, struct comedi_subdevice *s); static void das6402_setcounter(struct comedi_device *dev) { BYTE p; unsigned short ctrlwrd; /* set up counter0 first, mode 0 */ p = M0 | C0 | RWLH; outb_p(p, dev->iobase + 15); ctrlwrd = 2000; p = (BYTE) (0xff & ctrlwrd); outb_p(p, dev->iobase + 12); p = (BYTE) (0xff & (ctrlwrd >> 8)); outb_p(p, dev->iobase + 12); /* set up counter1, mode 2 */ p = M2 | C1 | RWLH; outb_p(p, dev->iobase + 15); ctrlwrd = 10; p = (BYTE) (0xff & ctrlwrd); outb_p(p, dev->iobase + 13); p = (BYTE) (0xff & (ctrlwrd >> 8)); outb_p(p, dev->iobase + 13); /* set up counter1, mode 2 */ p = M2 | C2 | RWLH; outb_p(p, dev->iobase + 15); ctrlwrd = 1000; p = (BYTE) (0xff & ctrlwrd); outb_p(p, dev->iobase + 14); p = (BYTE) (0xff & (ctrlwrd >> 8)); outb_p(p, dev->iobase + 14); } static irqreturn_t intr_handler(int irq, void *d) { struct comedi_device *dev = d; struct comedi_subdevice *s = dev->subdevices; if (!dev->attached || devpriv->das6402_ignoreirq) { printk("das6402: BUG: spurious interrupt\n"); return IRQ_HANDLED; } #ifdef DEBUG printk("das6402: interrupt! das6402_irqcount=%i\n", devpriv->das6402_irqcount); printk("das6402: iobase+2=%i\n", inw_p(dev->iobase + 2)); #endif das6402_ai_fifo_dregs(dev, s); if (s->async->buf_write_count >= devpriv->ai_bytes_to_read) { outw_p(SCANL, dev->iobase + 2); /* clears the fifo */ outb(0x07, dev->iobase + 8); /* clears all flip-flops */ #ifdef DEBUG printk("das6402: Got %i samples\n\n", devpriv->das6402_wordsread - diff); #endif s->async->events |= COMEDI_CB_EOA; comedi_event(dev, s); } outb(0x01, dev->iobase + 8); /* clear only the interrupt flip-flop */ comedi_event(dev, s); return IRQ_HANDLED; } #if 0 static void das6402_ai_fifo_read(struct comedi_device *dev, short *data, int n) { int i; for (i = 0; i < n; i++) data[i] = inw(dev->iobase); } #endif static void das6402_ai_fifo_dregs(struct comedi_device *dev, struct comedi_subdevice *s) { while (1) { if (!(inb(dev->iobase + 8) & 0x01)) return; comedi_buf_put(s->async, inw(dev->iobase)); } } static int das6402_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { /* * This function should reset the board from whatever condition it * is in (i.e., acquiring data), to a non-active state. */ devpriv->das6402_ignoreirq = 1; #ifdef DEBUG printk("das6402: Stopping acquisition\n"); #endif devpriv->das6402_ignoreirq = 1; outb_p(0x02, dev->iobase + 10); /* disable external trigging */ outw_p(SCANL, dev->iobase + 2); /* resets the card fifo */ outb_p(0, dev->iobase + 9); /* disables interrupts */ outw_p(SCANL, dev->iobase + 2); return 0; } #ifdef unused static int das6402_ai_mode2(struct comedi_device *dev, struct comedi_subdevice *s, comedi_trig * it) { devpriv->das6402_ignoreirq = 1; #ifdef DEBUG printk("das6402: Starting acquisition\n"); #endif outb_p(0x03, dev->iobase + 10); /* enable external trigging */ outw_p(SCANL, dev->iobase + 2); /* resets the card fifo */ outb_p(IRQ | CONVSRC | BURSTEN | INTE, dev->iobase + 9); devpriv->ai_bytes_to_read = it->n * sizeof(short); /* um... ignoreirq is a nasty race condition */ devpriv->das6402_ignoreirq = 0; outw_p(SCANL, dev->iobase + 2); return 0; } #endif static int board_init(struct comedi_device *dev) { BYTE b; devpriv->das6402_ignoreirq = 1; outb(0x07, dev->iobase + 8); /* register 11 */ outb_p(MODE, dev->iobase + 11); b = BIP | SEM | MODE | GAIN | FIFOHFULL; outb_p(b, dev->iobase + 11); /* register 8 */ outb_p(EXTEND, dev->iobase + 8); b = EXTEND | MHZ; outb_p(b, dev->iobase + 8); b = MHZ | CLRINT | CLRXTR | CLRXIN; outb_p(b, dev->iobase + 8); /* register 9 */ b = IRQ | CONVSRC | BURSTEN | INTE; outb_p(b, dev->iobase + 9); /* register 10 */ b = TGSEL | TGEN; outb_p(b, dev->iobase + 10); b = 0x07; outb_p(b, dev->iobase + 8); das6402_setcounter(dev); outw_p(SCANL, dev->iobase + 2); /* reset card fifo */ devpriv->das6402_ignoreirq = 0; return 0; } static int das6402_detach(struct comedi_device *dev) { if (dev->irq) free_irq(dev->irq, dev); if (dev->iobase) release_region(dev->iobase, DAS6402_SIZE); return 0; } static int das6402_attach(struct comedi_device *dev, struct comedi_devconfig *it) { unsigned int irq; unsigned long iobase; int ret; struct comedi_subdevice *s; dev->board_name = "das6402"; iobase = it->options[0]; if (iobase == 0) iobase = 0x300; printk("comedi%d: das6402: 0x%04lx", dev->minor, iobase); if (!request_region(iobase, DAS6402_SIZE, "das6402")) { printk(" I/O port conflict\n"); return -EIO; } dev->iobase = iobase; /* should do a probe here */ irq = it->options[0]; printk(" ( irq = %u )", irq); ret = request_irq(irq, intr_handler, 0, "das6402", dev); if (ret < 0) { printk("irq conflict\n"); return ret; } dev->irq = irq; ret = alloc_private(dev, sizeof(struct das6402_private)); if (ret < 0) return ret; ret = alloc_subdevices(dev, 1); if (ret < 0) return ret; /* ai subdevice */ s = dev->subdevices + 0; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_GROUND; s->n_chan = 8; /* s->trig[2]=das6402_ai_mode2; */ s->cancel = das6402_ai_cancel; s->maxdata = (1 << 12) - 1; s->len_chanlist = 16; /* ? */ s->range_table = &range_unknown; board_init(dev); return 0; } MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
SerkTheTurk/android_kernel_samsung_hugo
arch/arm/mach-omap2/emu.c
4113
1522
/* * emu.c * * ETM and ETB CoreSight components' resources as found in OMAP3xxx. * * Copyright (C) 2009 Nokia Corporation. * Alexander Shishkin * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/module.h> #include <linux/device.h> #include <linux/amba/bus.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/err.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alexander Shishkin"); /* Cortex CoreSight components within omap3xxx EMU */ #define ETM_BASE (L4_EMU_34XX_PHYS + 0x10000) #define DBG_BASE (L4_EMU_34XX_PHYS + 0x11000) #define ETB_BASE (L4_EMU_34XX_PHYS + 0x1b000) #define DAPCTL (L4_EMU_34XX_PHYS + 0x1d000) static struct amba_device omap3_etb_device = { .dev = { .init_name = "etb", }, .res = { .start = ETB_BASE, .end = ETB_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, .periphid = 0x000bb907, }; static struct amba_device omap3_etm_device = { .dev = { .init_name = "etm", }, .res = { .start = ETM_BASE, .end = ETM_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, .periphid = 0x102bb921, }; static int __init emu_init(void) { if (!cpu_is_omap34xx()) return -ENODEV; amba_device_register(&omap3_etb_device, &iomem_resource); amba_device_register(&omap3_etm_device, &iomem_resource); return 0; } subsys_initcall(emu_init);
gpl-2.0
emceethemouth/kernel
fs/afs/super.c
4625
12575
/* AFS superblock handling * * Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved. * * This software may be freely redistributed under the terms of the * GNU General Public License. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Authors: David Howells <dhowells@redhat.com> * David Woodhouse <dwmw2@infradead.org> * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mount.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/parser.h> #include <linux/statfs.h> #include <linux/sched.h> #include "internal.h" #define AFS_FS_MAGIC 0x6B414653 /* 'kAFS' */ static void afs_i_init_once(void *foo); static struct dentry *afs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data); static void afs_kill_super(struct super_block *sb); static struct inode *afs_alloc_inode(struct super_block *sb); static void afs_destroy_inode(struct inode *inode); static int afs_statfs(struct dentry *dentry, struct kstatfs *buf); struct file_system_type afs_fs_type = { .owner = THIS_MODULE, .name = "afs", .mount = afs_mount, .kill_sb = afs_kill_super, .fs_flags = 0, }; static const struct super_operations afs_super_ops = { .statfs = afs_statfs, .alloc_inode = afs_alloc_inode, .drop_inode = afs_drop_inode, .destroy_inode = afs_destroy_inode, .evict_inode = afs_evict_inode, .show_options = generic_show_options, }; static struct kmem_cache *afs_inode_cachep; static atomic_t afs_count_active_inodes; enum { afs_no_opt, afs_opt_cell, afs_opt_rwpath, afs_opt_vol, afs_opt_autocell, }; static const match_table_t afs_options_list = { { afs_opt_cell, "cell=%s" }, { afs_opt_rwpath, "rwpath" }, { afs_opt_vol, "vol=%s" }, { afs_opt_autocell, "autocell" }, { afs_no_opt, NULL }, }; /* * initialise the filesystem */ int __init afs_fs_init(void) { int ret; _enter(""); /* create ourselves an inode cache */ atomic_set(&afs_count_active_inodes, 0); ret = -ENOMEM; afs_inode_cachep = kmem_cache_create("afs_inode_cache", sizeof(struct afs_vnode), 0, SLAB_HWCACHE_ALIGN, afs_i_init_once); if (!afs_inode_cachep) { printk(KERN_NOTICE "kAFS: Failed to allocate inode cache\n"); return ret; } /* now export our filesystem to lesser mortals */ ret = register_filesystem(&afs_fs_type); if (ret < 0) { kmem_cache_destroy(afs_inode_cachep); _leave(" = %d", ret); return ret; } _leave(" = 0"); return 0; } /* * clean up the filesystem */ void __exit afs_fs_exit(void) { _enter(""); afs_mntpt_kill_timer(); unregister_filesystem(&afs_fs_type); if (atomic_read(&afs_count_active_inodes) != 0) { printk("kAFS: %d active inode objects still present\n", atomic_read(&afs_count_active_inodes)); BUG(); } kmem_cache_destroy(afs_inode_cachep); _leave(""); } /* * parse the mount options * - this function has been shamelessly adapted from the ext3 fs which * shamelessly adapted it from the msdos fs */ static int afs_parse_options(struct afs_mount_params *params, char *options, const char **devname) { struct afs_cell *cell; substring_t args[MAX_OPT_ARGS]; char *p; int token; _enter("%s", options); options[PAGE_SIZE - 1] = 0; while ((p = strsep(&options, ","))) { if (!*p) continue; token = match_token(p, afs_options_list, args); switch (token) { case afs_opt_cell: cell = afs_cell_lookup(args[0].from, args[0].to - args[0].from, false); if (IS_ERR(cell)) return PTR_ERR(cell); afs_put_cell(params->cell); params->cell = cell; break; case afs_opt_rwpath: params->rwpath = 1; break; case afs_opt_vol: *devname = args[0].from; break; case afs_opt_autocell: params->autocell = 1; break; default: printk(KERN_ERR "kAFS:" " Unknown or invalid mount option: '%s'\n", p); return -EINVAL; } } _leave(" = 0"); return 0; } /* * parse a device name to get cell name, volume name, volume type and R/W * selector * - this can be one of the following: * "%[cell:]volume[.]" R/W volume * "#[cell:]volume[.]" R/O or R/W volume (rwpath=0), * or R/W (rwpath=1) volume * "%[cell:]volume.readonly" R/O volume * "#[cell:]volume.readonly" R/O volume * "%[cell:]volume.backup" Backup volume * "#[cell:]volume.backup" Backup volume */ static int afs_parse_device_name(struct afs_mount_params *params, const char *name) { struct afs_cell *cell; const char *cellname, *suffix; int cellnamesz; _enter(",%s", name); if (!name) { printk(KERN_ERR "kAFS: no volume name specified\n"); return -EINVAL; } if ((name[0] != '%' && name[0] != '#') || !name[1]) { printk(KERN_ERR "kAFS: unparsable volume name\n"); return -EINVAL; } /* determine the type of volume we're looking for */ params->type = AFSVL_ROVOL; params->force = false; if (params->rwpath || name[0] == '%') { params->type = AFSVL_RWVOL; params->force = true; } name++; /* split the cell name out if there is one */ params->volname = strchr(name, ':'); if (params->volname) { cellname = name; cellnamesz = params->volname - name; params->volname++; } else { params->volname = name; cellname = NULL; cellnamesz = 0; } /* the volume type is further affected by a possible suffix */ suffix = strrchr(params->volname, '.'); if (suffix) { if (strcmp(suffix, ".readonly") == 0) { params->type = AFSVL_ROVOL; params->force = true; } else if (strcmp(suffix, ".backup") == 0) { params->type = AFSVL_BACKVOL; params->force = true; } else if (suffix[1] == 0) { } else { suffix = NULL; } } params->volnamesz = suffix ? suffix - params->volname : strlen(params->volname); _debug("cell %*.*s [%p]", cellnamesz, cellnamesz, cellname ?: "", params->cell); /* lookup the cell record */ if (cellname || !params->cell) { cell = afs_cell_lookup(cellname, cellnamesz, true); if (IS_ERR(cell)) { printk(KERN_ERR "kAFS: unable to lookup cell '%*.*s'\n", cellnamesz, cellnamesz, cellname ?: ""); return PTR_ERR(cell); } afs_put_cell(params->cell); params->cell = cell; } _debug("CELL:%s [%p] VOLUME:%*.*s SUFFIX:%s TYPE:%d%s", params->cell->name, params->cell, params->volnamesz, params->volnamesz, params->volname, suffix ?: "-", params->type, params->force ? " FORCE" : ""); return 0; } /* * check a superblock to see if it's the one we're looking for */ static int afs_test_super(struct super_block *sb, void *data) { struct afs_super_info *as1 = data; struct afs_super_info *as = sb->s_fs_info; return as->volume == as1->volume; } static int afs_set_super(struct super_block *sb, void *data) { sb->s_fs_info = data; return set_anon_super(sb, NULL); } /* * fill in the superblock */ static int afs_fill_super(struct super_block *sb, struct afs_mount_params *params) { struct afs_super_info *as = sb->s_fs_info; struct afs_fid fid; struct inode *inode = NULL; int ret; _enter(""); /* fill in the superblock */ sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = AFS_FS_MAGIC; sb->s_op = &afs_super_ops; sb->s_bdi = &as->volume->bdi; strlcpy(sb->s_id, as->volume->vlocation->vldb.name, sizeof(sb->s_id)); /* allocate the root inode and dentry */ fid.vid = as->volume->vid; fid.vnode = 1; fid.unique = 1; inode = afs_iget(sb, params->key, &fid, NULL, NULL); if (IS_ERR(inode)) return PTR_ERR(inode); if (params->autocell) set_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(inode)->flags); ret = -ENOMEM; sb->s_root = d_make_root(inode); if (!sb->s_root) goto error; sb->s_d_op = &afs_fs_dentry_operations; _leave(" = 0"); return 0; error: _leave(" = %d", ret); return ret; } /* * get an AFS superblock */ static struct dentry *afs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *options) { struct afs_mount_params params; struct super_block *sb; struct afs_volume *vol; struct key *key; char *new_opts = kstrdup(options, GFP_KERNEL); struct afs_super_info *as; int ret; _enter(",,%s,%p", dev_name, options); memset(&params, 0, sizeof(params)); /* parse the options and device name */ if (options) { ret = afs_parse_options(&params, options, &dev_name); if (ret < 0) goto error; } ret = afs_parse_device_name(&params, dev_name); if (ret < 0) goto error; /* try and do the mount securely */ key = afs_request_key(params.cell); if (IS_ERR(key)) { _leave(" = %ld [key]", PTR_ERR(key)); ret = PTR_ERR(key); goto error; } params.key = key; /* parse the device name */ vol = afs_volume_lookup(&params); if (IS_ERR(vol)) { ret = PTR_ERR(vol); goto error; } /* allocate a superblock info record */ as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL); if (!as) { ret = -ENOMEM; afs_put_volume(vol); goto error; } as->volume = vol; /* allocate a deviceless superblock */ sb = sget(fs_type, afs_test_super, afs_set_super, as); if (IS_ERR(sb)) { ret = PTR_ERR(sb); afs_put_volume(vol); kfree(as); goto error; } if (!sb->s_root) { /* initial superblock/root creation */ _debug("create"); sb->s_flags = flags; ret = afs_fill_super(sb, &params); if (ret < 0) { deactivate_locked_super(sb); goto error; } save_mount_options(sb, new_opts); sb->s_flags |= MS_ACTIVE; } else { _debug("reuse"); ASSERTCMP(sb->s_flags, &, MS_ACTIVE); afs_put_volume(vol); kfree(as); } afs_put_cell(params.cell); kfree(new_opts); _leave(" = 0 [%p]", sb); return dget(sb->s_root); error: afs_put_cell(params.cell); key_put(params.key); kfree(new_opts); _leave(" = %d", ret); return ERR_PTR(ret); } static void afs_kill_super(struct super_block *sb) { struct afs_super_info *as = sb->s_fs_info; kill_anon_super(sb); afs_put_volume(as->volume); kfree(as); } /* * initialise an inode cache slab element prior to any use */ static void afs_i_init_once(void *_vnode) { struct afs_vnode *vnode = _vnode; memset(vnode, 0, sizeof(*vnode)); inode_init_once(&vnode->vfs_inode); init_waitqueue_head(&vnode->update_waitq); mutex_init(&vnode->permits_lock); mutex_init(&vnode->validate_lock); spin_lock_init(&vnode->writeback_lock); spin_lock_init(&vnode->lock); INIT_LIST_HEAD(&vnode->writebacks); INIT_LIST_HEAD(&vnode->pending_locks); INIT_LIST_HEAD(&vnode->granted_locks); INIT_DELAYED_WORK(&vnode->lock_work, afs_lock_work); INIT_WORK(&vnode->cb_broken_work, afs_broken_callback_work); } /* * allocate an AFS inode struct from our slab cache */ static struct inode *afs_alloc_inode(struct super_block *sb) { struct afs_vnode *vnode; vnode = kmem_cache_alloc(afs_inode_cachep, GFP_KERNEL); if (!vnode) return NULL; atomic_inc(&afs_count_active_inodes); memset(&vnode->fid, 0, sizeof(vnode->fid)); memset(&vnode->status, 0, sizeof(vnode->status)); vnode->volume = NULL; vnode->update_cnt = 0; vnode->flags = 1 << AFS_VNODE_UNSET; vnode->cb_promised = false; _leave(" = %p", &vnode->vfs_inode); return &vnode->vfs_inode; } static void afs_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); struct afs_vnode *vnode = AFS_FS_I(inode); kmem_cache_free(afs_inode_cachep, vnode); } /* * destroy an AFS inode struct */ static void afs_destroy_inode(struct inode *inode) { struct afs_vnode *vnode = AFS_FS_I(inode); _enter("%p{%x:%u}", inode, vnode->fid.vid, vnode->fid.vnode); _debug("DESTROY INODE %p", inode); ASSERTCMP(vnode->server, ==, NULL); call_rcu(&inode->i_rcu, afs_i_callback); atomic_dec(&afs_count_active_inodes); } /* * return information about an AFS volume */ static int afs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct afs_volume_status vs; struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode); struct key *key; int ret; key = afs_request_key(vnode->volume->cell); if (IS_ERR(key)) return PTR_ERR(key); ret = afs_vnode_get_volume_status(vnode, key, &vs); key_put(key); if (ret < 0) { _leave(" = %d", ret); return ret; } buf->f_type = dentry->d_sb->s_magic; buf->f_bsize = AFS_BLOCK_SIZE; buf->f_namelen = AFSNAMEMAX - 1; if (vs.max_quota == 0) buf->f_blocks = vs.part_max_blocks; else buf->f_blocks = vs.max_quota; buf->f_bavail = buf->f_bfree = buf->f_blocks - vs.blocks_in_use; return 0; }
gpl-2.0
CyanogenMod/android_kernel_xiaomi_cancro
net/dccp/proto.c
4881
30691
/* * net/dccp/proto.c * * An implementation of the DCCP protocol * Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/dccp.h> #include <linux/module.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/in.h> #include <linux/if_arp.h> #include <linux/init.h> #include <linux/random.h> #include <linux/slab.h> #include <net/checksum.h> #include <net/inet_sock.h> #include <net/sock.h> #include <net/xfrm.h> #include <asm/ioctls.h> #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/delay.h> #include <linux/poll.h> #include "ccid.h" #include "dccp.h" #include "feat.h" DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly; EXPORT_SYMBOL_GPL(dccp_statistics); struct percpu_counter dccp_orphan_count; EXPORT_SYMBOL_GPL(dccp_orphan_count); struct inet_hashinfo dccp_hashinfo; EXPORT_SYMBOL_GPL(dccp_hashinfo); /* the maximum queue length for tx in packets. 0 is no limit */ int sysctl_dccp_tx_qlen __read_mostly = 5; #ifdef CONFIG_IP_DCCP_DEBUG static const char *dccp_state_name(const int state) { static const char *const dccp_state_names[] = { [DCCP_OPEN] = "OPEN", [DCCP_REQUESTING] = "REQUESTING", [DCCP_PARTOPEN] = "PARTOPEN", [DCCP_LISTEN] = "LISTEN", [DCCP_RESPOND] = "RESPOND", [DCCP_CLOSING] = "CLOSING", [DCCP_ACTIVE_CLOSEREQ] = "CLOSEREQ", [DCCP_PASSIVE_CLOSE] = "PASSIVE_CLOSE", [DCCP_PASSIVE_CLOSEREQ] = "PASSIVE_CLOSEREQ", [DCCP_TIME_WAIT] = "TIME_WAIT", [DCCP_CLOSED] = "CLOSED", }; if (state >= DCCP_MAX_STATES) return "INVALID STATE!"; else return dccp_state_names[state]; } #endif void dccp_set_state(struct sock *sk, const int state) { const int oldstate = sk->sk_state; dccp_pr_debug("%s(%p) %s --> %s\n", dccp_role(sk), sk, dccp_state_name(oldstate), dccp_state_name(state)); WARN_ON(state == oldstate); switch (state) { case DCCP_OPEN: if (oldstate != DCCP_OPEN) DCCP_INC_STATS(DCCP_MIB_CURRESTAB); /* Client retransmits all Confirm options until entering OPEN */ if (oldstate == DCCP_PARTOPEN) dccp_feat_list_purge(&dccp_sk(sk)->dccps_featneg); break; case DCCP_CLOSED: if (oldstate == DCCP_OPEN || oldstate == DCCP_ACTIVE_CLOSEREQ || oldstate == DCCP_CLOSING) DCCP_INC_STATS(DCCP_MIB_ESTABRESETS); sk->sk_prot->unhash(sk); if (inet_csk(sk)->icsk_bind_hash != NULL && !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) inet_put_port(sk); /* fall through */ default: if (oldstate == DCCP_OPEN) DCCP_DEC_STATS(DCCP_MIB_CURRESTAB); } /* Change state AFTER socket is unhashed to avoid closed * socket sitting in hash tables. */ sk->sk_state = state; } EXPORT_SYMBOL_GPL(dccp_set_state); static void dccp_finish_passive_close(struct sock *sk) { switch (sk->sk_state) { case DCCP_PASSIVE_CLOSE: /* Node (client or server) has received Close packet. */ dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED); dccp_set_state(sk, DCCP_CLOSED); break; case DCCP_PASSIVE_CLOSEREQ: /* * Client received CloseReq. We set the `active' flag so that * dccp_send_close() retransmits the Close as per RFC 4340, 8.3. */ dccp_send_close(sk, 1); dccp_set_state(sk, DCCP_CLOSING); } } void dccp_done(struct sock *sk) { dccp_set_state(sk, DCCP_CLOSED); dccp_clear_xmit_timers(sk); sk->sk_shutdown = SHUTDOWN_MASK; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_state_change(sk); else inet_csk_destroy_sock(sk); } EXPORT_SYMBOL_GPL(dccp_done); const char *dccp_packet_name(const int type) { static const char *const dccp_packet_names[] = { [DCCP_PKT_REQUEST] = "REQUEST", [DCCP_PKT_RESPONSE] = "RESPONSE", [DCCP_PKT_DATA] = "DATA", [DCCP_PKT_ACK] = "ACK", [DCCP_PKT_DATAACK] = "DATAACK", [DCCP_PKT_CLOSEREQ] = "CLOSEREQ", [DCCP_PKT_CLOSE] = "CLOSE", [DCCP_PKT_RESET] = "RESET", [DCCP_PKT_SYNC] = "SYNC", [DCCP_PKT_SYNCACK] = "SYNCACK", }; if (type >= DCCP_NR_PKT_TYPES) return "INVALID"; else return dccp_packet_names[type]; } EXPORT_SYMBOL_GPL(dccp_packet_name); int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) { struct dccp_sock *dp = dccp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_rto = DCCP_TIMEOUT_INIT; icsk->icsk_syn_retries = sysctl_dccp_request_retries; sk->sk_state = DCCP_CLOSED; sk->sk_write_space = dccp_write_space; icsk->icsk_sync_mss = dccp_sync_mss; dp->dccps_mss_cache = 536; dp->dccps_rate_last = jiffies; dp->dccps_role = DCCP_ROLE_UNDEFINED; dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT; dp->dccps_tx_qlen = sysctl_dccp_tx_qlen; dccp_init_xmit_timers(sk); INIT_LIST_HEAD(&dp->dccps_featneg); /* control socket doesn't need feat nego */ if (likely(ctl_sock_initialized)) return dccp_feat_init(sk); return 0; } EXPORT_SYMBOL_GPL(dccp_init_sock); void dccp_destroy_sock(struct sock *sk) { struct dccp_sock *dp = dccp_sk(sk); /* * DCCP doesn't use sk_write_queue, just sk_send_head * for retransmissions */ if (sk->sk_send_head != NULL) { kfree_skb(sk->sk_send_head); sk->sk_send_head = NULL; } /* Clean up a referenced DCCP bind bucket. */ if (inet_csk(sk)->icsk_bind_hash != NULL) inet_put_port(sk); kfree(dp->dccps_service_list); dp->dccps_service_list = NULL; if (dp->dccps_hc_rx_ackvec != NULL) { dccp_ackvec_free(dp->dccps_hc_rx_ackvec); dp->dccps_hc_rx_ackvec = NULL; } ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk); ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL; /* clean up feature negotiation state */ dccp_feat_list_purge(&dp->dccps_featneg); } EXPORT_SYMBOL_GPL(dccp_destroy_sock); static inline int dccp_listen_start(struct sock *sk, int backlog) { struct dccp_sock *dp = dccp_sk(sk); dp->dccps_role = DCCP_ROLE_LISTEN; /* do not start to listen if feature negotiation setup fails */ if (dccp_feat_finalise_settings(dp)) return -EPROTO; return inet_csk_listen_start(sk, backlog); } static inline int dccp_need_reset(int state) { return state != DCCP_CLOSED && state != DCCP_LISTEN && state != DCCP_REQUESTING; } int dccp_disconnect(struct sock *sk, int flags) { struct inet_connection_sock *icsk = inet_csk(sk); struct inet_sock *inet = inet_sk(sk); int err = 0; const int old_state = sk->sk_state; if (old_state != DCCP_CLOSED) dccp_set_state(sk, DCCP_CLOSED); /* * This corresponds to the ABORT function of RFC793, sec. 3.8 * TCP uses a RST segment, DCCP a Reset packet with Code 2, "Aborted". */ if (old_state == DCCP_LISTEN) { inet_csk_listen_stop(sk); } else if (dccp_need_reset(old_state)) { dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED); sk->sk_err = ECONNRESET; } else if (old_state == DCCP_REQUESTING) sk->sk_err = ECONNRESET; dccp_clear_xmit_timers(sk); __skb_queue_purge(&sk->sk_receive_queue); __skb_queue_purge(&sk->sk_write_queue); if (sk->sk_send_head != NULL) { __kfree_skb(sk->sk_send_head); sk->sk_send_head = NULL; } inet->inet_dport = 0; if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) inet_reset_saddr(sk); sk->sk_shutdown = 0; sock_reset_flag(sk, SOCK_DONE); icsk->icsk_backoff = 0; inet_csk_delack_init(sk); __sk_dst_reset(sk); WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); sk->sk_error_report(sk); return err; } EXPORT_SYMBOL_GPL(dccp_disconnect); /* * Wait for a DCCP event. * * Note that we don't need to lock the socket, as the upper poll layers * take care of normal races (between the test and the event) and we don't * go look at any of the socket buffers directly. */ unsigned int dccp_poll(struct file *file, struct socket *sock, poll_table *wait) { unsigned int mask; struct sock *sk = sock->sk; sock_poll_wait(file, sk_sleep(sk), wait); if (sk->sk_state == DCCP_LISTEN) return inet_csk_listen_poll(sk); /* Socket is not locked. We are protected from async events by poll logic and correct handling of state changes made by another threads is impossible in any case. */ mask = 0; if (sk->sk_err) mask = POLLERR; if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED) mask |= POLLHUP; if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLIN | POLLRDNORM | POLLRDHUP; /* Connected? */ if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) { if (atomic_read(&sk->sk_rmem_alloc) > 0) mask |= POLLIN | POLLRDNORM; if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { mask |= POLLOUT | POLLWRNORM; } else { /* send SIGIO later */ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); /* Race breaker. If space is freed after * wspace test but before the flags are set, * IO signal will be lost. */ if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) mask |= POLLOUT | POLLWRNORM; } } } return mask; } EXPORT_SYMBOL_GPL(dccp_poll); int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg) { int rc = -ENOTCONN; lock_sock(sk); if (sk->sk_state == DCCP_LISTEN) goto out; switch (cmd) { case SIOCINQ: { struct sk_buff *skb; unsigned long amount = 0; skb = skb_peek(&sk->sk_receive_queue); if (skb != NULL) { /* * We will only return the amount of this packet since * that is all that will be read. */ amount = skb->len; } rc = put_user(amount, (int __user *)arg); } break; default: rc = -ENOIOCTLCMD; break; } out: release_sock(sk); return rc; } EXPORT_SYMBOL_GPL(dccp_ioctl); static int dccp_setsockopt_service(struct sock *sk, const __be32 service, char __user *optval, unsigned int optlen) { struct dccp_sock *dp = dccp_sk(sk); struct dccp_service_list *sl = NULL; if (service == DCCP_SERVICE_INVALID_VALUE || optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32)) return -EINVAL; if (optlen > sizeof(service)) { sl = kmalloc(optlen, GFP_KERNEL); if (sl == NULL) return -ENOMEM; sl->dccpsl_nr = optlen / sizeof(u32) - 1; if (copy_from_user(sl->dccpsl_list, optval + sizeof(service), optlen - sizeof(service)) || dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) { kfree(sl); return -EFAULT; } } lock_sock(sk); dp->dccps_service = service; kfree(dp->dccps_service_list); dp->dccps_service_list = sl; release_sock(sk); return 0; } static int dccp_setsockopt_cscov(struct sock *sk, int cscov, bool rx) { u8 *list, len; int i, rc; if (cscov < 0 || cscov > 15) return -EINVAL; /* * Populate a list of permissible values, in the range cscov...15. This * is necessary since feature negotiation of single values only works if * both sides incidentally choose the same value. Since the list starts * lowest-value first, negotiation will pick the smallest shared value. */ if (cscov == 0) return 0; len = 16 - cscov; list = kmalloc(len, GFP_KERNEL); if (list == NULL) return -ENOBUFS; for (i = 0; i < len; i++) list[i] = cscov++; rc = dccp_feat_register_sp(sk, DCCPF_MIN_CSUM_COVER, rx, list, len); if (rc == 0) { if (rx) dccp_sk(sk)->dccps_pcrlen = cscov; else dccp_sk(sk)->dccps_pcslen = cscov; } kfree(list); return rc; } static int dccp_setsockopt_ccid(struct sock *sk, int type, char __user *optval, unsigned int optlen) { u8 *val; int rc = 0; if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS) return -EINVAL; val = memdup_user(optval, optlen); if (IS_ERR(val)) return PTR_ERR(val); lock_sock(sk); if (type == DCCP_SOCKOPT_TX_CCID || type == DCCP_SOCKOPT_CCID) rc = dccp_feat_register_sp(sk, DCCPF_CCID, 1, val, optlen); if (!rc && (type == DCCP_SOCKOPT_RX_CCID || type == DCCP_SOCKOPT_CCID)) rc = dccp_feat_register_sp(sk, DCCPF_CCID, 0, val, optlen); release_sock(sk); kfree(val); return rc; } static int do_dccp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { struct dccp_sock *dp = dccp_sk(sk); int val, err = 0; switch (optname) { case DCCP_SOCKOPT_PACKET_SIZE: DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n"); return 0; case DCCP_SOCKOPT_CHANGE_L: case DCCP_SOCKOPT_CHANGE_R: DCCP_WARN("sockopt(CHANGE_L/R) is deprecated: fix your app\n"); return 0; case DCCP_SOCKOPT_CCID: case DCCP_SOCKOPT_RX_CCID: case DCCP_SOCKOPT_TX_CCID: return dccp_setsockopt_ccid(sk, optname, optval, optlen); } if (optlen < (int)sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; if (optname == DCCP_SOCKOPT_SERVICE) return dccp_setsockopt_service(sk, val, optval, optlen); lock_sock(sk); switch (optname) { case DCCP_SOCKOPT_SERVER_TIMEWAIT: if (dp->dccps_role != DCCP_ROLE_SERVER) err = -EOPNOTSUPP; else dp->dccps_server_timewait = (val != 0); break; case DCCP_SOCKOPT_SEND_CSCOV: err = dccp_setsockopt_cscov(sk, val, false); break; case DCCP_SOCKOPT_RECV_CSCOV: err = dccp_setsockopt_cscov(sk, val, true); break; case DCCP_SOCKOPT_QPOLICY_ID: if (sk->sk_state != DCCP_CLOSED) err = -EISCONN; else if (val < 0 || val >= DCCPQ_POLICY_MAX) err = -EINVAL; else dp->dccps_qpolicy = val; break; case DCCP_SOCKOPT_QPOLICY_TXQLEN: if (val < 0) err = -EINVAL; else dp->dccps_tx_qlen = val; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } int dccp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level != SOL_DCCP) return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level, optname, optval, optlen); return do_dccp_setsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL_GPL(dccp_setsockopt); #ifdef CONFIG_COMPAT int compat_dccp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level != SOL_DCCP) return inet_csk_compat_setsockopt(sk, level, optname, optval, optlen); return do_dccp_setsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL_GPL(compat_dccp_setsockopt); #endif static int dccp_getsockopt_service(struct sock *sk, int len, __be32 __user *optval, int __user *optlen) { const struct dccp_sock *dp = dccp_sk(sk); const struct dccp_service_list *sl; int err = -ENOENT, slen = 0, total_len = sizeof(u32); lock_sock(sk); if ((sl = dp->dccps_service_list) != NULL) { slen = sl->dccpsl_nr * sizeof(u32); total_len += slen; } err = -EINVAL; if (total_len > len) goto out; err = 0; if (put_user(total_len, optlen) || put_user(dp->dccps_service, optval) || (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen))) err = -EFAULT; out: release_sock(sk); return err; } static int do_dccp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { struct dccp_sock *dp; int val, len; if (get_user(len, optlen)) return -EFAULT; if (len < (int)sizeof(int)) return -EINVAL; dp = dccp_sk(sk); switch (optname) { case DCCP_SOCKOPT_PACKET_SIZE: DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n"); return 0; case DCCP_SOCKOPT_SERVICE: return dccp_getsockopt_service(sk, len, (__be32 __user *)optval, optlen); case DCCP_SOCKOPT_GET_CUR_MPS: val = dp->dccps_mss_cache; break; case DCCP_SOCKOPT_AVAILABLE_CCIDS: return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen); case DCCP_SOCKOPT_TX_CCID: val = ccid_get_current_tx_ccid(dp); if (val < 0) return -ENOPROTOOPT; break; case DCCP_SOCKOPT_RX_CCID: val = ccid_get_current_rx_ccid(dp); if (val < 0) return -ENOPROTOOPT; break; case DCCP_SOCKOPT_SERVER_TIMEWAIT: val = dp->dccps_server_timewait; break; case DCCP_SOCKOPT_SEND_CSCOV: val = dp->dccps_pcslen; break; case DCCP_SOCKOPT_RECV_CSCOV: val = dp->dccps_pcrlen; break; case DCCP_SOCKOPT_QPOLICY_ID: val = dp->dccps_qpolicy; break; case DCCP_SOCKOPT_QPOLICY_TXQLEN: val = dp->dccps_tx_qlen; break; case 128 ... 191: return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname, len, (u32 __user *)optval, optlen); case 192 ... 255: return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname, len, (u32 __user *)optval, optlen); default: return -ENOPROTOOPT; } len = sizeof(val); if (put_user(len, optlen) || copy_to_user(optval, &val, len)) return -EFAULT; return 0; } int dccp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level != SOL_DCCP) return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level, optname, optval, optlen); return do_dccp_getsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL_GPL(dccp_getsockopt); #ifdef CONFIG_COMPAT int compat_dccp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level != SOL_DCCP) return inet_csk_compat_getsockopt(sk, level, optname, optval, optlen); return do_dccp_getsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL_GPL(compat_dccp_getsockopt); #endif static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb) { struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg); /* * Assign an (opaque) qpolicy priority value to skb->priority. * * We are overloading this skb field for use with the qpolicy subystem. * The skb->priority is normally used for the SO_PRIORITY option, which * is initialised from sk_priority. Since the assignment of sk_priority * to skb->priority happens later (on layer 3), we overload this field * for use with queueing priorities as long as the skb is on layer 4. * The default priority value (if nothing is set) is 0. */ skb->priority = 0; for (; cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) { if (!CMSG_OK(msg, cmsg)) return -EINVAL; if (cmsg->cmsg_level != SOL_DCCP) continue; if (cmsg->cmsg_type <= DCCP_SCM_QPOLICY_MAX && !dccp_qpolicy_param_ok(skb->sk, cmsg->cmsg_type)) return -EINVAL; switch (cmsg->cmsg_type) { case DCCP_SCM_PRIORITY: if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u32))) return -EINVAL; skb->priority = *(__u32 *)CMSG_DATA(cmsg); break; default: return -EINVAL; } } return 0; } int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len) { const struct dccp_sock *dp = dccp_sk(sk); const int flags = msg->msg_flags; const int noblock = flags & MSG_DONTWAIT; struct sk_buff *skb; int rc, size; long timeo; if (len > dp->dccps_mss_cache) return -EMSGSIZE; lock_sock(sk); if (dccp_qpolicy_full(sk)) { rc = -EAGAIN; goto out_release; } timeo = sock_sndtimeo(sk, noblock); /* * We have to use sk_stream_wait_connect here to set sk_write_pending, * so that the trick in dccp_rcv_request_sent_state_process. */ /* Wait for a connection to finish. */ if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN)) if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0) goto out_release; size = sk->sk_prot->max_header + len; release_sock(sk); skb = sock_alloc_send_skb(sk, size, noblock, &rc); lock_sock(sk); if (skb == NULL) goto out_release; skb_reserve(skb, sk->sk_prot->max_header); rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); if (rc != 0) goto out_discard; rc = dccp_msghdr_parse(msg, skb); if (rc != 0) goto out_discard; dccp_qpolicy_push(sk, skb); /* * The xmit_timer is set if the TX CCID is rate-based and will expire * when congestion control permits to release further packets into the * network. Window-based CCIDs do not use this timer. */ if (!timer_pending(&dp->dccps_xmit_timer)) dccp_write_xmit(sk); out_release: release_sock(sk); return rc ? : len; out_discard: kfree_skb(skb); goto out_release; } EXPORT_SYMBOL_GPL(dccp_sendmsg); int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len) { const struct dccp_hdr *dh; long timeo; lock_sock(sk); if (sk->sk_state == DCCP_LISTEN) { len = -ENOTCONN; goto out; } timeo = sock_rcvtimeo(sk, nonblock); do { struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); if (skb == NULL) goto verify_sock_status; dh = dccp_hdr(skb); switch (dh->dccph_type) { case DCCP_PKT_DATA: case DCCP_PKT_DATAACK: goto found_ok_skb; case DCCP_PKT_CLOSE: case DCCP_PKT_CLOSEREQ: if (!(flags & MSG_PEEK)) dccp_finish_passive_close(sk); /* fall through */ case DCCP_PKT_RESET: dccp_pr_debug("found fin (%s) ok!\n", dccp_packet_name(dh->dccph_type)); len = 0; goto found_fin_ok; default: dccp_pr_debug("packet_type=%s\n", dccp_packet_name(dh->dccph_type)); sk_eat_skb(sk, skb, 0); } verify_sock_status: if (sock_flag(sk, SOCK_DONE)) { len = 0; break; } if (sk->sk_err) { len = sock_error(sk); break; } if (sk->sk_shutdown & RCV_SHUTDOWN) { len = 0; break; } if (sk->sk_state == DCCP_CLOSED) { if (!sock_flag(sk, SOCK_DONE)) { /* This occurs when user tries to read * from never connected socket. */ len = -ENOTCONN; break; } len = 0; break; } if (!timeo) { len = -EAGAIN; break; } if (signal_pending(current)) { len = sock_intr_errno(timeo); break; } sk_wait_data(sk, &timeo); continue; found_ok_skb: if (len > skb->len) len = skb->len; else if (len < skb->len) msg->msg_flags |= MSG_TRUNC; if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) { /* Exception. Bailout! */ len = -EFAULT; break; } if (flags & MSG_TRUNC) len = skb->len; found_fin_ok: if (!(flags & MSG_PEEK)) sk_eat_skb(sk, skb, 0); break; } while (1); out: release_sock(sk); return len; } EXPORT_SYMBOL_GPL(dccp_recvmsg); int inet_dccp_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; unsigned char old_state; int err; lock_sock(sk); err = -EINVAL; if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP) goto out; old_state = sk->sk_state; if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN))) goto out; /* Really, if the socket is already in listen state * we can only allow the backlog to be adjusted. */ if (old_state != DCCP_LISTEN) { /* * FIXME: here it probably should be sk->sk_prot->listen_start * see tcp_listen_start */ err = dccp_listen_start(sk, backlog); if (err) goto out; } sk->sk_max_ack_backlog = backlog; err = 0; out: release_sock(sk); return err; } EXPORT_SYMBOL_GPL(inet_dccp_listen); static void dccp_terminate_connection(struct sock *sk) { u8 next_state = DCCP_CLOSED; switch (sk->sk_state) { case DCCP_PASSIVE_CLOSE: case DCCP_PASSIVE_CLOSEREQ: dccp_finish_passive_close(sk); break; case DCCP_PARTOPEN: dccp_pr_debug("Stop PARTOPEN timer (%p)\n", sk); inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); /* fall through */ case DCCP_OPEN: dccp_send_close(sk, 1); if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER && !dccp_sk(sk)->dccps_server_timewait) next_state = DCCP_ACTIVE_CLOSEREQ; else next_state = DCCP_CLOSING; /* fall through */ default: dccp_set_state(sk, next_state); } } void dccp_close(struct sock *sk, long timeout) { struct dccp_sock *dp = dccp_sk(sk); struct sk_buff *skb; u32 data_was_unread = 0; int state; lock_sock(sk); sk->sk_shutdown = SHUTDOWN_MASK; if (sk->sk_state == DCCP_LISTEN) { dccp_set_state(sk, DCCP_CLOSED); /* Special case. */ inet_csk_listen_stop(sk); goto adjudge_to_death; } sk_stop_timer(sk, &dp->dccps_xmit_timer); /* * We need to flush the recv. buffs. We do this only on the * descriptor close, not protocol-sourced closes, because the *reader process may not have drained the data yet! */ while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { data_was_unread += skb->len; __kfree_skb(skb); } if (data_was_unread) { /* Unread data was tossed, send an appropriate Reset Code */ DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread); dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED); dccp_set_state(sk, DCCP_CLOSED); } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { /* Check zero linger _after_ checking for unread data. */ sk->sk_prot->disconnect(sk, 0); } else if (sk->sk_state != DCCP_CLOSED) { /* * Normal connection termination. May need to wait if there are * still packets in the TX queue that are delayed by the CCID. */ dccp_flush_write_queue(sk, &timeout); dccp_terminate_connection(sk); } /* * Flush write queue. This may be necessary in several cases: * - we have been closed by the peer but still have application data; * - abortive termination (unread data or zero linger time), * - normal termination but queue could not be flushed within time limit */ __skb_queue_purge(&sk->sk_write_queue); sk_stream_wait_close(sk, timeout); adjudge_to_death: state = sk->sk_state; sock_hold(sk); sock_orphan(sk); /* * It is the last release_sock in its life. It will remove backlog. */ release_sock(sk); /* * Now socket is owned by kernel and we acquire BH lock * to finish close. No need to check for user refs. */ local_bh_disable(); bh_lock_sock(sk); WARN_ON(sock_owned_by_user(sk)); percpu_counter_inc(sk->sk_prot->orphan_count); /* Have we already been destroyed by a softirq or backlog? */ if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED) goto out; if (sk->sk_state == DCCP_CLOSED) inet_csk_destroy_sock(sk); /* Otherwise, socket is reprieved until protocol close. */ out: bh_unlock_sock(sk); local_bh_enable(); sock_put(sk); } EXPORT_SYMBOL_GPL(dccp_close); void dccp_shutdown(struct sock *sk, int how) { dccp_pr_debug("called shutdown(%x)\n", how); } EXPORT_SYMBOL_GPL(dccp_shutdown); static inline int dccp_mib_init(void) { return snmp_mib_init((void __percpu **)dccp_statistics, sizeof(struct dccp_mib), __alignof__(struct dccp_mib)); } static inline void dccp_mib_exit(void) { snmp_mib_free((void __percpu **)dccp_statistics); } static int thash_entries; module_param(thash_entries, int, 0444); MODULE_PARM_DESC(thash_entries, "Number of ehash buckets"); #ifdef CONFIG_IP_DCCP_DEBUG bool dccp_debug; module_param(dccp_debug, bool, 0644); MODULE_PARM_DESC(dccp_debug, "Enable debug messages"); EXPORT_SYMBOL_GPL(dccp_debug); #endif static int __init dccp_init(void) { unsigned long goal; int ehash_order, bhash_order, i; int rc; BUILD_BUG_ON(sizeof(struct dccp_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb)); rc = percpu_counter_init(&dccp_orphan_count, 0); if (rc) goto out_fail; rc = -ENOBUFS; inet_hashinfo_init(&dccp_hashinfo); dccp_hashinfo.bind_bucket_cachep = kmem_cache_create("dccp_bind_bucket", sizeof(struct inet_bind_bucket), 0, SLAB_HWCACHE_ALIGN, NULL); if (!dccp_hashinfo.bind_bucket_cachep) goto out_free_percpu; /* * Size and allocate the main established and bind bucket * hash tables. * * The methodology is similar to that of the buffer cache. */ if (totalram_pages >= (128 * 1024)) goal = totalram_pages >> (21 - PAGE_SHIFT); else goal = totalram_pages >> (23 - PAGE_SHIFT); if (thash_entries) goal = (thash_entries * sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT; for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++) ; do { unsigned long hash_size = (1UL << ehash_order) * PAGE_SIZE / sizeof(struct inet_ehash_bucket); while (hash_size & (hash_size - 1)) hash_size--; dccp_hashinfo.ehash_mask = hash_size - 1; dccp_hashinfo.ehash = (struct inet_ehash_bucket *) __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, ehash_order); } while (!dccp_hashinfo.ehash && --ehash_order > 0); if (!dccp_hashinfo.ehash) { DCCP_CRIT("Failed to allocate DCCP established hash table"); goto out_free_bind_bucket_cachep; } for (i = 0; i <= dccp_hashinfo.ehash_mask; i++) { INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i); INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].twchain, i); } if (inet_ehash_locks_alloc(&dccp_hashinfo)) goto out_free_dccp_ehash; bhash_order = ehash_order; do { dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE / sizeof(struct inet_bind_hashbucket); if ((dccp_hashinfo.bhash_size > (64 * 1024)) && bhash_order > 0) continue; dccp_hashinfo.bhash = (struct inet_bind_hashbucket *) __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, bhash_order); } while (!dccp_hashinfo.bhash && --bhash_order >= 0); if (!dccp_hashinfo.bhash) { DCCP_CRIT("Failed to allocate DCCP bind hash table"); goto out_free_dccp_locks; } for (i = 0; i < dccp_hashinfo.bhash_size; i++) { spin_lock_init(&dccp_hashinfo.bhash[i].lock); INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain); } rc = dccp_mib_init(); if (rc) goto out_free_dccp_bhash; rc = dccp_ackvec_init(); if (rc) goto out_free_dccp_mib; rc = dccp_sysctl_init(); if (rc) goto out_ackvec_exit; rc = ccid_initialize_builtins(); if (rc) goto out_sysctl_exit; dccp_timestamping_init(); return 0; out_sysctl_exit: dccp_sysctl_exit(); out_ackvec_exit: dccp_ackvec_exit(); out_free_dccp_mib: dccp_mib_exit(); out_free_dccp_bhash: free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order); out_free_dccp_locks: inet_ehash_locks_free(&dccp_hashinfo); out_free_dccp_ehash: free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order); out_free_bind_bucket_cachep: kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); out_free_percpu: percpu_counter_destroy(&dccp_orphan_count); out_fail: dccp_hashinfo.bhash = NULL; dccp_hashinfo.ehash = NULL; dccp_hashinfo.bind_bucket_cachep = NULL; return rc; } static void __exit dccp_fini(void) { ccid_cleanup_builtins(); dccp_mib_exit(); free_pages((unsigned long)dccp_hashinfo.bhash, get_order(dccp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket))); free_pages((unsigned long)dccp_hashinfo.ehash, get_order((dccp_hashinfo.ehash_mask + 1) * sizeof(struct inet_ehash_bucket))); inet_ehash_locks_free(&dccp_hashinfo); kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); dccp_ackvec_exit(); dccp_sysctl_exit(); percpu_counter_destroy(&dccp_orphan_count); } module_init(dccp_init); module_exit(dccp_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>"); MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");
gpl-2.0
TeamBliss-Devices/blissful_kernel_lge_g2
net/dccp/proto.c
4881
30691
/* * net/dccp/proto.c * * An implementation of the DCCP protocol * Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/dccp.h> #include <linux/module.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/in.h> #include <linux/if_arp.h> #include <linux/init.h> #include <linux/random.h> #include <linux/slab.h> #include <net/checksum.h> #include <net/inet_sock.h> #include <net/sock.h> #include <net/xfrm.h> #include <asm/ioctls.h> #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/delay.h> #include <linux/poll.h> #include "ccid.h" #include "dccp.h" #include "feat.h" DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly; EXPORT_SYMBOL_GPL(dccp_statistics); struct percpu_counter dccp_orphan_count; EXPORT_SYMBOL_GPL(dccp_orphan_count); struct inet_hashinfo dccp_hashinfo; EXPORT_SYMBOL_GPL(dccp_hashinfo); /* the maximum queue length for tx in packets. 0 is no limit */ int sysctl_dccp_tx_qlen __read_mostly = 5; #ifdef CONFIG_IP_DCCP_DEBUG static const char *dccp_state_name(const int state) { static const char *const dccp_state_names[] = { [DCCP_OPEN] = "OPEN", [DCCP_REQUESTING] = "REQUESTING", [DCCP_PARTOPEN] = "PARTOPEN", [DCCP_LISTEN] = "LISTEN", [DCCP_RESPOND] = "RESPOND", [DCCP_CLOSING] = "CLOSING", [DCCP_ACTIVE_CLOSEREQ] = "CLOSEREQ", [DCCP_PASSIVE_CLOSE] = "PASSIVE_CLOSE", [DCCP_PASSIVE_CLOSEREQ] = "PASSIVE_CLOSEREQ", [DCCP_TIME_WAIT] = "TIME_WAIT", [DCCP_CLOSED] = "CLOSED", }; if (state >= DCCP_MAX_STATES) return "INVALID STATE!"; else return dccp_state_names[state]; } #endif void dccp_set_state(struct sock *sk, const int state) { const int oldstate = sk->sk_state; dccp_pr_debug("%s(%p) %s --> %s\n", dccp_role(sk), sk, dccp_state_name(oldstate), dccp_state_name(state)); WARN_ON(state == oldstate); switch (state) { case DCCP_OPEN: if (oldstate != DCCP_OPEN) DCCP_INC_STATS(DCCP_MIB_CURRESTAB); /* Client retransmits all Confirm options until entering OPEN */ if (oldstate == DCCP_PARTOPEN) dccp_feat_list_purge(&dccp_sk(sk)->dccps_featneg); break; case DCCP_CLOSED: if (oldstate == DCCP_OPEN || oldstate == DCCP_ACTIVE_CLOSEREQ || oldstate == DCCP_CLOSING) DCCP_INC_STATS(DCCP_MIB_ESTABRESETS); sk->sk_prot->unhash(sk); if (inet_csk(sk)->icsk_bind_hash != NULL && !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) inet_put_port(sk); /* fall through */ default: if (oldstate == DCCP_OPEN) DCCP_DEC_STATS(DCCP_MIB_CURRESTAB); } /* Change state AFTER socket is unhashed to avoid closed * socket sitting in hash tables. */ sk->sk_state = state; } EXPORT_SYMBOL_GPL(dccp_set_state); static void dccp_finish_passive_close(struct sock *sk) { switch (sk->sk_state) { case DCCP_PASSIVE_CLOSE: /* Node (client or server) has received Close packet. */ dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED); dccp_set_state(sk, DCCP_CLOSED); break; case DCCP_PASSIVE_CLOSEREQ: /* * Client received CloseReq. We set the `active' flag so that * dccp_send_close() retransmits the Close as per RFC 4340, 8.3. */ dccp_send_close(sk, 1); dccp_set_state(sk, DCCP_CLOSING); } } void dccp_done(struct sock *sk) { dccp_set_state(sk, DCCP_CLOSED); dccp_clear_xmit_timers(sk); sk->sk_shutdown = SHUTDOWN_MASK; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_state_change(sk); else inet_csk_destroy_sock(sk); } EXPORT_SYMBOL_GPL(dccp_done); const char *dccp_packet_name(const int type) { static const char *const dccp_packet_names[] = { [DCCP_PKT_REQUEST] = "REQUEST", [DCCP_PKT_RESPONSE] = "RESPONSE", [DCCP_PKT_DATA] = "DATA", [DCCP_PKT_ACK] = "ACK", [DCCP_PKT_DATAACK] = "DATAACK", [DCCP_PKT_CLOSEREQ] = "CLOSEREQ", [DCCP_PKT_CLOSE] = "CLOSE", [DCCP_PKT_RESET] = "RESET", [DCCP_PKT_SYNC] = "SYNC", [DCCP_PKT_SYNCACK] = "SYNCACK", }; if (type >= DCCP_NR_PKT_TYPES) return "INVALID"; else return dccp_packet_names[type]; } EXPORT_SYMBOL_GPL(dccp_packet_name); int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) { struct dccp_sock *dp = dccp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_rto = DCCP_TIMEOUT_INIT; icsk->icsk_syn_retries = sysctl_dccp_request_retries; sk->sk_state = DCCP_CLOSED; sk->sk_write_space = dccp_write_space; icsk->icsk_sync_mss = dccp_sync_mss; dp->dccps_mss_cache = 536; dp->dccps_rate_last = jiffies; dp->dccps_role = DCCP_ROLE_UNDEFINED; dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT; dp->dccps_tx_qlen = sysctl_dccp_tx_qlen; dccp_init_xmit_timers(sk); INIT_LIST_HEAD(&dp->dccps_featneg); /* control socket doesn't need feat nego */ if (likely(ctl_sock_initialized)) return dccp_feat_init(sk); return 0; } EXPORT_SYMBOL_GPL(dccp_init_sock); void dccp_destroy_sock(struct sock *sk) { struct dccp_sock *dp = dccp_sk(sk); /* * DCCP doesn't use sk_write_queue, just sk_send_head * for retransmissions */ if (sk->sk_send_head != NULL) { kfree_skb(sk->sk_send_head); sk->sk_send_head = NULL; } /* Clean up a referenced DCCP bind bucket. */ if (inet_csk(sk)->icsk_bind_hash != NULL) inet_put_port(sk); kfree(dp->dccps_service_list); dp->dccps_service_list = NULL; if (dp->dccps_hc_rx_ackvec != NULL) { dccp_ackvec_free(dp->dccps_hc_rx_ackvec); dp->dccps_hc_rx_ackvec = NULL; } ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk); ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL; /* clean up feature negotiation state */ dccp_feat_list_purge(&dp->dccps_featneg); } EXPORT_SYMBOL_GPL(dccp_destroy_sock); static inline int dccp_listen_start(struct sock *sk, int backlog) { struct dccp_sock *dp = dccp_sk(sk); dp->dccps_role = DCCP_ROLE_LISTEN; /* do not start to listen if feature negotiation setup fails */ if (dccp_feat_finalise_settings(dp)) return -EPROTO; return inet_csk_listen_start(sk, backlog); } static inline int dccp_need_reset(int state) { return state != DCCP_CLOSED && state != DCCP_LISTEN && state != DCCP_REQUESTING; } int dccp_disconnect(struct sock *sk, int flags) { struct inet_connection_sock *icsk = inet_csk(sk); struct inet_sock *inet = inet_sk(sk); int err = 0; const int old_state = sk->sk_state; if (old_state != DCCP_CLOSED) dccp_set_state(sk, DCCP_CLOSED); /* * This corresponds to the ABORT function of RFC793, sec. 3.8 * TCP uses a RST segment, DCCP a Reset packet with Code 2, "Aborted". */ if (old_state == DCCP_LISTEN) { inet_csk_listen_stop(sk); } else if (dccp_need_reset(old_state)) { dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED); sk->sk_err = ECONNRESET; } else if (old_state == DCCP_REQUESTING) sk->sk_err = ECONNRESET; dccp_clear_xmit_timers(sk); __skb_queue_purge(&sk->sk_receive_queue); __skb_queue_purge(&sk->sk_write_queue); if (sk->sk_send_head != NULL) { __kfree_skb(sk->sk_send_head); sk->sk_send_head = NULL; } inet->inet_dport = 0; if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) inet_reset_saddr(sk); sk->sk_shutdown = 0; sock_reset_flag(sk, SOCK_DONE); icsk->icsk_backoff = 0; inet_csk_delack_init(sk); __sk_dst_reset(sk); WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); sk->sk_error_report(sk); return err; } EXPORT_SYMBOL_GPL(dccp_disconnect); /* * Wait for a DCCP event. * * Note that we don't need to lock the socket, as the upper poll layers * take care of normal races (between the test and the event) and we don't * go look at any of the socket buffers directly. */ unsigned int dccp_poll(struct file *file, struct socket *sock, poll_table *wait) { unsigned int mask; struct sock *sk = sock->sk; sock_poll_wait(file, sk_sleep(sk), wait); if (sk->sk_state == DCCP_LISTEN) return inet_csk_listen_poll(sk); /* Socket is not locked. We are protected from async events by poll logic and correct handling of state changes made by another threads is impossible in any case. */ mask = 0; if (sk->sk_err) mask = POLLERR; if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED) mask |= POLLHUP; if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLIN | POLLRDNORM | POLLRDHUP; /* Connected? */ if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) { if (atomic_read(&sk->sk_rmem_alloc) > 0) mask |= POLLIN | POLLRDNORM; if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { mask |= POLLOUT | POLLWRNORM; } else { /* send SIGIO later */ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); /* Race breaker. If space is freed after * wspace test but before the flags are set, * IO signal will be lost. */ if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) mask |= POLLOUT | POLLWRNORM; } } } return mask; } EXPORT_SYMBOL_GPL(dccp_poll); int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg) { int rc = -ENOTCONN; lock_sock(sk); if (sk->sk_state == DCCP_LISTEN) goto out; switch (cmd) { case SIOCINQ: { struct sk_buff *skb; unsigned long amount = 0; skb = skb_peek(&sk->sk_receive_queue); if (skb != NULL) { /* * We will only return the amount of this packet since * that is all that will be read. */ amount = skb->len; } rc = put_user(amount, (int __user *)arg); } break; default: rc = -ENOIOCTLCMD; break; } out: release_sock(sk); return rc; } EXPORT_SYMBOL_GPL(dccp_ioctl); static int dccp_setsockopt_service(struct sock *sk, const __be32 service, char __user *optval, unsigned int optlen) { struct dccp_sock *dp = dccp_sk(sk); struct dccp_service_list *sl = NULL; if (service == DCCP_SERVICE_INVALID_VALUE || optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32)) return -EINVAL; if (optlen > sizeof(service)) { sl = kmalloc(optlen, GFP_KERNEL); if (sl == NULL) return -ENOMEM; sl->dccpsl_nr = optlen / sizeof(u32) - 1; if (copy_from_user(sl->dccpsl_list, optval + sizeof(service), optlen - sizeof(service)) || dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) { kfree(sl); return -EFAULT; } } lock_sock(sk); dp->dccps_service = service; kfree(dp->dccps_service_list); dp->dccps_service_list = sl; release_sock(sk); return 0; } static int dccp_setsockopt_cscov(struct sock *sk, int cscov, bool rx) { u8 *list, len; int i, rc; if (cscov < 0 || cscov > 15) return -EINVAL; /* * Populate a list of permissible values, in the range cscov...15. This * is necessary since feature negotiation of single values only works if * both sides incidentally choose the same value. Since the list starts * lowest-value first, negotiation will pick the smallest shared value. */ if (cscov == 0) return 0; len = 16 - cscov; list = kmalloc(len, GFP_KERNEL); if (list == NULL) return -ENOBUFS; for (i = 0; i < len; i++) list[i] = cscov++; rc = dccp_feat_register_sp(sk, DCCPF_MIN_CSUM_COVER, rx, list, len); if (rc == 0) { if (rx) dccp_sk(sk)->dccps_pcrlen = cscov; else dccp_sk(sk)->dccps_pcslen = cscov; } kfree(list); return rc; } static int dccp_setsockopt_ccid(struct sock *sk, int type, char __user *optval, unsigned int optlen) { u8 *val; int rc = 0; if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS) return -EINVAL; val = memdup_user(optval, optlen); if (IS_ERR(val)) return PTR_ERR(val); lock_sock(sk); if (type == DCCP_SOCKOPT_TX_CCID || type == DCCP_SOCKOPT_CCID) rc = dccp_feat_register_sp(sk, DCCPF_CCID, 1, val, optlen); if (!rc && (type == DCCP_SOCKOPT_RX_CCID || type == DCCP_SOCKOPT_CCID)) rc = dccp_feat_register_sp(sk, DCCPF_CCID, 0, val, optlen); release_sock(sk); kfree(val); return rc; } static int do_dccp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { struct dccp_sock *dp = dccp_sk(sk); int val, err = 0; switch (optname) { case DCCP_SOCKOPT_PACKET_SIZE: DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n"); return 0; case DCCP_SOCKOPT_CHANGE_L: case DCCP_SOCKOPT_CHANGE_R: DCCP_WARN("sockopt(CHANGE_L/R) is deprecated: fix your app\n"); return 0; case DCCP_SOCKOPT_CCID: case DCCP_SOCKOPT_RX_CCID: case DCCP_SOCKOPT_TX_CCID: return dccp_setsockopt_ccid(sk, optname, optval, optlen); } if (optlen < (int)sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; if (optname == DCCP_SOCKOPT_SERVICE) return dccp_setsockopt_service(sk, val, optval, optlen); lock_sock(sk); switch (optname) { case DCCP_SOCKOPT_SERVER_TIMEWAIT: if (dp->dccps_role != DCCP_ROLE_SERVER) err = -EOPNOTSUPP; else dp->dccps_server_timewait = (val != 0); break; case DCCP_SOCKOPT_SEND_CSCOV: err = dccp_setsockopt_cscov(sk, val, false); break; case DCCP_SOCKOPT_RECV_CSCOV: err = dccp_setsockopt_cscov(sk, val, true); break; case DCCP_SOCKOPT_QPOLICY_ID: if (sk->sk_state != DCCP_CLOSED) err = -EISCONN; else if (val < 0 || val >= DCCPQ_POLICY_MAX) err = -EINVAL; else dp->dccps_qpolicy = val; break; case DCCP_SOCKOPT_QPOLICY_TXQLEN: if (val < 0) err = -EINVAL; else dp->dccps_tx_qlen = val; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } int dccp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level != SOL_DCCP) return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level, optname, optval, optlen); return do_dccp_setsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL_GPL(dccp_setsockopt); #ifdef CONFIG_COMPAT int compat_dccp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level != SOL_DCCP) return inet_csk_compat_setsockopt(sk, level, optname, optval, optlen); return do_dccp_setsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL_GPL(compat_dccp_setsockopt); #endif static int dccp_getsockopt_service(struct sock *sk, int len, __be32 __user *optval, int __user *optlen) { const struct dccp_sock *dp = dccp_sk(sk); const struct dccp_service_list *sl; int err = -ENOENT, slen = 0, total_len = sizeof(u32); lock_sock(sk); if ((sl = dp->dccps_service_list) != NULL) { slen = sl->dccpsl_nr * sizeof(u32); total_len += slen; } err = -EINVAL; if (total_len > len) goto out; err = 0; if (put_user(total_len, optlen) || put_user(dp->dccps_service, optval) || (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen))) err = -EFAULT; out: release_sock(sk); return err; } static int do_dccp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { struct dccp_sock *dp; int val, len; if (get_user(len, optlen)) return -EFAULT; if (len < (int)sizeof(int)) return -EINVAL; dp = dccp_sk(sk); switch (optname) { case DCCP_SOCKOPT_PACKET_SIZE: DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n"); return 0; case DCCP_SOCKOPT_SERVICE: return dccp_getsockopt_service(sk, len, (__be32 __user *)optval, optlen); case DCCP_SOCKOPT_GET_CUR_MPS: val = dp->dccps_mss_cache; break; case DCCP_SOCKOPT_AVAILABLE_CCIDS: return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen); case DCCP_SOCKOPT_TX_CCID: val = ccid_get_current_tx_ccid(dp); if (val < 0) return -ENOPROTOOPT; break; case DCCP_SOCKOPT_RX_CCID: val = ccid_get_current_rx_ccid(dp); if (val < 0) return -ENOPROTOOPT; break; case DCCP_SOCKOPT_SERVER_TIMEWAIT: val = dp->dccps_server_timewait; break; case DCCP_SOCKOPT_SEND_CSCOV: val = dp->dccps_pcslen; break; case DCCP_SOCKOPT_RECV_CSCOV: val = dp->dccps_pcrlen; break; case DCCP_SOCKOPT_QPOLICY_ID: val = dp->dccps_qpolicy; break; case DCCP_SOCKOPT_QPOLICY_TXQLEN: val = dp->dccps_tx_qlen; break; case 128 ... 191: return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname, len, (u32 __user *)optval, optlen); case 192 ... 255: return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname, len, (u32 __user *)optval, optlen); default: return -ENOPROTOOPT; } len = sizeof(val); if (put_user(len, optlen) || copy_to_user(optval, &val, len)) return -EFAULT; return 0; } int dccp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level != SOL_DCCP) return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level, optname, optval, optlen); return do_dccp_getsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL_GPL(dccp_getsockopt); #ifdef CONFIG_COMPAT int compat_dccp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level != SOL_DCCP) return inet_csk_compat_getsockopt(sk, level, optname, optval, optlen); return do_dccp_getsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL_GPL(compat_dccp_getsockopt); #endif static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb) { struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg); /* * Assign an (opaque) qpolicy priority value to skb->priority. * * We are overloading this skb field for use with the qpolicy subystem. * The skb->priority is normally used for the SO_PRIORITY option, which * is initialised from sk_priority. Since the assignment of sk_priority * to skb->priority happens later (on layer 3), we overload this field * for use with queueing priorities as long as the skb is on layer 4. * The default priority value (if nothing is set) is 0. */ skb->priority = 0; for (; cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) { if (!CMSG_OK(msg, cmsg)) return -EINVAL; if (cmsg->cmsg_level != SOL_DCCP) continue; if (cmsg->cmsg_type <= DCCP_SCM_QPOLICY_MAX && !dccp_qpolicy_param_ok(skb->sk, cmsg->cmsg_type)) return -EINVAL; switch (cmsg->cmsg_type) { case DCCP_SCM_PRIORITY: if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u32))) return -EINVAL; skb->priority = *(__u32 *)CMSG_DATA(cmsg); break; default: return -EINVAL; } } return 0; } int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len) { const struct dccp_sock *dp = dccp_sk(sk); const int flags = msg->msg_flags; const int noblock = flags & MSG_DONTWAIT; struct sk_buff *skb; int rc, size; long timeo; if (len > dp->dccps_mss_cache) return -EMSGSIZE; lock_sock(sk); if (dccp_qpolicy_full(sk)) { rc = -EAGAIN; goto out_release; } timeo = sock_sndtimeo(sk, noblock); /* * We have to use sk_stream_wait_connect here to set sk_write_pending, * so that the trick in dccp_rcv_request_sent_state_process. */ /* Wait for a connection to finish. */ if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN)) if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0) goto out_release; size = sk->sk_prot->max_header + len; release_sock(sk); skb = sock_alloc_send_skb(sk, size, noblock, &rc); lock_sock(sk); if (skb == NULL) goto out_release; skb_reserve(skb, sk->sk_prot->max_header); rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); if (rc != 0) goto out_discard; rc = dccp_msghdr_parse(msg, skb); if (rc != 0) goto out_discard; dccp_qpolicy_push(sk, skb); /* * The xmit_timer is set if the TX CCID is rate-based and will expire * when congestion control permits to release further packets into the * network. Window-based CCIDs do not use this timer. */ if (!timer_pending(&dp->dccps_xmit_timer)) dccp_write_xmit(sk); out_release: release_sock(sk); return rc ? : len; out_discard: kfree_skb(skb); goto out_release; } EXPORT_SYMBOL_GPL(dccp_sendmsg); int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len) { const struct dccp_hdr *dh; long timeo; lock_sock(sk); if (sk->sk_state == DCCP_LISTEN) { len = -ENOTCONN; goto out; } timeo = sock_rcvtimeo(sk, nonblock); do { struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); if (skb == NULL) goto verify_sock_status; dh = dccp_hdr(skb); switch (dh->dccph_type) { case DCCP_PKT_DATA: case DCCP_PKT_DATAACK: goto found_ok_skb; case DCCP_PKT_CLOSE: case DCCP_PKT_CLOSEREQ: if (!(flags & MSG_PEEK)) dccp_finish_passive_close(sk); /* fall through */ case DCCP_PKT_RESET: dccp_pr_debug("found fin (%s) ok!\n", dccp_packet_name(dh->dccph_type)); len = 0; goto found_fin_ok; default: dccp_pr_debug("packet_type=%s\n", dccp_packet_name(dh->dccph_type)); sk_eat_skb(sk, skb, 0); } verify_sock_status: if (sock_flag(sk, SOCK_DONE)) { len = 0; break; } if (sk->sk_err) { len = sock_error(sk); break; } if (sk->sk_shutdown & RCV_SHUTDOWN) { len = 0; break; } if (sk->sk_state == DCCP_CLOSED) { if (!sock_flag(sk, SOCK_DONE)) { /* This occurs when user tries to read * from never connected socket. */ len = -ENOTCONN; break; } len = 0; break; } if (!timeo) { len = -EAGAIN; break; } if (signal_pending(current)) { len = sock_intr_errno(timeo); break; } sk_wait_data(sk, &timeo); continue; found_ok_skb: if (len > skb->len) len = skb->len; else if (len < skb->len) msg->msg_flags |= MSG_TRUNC; if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) { /* Exception. Bailout! */ len = -EFAULT; break; } if (flags & MSG_TRUNC) len = skb->len; found_fin_ok: if (!(flags & MSG_PEEK)) sk_eat_skb(sk, skb, 0); break; } while (1); out: release_sock(sk); return len; } EXPORT_SYMBOL_GPL(dccp_recvmsg); int inet_dccp_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; unsigned char old_state; int err; lock_sock(sk); err = -EINVAL; if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP) goto out; old_state = sk->sk_state; if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN))) goto out; /* Really, if the socket is already in listen state * we can only allow the backlog to be adjusted. */ if (old_state != DCCP_LISTEN) { /* * FIXME: here it probably should be sk->sk_prot->listen_start * see tcp_listen_start */ err = dccp_listen_start(sk, backlog); if (err) goto out; } sk->sk_max_ack_backlog = backlog; err = 0; out: release_sock(sk); return err; } EXPORT_SYMBOL_GPL(inet_dccp_listen); static void dccp_terminate_connection(struct sock *sk) { u8 next_state = DCCP_CLOSED; switch (sk->sk_state) { case DCCP_PASSIVE_CLOSE: case DCCP_PASSIVE_CLOSEREQ: dccp_finish_passive_close(sk); break; case DCCP_PARTOPEN: dccp_pr_debug("Stop PARTOPEN timer (%p)\n", sk); inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); /* fall through */ case DCCP_OPEN: dccp_send_close(sk, 1); if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER && !dccp_sk(sk)->dccps_server_timewait) next_state = DCCP_ACTIVE_CLOSEREQ; else next_state = DCCP_CLOSING; /* fall through */ default: dccp_set_state(sk, next_state); } } void dccp_close(struct sock *sk, long timeout) { struct dccp_sock *dp = dccp_sk(sk); struct sk_buff *skb; u32 data_was_unread = 0; int state; lock_sock(sk); sk->sk_shutdown = SHUTDOWN_MASK; if (sk->sk_state == DCCP_LISTEN) { dccp_set_state(sk, DCCP_CLOSED); /* Special case. */ inet_csk_listen_stop(sk); goto adjudge_to_death; } sk_stop_timer(sk, &dp->dccps_xmit_timer); /* * We need to flush the recv. buffs. We do this only on the * descriptor close, not protocol-sourced closes, because the *reader process may not have drained the data yet! */ while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { data_was_unread += skb->len; __kfree_skb(skb); } if (data_was_unread) { /* Unread data was tossed, send an appropriate Reset Code */ DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread); dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED); dccp_set_state(sk, DCCP_CLOSED); } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { /* Check zero linger _after_ checking for unread data. */ sk->sk_prot->disconnect(sk, 0); } else if (sk->sk_state != DCCP_CLOSED) { /* * Normal connection termination. May need to wait if there are * still packets in the TX queue that are delayed by the CCID. */ dccp_flush_write_queue(sk, &timeout); dccp_terminate_connection(sk); } /* * Flush write queue. This may be necessary in several cases: * - we have been closed by the peer but still have application data; * - abortive termination (unread data or zero linger time), * - normal termination but queue could not be flushed within time limit */ __skb_queue_purge(&sk->sk_write_queue); sk_stream_wait_close(sk, timeout); adjudge_to_death: state = sk->sk_state; sock_hold(sk); sock_orphan(sk); /* * It is the last release_sock in its life. It will remove backlog. */ release_sock(sk); /* * Now socket is owned by kernel and we acquire BH lock * to finish close. No need to check for user refs. */ local_bh_disable(); bh_lock_sock(sk); WARN_ON(sock_owned_by_user(sk)); percpu_counter_inc(sk->sk_prot->orphan_count); /* Have we already been destroyed by a softirq or backlog? */ if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED) goto out; if (sk->sk_state == DCCP_CLOSED) inet_csk_destroy_sock(sk); /* Otherwise, socket is reprieved until protocol close. */ out: bh_unlock_sock(sk); local_bh_enable(); sock_put(sk); } EXPORT_SYMBOL_GPL(dccp_close); void dccp_shutdown(struct sock *sk, int how) { dccp_pr_debug("called shutdown(%x)\n", how); } EXPORT_SYMBOL_GPL(dccp_shutdown); static inline int dccp_mib_init(void) { return snmp_mib_init((void __percpu **)dccp_statistics, sizeof(struct dccp_mib), __alignof__(struct dccp_mib)); } static inline void dccp_mib_exit(void) { snmp_mib_free((void __percpu **)dccp_statistics); } static int thash_entries; module_param(thash_entries, int, 0444); MODULE_PARM_DESC(thash_entries, "Number of ehash buckets"); #ifdef CONFIG_IP_DCCP_DEBUG bool dccp_debug; module_param(dccp_debug, bool, 0644); MODULE_PARM_DESC(dccp_debug, "Enable debug messages"); EXPORT_SYMBOL_GPL(dccp_debug); #endif static int __init dccp_init(void) { unsigned long goal; int ehash_order, bhash_order, i; int rc; BUILD_BUG_ON(sizeof(struct dccp_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb)); rc = percpu_counter_init(&dccp_orphan_count, 0); if (rc) goto out_fail; rc = -ENOBUFS; inet_hashinfo_init(&dccp_hashinfo); dccp_hashinfo.bind_bucket_cachep = kmem_cache_create("dccp_bind_bucket", sizeof(struct inet_bind_bucket), 0, SLAB_HWCACHE_ALIGN, NULL); if (!dccp_hashinfo.bind_bucket_cachep) goto out_free_percpu; /* * Size and allocate the main established and bind bucket * hash tables. * * The methodology is similar to that of the buffer cache. */ if (totalram_pages >= (128 * 1024)) goal = totalram_pages >> (21 - PAGE_SHIFT); else goal = totalram_pages >> (23 - PAGE_SHIFT); if (thash_entries) goal = (thash_entries * sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT; for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++) ; do { unsigned long hash_size = (1UL << ehash_order) * PAGE_SIZE / sizeof(struct inet_ehash_bucket); while (hash_size & (hash_size - 1)) hash_size--; dccp_hashinfo.ehash_mask = hash_size - 1; dccp_hashinfo.ehash = (struct inet_ehash_bucket *) __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, ehash_order); } while (!dccp_hashinfo.ehash && --ehash_order > 0); if (!dccp_hashinfo.ehash) { DCCP_CRIT("Failed to allocate DCCP established hash table"); goto out_free_bind_bucket_cachep; } for (i = 0; i <= dccp_hashinfo.ehash_mask; i++) { INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i); INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].twchain, i); } if (inet_ehash_locks_alloc(&dccp_hashinfo)) goto out_free_dccp_ehash; bhash_order = ehash_order; do { dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE / sizeof(struct inet_bind_hashbucket); if ((dccp_hashinfo.bhash_size > (64 * 1024)) && bhash_order > 0) continue; dccp_hashinfo.bhash = (struct inet_bind_hashbucket *) __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, bhash_order); } while (!dccp_hashinfo.bhash && --bhash_order >= 0); if (!dccp_hashinfo.bhash) { DCCP_CRIT("Failed to allocate DCCP bind hash table"); goto out_free_dccp_locks; } for (i = 0; i < dccp_hashinfo.bhash_size; i++) { spin_lock_init(&dccp_hashinfo.bhash[i].lock); INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain); } rc = dccp_mib_init(); if (rc) goto out_free_dccp_bhash; rc = dccp_ackvec_init(); if (rc) goto out_free_dccp_mib; rc = dccp_sysctl_init(); if (rc) goto out_ackvec_exit; rc = ccid_initialize_builtins(); if (rc) goto out_sysctl_exit; dccp_timestamping_init(); return 0; out_sysctl_exit: dccp_sysctl_exit(); out_ackvec_exit: dccp_ackvec_exit(); out_free_dccp_mib: dccp_mib_exit(); out_free_dccp_bhash: free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order); out_free_dccp_locks: inet_ehash_locks_free(&dccp_hashinfo); out_free_dccp_ehash: free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order); out_free_bind_bucket_cachep: kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); out_free_percpu: percpu_counter_destroy(&dccp_orphan_count); out_fail: dccp_hashinfo.bhash = NULL; dccp_hashinfo.ehash = NULL; dccp_hashinfo.bind_bucket_cachep = NULL; return rc; } static void __exit dccp_fini(void) { ccid_cleanup_builtins(); dccp_mib_exit(); free_pages((unsigned long)dccp_hashinfo.bhash, get_order(dccp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket))); free_pages((unsigned long)dccp_hashinfo.ehash, get_order((dccp_hashinfo.ehash_mask + 1) * sizeof(struct inet_ehash_bucket))); inet_ehash_locks_free(&dccp_hashinfo); kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); dccp_ackvec_exit(); dccp_sysctl_exit(); percpu_counter_destroy(&dccp_orphan_count); } module_init(dccp_init); module_exit(dccp_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>"); MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");
gpl-2.0
cm-3470/android_kernel_samsung_gardalte
drivers/mfd/wl1273-core.c
5137
6707
/* * MFD driver for wl1273 FM radio and audio codec submodules. * * Copyright (C) 2011 Nokia Corporation * Author: Matti Aaltonen <matti.j.aaltonen@nokia.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/mfd/wl1273-core.h> #include <linux/slab.h> #include <linux/module.h> #define DRIVER_DESC "WL1273 FM Radio Core" static const struct i2c_device_id wl1273_driver_id_table[] = { { WL1273_FM_DRIVER_NAME, 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wl1273_driver_id_table); static int wl1273_fm_read_reg(struct wl1273_core *core, u8 reg, u16 *value) { struct i2c_client *client = core->client; u8 b[2]; int r; r = i2c_smbus_read_i2c_block_data(client, reg, sizeof(b), b); if (r != 2) { dev_err(&client->dev, "%s: Read: %d fails.\n", __func__, reg); return -EREMOTEIO; } *value = (u16)b[0] << 8 | b[1]; return 0; } static int wl1273_fm_write_cmd(struct wl1273_core *core, u8 cmd, u16 param) { struct i2c_client *client = core->client; u8 buf[] = { (param >> 8) & 0xff, param & 0xff }; int r; r = i2c_smbus_write_i2c_block_data(client, cmd, sizeof(buf), buf); if (r) { dev_err(&client->dev, "%s: Cmd: %d fails.\n", __func__, cmd); return r; } return 0; } static int wl1273_fm_write_data(struct wl1273_core *core, u8 *data, u16 len) { struct i2c_client *client = core->client; struct i2c_msg msg; int r; msg.addr = client->addr; msg.flags = 0; msg.buf = data; msg.len = len; r = i2c_transfer(client->adapter, &msg, 1); if (r != 1) { dev_err(&client->dev, "%s: write error.\n", __func__); return -EREMOTEIO; } return 0; } /** * wl1273_fm_set_audio() - Set audio mode. * @core: A pointer to the device struct. * @new_mode: The new audio mode. * * Audio modes are WL1273_AUDIO_DIGITAL and WL1273_AUDIO_ANALOG. */ static int wl1273_fm_set_audio(struct wl1273_core *core, unsigned int new_mode) { int r = 0; if (core->mode == WL1273_MODE_OFF || core->mode == WL1273_MODE_SUSPENDED) return -EPERM; if (core->mode == WL1273_MODE_RX && new_mode == WL1273_AUDIO_DIGITAL) { r = wl1273_fm_write_cmd(core, WL1273_PCM_MODE_SET, WL1273_PCM_DEF_MODE); if (r) goto out; r = wl1273_fm_write_cmd(core, WL1273_I2S_MODE_CONFIG_SET, core->i2s_mode); if (r) goto out; r = wl1273_fm_write_cmd(core, WL1273_AUDIO_ENABLE, WL1273_AUDIO_ENABLE_I2S); if (r) goto out; } else if (core->mode == WL1273_MODE_RX && new_mode == WL1273_AUDIO_ANALOG) { r = wl1273_fm_write_cmd(core, WL1273_AUDIO_ENABLE, WL1273_AUDIO_ENABLE_ANALOG); if (r) goto out; } else if (core->mode == WL1273_MODE_TX && new_mode == WL1273_AUDIO_DIGITAL) { r = wl1273_fm_write_cmd(core, WL1273_I2S_MODE_CONFIG_SET, core->i2s_mode); if (r) goto out; r = wl1273_fm_write_cmd(core, WL1273_AUDIO_IO_SET, WL1273_AUDIO_IO_SET_I2S); if (r) goto out; } else if (core->mode == WL1273_MODE_TX && new_mode == WL1273_AUDIO_ANALOG) { r = wl1273_fm_write_cmd(core, WL1273_AUDIO_IO_SET, WL1273_AUDIO_IO_SET_ANALOG); if (r) goto out; } core->audio_mode = new_mode; out: return r; } /** * wl1273_fm_set_volume() - Set volume. * @core: A pointer to the device struct. * @volume: The new volume value. */ static int wl1273_fm_set_volume(struct wl1273_core *core, unsigned int volume) { int r; if (volume > WL1273_MAX_VOLUME) return -EINVAL; if (core->volume == volume) return 0; r = wl1273_fm_write_cmd(core, WL1273_VOLUME_SET, volume); if (r) return r; core->volume = volume; return 0; } static int wl1273_core_remove(struct i2c_client *client) { struct wl1273_core *core = i2c_get_clientdata(client); dev_dbg(&client->dev, "%s\n", __func__); mfd_remove_devices(&client->dev); kfree(core); return 0; } static int __devinit wl1273_core_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct wl1273_fm_platform_data *pdata = client->dev.platform_data; struct wl1273_core *core; struct mfd_cell *cell; int children = 0; int r = 0; dev_dbg(&client->dev, "%s\n", __func__); if (!pdata) { dev_err(&client->dev, "No platform data.\n"); return -EINVAL; } if (!(pdata->children & WL1273_RADIO_CHILD)) { dev_err(&client->dev, "Cannot function without radio child.\n"); return -EINVAL; } core = kzalloc(sizeof(*core), GFP_KERNEL); if (!core) return -ENOMEM; core->pdata = pdata; core->client = client; mutex_init(&core->lock); i2c_set_clientdata(client, core); dev_dbg(&client->dev, "%s: Have V4L2.\n", __func__); cell = &core->cells[children]; cell->name = "wl1273_fm_radio"; cell->platform_data = &core; cell->pdata_size = sizeof(core); children++; core->read = wl1273_fm_read_reg; core->write = wl1273_fm_write_cmd; core->write_data = wl1273_fm_write_data; core->set_audio = wl1273_fm_set_audio; core->set_volume = wl1273_fm_set_volume; if (pdata->children & WL1273_CODEC_CHILD) { cell = &core->cells[children]; dev_dbg(&client->dev, "%s: Have codec.\n", __func__); cell->name = "wl1273-codec"; cell->platform_data = &core; cell->pdata_size = sizeof(core); children++; } dev_dbg(&client->dev, "%s: number of children: %d.\n", __func__, children); r = mfd_add_devices(&client->dev, -1, core->cells, children, NULL, 0); if (r) goto err; return 0; err: pdata->free_resources(); kfree(core); dev_dbg(&client->dev, "%s\n", __func__); return r; } static struct i2c_driver wl1273_core_driver = { .driver = { .name = WL1273_FM_DRIVER_NAME, }, .probe = wl1273_core_probe, .id_table = wl1273_driver_id_table, .remove = __devexit_p(wl1273_core_remove), }; static int __init wl1273_core_init(void) { int r; r = i2c_add_driver(&wl1273_core_driver); if (r) { pr_err(WL1273_FM_DRIVER_NAME ": driver registration failed\n"); return r; } return r; } static void __exit wl1273_core_exit(void) { i2c_del_driver(&wl1273_core_driver); } late_initcall(wl1273_core_init); module_exit(wl1273_core_exit); MODULE_AUTHOR("Matti Aaltonen <matti.j.aaltonen@nokia.com>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
gpl-2.0
vinay94185vinay/Hybrid
sound/pci/oxygen/xonar_hdmi.c
11025
3495
/* * helper functions for HDMI models (Xonar HDAV1.3/HDAV1.3 Slim) * * Copyright (c) Clemens Ladisch <clemens@ladisch.de> * * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this driver; if not, see <http://www.gnu.org/licenses/>. */ #include <linux/pci.h> #include <linux/delay.h> #include <sound/asoundef.h> #include <sound/control.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/tlv.h> #include "xonar.h" static void hdmi_write_command(struct oxygen *chip, u8 command, unsigned int count, const u8 *params) { unsigned int i; u8 checksum; oxygen_write_uart(chip, 0xfb); oxygen_write_uart(chip, 0xef); oxygen_write_uart(chip, command); oxygen_write_uart(chip, count); for (i = 0; i < count; ++i) oxygen_write_uart(chip, params[i]); checksum = 0xfb + 0xef + command + count; for (i = 0; i < count; ++i) checksum += params[i]; oxygen_write_uart(chip, checksum); } static void xonar_hdmi_init_commands(struct oxygen *chip, struct xonar_hdmi *hdmi) { u8 param; oxygen_reset_uart(chip); param = 0; hdmi_write_command(chip, 0x61, 1, &param); param = 1; hdmi_write_command(chip, 0x74, 1, &param); hdmi_write_command(chip, 0x54, 5, hdmi->params); } void xonar_hdmi_init(struct oxygen *chip, struct xonar_hdmi *hdmi) { hdmi->params[1] = IEC958_AES3_CON_FS_48000; hdmi->params[4] = 1; xonar_hdmi_init_commands(chip, hdmi); } void xonar_hdmi_cleanup(struct oxygen *chip) { u8 param = 0; hdmi_write_command(chip, 0x74, 1, &param); } void xonar_hdmi_resume(struct oxygen *chip, struct xonar_hdmi *hdmi) { xonar_hdmi_init_commands(chip, hdmi); } void xonar_hdmi_pcm_hardware_filter(unsigned int channel, struct snd_pcm_hardware *hardware) { if (channel == PCM_MULTICH) { hardware->rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_192000; hardware->rate_min = 44100; } } void xonar_set_hdmi_params(struct oxygen *chip, struct xonar_hdmi *hdmi, struct snd_pcm_hw_params *params) { hdmi->params[0] = 0; /* 1 = non-audio */ switch (params_rate(params)) { case 44100: hdmi->params[1] = IEC958_AES3_CON_FS_44100; break; case 48000: hdmi->params[1] = IEC958_AES3_CON_FS_48000; break; default: /* 96000 */ hdmi->params[1] = IEC958_AES3_CON_FS_96000; break; case 192000: hdmi->params[1] = IEC958_AES3_CON_FS_192000; break; } hdmi->params[2] = params_channels(params) / 2 - 1; if (params_format(params) == SNDRV_PCM_FORMAT_S16_LE) hdmi->params[3] = 0; else hdmi->params[3] = 0xc0; hdmi->params[4] = 1; /* ? */ hdmi_write_command(chip, 0x54, 5, hdmi->params); } void xonar_hdmi_uart_input(struct oxygen *chip) { if (chip->uart_input_count >= 2 && chip->uart_input[chip->uart_input_count - 2] == 'O' && chip->uart_input[chip->uart_input_count - 1] == 'K') { printk(KERN_DEBUG "message from HDMI chip received:\n"); print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, chip->uart_input, chip->uart_input_count); chip->uart_input_count = 0; } }
gpl-2.0
TimesysGit/advantech-linux
drivers/uwb/i1480/i1480-est.c
13073
3167
/* * Intel Wireless UWB Link 1480 * Event Size tables for Wired Adaptors * * Copyright (C) 2005-2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * FIXME: docs */ #include <linux/init.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/uwb.h> #include "dfu/i1480-dfu.h" /** Event size table for wEvents 0x00XX */ static struct uwb_est_entry i1480_est_fd00[] = { /* Anybody expecting this response has to use * neh->extra_size to specify the real size that will * come back. */ [i1480_EVT_CONFIRM] = { .size = sizeof(struct i1480_evt_confirm) }, [i1480_CMD_SET_IP_MAS] = { .size = sizeof(struct i1480_evt_confirm) }, #ifdef i1480_RCEB_EXTENDED [0x09] = { .size = sizeof(struct i1480_rceb), .offset = 1 + offsetof(struct i1480_rceb, wParamLength), }, #endif }; /** Event size table for wEvents 0x01XX */ static struct uwb_est_entry i1480_est_fd01[] = { [0xff & i1480_EVT_RM_INIT_DONE] = { .size = sizeof(struct i1480_rceb) }, [0xff & i1480_EVT_DEV_ADD] = { .size = sizeof(struct i1480_rceb) + 9 }, [0xff & i1480_EVT_DEV_RM] = { .size = sizeof(struct i1480_rceb) + 9 }, [0xff & i1480_EVT_DEV_ID_CHANGE] = { .size = sizeof(struct i1480_rceb) + 2 }, }; static int __init i1480_est_init(void) { int result = uwb_est_register(i1480_CET_VS1, 0x00, 0x8086, 0x0c3b, i1480_est_fd00, ARRAY_SIZE(i1480_est_fd00)); if (result < 0) { printk(KERN_ERR "Can't register EST table fd00: %d\n", result); return result; } result = uwb_est_register(i1480_CET_VS1, 0x01, 0x8086, 0x0c3b, i1480_est_fd01, ARRAY_SIZE(i1480_est_fd01)); if (result < 0) { printk(KERN_ERR "Can't register EST table fd01: %d\n", result); return result; } return 0; } module_init(i1480_est_init); static void __exit i1480_est_exit(void) { uwb_est_unregister(i1480_CET_VS1, 0x00, 0x8086, 0x0c3b, i1480_est_fd00, ARRAY_SIZE(i1480_est_fd00)); uwb_est_unregister(i1480_CET_VS1, 0x01, 0x8086, 0x0c3b, i1480_est_fd01, ARRAY_SIZE(i1480_est_fd01)); } module_exit(i1480_est_exit); MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>"); MODULE_DESCRIPTION("i1480's Vendor Specific Event Size Tables"); MODULE_LICENSE("GPL"); /** * USB device ID's that we handle * * [so we are loaded when this kind device is connected] */ static struct usb_device_id __used i1480_est_id_table[] = { { USB_DEVICE(0x8086, 0xdf3b), }, { USB_DEVICE(0x8086, 0x0c3b), }, { }, }; MODULE_DEVICE_TABLE(usb, i1480_est_id_table);
gpl-2.0
goodwinos/linux-2.6
drivers/input/mouse/logibm.c
14609
5174
/* * Copyright (c) 1999-2001 Vojtech Pavlik * * Based on the work of: * James Banks Matthew Dillon * David Giller Nathan Laredo * Linus Torvalds Johan Myreen * Cliff Matthews Philip Blundell * Russell King */ /* * Logitech Bus Mouse Driver for Linux */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/module.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/input.h> #include <linux/interrupt.h> #include <asm/io.h> #include <asm/irq.h> MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION("Logitech busmouse driver"); MODULE_LICENSE("GPL"); #define LOGIBM_BASE 0x23c #define LOGIBM_EXTENT 4 #define LOGIBM_DATA_PORT LOGIBM_BASE + 0 #define LOGIBM_SIGNATURE_PORT LOGIBM_BASE + 1 #define LOGIBM_CONTROL_PORT LOGIBM_BASE + 2 #define LOGIBM_CONFIG_PORT LOGIBM_BASE + 3 #define LOGIBM_ENABLE_IRQ 0x00 #define LOGIBM_DISABLE_IRQ 0x10 #define LOGIBM_READ_X_LOW 0x80 #define LOGIBM_READ_X_HIGH 0xa0 #define LOGIBM_READ_Y_LOW 0xc0 #define LOGIBM_READ_Y_HIGH 0xe0 #define LOGIBM_DEFAULT_MODE 0x90 #define LOGIBM_CONFIG_BYTE 0x91 #define LOGIBM_SIGNATURE_BYTE 0xa5 #define LOGIBM_IRQ 5 static int logibm_irq = LOGIBM_IRQ; module_param_named(irq, logibm_irq, uint, 0); MODULE_PARM_DESC(irq, "IRQ number (5=default)"); static struct input_dev *logibm_dev; static irqreturn_t logibm_interrupt(int irq, void *dev_id) { char dx, dy; unsigned char buttons; outb(LOGIBM_READ_X_LOW, LOGIBM_CONTROL_PORT); dx = (inb(LOGIBM_DATA_PORT) & 0xf); outb(LOGIBM_READ_X_HIGH, LOGIBM_CONTROL_PORT); dx |= (inb(LOGIBM_DATA_PORT) & 0xf) << 4; outb(LOGIBM_READ_Y_LOW, LOGIBM_CONTROL_PORT); dy = (inb(LOGIBM_DATA_PORT) & 0xf); outb(LOGIBM_READ_Y_HIGH, LOGIBM_CONTROL_PORT); buttons = inb(LOGIBM_DATA_PORT); dy |= (buttons & 0xf) << 4; buttons = ~buttons >> 5; input_report_rel(logibm_dev, REL_X, dx); input_report_rel(logibm_dev, REL_Y, dy); input_report_key(logibm_dev, BTN_RIGHT, buttons & 1); input_report_key(logibm_dev, BTN_MIDDLE, buttons & 2); input_report_key(logibm_dev, BTN_LEFT, buttons & 4); input_sync(logibm_dev); outb(LOGIBM_ENABLE_IRQ, LOGIBM_CONTROL_PORT); return IRQ_HANDLED; } static int logibm_open(struct input_dev *dev) { if (request_irq(logibm_irq, logibm_interrupt, 0, "logibm", NULL)) { printk(KERN_ERR "logibm.c: Can't allocate irq %d\n", logibm_irq); return -EBUSY; } outb(LOGIBM_ENABLE_IRQ, LOGIBM_CONTROL_PORT); return 0; } static void logibm_close(struct input_dev *dev) { outb(LOGIBM_DISABLE_IRQ, LOGIBM_CONTROL_PORT); free_irq(logibm_irq, NULL); } static int __init logibm_init(void) { int err; if (!request_region(LOGIBM_BASE, LOGIBM_EXTENT, "logibm")) { printk(KERN_ERR "logibm.c: Can't allocate ports at %#x\n", LOGIBM_BASE); return -EBUSY; } outb(LOGIBM_CONFIG_BYTE, LOGIBM_CONFIG_PORT); outb(LOGIBM_SIGNATURE_BYTE, LOGIBM_SIGNATURE_PORT); udelay(100); if (inb(LOGIBM_SIGNATURE_PORT) != LOGIBM_SIGNATURE_BYTE) { printk(KERN_INFO "logibm.c: Didn't find Logitech busmouse at %#x\n", LOGIBM_BASE); err = -ENODEV; goto err_release_region; } outb(LOGIBM_DEFAULT_MODE, LOGIBM_CONFIG_PORT); outb(LOGIBM_DISABLE_IRQ, LOGIBM_CONTROL_PORT); logibm_dev = input_allocate_device(); if (!logibm_dev) { printk(KERN_ERR "logibm.c: Not enough memory for input device\n"); err = -ENOMEM; goto err_release_region; } logibm_dev->name = "Logitech bus mouse"; logibm_dev->phys = "isa023c/input0"; logibm_dev->id.bustype = BUS_ISA; logibm_dev->id.vendor = 0x0003; logibm_dev->id.product = 0x0001; logibm_dev->id.version = 0x0100; logibm_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); logibm_dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT); logibm_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y); logibm_dev->open = logibm_open; logibm_dev->close = logibm_close; err = input_register_device(logibm_dev); if (err) goto err_free_dev; return 0; err_free_dev: input_free_device(logibm_dev); err_release_region: release_region(LOGIBM_BASE, LOGIBM_EXTENT); return err; } static void __exit logibm_exit(void) { input_unregister_device(logibm_dev); release_region(LOGIBM_BASE, LOGIBM_EXTENT); } module_init(logibm_init); module_exit(logibm_exit);
gpl-2.0
n3ocort3x/Kernel_2.6.39.4_one_x
drivers/video/tegra/host/nvhost_acm.c
18
12534
/* * drivers/video/tegra/host/nvhost_acm.c * * Tegra Graphics Host Automatic Clock Management * * Copyright (c) 2010-2011, NVIDIA Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "nvhost_acm.h" #include "dev.h" #include <linux/slab.h> #include <linux/string.h> #include <linux/sched.h> #include <linux/err.h> #include <linux/device.h> #include <linux/delay.h> #include <mach/powergate.h> #include <mach/clk.h> #include <mach/hardware.h> #define ACM_SUSPEND_WAIT_FOR_IDLE_TIMEOUT (2 * HZ) #define POWERGATE_DELAY 10 #define MAX_DEVID_LENGTH 16 DEFINE_MUTEX(client_list_lock); struct nvhost_module_client { struct list_head node; unsigned long rate[NVHOST_MODULE_MAX_CLOCKS]; void *priv; }; static void do_powergate_locked(int id) { if (id != -1 && tegra_powergate_is_powered(id)) tegra_powergate_partition(id); } static void do_unpowergate_locked(int id) { if (id != -1) tegra_unpowergate_partition(id); } void nvhost_module_reset(struct device *dev, struct nvhost_module *mod) { dev_dbg(dev, "%s: asserting %s module reset (id %d, id2 %d)\n", __func__, mod->name, mod->desc->powergate_ids[0], mod->desc->powergate_ids[1]); mutex_lock(&mod->lock); /* assert module and mc client reset */ if (mod->desc->powergate_ids[0] != -1) { tegra_powergate_mc_disable(mod->desc->powergate_ids[0]); tegra_periph_reset_assert(mod->clk[0]); tegra_powergate_mc_flush(mod->desc->powergate_ids[0]); } if (mod->desc->powergate_ids[1] != -1) { tegra_powergate_mc_disable(mod->desc->powergate_ids[1]); tegra_periph_reset_assert(mod->clk[1]); tegra_powergate_mc_flush(mod->desc->powergate_ids[1]); } udelay(POWERGATE_DELAY); /* deassert reset */ if (mod->desc->powergate_ids[0] != -1) { tegra_powergate_mc_flush_done(mod->desc->powergate_ids[0]); tegra_periph_reset_deassert(mod->clk[0]); tegra_powergate_mc_enable(mod->desc->powergate_ids[0]); } if (mod->desc->powergate_ids[1] != -1) { tegra_powergate_mc_flush_done(mod->desc->powergate_ids[1]); tegra_periph_reset_deassert(mod->clk[1]); tegra_powergate_mc_enable(mod->desc->powergate_ids[1]); } mutex_unlock(&mod->lock); dev_dbg(dev, "%s: module %s out of reset\n", __func__, mod->name); } static void to_state_clockgated_locked(struct nvhost_module *mod) { const struct nvhost_moduledesc *desc = mod->desc; if (mod->powerstate == NVHOST_POWER_STATE_RUNNING) { int i; for (i = 0; i < mod->num_clks; i++) clk_disable(mod->clk[i]); if (mod->parent) nvhost_module_idle(mod->parent); } else if (mod->powerstate == NVHOST_POWER_STATE_POWERGATED && mod->desc->can_powergate) { do_unpowergate_locked(desc->powergate_ids[0]); do_unpowergate_locked(desc->powergate_ids[1]); } mod->powerstate = NVHOST_POWER_STATE_CLOCKGATED; } static void to_state_running_locked(struct nvhost_module *mod) { int prev_state = mod->powerstate; if (mod->powerstate == NVHOST_POWER_STATE_POWERGATED) to_state_clockgated_locked(mod); if (mod->powerstate == NVHOST_POWER_STATE_CLOCKGATED) { int i; if (mod->parent) nvhost_module_busy(mod->parent); for (i = 0; i < mod->num_clks; i++) { int err = clk_enable(mod->clk[i]); BUG_ON(err); } if (prev_state == NVHOST_POWER_STATE_POWERGATED && mod->desc->finalize_poweron) mod->desc->finalize_poweron(mod); } mod->powerstate = NVHOST_POWER_STATE_RUNNING; } /* This gets called from powergate_handler() and from module suspend. * Module suspend is done for all modules, runtime power gating only * for modules with can_powergate set. */ static int to_state_powergated_locked(struct nvhost_module *mod) { int err = 0; if (mod->desc->prepare_poweroff && mod->powerstate != NVHOST_POWER_STATE_POWERGATED) { /* Clock needs to be on in prepare_poweroff */ to_state_running_locked(mod); err = mod->desc->prepare_poweroff(mod); if (err) return err; } if (mod->powerstate == NVHOST_POWER_STATE_RUNNING) to_state_clockgated_locked(mod); if (mod->desc->can_powergate) { do_powergate_locked(mod->desc->powergate_ids[0]); do_powergate_locked(mod->desc->powergate_ids[1]); } mod->powerstate = NVHOST_POWER_STATE_POWERGATED; return 0; } static void schedule_powergating_locked(struct nvhost_module *mod) { if (mod->desc->can_powergate) schedule_delayed_work(&mod->powerstate_down, msecs_to_jiffies(mod->desc->powergate_delay)); } static void schedule_clockgating_locked(struct nvhost_module *mod) { schedule_delayed_work(&mod->powerstate_down, msecs_to_jiffies(mod->desc->clockgate_delay)); } void nvhost_module_busy(struct nvhost_module *mod) { if (mod->desc->busy) mod->desc->busy(mod); mutex_lock(&mod->lock); cancel_delayed_work(&mod->powerstate_down); mod->refcount++; if (mod->refcount > 0 && !nvhost_module_powered(mod)) to_state_running_locked(mod); mutex_unlock(&mod->lock); } static void powerstate_down_handler(struct work_struct *work) { struct nvhost_module *mod; mod = container_of(to_delayed_work(work), struct nvhost_module, powerstate_down); mutex_lock(&mod->lock); if (mod->refcount == 0) { switch (mod->powerstate) { case NVHOST_POWER_STATE_RUNNING: to_state_clockgated_locked(mod); schedule_powergating_locked(mod); break; case NVHOST_POWER_STATE_CLOCKGATED: if (to_state_powergated_locked(mod)) schedule_powergating_locked(mod); break; default: break; } } mutex_unlock(&mod->lock); } void nvhost_module_idle_mult(struct nvhost_module *mod, int refs) { bool kick = false; mutex_lock(&mod->lock); mod->refcount -= refs; if (mod->refcount == 0) { if (nvhost_module_powered(mod)) schedule_clockgating_locked(mod); kick = true; } mutex_unlock(&mod->lock); if (kick) { wake_up(&mod->idle); if (mod->desc->idle) mod->desc->idle(mod); } } int nvhost_module_get_rate(struct nvhost_master *host, struct nvhost_module *mod, unsigned long *rate, int index) { struct clk *c; c = mod->clk[index]; if (IS_ERR_OR_NULL(c)) return -EINVAL; /* Need to enable client to get correct rate */ nvhost_module_busy(mod); *rate = clk_get_rate(c); nvhost_module_idle(mod); return 0; } static int nvhost_module_update_rate(struct nvhost_module *mod, int index) { unsigned long rate = 0; struct nvhost_module_client *m; if (!mod->clk[index]) return -EINVAL; list_for_each_entry(m, &mod->client_list, node) { rate = max(m->rate[index], rate); } if (!rate) rate = clk_round_rate(mod->clk[index], mod->desc->clocks[index].default_rate); return clk_set_rate(mod->clk[index], rate); } int nvhost_module_set_rate(struct nvhost_master *host, struct nvhost_module *mod, void *priv, unsigned long rate, int index) { struct nvhost_module_client *m; int ret; mutex_lock(&client_list_lock); list_for_each_entry(m, &mod->client_list, node) { if (m->priv == priv) { rate = clk_round_rate(mod->clk[index], rate); m->rate[index] = rate; break; } } ret = nvhost_module_update_rate(mod, index); mutex_unlock(&client_list_lock); return ret; } int nvhost_module_add_client(struct nvhost_master *host, struct nvhost_module *mod, void *priv) { int i; unsigned long rate; struct nvhost_module_client *client; client = kzalloc(sizeof(*client), GFP_KERNEL); if (!client) return -ENOMEM; INIT_LIST_HEAD(&client->node); client->priv = priv; for (i = 0; i < mod->num_clks; i++) { rate = clk_round_rate(mod->clk[i], mod->desc->clocks[i].default_rate); client->rate[i] = rate; } mutex_lock(&client_list_lock); list_add_tail(&client->node, &mod->client_list); mutex_unlock(&client_list_lock); return 0; } void nvhost_module_remove_client(struct nvhost_master *host, struct nvhost_module *mod, void *priv) { int i; struct nvhost_module_client *m; mutex_lock(&client_list_lock); list_for_each_entry(m, &mod->client_list, node) { if (priv == m->priv) { list_del(&m->node); break; } } if (m) { kfree(m); for (i = 0; i < mod->num_clks; i++) nvhost_module_update_rate(mod, i); } mutex_unlock(&client_list_lock); } void nvhost_module_preinit(const char *name, const struct nvhost_moduledesc *desc) { int i = 0; /* initialize clocks to known state */ while (i < NVHOST_MODULE_MAX_CLOCKS && desc->clocks[i].name) { char devname[MAX_DEVID_LENGTH]; long rate = desc->clocks[i].default_rate; struct clk *c; snprintf(devname, MAX_DEVID_LENGTH, "tegra_%s", name); c = clk_get_sys(devname, desc->clocks[i].name); BUG_ON(IS_ERR_OR_NULL(c)); rate = clk_round_rate(c, rate); clk_enable(c); clk_set_rate(c, rate); clk_disable(c); i++; } if (desc->can_powergate) { do_powergate_locked(desc->powergate_ids[0]); do_powergate_locked(desc->powergate_ids[1]); } else { do_unpowergate_locked(desc->powergate_ids[0]); do_unpowergate_locked(desc->powergate_ids[1]); } } int nvhost_module_init(struct nvhost_module *mod, const char *name, const struct nvhost_moduledesc *desc, struct nvhost_module *parent, struct device *dev) { int i = 0; int err; /* register to kernel */ mod->drv.driver.name = name; mod->drv.driver.owner = THIS_MODULE; err = nvhost_driver_register(&mod->drv); if (err) return err; nvhost_module_preinit(name, desc); mod->name = name; INIT_LIST_HEAD(&mod->client_list); while (desc->clocks[i].name && i < NVHOST_MODULE_MAX_CLOCKS) { char devname[MAX_DEVID_LENGTH]; snprintf(devname, MAX_DEVID_LENGTH, "tegra_%s", name); mod->clk[i] = clk_get_sys(devname, desc->clocks[i].name); BUG_ON(IS_ERR_OR_NULL(mod->clk[i])); i++; } mod->num_clks = i; mod->desc = desc; mod->parent = parent; mutex_init(&mod->lock); init_waitqueue_head(&mod->idle); INIT_DELAYED_WORK(&mod->powerstate_down, powerstate_down_handler); if (desc->can_powergate) { mod->powerstate = NVHOST_POWER_STATE_POWERGATED; } else { mod->powerstate = NVHOST_POWER_STATE_CLOCKGATED; } if (desc->init) desc->init(dev, mod); return 0; } static int is_module_idle(struct nvhost_module *mod) { int count; mutex_lock(&mod->lock); count = mod->refcount; mutex_unlock(&mod->lock); return (count == 0); } static void debug_not_idle(struct nvhost_master *dev) { int i; bool lock_released = true; for (i = 0; i < dev->nb_channels; i++) { struct nvhost_module *mod = &dev->channels[i].mod; mutex_lock(&mod->lock); if (mod->name) dev_warn(&dev->pdev->dev, "tegra_grhost: %s: refcnt %d\n", mod->name, mod->refcount); mutex_unlock(&mod->lock); } for (i = 0; i < dev->nb_mlocks; i++) { int c = atomic_read(&dev->cpuaccess.lock_counts[i]); if (c) { dev_warn(&dev->pdev->dev, "tegra_grhost: lock id %d: refcnt %d\n", i, c); lock_released = false; } } if (lock_released) dev_dbg(&dev->pdev->dev, "tegra_grhost: all locks released\n"); } int nvhost_module_suspend(struct nvhost_module *mod, bool system_suspend) { int ret; struct nvhost_master *dev; if (system_suspend) { dev = container_of(mod, struct nvhost_master, mod); if (!is_module_idle(mod)) debug_not_idle(dev); } else { dev = container_of(mod, struct nvhost_channel, mod)->dev; } ret = wait_event_timeout(mod->idle, is_module_idle(mod), ACM_SUSPEND_WAIT_FOR_IDLE_TIMEOUT); if (ret == 0) { dev_info(&dev->pdev->dev, "%s prevented suspend\n", mod->name); return -EBUSY; } if (system_suspend) dev_dbg(&dev->pdev->dev, "tegra_grhost: entered idle\n"); mutex_lock(&mod->lock); cancel_delayed_work(&mod->powerstate_down); to_state_powergated_locked(mod); mutex_unlock(&mod->lock); if (mod->desc->suspend) mod->desc->suspend(mod); return 0; } void nvhost_module_deinit(struct device *dev, struct nvhost_module *mod) { int i; nvhost_driver_unregister(&mod->drv); if (mod->desc->deinit) mod->desc->deinit(dev, mod); nvhost_module_suspend(mod, false); for (i = 0; i < mod->num_clks; i++) clk_put(mod->clk[i]); mod->powerstate = NVHOST_POWER_STATE_DEINIT; }
gpl-2.0
jrior001/android_kernel_asus_Z00D
drivers/external_drivers/camera/drivers/media/pci/atomisp2/atomisp_driver_css15/hmm/hmm_bo_dev.c
18
8370
/* * Support for Medifield PNW Camera Imaging ISP subsystem. * * Copyright (c) 2010 Intel Corporation. All Rights Reserved. * * Copyright (c) 2010 Silicon Hive www.siliconhive.com. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/gfp.h> #include <linux/mm.h> /* for GFP_ATOMIC */ #include <linux/slab.h> /* for kmalloc */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/string.h> #include <linux/list.h> #include <linux/errno.h> #ifdef CONFIG_ION_FOR_CAMERA #include <linux/ion.h> #endif #include "atomisp_internal.h" #include "hmm/hmm_common.h" #include "hmm/hmm_bo_dev.h" #include "hmm/hmm_bo.h" /* * hmm_bo_device functions. */ int hmm_bo_device_init(struct hmm_bo_device *bdev, struct isp_mmu_client *mmu_driver, unsigned int vaddr_start, unsigned int size) { int ret; check_bodev_null_return(bdev, -EINVAL); ret = isp_mmu_init(&bdev->mmu, mmu_driver); if (ret) { dev_err(atomisp_dev, "isp_mmu_init failed.\n"); goto isp_mmu_init_err; } ret = hmm_vm_init(&bdev->vaddr_space, vaddr_start, size); if (ret) { dev_err(atomisp_dev, "hmm_vm_init falied. " "vaddr_start = 0x%x, size = %d\n", vaddr_start, size); goto vm_init_err; } INIT_LIST_HEAD(&bdev->free_bo_list); INIT_LIST_HEAD(&bdev->active_bo_list); spin_lock_init(&bdev->list_lock); #ifdef CONFIG_ION_FOR_CAMERA /* * TODO: * The ion_dev should be defined by ION driver. But ION driver does * not implement it yet, will fix it when it is ready. */ if (!ion_dev) goto vm_init_err; bdev->iclient = ion_client_create(ion_dev, "atomisp"); if (IS_ERR_OR_NULL(bdev->iclient)) { ret = PTR_ERR(bdev->iclient); if (!bdev->iclient) ret = -EINVAL; goto vm_init_err; } #endif bdev->flag = HMM_BO_DEVICE_INITED; return 0; vm_init_err: isp_mmu_exit(&bdev->mmu); isp_mmu_init_err: return ret; } void hmm_bo_device_exit(struct hmm_bo_device *bdev) { check_bodev_null_return_void(bdev); /* * destroy all bos in the bo list, even they are in use. */ if (!list_empty(&bdev->active_bo_list)) dev_warn(atomisp_dev, "there're still activated bo in use. " "force to free them.\n"); while (!list_empty(&bdev->active_bo_list)) hmm_bo_unref(list_to_hmm_bo(bdev->active_bo_list.next)); if (!list_empty(&bdev->free_bo_list)) dev_warn(atomisp_dev, "there're still bo in free_bo_list. " "force to free them.\n"); while (!list_empty(&bdev->free_bo_list)) hmm_bo_unref(list_to_hmm_bo(bdev->free_bo_list.next)); isp_mmu_exit(&bdev->mmu); hmm_vm_clean(&bdev->vaddr_space); #ifdef CONFIG_ION_FOR_CAMERA if (bdev->iclient != NULL) ion_client_destroy(bdev->iclient); #endif } void hmm_bo_device_cleanup_mmu_l2(struct hmm_bo_device *bdev) { check_bodev_null_return_void(bdev); isp_mmu_clean_l2(&bdev->mmu); } int hmm_bo_device_inited(struct hmm_bo_device *bdev) { check_bodev_null_return(bdev, -EINVAL); return bdev->flag == HMM_BO_DEVICE_INITED; } /* * find the buffer object with virtual address vaddr. * return NULL if no such buffer object found. */ struct hmm_buffer_object *hmm_bo_device_search_start(struct hmm_bo_device *bdev, ia_css_ptr vaddr) { struct list_head *pos; struct hmm_buffer_object *bo; unsigned long flags; check_bodev_null_return(bdev, NULL); spin_lock_irqsave(&bdev->list_lock, flags); list_for_each(pos, &bdev->active_bo_list) { bo = list_to_hmm_bo(pos); /* pass bo which has no vm_node allocated */ if (!hmm_bo_vm_allocated(bo)) continue; if (bo->vm_node->start == vaddr) goto found; } spin_unlock_irqrestore(&bdev->list_lock, flags); return NULL; found: spin_unlock_irqrestore(&bdev->list_lock, flags); return bo; } static int in_range(unsigned int start, unsigned int size, unsigned int addr) { return (start <= addr) && (start + size > addr); } struct hmm_buffer_object *hmm_bo_device_search_in_range(struct hmm_bo_device *bdev, unsigned int vaddr) { struct list_head *pos; struct hmm_buffer_object *bo; unsigned long flags; check_bodev_null_return(bdev, NULL); spin_lock_irqsave(&bdev->list_lock, flags); list_for_each(pos, &bdev->active_bo_list) { bo = list_to_hmm_bo(pos); /* pass bo which has no vm_node allocated */ if (!hmm_bo_vm_allocated(bo)) continue; if (in_range(bo->vm_node->start, bo->vm_node->size, vaddr)) goto found; } spin_unlock_irqrestore(&bdev->list_lock, flags); return NULL; found: spin_unlock_irqrestore(&bdev->list_lock, flags); return bo; } struct hmm_buffer_object * hmm_bo_device_search_vmap_start(struct hmm_bo_device *bdev, const void *vaddr) { struct list_head *pos; struct hmm_buffer_object *bo; unsigned long flags; check_bodev_null_return(bdev, NULL); spin_lock_irqsave(&bdev->list_lock, flags); list_for_each(pos, &bdev->active_bo_list) { bo = list_to_hmm_bo(pos); /* pass bo which has no vm_node allocated */ if (!hmm_bo_vm_allocated(bo)) continue; if (bo->vmap_addr == vaddr) goto found; } spin_unlock_irqrestore(&bdev->list_lock, flags); return NULL; found: spin_unlock_irqrestore(&bdev->list_lock, flags); return bo; } /* * find a buffer object with pgnr pages from free_bo_list and * activate it (remove from free_bo_list and add to * active_bo_list) * * return NULL if no such buffer object found. */ struct hmm_buffer_object *hmm_bo_device_get_bo(struct hmm_bo_device *bdev, unsigned int pgnr) { struct list_head *pos; struct hmm_buffer_object *bo; unsigned long flags; check_bodev_null_return(bdev, NULL); spin_lock_irqsave(&bdev->list_lock, flags); list_for_each(pos, &bdev->free_bo_list) { bo = list_to_hmm_bo(pos); if (bo->pgnr == pgnr) goto found; } spin_unlock_irqrestore(&bdev->list_lock, flags); return NULL; found: list_del(&bo->list); list_add(&bo->list, &bdev->active_bo_list); spin_unlock_irqrestore(&bdev->list_lock, flags); return bo; } /* * destroy all buffer objects in the free_bo_list. */ void hmm_bo_device_destroy_free_bo_list(struct hmm_bo_device *bdev) { struct hmm_buffer_object *bo, *tmp; unsigned long flags; struct list_head new_head; check_bodev_null_return_void(bdev); spin_lock_irqsave(&bdev->list_lock, flags); list_replace_init(&bdev->free_bo_list, &new_head); spin_unlock_irqrestore(&bdev->list_lock, flags); list_for_each_entry_safe(bo, tmp, &new_head, list) { list_del(&bo->list); hmm_bo_unref(bo); } } /* * destroy buffer object with start virtual address vaddr. */ void hmm_bo_device_destroy_free_bo_addr(struct hmm_bo_device *bdev, unsigned int vaddr) { struct list_head *pos; struct hmm_buffer_object *bo; unsigned long flags; check_bodev_null_return_void(bdev); spin_lock_irqsave(&bdev->list_lock, flags); list_for_each(pos, &bdev->free_bo_list) { bo = list_to_hmm_bo(pos); /* pass bo which has no vm_node allocated */ if (!hmm_bo_vm_allocated(bo)) continue; if (bo->vm_node->start == vaddr) goto found; } spin_unlock_irqrestore(&bdev->list_lock, flags); return; found: list_del(&bo->list); spin_unlock_irqrestore(&bdev->list_lock, flags); hmm_bo_unref(bo); } /* * destroy all buffer objects with pgnr pages. */ void hmm_bo_device_destroy_free_bo_size(struct hmm_bo_device *bdev, unsigned int pgnr) { struct list_head *pos; struct hmm_buffer_object *bo; unsigned long flags; check_bodev_null_return_void(bdev); retry: spin_lock_irqsave(&bdev->list_lock, flags); list_for_each(pos, &bdev->free_bo_list) { bo = list_to_hmm_bo(pos); if (bo->pgnr == pgnr) goto found; } spin_unlock_irqrestore(&bdev->list_lock, flags); return; found: list_del(&bo->list); spin_unlock_irqrestore(&bdev->list_lock, flags); hmm_bo_unref(bo); goto retry; }
gpl-2.0
thicklizard/komodo-revisited
arch/arm/mach-msm/htc/jet/display/mipi_jet_720p_pt.c
18
7190
#include "../../../drivers/video/msm/msm_fb.h" #include "../../../drivers/video/msm/mipi_dsi.h" #include "mipi_jet.h" #include <mach/panel_id.h> static struct mipi_dsi_phy_ctrl nova_dsi_video_mode_phy_db = { /* DSI_BIT_CLK at 569MHz, 3 lane, RGB888 */ /* regulator *//* off=0x0500 */ {0x03, 0x08, 0x05, 0x00, 0x20}, /* timing *//* off=0x0440 */ {0x9B, 0x38, 0x18, 0x00, 0x4B, 0x51, 0x1C, 0x3B, 0x29, 0x03, 0x04, 0xA0}, /* phy ctrl *//* off=0x0470 */ {0x5F, 0x00, 0x00, 0x10}, /* strength *//* off=0x0480 */ {0xFF, 0x00, 0x06, 0x00}, /* pll control *//* off=0x0204 */ {0x0, 0x38, 0x32, 0xDA, 0x00, 0x10, 0x0F, 0x61, 0x41, 0x0F, 0x01, 0x00, 0x1A, 0x00, 0x00, 0x02, 0x00, 0x20, 0x00, 0x02 }, }; static struct msm_panel_info pinfo; static int __init mipi_video_auo_hd720p_init(void) { int ret; #ifdef JEL_CMD_MODE_PANEL printk(KERN_INFO "%s: CMD mode (AL)\n", __func__); pinfo.type = MIPI_CMD_PANEL; pinfo.mipi.mode = DSI_CMD_MODE; pinfo.mipi.dst_format = DSI_CMD_DST_FORMAT_RGB888; /*pinfo.mipi.mdp_trigger = DSI_CMD_TRIGGER_NONE;*/ pinfo.mipi.mdp_trigger = DSI_CMD_TRIGGER_SW; #ifdef CONFIG_FB_MSM_SELF_REFRESH jet_panel_data.self_refresh_switch = NULL; /* CMD or VIDEO mode only */ #endif pinfo.lcd.vsync_enable = TRUE; pinfo.lcd.hw_vsync_mode = TRUE; pinfo.lcd.refx100 = 6096; /* adjust refx100 to prevent tearing */ pinfo.mipi.te_sel = 1; /* TE from vsycn gpio */ pinfo.mipi.interleave_max = 1; pinfo.mipi.insert_dcs_cmd = TRUE; pinfo.mipi.wr_mem_continue = 0x3c; pinfo.mipi.wr_mem_start = 0x2c; #else pinfo.type = MIPI_VIDEO_PANEL;/*MIPI_VIDEO_PANEL;*/ pinfo.mipi.mode = DSI_VIDEO_MODE; pinfo.mipi.dst_format = DSI_VIDEO_DST_FORMAT_RGB888; pinfo.mipi.mdp_trigger = DSI_CMD_TRIGGER_SW; #ifdef CONFIG_FB_MSM_SELF_REFRESH printk(KERN_INFO "%s: VIDEO mode (AL)\n", __func__); jet_panel_data.self_refresh_switch = NULL; /* CMD or VIDEO mode only */ #else printk(KERN_INFO "%s: SWITCH mode (AL)\n", __func__); #endif pinfo.mipi.pulse_mode_hsa_he = TRUE; pinfo.mipi.hfp_power_stop = TRUE; pinfo.mipi.hbp_power_stop = TRUE; pinfo.mipi.hsa_power_stop = TRUE; pinfo.mipi.eof_bllp_power_stop = TRUE; pinfo.mipi.bllp_power_stop = TRUE; pinfo.mipi.traffic_mode = DSI_NON_BURST_SYNCH_PULSE; #endif pinfo.xres = 720; pinfo.yres = 1280; pinfo.pdest = DISPLAY_1; pinfo.wait_cycle = 0; pinfo.bpp = 24; pinfo.lcdc.h_back_porch = 104; /* 660Mhz: 116 */ pinfo.lcdc.h_front_porch = 95; /* 660Mhz: 184 */ pinfo.lcdc.h_pulse_width = 1; /* 660Mhz: 24 */ pinfo.lcdc.v_back_porch = 2; pinfo.lcdc.v_front_porch = 6; pinfo.lcdc.v_pulse_width = 1; pinfo.lcd.v_back_porch = 2; pinfo.lcd.v_front_porch = 6; pinfo.lcd.v_pulse_width = 1; pinfo.lcd.primary_vsync_init = pinfo.yres; pinfo.lcd.primary_rdptr_irq = 0; pinfo.lcd.primary_start_pos = pinfo.yres + pinfo.lcd.v_back_porch + pinfo.lcd.v_front_porch - 1; pinfo.lcdc.border_clr = 0; /* blk */ pinfo.lcdc.underflow_clr = 0xff; /* blue */ pinfo.lcdc.hsync_skew = 0; pinfo.bl_max = 255; pinfo.bl_min = 1; pinfo.fb_num = 2; /*pinfo.clk_rate = 742500000;*/ /*pinfo.clk_rate = 482000000;*/ pinfo.clk_rate = 569000000; pinfo.mipi.vc = 0; pinfo.mipi.rgb_swap = DSI_RGB_SWAP_RGB; pinfo.mipi.data_lane0 = TRUE; pinfo.mipi.data_lane1 = TRUE; pinfo.mipi.data_lane2 = TRUE; pinfo.mipi.tx_eot_append = TRUE; pinfo.mipi.t_clk_post = 0x10; /* 660Mhz: 10 */ pinfo.mipi.t_clk_pre = 0x21; /* 660Mhz: 30 */ pinfo.mipi.stream = 0; /* dma_p */ pinfo.mipi.dma_trigger = DSI_CMD_TRIGGER_SW; pinfo.mipi.frame_rate = 63; pinfo.mipi.dsi_phy_db = &nova_dsi_video_mode_phy_db; ret = mipi_jet_device_register(&pinfo, MIPI_DSI_PRIM, MIPI_DSI_PANEL_WVGA_PT); if (ret) printk(KERN_ERR "%s: failed to register device!\n", __func__); return ret; } static int __init mipi_video_sony_hd720p_init(void) { int ret; /* 1:VIDEO MODE, 0:CMD MODE */ #ifdef JEL_CMD_MODE_PANEL printk(KERN_INFO "%s: CMD mode (AL)\n", __func__); pinfo.type = MIPI_CMD_PANEL; pinfo.mipi.mode = DSI_CMD_MODE; pinfo.mipi.dst_format = DSI_CMD_DST_FORMAT_RGB888; /*pinfo.mipi.mdp_trigger = DSI_CMD_TRIGGER_NONE;*/ pinfo.mipi.mdp_trigger = DSI_CMD_TRIGGER_SW; #ifdef CONFIG_FB_MSM_SELF_REFRESH jet_panel_data.self_refresh_switch = NULL; /* CMD or VIDEO mode only */ #endif pinfo.lcd.vsync_enable = TRUE; pinfo.lcd.hw_vsync_mode = TRUE; pinfo.lcd.refx100 = 5700; /* adjust refx100 to prevent tearing */ pinfo.mipi.te_sel = 1; /* TE from vsycn gpio */ pinfo.mipi.interleave_max = 1; pinfo.mipi.insert_dcs_cmd = TRUE; pinfo.mipi.wr_mem_continue = 0x3c; pinfo.mipi.wr_mem_start = 0x2c; #else pinfo.type = MIPI_VIDEO_PANEL; pinfo.mipi.mode = DSI_VIDEO_MODE; pinfo.mipi.dst_format = DSI_VIDEO_DST_FORMAT_RGB888; pinfo.mipi.mdp_trigger = DSI_CMD_TRIGGER_SW; #ifdef CONFIG_FB_MSM_SELF_REFRESH printk(KERN_INFO "%s: VIDEO mode (AL)\n", __func__); jet_panel_data.self_refresh_switch = NULL; /* CMD or VIDEO mode only */ #else printk(KERN_INFO "%s: SWITCH mode (AL)\n", __func__); #endif pinfo.mipi.pulse_mode_hsa_he = TRUE; pinfo.mipi.hfp_power_stop = TRUE; pinfo.mipi.hbp_power_stop = TRUE; pinfo.mipi.hsa_power_stop = TRUE; pinfo.mipi.eof_bllp_power_stop = TRUE; pinfo.mipi.bllp_power_stop = TRUE; pinfo.mipi.traffic_mode = DSI_NON_BURST_SYNCH_PULSE; #endif pinfo.xres = 720; pinfo.yres = 1280; pinfo.pdest = DISPLAY_1; pinfo.wait_cycle = 0; pinfo.bpp = 24; pinfo.lcdc.h_back_porch = 104; pinfo.lcdc.h_front_porch = 95; pinfo.lcdc.h_pulse_width = 1; pinfo.lcdc.v_back_porch = 2; pinfo.lcdc.v_front_porch = 6; pinfo.lcdc.v_pulse_width = 1; pinfo.lcd.v_back_porch = 2; pinfo.lcd.v_front_porch = 6; pinfo.lcd.v_pulse_width = 1; pinfo.lcd.primary_vsync_init = pinfo.yres; pinfo.lcd.primary_rdptr_irq = 0; pinfo.lcd.primary_start_pos = pinfo.yres + pinfo.lcd.v_back_porch + pinfo.lcd.v_front_porch - 1; pinfo.lcdc.border_clr = 0; /* blk */ pinfo.lcdc.underflow_clr = 0xff; /* blue */ pinfo.lcdc.hsync_skew = 0; pinfo.bl_max = 255; pinfo.bl_min = 1; pinfo.fb_num = 2; pinfo.clk_rate = 569000000; pinfo.mipi.vc = 0; pinfo.mipi.rgb_swap = DSI_RGB_SWAP_RGB; pinfo.mipi.data_lane0 = TRUE; pinfo.mipi.data_lane1 = TRUE; pinfo.mipi.data_lane2 = TRUE; pinfo.mipi.tx_eot_append = TRUE; pinfo.mipi.t_clk_post = 0x10; pinfo.mipi.t_clk_pre = 0x21; pinfo.mipi.stream = 0; /* dma_p */ pinfo.mipi.dma_trigger = DSI_CMD_TRIGGER_SW; pinfo.mipi.frame_rate = 59; pinfo.mipi.dsi_phy_db = &nova_dsi_video_mode_phy_db; ret = mipi_jet_device_register(&pinfo, MIPI_DSI_PRIM, MIPI_DSI_PANEL_WVGA_PT); if (ret) printk(KERN_ERR "%s: failed to register device!\n", __func__); return ret; } static int __init mipi_jet_panel_init(void) { int rc = 0; if (panel_type == PANEL_ID_NONE) { printk(KERN_INFO "No panel detected.\n"); return -EINVAL; } if (panel_type == PANEL_ID_JET_SONY_NT || panel_type == PANEL_ID_JET_SONY_NT_C1 || panel_type == PANEL_ID_JET_SONY_NT_C2) rc = mipi_video_sony_hd720p_init(); else if (panel_type == PANEL_ID_JET_AUO_NT || panel_type == PANEL_ID_JET_AUO_NT_C2 || panel_type == PANEL_ID_JET_AUO_NT_C3 || panel_type == PANEL_ID_JET_AUO_NT_C3_1) rc = mipi_video_auo_hd720p_init(); return rc; } late_initcall(mipi_jet_panel_init);
gpl-2.0
LEPT-Development/android_kernel_lge_msm8916
drivers/staging/prima/CORE/HDD/src/wlan_hdd_wmm.c
18
107752
/* * Copyright (c) 2012-2015 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * This file was originally distributed by Qualcomm Atheros, Inc. * under proprietary terms before Copyright ownership was assigned * to the Linux Foundation. */ /*============================================================================ @file wlan_hdd_wmm.c This module (wlan_hdd_wmm.h interface + wlan_hdd_wmm.c implementation) houses all the logic for WMM in HDD. On the control path, it has the logic to setup QoS, modify QoS and delete QoS (QoS here refers to a TSPEC). The setup QoS comes in two flavors: an explicit application invoked and an internal HDD invoked. The implicit QoS is for applications that do NOT call the custom QCT WLAN OIDs for QoS but which DO mark their traffic for priortization. It also has logic to start, update and stop the U-APSD trigger frame generation. It also has logic to read WMM related config parameters from the registry. On the data path, it has the logic to figure out the WMM AC of an egress packet and when to signal TL to serve a particular AC queue. It also has the logic to retrieve a packet based on WMM priority in response to a fetch from TL. The remaining functions are utility functions for information hiding. Copyright (c) 2008-9 QUALCOMM Incorporated. All Rights Reserved. Qualcomm Confidential and Proprietary ============================================================================*/ /*--------------------------------------------------------------------------- Include files -------------------------------------------------------------------------*/ #include <wlan_hdd_tx_rx.h> #include <wlan_hdd_dp_utils.h> #include <wlan_hdd_wmm.h> #include <wlan_hdd_ether.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/etherdevice.h> #include <linux/if_vlan.h> #include <linux/ip.h> #include <linux/semaphore.h> #include <wlan_hdd_hostapd.h> #include <wlan_hdd_softap_tx_rx.h> #include <vos_sched.h> #include "sme_Api.h" // change logging behavior based upon debug flag #ifdef HDD_WMM_DEBUG #define WMM_TRACE_LEVEL_FATAL VOS_TRACE_LEVEL_FATAL #define WMM_TRACE_LEVEL_ERROR VOS_TRACE_LEVEL_FATAL #define WMM_TRACE_LEVEL_WARN VOS_TRACE_LEVEL_FATAL #define WMM_TRACE_LEVEL_INFO VOS_TRACE_LEVEL_FATAL #define WMM_TRACE_LEVEL_INFO_HIGH VOS_TRACE_LEVEL_FATAL #define WMM_TRACE_LEVEL_INFO_LOW VOS_TRACE_LEVEL_FATAL #else #define WMM_TRACE_LEVEL_FATAL VOS_TRACE_LEVEL_FATAL #define WMM_TRACE_LEVEL_ERROR VOS_TRACE_LEVEL_ERROR #define WMM_TRACE_LEVEL_WARN VOS_TRACE_LEVEL_WARN #define WMM_TRACE_LEVEL_INFO VOS_TRACE_LEVEL_INFO #define WMM_TRACE_LEVEL_INFO_HIGH VOS_TRACE_LEVEL_INFO_HIGH #define WMM_TRACE_LEVEL_INFO_LOW VOS_TRACE_LEVEL_INFO_LOW #endif #define WLAN_HDD_MAX_DSCP 0x3f // DHCP Port number #define DHCP_SOURCE_PORT 0x4400 #define DHCP_DESTINATION_PORT 0x4300 #define HDD_WMM_UP_TO_AC_MAP_SIZE 8 const v_U8_t hddWmmUpToAcMap[] = { WLANTL_AC_BE, WLANTL_AC_BK, WLANTL_AC_BK, WLANTL_AC_BE, WLANTL_AC_VI, WLANTL_AC_VI, WLANTL_AC_VO, WLANTL_AC_VO }; //Linux based UP -> AC Mapping const v_U8_t hddLinuxUpToAcMap[8] = { HDD_LINUX_AC_BE, HDD_LINUX_AC_BK, HDD_LINUX_AC_BK, HDD_LINUX_AC_BE, HDD_LINUX_AC_VI, HDD_LINUX_AC_VI, HDD_LINUX_AC_VO, HDD_LINUX_AC_VO }; #ifndef WLAN_MDM_CODE_REDUCTION_OPT /** @brief hdd_wmm_enable_tl_uapsd() - function which decides whether and how to update UAPSD parameters in TL @param pQosContext : [in] the pointer the QoS instance control block @return None */ static void hdd_wmm_enable_tl_uapsd (hdd_wmm_qos_context_t* pQosContext) { hdd_adapter_t* pAdapter = pQosContext->pAdapter; WLANTL_ACEnumType acType = pQosContext->acType; hdd_wmm_ac_status_t *pAc = NULL; VOS_STATUS status; v_U32_t service_interval; v_U32_t suspension_interval; sme_QosWmmDirType direction; v_BOOL_t psb; if (acType >= WLANTL_MAX_AC) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Invalid AC: %d", __func__, acType); return; } pAc = &pAdapter->hddWmmStatus.wmmAcStatus[acType]; // The TSPEC must be valid if (pAc->wmmAcTspecValid == VOS_FALSE) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Invoked with invalid TSPEC", __func__); return; } // determine the service interval if (pAc->wmmAcTspecInfo.min_service_interval) { service_interval = pAc->wmmAcTspecInfo.min_service_interval; } else if (pAc->wmmAcTspecInfo.max_service_interval) { service_interval = pAc->wmmAcTspecInfo.max_service_interval; } else { // no service interval is present in the TSPEC // this is OK, there just won't be U-APSD VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: No service interval supplied", __func__); service_interval = 0; } // determine the suspension interval & direction suspension_interval = pAc->wmmAcTspecInfo.suspension_interval; direction = pAc->wmmAcTspecInfo.ts_info.direction; psb = pAc->wmmAcTspecInfo.ts_info.psb; // if we have previously enabled U-APSD, have any params changed? if ((pAc->wmmAcUapsdInfoValid) && (pAc->wmmAcUapsdServiceInterval == service_interval) && (pAc->wmmAcUapsdSuspensionInterval == suspension_interval) && (pAc->wmmAcUapsdDirection == direction) && (pAc->wmmAcIsUapsdEnabled == psb)) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: No change in U-APSD parameters", __func__); return; } // are we in the appropriate power save modes? if (!sme_IsPowerSaveEnabled(WLAN_HDD_GET_HAL_CTX(pAdapter), ePMC_BEACON_MODE_POWER_SAVE)) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: BMPS is not enabled", __func__); return; } if (!sme_IsPowerSaveEnabled(WLAN_HDD_GET_HAL_CTX(pAdapter), ePMC_UAPSD_MODE_POWER_SAVE)) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: U-APSD is not enabled", __func__); return; } // everything is in place to notify TL status = WLANTL_EnableUAPSDForAC((WLAN_HDD_GET_CTX(pAdapter))->pvosContext, (WLAN_HDD_GET_STATION_CTX_PTR(pAdapter))->conn_info.staId[0], acType, pAc->wmmAcTspecInfo.ts_info.tid, pAc->wmmAcTspecInfo.ts_info.up, service_interval, suspension_interval, direction); if ( !VOS_IS_STATUS_SUCCESS( status ) ) { VOS_TRACE( VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Failed to enable U-APSD for AC=%d", __func__, acType ); return; } // stash away the parameters that were used pAc->wmmAcUapsdInfoValid = VOS_TRUE; pAc->wmmAcUapsdServiceInterval = service_interval; pAc->wmmAcUapsdSuspensionInterval = suspension_interval; pAc->wmmAcUapsdDirection = direction; pAc->wmmAcIsUapsdEnabled = psb; VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Enabled UAPSD in TL srv_int=%d " "susp_int=%d dir=%d AC=%d", __func__, service_interval, suspension_interval, direction, acType); } /** @brief hdd_wmm_disable_tl_uapsd() - function which decides whether to disable UAPSD parameters in TL @param pQosContext : [in] the pointer the QoS instance control block @return None */ static void hdd_wmm_disable_tl_uapsd (hdd_wmm_qos_context_t* pQosContext) { hdd_adapter_t* pAdapter = pQosContext->pAdapter; WLANTL_ACEnumType acType = pQosContext->acType; hdd_wmm_ac_status_t *pAc = NULL; VOS_STATUS status; v_U32_t service_interval; v_U32_t suspension_interval; v_U8_t uapsd_mask; v_U8_t ActiveTspec = INVALID_TSPEC; if (acType >= WLANTL_MAX_AC) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Invalid AC: %d", __func__, acType); return; } pAc = &pAdapter->hddWmmStatus.wmmAcStatus[acType]; // have we previously enabled UAPSD? if (pAc->wmmAcUapsdInfoValid == VOS_TRUE) { uapsd_mask = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->UapsdMask; //Finding uapsd_mask as per AC uapsd_mask = uapsd_mask & (1 << (WLANTL_AC_VO - acType)); sme_QosTspecActive((tpAniSirGlobal)WLAN_HDD_GET_HAL_CTX(pAdapter), acType, pAdapter->sessionId, &ActiveTspec); //Call WLANTL_EnableUAPSDForAC only when static uapsd mask is present and // no active tspecs. TODO: Need to change naming convention as Enable // UAPSD function is called in hdd_wmm_disable_tl_uapsd. Purpose of // calling WLANTL_EnableUAPSDForAC is to update UAPSD intervals to fw if(uapsd_mask && !ActiveTspec) { switch(acType) { case WLANTL_AC_VO: service_interval = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdVoSrvIntv; suspension_interval = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdVoSuspIntv; break; case WLANTL_AC_VI: service_interval = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdViSrvIntv; suspension_interval = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdViSuspIntv; break; case WLANTL_AC_BE: service_interval = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdBeSrvIntv; suspension_interval = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdBeSuspIntv; break; case WLANTL_AC_BK: service_interval = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdBkSrvIntv; suspension_interval = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdBkSuspIntv; break; default: VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Invalid AC %d", __func__, acType ); return; } status = WLANTL_EnableUAPSDForAC((WLAN_HDD_GET_CTX(pAdapter))->pvosContext, (WLAN_HDD_GET_STATION_CTX_PTR(pAdapter))->conn_info.staId[0], acType, pAc->wmmAcTspecInfo.ts_info.tid, pAc->wmmAcTspecInfo.ts_info.up, service_interval, suspension_interval, pAc->wmmAcTspecInfo.ts_info.direction); if ( !VOS_IS_STATUS_SUCCESS( status ) ) { VOS_TRACE( VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Failed to update U-APSD params for AC=%d", __func__, acType ); } else { // TL no longer has valid UAPSD info pAc->wmmAcUapsdInfoValid = VOS_FALSE; VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Updated UAPSD params in TL for AC=%d", __func__, acType); } } } } #endif /** @brief hdd_wmm_free_context() - function which frees a QoS context @param pQosContext : [in] the pointer the QoS instance control block @return None */ static void hdd_wmm_free_context (hdd_wmm_qos_context_t* pQosContext) { v_CONTEXT_t pVosContext = vos_get_global_context( VOS_MODULE_ID_HDD, NULL ); hdd_context_t *pHddCtx; if (NULL != pVosContext) { pHddCtx = vos_get_context( VOS_MODULE_ID_HDD, pVosContext); if (NULL == pHddCtx) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, FL("HddCtx is NULL")); return; } } VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Entered, context %p", __func__, pQosContext); // take the wmmLock since we're manipulating the context list mutex_lock(&pHddCtx->wmmLock); if (unlikely((NULL == pQosContext) || (HDD_WMM_CTX_MAGIC != pQosContext->magic))) { // must have been freed in another thread mutex_unlock(&pHddCtx->wmmLock); return; } // make sure nobody thinks this is a valid context pQosContext->magic = 0; // unlink the context list_del(&pQosContext->node); // done manipulating the list mutex_unlock(&pHddCtx->wmmLock); // reclaim memory kfree(pQosContext); } #ifndef WLAN_MDM_CODE_REDUCTION_OPT /** @brief hdd_wmm_notify_app() - function which notifies an application changes in state of it flow @param pQosContext : [in] the pointer the QoS instance control block @return None */ #define MAX_NOTIFY_LEN 50 static void hdd_wmm_notify_app (hdd_wmm_qos_context_t* pQosContext) { hdd_adapter_t* pAdapter; union iwreq_data wrqu; char buf[MAX_NOTIFY_LEN+1]; v_CONTEXT_t pVosContext = vos_get_global_context( VOS_MODULE_ID_HDD, NULL ); hdd_context_t *pHddCtx; if (NULL != pVosContext) { pHddCtx = vos_get_context( VOS_MODULE_ID_HDD, pVosContext); if (NULL == pHddCtx) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, FL("HddCtx is NULL")); return; } } VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Entered, context %p", __func__, pQosContext); mutex_lock(&pHddCtx->wmmLock); if (unlikely((NULL == pQosContext) || (HDD_WMM_CTX_MAGIC != pQosContext->magic))) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Invalid QoS Context", __func__); mutex_unlock(&pHddCtx->wmmLock); return; } // get pointer to the adapter pAdapter = pQosContext->pAdapter; mutex_unlock(&pHddCtx->wmmLock); // create the event memset(&wrqu, 0, sizeof(wrqu)); memset(buf, 0, sizeof(buf)); snprintf(buf, MAX_NOTIFY_LEN, "QCOM: TS change[%u: %u]", (unsigned int)pQosContext->handle, (unsigned int)pQosContext->lastStatus); wrqu.data.pointer = buf; wrqu.data.length = strlen(buf); // send the event VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Sending [%s]", __func__, buf); wireless_send_event(pAdapter->dev, IWEVCUSTOM, &wrqu, buf); } /** @brief hdd_wmm_is_access_allowed() - function which determines if access is allowed for the given AC. this is designed to be called during SME callback processing since that is when access can be granted or removed @param pAdapter : [in] pointer to adapter context @param pAc : [in] pointer to the per-AC status @return : VOS_TRUE - access is allowed : VOS_FALSE - access is not allowed None */ static v_BOOL_t hdd_wmm_is_access_allowed(hdd_adapter_t* pAdapter, hdd_wmm_ac_status_t* pAc) { // if we don't want QoS or the AP doesn't support QoS // or we don't want to do implicit QoS // or if AP doesn't require admission for this AC // then we have access if (!hdd_wmm_is_active(pAdapter) || !(WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->bImplicitQosEnabled || !pAc->wmmAcAccessRequired) { return VOS_TRUE; } // if implicit QoS has already completed, successfully or not, // then access is allowed if (pAc->wmmAcAccessGranted || pAc->wmmAcAccessFailed) { return VOS_TRUE; } // admission is required and implicit QoS hasn't completed // however explicit QoS may have completed and we'll have // a Tspec // if we don't have a Tspec then access is not allowed if (!pAc->wmmAcTspecValid) { return VOS_FALSE; } // we have a Tspec -- does it allow upstream or bidirectional traffic? // if it only allows downstream traffic then access is not allowed if (pAc->wmmAcTspecInfo.ts_info.direction == SME_QOS_WMM_TS_DIR_DOWNLINK) { return VOS_FALSE; } // we meet all of the criteria for access return VOS_TRUE; } #ifdef FEATURE_WLAN_ESE /** @brief hdd_wmm_inactivity_timer_cb() - timer handler function which is called for every inactivity interval per AC. This function gets the current transmitted packets on the given AC, and checks if there where any TX activity from the previous interval. If there was no traffic then it would delete the TS that was negotiated on that AC. @param pUserData : [in] pointer to pQosContext @return : NONE */ void hdd_wmm_inactivity_timer_cb( v_PVOID_t pUserData ) { hdd_wmm_qos_context_t* pQosContext = (hdd_wmm_qos_context_t*)pUserData; hdd_adapter_t* pAdapter; hdd_wmm_ac_status_t *pAc; hdd_wlan_wmm_status_e status; VOS_STATUS vos_status; v_U32_t currentTrafficCnt = 0; WLANTL_ACEnumType acType = 0; v_CONTEXT_t pVosContext = vos_get_global_context( VOS_MODULE_ID_HDD, NULL ); hdd_context_t *pHddCtx; if (NULL != pVosContext) { pHddCtx = vos_get_context( VOS_MODULE_ID_HDD, pVosContext); if (NULL == pHddCtx) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, FL("HddCtx is NULL")); return; } } mutex_lock(&pHddCtx->wmmLock); if (unlikely((NULL == pQosContext) || (HDD_WMM_CTX_MAGIC != pQosContext->magic))) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Invalid QoS Context", __func__); mutex_unlock(&pHddCtx->wmmLock); return; } mutex_unlock(&pHddCtx->wmmLock); acType = pQosContext->acType; pAdapter = pQosContext->pAdapter; pAc = &pAdapter->hddWmmStatus.wmmAcStatus[acType]; // Get the Tx stats for this AC. currentTrafficCnt = pAdapter->hdd_stats.hddTxRxStats.txXmitClassifiedAC[pQosContext->acType]; VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN, FL("WMM inactivity Timer for AC=%d, currentCnt=%d, prevCnt=%d"), acType, (int)currentTrafficCnt, (int)pAc->wmmPrevTrafficCnt); if (pAc->wmmPrevTrafficCnt == currentTrafficCnt) { // If there is no traffic activity, delete the TSPEC for this AC status = hdd_wmm_delts(pAdapter, pQosContext->handle); VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN, FL("Deleted TS on AC %d, due to inactivity with status = %d!!!"), acType, status); } else { pAc->wmmPrevTrafficCnt = currentTrafficCnt; if (pAc->wmmInactivityTimer.state == VOS_TIMER_STATE_STOPPED) { // Restart the timer vos_status = vos_timer_start(&pAc->wmmInactivityTimer, pAc->wmmInactivityTime); if (!VOS_IS_STATUS_SUCCESS(vos_status)) { VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR, FL("Restarting inactivity timer failed on AC %d"), acType); } } else { VOS_ASSERT(vos_timer_getCurrentState( &pAc->wmmInactivityTimer) == VOS_TIMER_STATE_STOPPED); } } return; } /** @brief hdd_wmm_enable_inactivity_timer() - function to enable the traffic inactivity timer for the given AC, if the inactivity_interval specified in the ADDTS parameters is non-zero @param pQosContext : [in] pointer to pQosContext @param inactivityTime: [in] value of the inactivity interval in millisecs @return : VOS_STATUS_E_FAILURE VOS_STATUS_SUCCESS */ VOS_STATUS hdd_wmm_enable_inactivity_timer(hdd_wmm_qos_context_t* pQosContext, v_U32_t inactivityTime) { VOS_STATUS vos_status = VOS_STATUS_E_FAILURE; hdd_adapter_t* pAdapter = pQosContext->pAdapter; WLANTL_ACEnumType acType = pQosContext->acType; hdd_wmm_ac_status_t *pAc; pAdapter = pQosContext->pAdapter; pAc = &pAdapter->hddWmmStatus.wmmAcStatus[acType]; // If QoS-Tspec is successfully setup and if the inactivity timer is non-zero, // a traffic inactivity timer needs to be started for the given AC vos_status = vos_timer_init( &pAc->wmmInactivityTimer, VOS_TIMER_TYPE_SW, hdd_wmm_inactivity_timer_cb, (v_PVOID_t)pQosContext ); if ( !VOS_IS_STATUS_SUCCESS(vos_status)) { VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR, FL("Initializing inactivity timer failed on AC %d"), acType); return vos_status; } // Start the inactivity timer vos_status = vos_timer_start( &pAc->wmmInactivityTimer, inactivityTime); if ( !VOS_IS_STATUS_SUCCESS(vos_status)) { VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR, FL("Starting inactivity timer failed on AC %d"), acType); return vos_status; } pAc->wmmInactivityTime = inactivityTime; // Initialize the current tx traffic count on this AC pAc->wmmPrevTrafficCnt = pAdapter->hdd_stats.hddTxRxStats.txXmitClassifiedAC[pQosContext->acType]; return vos_status; } /** @brief hdd_wmm_enable_inactivity_timer() - function to disable the traffic inactivity timer for the given AC. This would be called when deleting the TS. @param pQosContext : [in] pointer to pQosContext @return : VOS_STATUS_E_FAILURE VOS_STATUS_SUCCESS */ VOS_STATUS hdd_wmm_disable_inactivity_timer(hdd_wmm_qos_context_t* pQosContext) { hdd_adapter_t* pAdapter = pQosContext->pAdapter; WLANTL_ACEnumType acType = pQosContext->acType; hdd_wmm_ac_status_t *pAc = &pAdapter->hddWmmStatus.wmmAcStatus[acType]; VOS_STATUS vos_status = VOS_STATUS_E_FAILURE; // Clear the timer and the counter pAc->wmmInactivityTime = 0; pAc->wmmPrevTrafficCnt = 0; vos_timer_stop(&pAc->wmmInactivityTimer); vos_status = vos_timer_destroy(&pAc->wmmInactivityTimer); return vos_status; } #endif // FEATURE_WLAN_ESE /** @brief hdd_wmm_sme_callback() - callback registered by HDD with SME for receiving QoS notifications. Even though this function has a static scope it gets called externally through some function pointer magic (so there is a need for rigorous parameter checking) @param hHal : [in] the HAL handle @param HddCtx : [in] the HDD specified handle @param pCurrentQosInfo : [in] the TSPEC params @param SmeStatus : [in] the QoS related SME status @return eHAL_STATUS_SUCCESS if all good, eHAL_STATUS_FAILURE otherwise */ static eHalStatus hdd_wmm_sme_callback (tHalHandle hHal, void * hddCtx, sme_QosWmmTspecInfo* pCurrentQosInfo, sme_QosStatusType smeStatus, v_U32_t qosFlowId) { hdd_wmm_qos_context_t* pQosContext = hddCtx; hdd_adapter_t* pAdapter; WLANTL_ACEnumType acType; hdd_wmm_ac_status_t *pAc; VOS_STATUS status; v_CONTEXT_t pVosContext = vos_get_global_context( VOS_MODULE_ID_HDD, NULL ); hdd_context_t *pHddCtx; if (NULL != pVosContext) { pHddCtx = vos_get_context( VOS_MODULE_ID_HDD, pVosContext); if (NULL == pHddCtx) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, FL("HddCtx is NULL")); return eHAL_STATUS_FAILURE; } } VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Entered, context %p", __func__, pQosContext); mutex_lock(&pHddCtx->wmmLock); if (unlikely((NULL == pQosContext) || (HDD_WMM_CTX_MAGIC != pQosContext->magic))) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Invalid QoS Context", __func__); mutex_unlock(&pHddCtx->wmmLock); return eHAL_STATUS_FAILURE; } mutex_unlock(&pHddCtx->wmmLock); pAdapter = pQosContext->pAdapter; acType = pQosContext->acType; pAc = &pAdapter->hddWmmStatus.wmmAcStatus[acType]; VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: status %d flowid %d info %p", __func__, smeStatus, qosFlowId, pCurrentQosInfo); switch (smeStatus) { case SME_QOS_STATUS_SETUP_SUCCESS_IND: VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Setup is complete", __func__); // there will always be a TSPEC returned with this status, even if // a TSPEC is not exchanged OTA if (pCurrentQosInfo) { pAc->wmmAcTspecValid = VOS_TRUE; memcpy(&pAc->wmmAcTspecInfo, pCurrentQosInfo, sizeof(pAc->wmmAcTspecInfo)); } if (HDD_WMM_HANDLE_IMPLICIT == pQosContext->handle) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Implicit Qos, notifying TL for TL AC %d", __func__, acType); // this was triggered by implicit QoS so we know packets are pending // update state pAc->wmmAcAccessAllowed = VOS_TRUE; pAc->wmmAcAccessGranted = VOS_TRUE; pAc->wmmAcAccessPending = VOS_FALSE; // notify TL that packets are pending status = WLANTL_STAPktPending( (WLAN_HDD_GET_CTX(pAdapter))->pvosContext, (WLAN_HDD_GET_STATION_CTX_PTR(pAdapter))->conn_info.staId[0], acType ); if ( !VOS_IS_STATUS_SUCCESS( status ) ) { VOS_TRACE( VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Failed to signal TL for AC=%d", __func__, acType ); } } else { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Explicit Qos, notifying userspace", __func__); // this was triggered by an application pQosContext->lastStatus = HDD_WLAN_WMM_STATUS_SETUP_SUCCESS; hdd_wmm_notify_app(pQosContext); } #ifdef FEATURE_WLAN_ESE // Check if the inactivity interval is specified if (pCurrentQosInfo && pCurrentQosInfo->inactivity_interval) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Inactivity timer value = %d for AC=%d", __func__, pCurrentQosInfo->inactivity_interval, acType); hdd_wmm_enable_inactivity_timer(pQosContext, pCurrentQosInfo->inactivity_interval); } #endif // FEATURE_WLAN_ESE // notify TL to enable trigger frames if necessary hdd_wmm_enable_tl_uapsd(pQosContext); break; case SME_QOS_STATUS_SETUP_SUCCESS_APSD_SET_ALREADY: VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Setup is complete (U-APSD set previously)", __func__); if (HDD_WMM_HANDLE_IMPLICIT == pQosContext->handle) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Implicit Qos, notifying TL", __func__); // this was triggered by implicit QoS so we know packets are pending // update state pAc->wmmAcAccessAllowed = VOS_TRUE; pAc->wmmAcAccessGranted = VOS_TRUE; pAc->wmmAcAccessPending = VOS_FALSE; // notify TL that packets are pending status = WLANTL_STAPktPending( (WLAN_HDD_GET_CTX(pAdapter))->pvosContext, (WLAN_HDD_GET_STATION_CTX_PTR(pAdapter))->conn_info.staId[0], acType ); if ( !VOS_IS_STATUS_SUCCESS( status ) ) { VOS_TRACE( VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Failed to signal TL for AC=%d", __func__, acType ); } } else { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Explicit Qos, notifying userspace", __func__); // this was triggered by an application pQosContext->lastStatus = HDD_WLAN_WMM_STATUS_SETUP_SUCCESS_NO_ACM_UAPSD_EXISTING; hdd_wmm_notify_app(pQosContext); } break; case SME_QOS_STATUS_SETUP_FAILURE_RSP: VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Setup failed", __func__); // QoS setup failed if (HDD_WMM_HANDLE_IMPLICIT == pQosContext->handle) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Implicit Qos, notifying TL", __func__); // we note the failure, but we also mark access as allowed so that // the packets will flow. Note that the MAC will "do the right thing" pAc->wmmAcAccessPending = VOS_FALSE; pAc->wmmAcAccessFailed = VOS_TRUE; pAc->wmmAcAccessAllowed = VOS_TRUE; // this was triggered by implicit QoS so we know packets are pending status = WLANTL_STAPktPending( (WLAN_HDD_GET_CTX(pAdapter))->pvosContext, (WLAN_HDD_GET_STATION_CTX_PTR(pAdapter))->conn_info.staId[0], acType ); if ( !VOS_IS_STATUS_SUCCESS( status ) ) { VOS_TRACE( VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Failed to signal TL for AC=%d", __func__, acType ); } } else { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Explicit Qos, notifying userspace", __func__); // this was triggered by an application pQosContext->lastStatus = HDD_WLAN_WMM_STATUS_SETUP_FAILED; hdd_wmm_notify_app(pQosContext); } /* Setting up QoS Failed, QoS context can be released. * SME is releasing this flow information and if HDD doen't release this context, * next time if application uses the same handle to set-up QoS, HDD (as it has * QoS context for this handle) will issue Modify QoS request to SME but SME will * reject as no it has no information for this flow. */ hdd_wmm_free_context(pQosContext); break; case SME_QOS_STATUS_SETUP_INVALID_PARAMS_RSP: VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Setup Invalid Params, notify TL", __func__); // QoS setup failed if (HDD_WMM_HANDLE_IMPLICIT == pQosContext->handle) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Implicit Qos, notifying TL", __func__); // we note the failure, but we also mark access as allowed so that // the packets will flow. Note that the MAC will "do the right thing" pAc->wmmAcAccessPending = VOS_FALSE; pAc->wmmAcAccessFailed = VOS_TRUE; pAc->wmmAcAccessAllowed = VOS_TRUE; // this was triggered by implicit QoS so we know packets are pending status = WLANTL_STAPktPending( (WLAN_HDD_GET_CTX(pAdapter))->pvosContext, (WLAN_HDD_GET_STATION_CTX_PTR(pAdapter))->conn_info.staId[0], acType ); if ( !VOS_IS_STATUS_SUCCESS( status ) ) { VOS_TRACE( VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Failed to signal TL for AC=%d", __func__, acType ); } } else { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Explicit Qos, notifying userspace", __func__); // this was triggered by an application pQosContext->lastStatus = HDD_WLAN_WMM_STATUS_SETUP_FAILED_BAD_PARAM; hdd_wmm_notify_app(pQosContext); } break; case SME_QOS_STATUS_SETUP_NOT_QOS_AP_RSP: VOS_TRACE( VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Setup failed, not a QoS AP", __func__); if (!HDD_WMM_HANDLE_IMPLICIT == pQosContext->handle) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Explicit Qos, notifying userspace", __func__); // this was triggered by an application pQosContext->lastStatus = HDD_WLAN_WMM_STATUS_SETUP_FAILED_NO_WMM; hdd_wmm_notify_app(pQosContext); } break; case SME_QOS_STATUS_SETUP_REQ_PENDING_RSP: VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Setup pending", __func__); // not a callback status -- ignore if we get it break; case SME_QOS_STATUS_SETUP_MODIFIED_IND: VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Setup modified", __func__); if (pCurrentQosInfo) { // update the TSPEC pAc->wmmAcTspecValid = VOS_TRUE; memcpy(&pAc->wmmAcTspecInfo, pCurrentQosInfo, sizeof(pAc->wmmAcTspecInfo)); if (HDD_WMM_HANDLE_IMPLICIT != pQosContext->handle) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Explicit Qos, notifying userspace", __func__); // this was triggered by an application pQosContext->lastStatus = HDD_WLAN_WMM_STATUS_MODIFIED; hdd_wmm_notify_app(pQosContext); } // need to tell TL to update its UAPSD handling hdd_wmm_enable_tl_uapsd(pQosContext); } break; case SME_QOS_STATUS_SETUP_SUCCESS_NO_ACM_NO_APSD_RSP: if (HDD_WMM_HANDLE_IMPLICIT == pQosContext->handle) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Implicit Qos, notifying TL", __func__); // this was triggered by implicit QoS so we know packets are pending pAc->wmmAcAccessPending = VOS_FALSE; pAc->wmmAcAccessGranted = VOS_TRUE; pAc->wmmAcAccessAllowed = VOS_TRUE; // notify TL that packets are pending status = WLANTL_STAPktPending( (WLAN_HDD_GET_CTX(pAdapter))->pvosContext, (WLAN_HDD_GET_STATION_CTX_PTR(pAdapter))->conn_info.staId[0], acType ); if ( !VOS_IS_STATUS_SUCCESS( status ) ) { VOS_TRACE( VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Failed to signal TL for AC=%d", __func__, acType ); } } else { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Explicit Qos, notifying userspace", __func__); // this was triggered by an application pQosContext->lastStatus = HDD_WLAN_WMM_STATUS_SETUP_SUCCESS_NO_ACM_NO_UAPSD; hdd_wmm_notify_app(pQosContext); } break; case SME_QOS_STATUS_SETUP_SUCCESS_IND_APSD_PENDING: // nothing to do for now break; case SME_QOS_STATUS_SETUP_SUCCESS_IND_APSD_SET_FAILED: VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Setup successful but U-APSD failed", __func__); if (HDD_WMM_HANDLE_IMPLICIT == pQosContext->handle) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Implicit Qos, notifying TL", __func__); // QoS setup was successful but setting U=APSD failed // Since the OTA part of the request was successful, we don't mark // this as a failure. // the packets will flow. Note that the MAC will "do the right thing" pAc->wmmAcAccessGranted = VOS_TRUE; pAc->wmmAcAccessAllowed = VOS_TRUE; pAc->wmmAcAccessFailed = VOS_FALSE; pAc->wmmAcAccessPending = VOS_FALSE; // this was triggered by implicit QoS so we know packets are pending status = WLANTL_STAPktPending( (WLAN_HDD_GET_CTX(pAdapter))->pvosContext, (WLAN_HDD_GET_STATION_CTX_PTR(pAdapter))->conn_info.staId[0], acType ); if ( !VOS_IS_STATUS_SUCCESS( status ) ) { VOS_TRACE( VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Failed to signal TL for AC=%d", __func__, acType ); } } else { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Explicit Qos, notifying userspace", __func__); // this was triggered by an application pQosContext->lastStatus = HDD_WLAN_WMM_STATUS_SETUP_UAPSD_SET_FAILED; hdd_wmm_notify_app(pQosContext); } // Since U-APSD portion failed disabled trigger frame generation hdd_wmm_disable_tl_uapsd(pQosContext); break; case SME_QOS_STATUS_RELEASE_SUCCESS_RSP: VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Release is complete", __func__); if (pCurrentQosInfo) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: flows still active", __func__); // there is still at least one flow active for this AC // so update the AC state memcpy(&pAc->wmmAcTspecInfo, pCurrentQosInfo, sizeof(pAc->wmmAcTspecInfo)); // need to tell TL to update its UAPSD handling hdd_wmm_enable_tl_uapsd(pQosContext); } else { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: last flow", __func__); // this is the last flow active for this AC so update the AC state pAc->wmmAcTspecValid = VOS_FALSE; // need to tell TL to update its UAPSD handling hdd_wmm_disable_tl_uapsd(pQosContext); } if (HDD_WMM_HANDLE_IMPLICIT != pQosContext->handle) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Explicit Qos, notifying userspace", __func__); // this was triggered by an application pQosContext->lastStatus = HDD_WLAN_WMM_STATUS_RELEASE_SUCCESS; hdd_wmm_notify_app(pQosContext); } // we are done with this flow hdd_wmm_free_context(pQosContext); break; case SME_QOS_STATUS_RELEASE_FAILURE_RSP: VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Release failure", __func__); // we don't need to update our state or TL since nothing has changed if (HDD_WMM_HANDLE_IMPLICIT != pQosContext->handle) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Explicit Qos, notifying userspace", __func__); // this was triggered by an application pQosContext->lastStatus = HDD_WLAN_WMM_STATUS_RELEASE_FAILED; hdd_wmm_notify_app(pQosContext); } break; case SME_QOS_STATUS_RELEASE_QOS_LOST_IND: VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: QOS Lost indication received", __func__); // current TSPEC is no longer valid pAc->wmmAcTspecValid = VOS_FALSE; // need to tell TL to update its UAPSD handling hdd_wmm_disable_tl_uapsd(pQosContext); if (HDD_WMM_HANDLE_IMPLICIT == pQosContext->handle) { // we no longer have implicit access granted pAc->wmmAcAccessGranted = VOS_FALSE; pAc->wmmAcAccessFailed = VOS_FALSE; } else { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Explicit Qos, notifying userspace", __func__); // this was triggered by an application pQosContext->lastStatus = HDD_WLAN_WMM_STATUS_LOST; hdd_wmm_notify_app(pQosContext); } // we are done with this flow hdd_wmm_free_context(pQosContext); break; case SME_QOS_STATUS_RELEASE_REQ_PENDING_RSP: VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Release pending", __func__); // not a callback status -- ignore if we get it break; case SME_QOS_STATUS_RELEASE_INVALID_PARAMS_RSP: VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Release Invalid Params", __func__); if (HDD_WMM_HANDLE_IMPLICIT != pQosContext->handle) { // this was triggered by an application pQosContext->lastStatus = HDD_WLAN_WMM_STATUS_RELEASE_FAILED_BAD_PARAM; hdd_wmm_notify_app(pQosContext); } break; case SME_QOS_STATUS_MODIFY_SETUP_SUCCESS_IND: VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Modification is complete, notify TL", __func__); // there will always be a TSPEC returned with this status, even if // a TSPEC is not exchanged OTA if (pCurrentQosInfo) { pAc->wmmAcTspecValid = VOS_TRUE; memcpy(&pAc->wmmAcTspecInfo, pCurrentQosInfo, sizeof(pAc->wmmAcTspecInfo)); } if (HDD_WMM_HANDLE_IMPLICIT != pQosContext->handle) { // this was triggered by an application pQosContext->lastStatus = HDD_WLAN_WMM_STATUS_MODIFY_SUCCESS; hdd_wmm_notify_app(pQosContext); } // notify TL to enable trigger frames if necessary hdd_wmm_enable_tl_uapsd(pQosContext); break; case SME_QOS_STATUS_MODIFY_SETUP_SUCCESS_APSD_SET_ALREADY: if (HDD_WMM_HANDLE_IMPLICIT != pQosContext->handle) { // this was triggered by an application pQosContext->lastStatus = HDD_WLAN_WMM_STATUS_MODIFY_SUCCESS_NO_ACM_UAPSD_EXISTING; hdd_wmm_notify_app(pQosContext); } break; case SME_QOS_STATUS_MODIFY_SETUP_FAILURE_RSP: // the flow modification failed so we'll leave in place // whatever existed beforehand if (HDD_WMM_HANDLE_IMPLICIT != pQosContext->handle) { // this was triggered by an application pQosContext->lastStatus = HDD_WLAN_WMM_STATUS_MODIFY_FAILED; hdd_wmm_notify_app(pQosContext); } break; case SME_QOS_STATUS_MODIFY_SETUP_PENDING_RSP: VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: modification pending", __func__); // not a callback status -- ignore if we get it break; case SME_QOS_STATUS_MODIFY_SETUP_SUCCESS_NO_ACM_NO_APSD_RSP: // the flow modification was successful but no QoS changes required if (HDD_WMM_HANDLE_IMPLICIT != pQosContext->handle) { // this was triggered by an application pQosContext->lastStatus = HDD_WLAN_WMM_STATUS_MODIFY_SUCCESS_NO_ACM_NO_UAPSD; hdd_wmm_notify_app(pQosContext); } break; case SME_QOS_STATUS_MODIFY_SETUP_INVALID_PARAMS_RSP: // invalid params -- notify the application if (HDD_WMM_HANDLE_IMPLICIT != pQosContext->handle) { // this was triggered by an application pQosContext->lastStatus = HDD_WLAN_WMM_STATUS_MODIFY_FAILED_BAD_PARAM; hdd_wmm_notify_app(pQosContext); } break; case SME_QOS_STATUS_MODIFY_SETUP_SUCCESS_IND_APSD_PENDING: // nothing to do for now. when APSD is established we'll have work to do break; case SME_QOS_STATUS_MODIFY_SETUP_SUCCESS_IND_APSD_SET_FAILED: VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Modify successful but U-APSD failed", __func__); // QoS modification was successful but setting U=APSD failed. // This will always be an explicit QoS instance, so all we can // do is notify the application and let it clean up. if (HDD_WMM_HANDLE_IMPLICIT != pQosContext->handle) { // this was triggered by an application pQosContext->lastStatus = HDD_WLAN_WMM_STATUS_MODIFY_UAPSD_SET_FAILED; hdd_wmm_notify_app(pQosContext); } // Since U-APSD portion failed disabled trigger frame generation hdd_wmm_disable_tl_uapsd(pQosContext); break; case SME_QOS_STATUS_HANDING_OFF: // no roaming so we won't see this break; case SME_QOS_STATUS_OUT_OF_APSD_POWER_MODE_IND: // need to tell TL to stop trigger frame generation hdd_wmm_disable_tl_uapsd(pQosContext); break; case SME_QOS_STATUS_INTO_APSD_POWER_MODE_IND: // need to tell TL to start sending trigger frames again hdd_wmm_enable_tl_uapsd(pQosContext); break; default: VOS_TRACE( VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: unexpected SME Status=%d", __func__, smeStatus ); VOS_ASSERT(0); } // our access to the particular access category may have changed. // some of the implicit QoS cases above may have already set this // prior to invoking TL (so that we will properly service the // Tx queues) but let's consistently handle all cases here pAc->wmmAcAccessAllowed = hdd_wmm_is_access_allowed(pAdapter, pAc); VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: complete, access for TL AC %d is%sallowed", __func__, acType, pAc->wmmAcAccessAllowed ? " " : " not "); return eHAL_STATUS_SUCCESS; } #endif /**======================================================================== @brief hdd_wmmps_helper() - Function to set uapsd psb dynamically @param pAdapter : [in] pointer to adapter structure @param ptr : [in] pointer to command buffer @return : Zero on success, appropriate error on failure. =======================================================================*/ int hdd_wmmps_helper(hdd_adapter_t *pAdapter, tANI_U8 *ptr) { if (NULL == pAdapter) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: pAdapter is NULL", __func__); return -EINVAL; } if (NULL == ptr) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: ptr is NULL", __func__); return -EINVAL; } /* convert ASCII to integer */ pAdapter->configuredPsb = ptr[9] - '0'; pAdapter->psbChanged = HDD_PSB_CHANGED; return 0; } /**============================================================================ @brief hdd_wmm_do_implicit_qos() - Function which will attempt to setup QoS for any AC requiring it @param work : [in] pointer to work structure @return : void ===========================================================================*/ static void hdd_wmm_do_implicit_qos(struct work_struct *work) { hdd_wmm_qos_context_t* pQosContext = container_of(work, hdd_wmm_qos_context_t, wmmAcSetupImplicitQos); hdd_adapter_t* pAdapter; WLANTL_ACEnumType acType; hdd_wmm_ac_status_t *pAc; #ifndef WLAN_MDM_CODE_REDUCTION_OPT VOS_STATUS status; sme_QosStatusType smeStatus; #endif sme_QosWmmTspecInfo qosInfo; v_CONTEXT_t pVosContext = vos_get_global_context( VOS_MODULE_ID_HDD, NULL ); hdd_context_t *pHddCtx; if (NULL != pVosContext) { pHddCtx = vos_get_context( VOS_MODULE_ID_HDD, pVosContext); if (NULL == pHddCtx) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, FL("HddCtx is NULL")); return; } } VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Entered, context %p", __func__, pQosContext); mutex_lock(&pHddCtx->wmmLock); if (unlikely(HDD_WMM_CTX_MAGIC != pQosContext->magic)) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Invalid QoS Context", __func__); mutex_unlock(&pHddCtx->wmmLock); return; } mutex_unlock(&pHddCtx->wmmLock); pAdapter = pQosContext->pAdapter; acType = pQosContext->acType; pAc = &pAdapter->hddWmmStatus.wmmAcStatus[acType]; VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: pAdapter %p acType %d", __func__, pAdapter, acType); if (!pAc->wmmAcAccessNeeded) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: AC %d doesn't need service", __func__, acType); pQosContext->magic = 0; kfree(pQosContext); return; } pAc->wmmAcAccessPending = VOS_TRUE; pAc->wmmAcAccessNeeded = VOS_FALSE; memset(&qosInfo, 0, sizeof(qosInfo)); qosInfo.ts_info.psb = pAdapter->configuredPsb; switch (acType) { case WLANTL_AC_VO: qosInfo.ts_info.up = SME_QOS_WMM_UP_VO; /* Check if there is any valid configuration from framework */ if (HDD_PSB_CFG_INVALID == pAdapter->configuredPsb) { qosInfo.ts_info.psb = ((WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->UapsdMask & SME_QOS_UAPSD_VO) ? 1 : 0; } qosInfo.ts_info.direction = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraDirAcVo; qosInfo.ts_info.tid = 255; qosInfo.mean_data_rate = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraMeanDataRateAcVo; qosInfo.min_phy_rate = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraMinPhyRateAcVo; qosInfo.min_service_interval = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdVoSrvIntv; qosInfo.nominal_msdu_size = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraNomMsduSizeAcVo; qosInfo.surplus_bw_allowance = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraSbaAcVo; qosInfo.suspension_interval = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdVoSuspIntv; break; case WLANTL_AC_VI: qosInfo.ts_info.up = SME_QOS_WMM_UP_VI; /* Check if there is any valid configuration from framework */ if (HDD_PSB_CFG_INVALID == pAdapter->configuredPsb) { qosInfo.ts_info.psb = ((WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->UapsdMask & SME_QOS_UAPSD_VI) ? 1 : 0; } qosInfo.ts_info.direction = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraDirAcVi; qosInfo.ts_info.tid = 255; qosInfo.mean_data_rate = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraMeanDataRateAcVi; qosInfo.min_phy_rate = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraMinPhyRateAcVi; qosInfo.min_service_interval = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdViSrvIntv; qosInfo.nominal_msdu_size = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraNomMsduSizeAcVi; qosInfo.surplus_bw_allowance = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraSbaAcVi; qosInfo.suspension_interval = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdViSuspIntv; break; case WLANTL_AC_BE: qosInfo.ts_info.up = SME_QOS_WMM_UP_BE; /* Check if there is any valid configuration from framework */ if (HDD_PSB_CFG_INVALID == pAdapter->configuredPsb) { qosInfo.ts_info.psb = ((WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->UapsdMask & SME_QOS_UAPSD_BE) ? 1 : 0; } qosInfo.ts_info.direction = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraDirAcBe; qosInfo.ts_info.tid = 255; qosInfo.mean_data_rate = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraMeanDataRateAcBe; qosInfo.min_phy_rate = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraMinPhyRateAcBe; qosInfo.min_service_interval = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdBeSrvIntv; qosInfo.nominal_msdu_size = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraNomMsduSizeAcBe; qosInfo.surplus_bw_allowance = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraSbaAcBe; qosInfo.suspension_interval = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdBeSuspIntv; break; case WLANTL_AC_BK: qosInfo.ts_info.up = SME_QOS_WMM_UP_BK; /* Check if there is any valid configuration from framework */ if (HDD_PSB_CFG_INVALID == pAdapter->configuredPsb) { qosInfo.ts_info.psb = ((WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->UapsdMask & SME_QOS_UAPSD_BK) ? 1 : 0; } qosInfo.ts_info.direction = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraDirAcBk; qosInfo.ts_info.tid = 255; qosInfo.mean_data_rate = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraMeanDataRateAcBk; qosInfo.min_phy_rate = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraMinPhyRateAcBk; qosInfo.min_service_interval = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdBkSrvIntv; qosInfo.nominal_msdu_size = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraNomMsduSizeAcBk; qosInfo.surplus_bw_allowance = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraSbaAcBk; qosInfo.suspension_interval = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdBkSuspIntv; break; default: VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Invalid AC %d", __func__, acType ); return; } #ifdef FEATURE_WLAN_ESE qosInfo.inactivity_interval = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraInactivityInterval; #endif qosInfo.ts_info.burst_size_defn = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->burstSizeDefinition; switch ((WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->tsInfoAckPolicy) { case HDD_WLAN_WMM_TS_INFO_ACK_POLICY_NORMAL_ACK: qosInfo.ts_info.ack_policy = SME_QOS_WMM_TS_ACK_POLICY_NORMAL_ACK; break; case HDD_WLAN_WMM_TS_INFO_ACK_POLICY_HT_IMMEDIATE_BLOCK_ACK: qosInfo.ts_info.ack_policy = SME_QOS_WMM_TS_ACK_POLICY_HT_IMMEDIATE_BLOCK_ACK; break; default: // unknown qosInfo.ts_info.ack_policy = SME_QOS_WMM_TS_ACK_POLICY_NORMAL_ACK; } if(qosInfo.ts_info.ack_policy == SME_QOS_WMM_TS_ACK_POLICY_HT_IMMEDIATE_BLOCK_ACK) { if(!sme_QosIsTSInfoAckPolicyValid((tpAniSirGlobal)WLAN_HDD_GET_HAL_CTX(pAdapter), &qosInfo, pAdapter->sessionId)) { qosInfo.ts_info.ack_policy = SME_QOS_WMM_TS_ACK_POLICY_NORMAL_ACK; } } mutex_lock(&pHddCtx->wmmLock); list_add(&pQosContext->node, &pAdapter->hddWmmStatus.wmmContextList); mutex_unlock(&pHddCtx->wmmLock); #ifndef WLAN_MDM_CODE_REDUCTION_OPT smeStatus = sme_QosSetupReq(WLAN_HDD_GET_HAL_CTX(pAdapter), pAdapter->sessionId, &qosInfo, hdd_wmm_sme_callback, pQosContext, qosInfo.ts_info.up, &pQosContext->qosFlowId); VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: sme_QosSetupReq returned %d flowid %d", __func__, smeStatus, pQosContext->qosFlowId); // need to check the return values and act appropriately switch (smeStatus) { case SME_QOS_STATUS_SETUP_REQ_PENDING_RSP: case SME_QOS_STATUS_SETUP_SUCCESS_IND_APSD_PENDING: // setup is pending, so no more work to do now. // all further work will be done in hdd_wmm_sme_callback() VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Setup is pending, no further work", __func__); break; case SME_QOS_STATUS_SETUP_FAILURE_RSP: // we can't tell the difference between when a request fails because // AP rejected it versus when SME encountered an internal error // in either case SME won't ever reference this context so // free the record hdd_wmm_free_context(pQosContext); // fall through and start packets flowing case SME_QOS_STATUS_SETUP_SUCCESS_NO_ACM_NO_APSD_RSP: // no ACM in effect, no need to setup U-APSD case SME_QOS_STATUS_SETUP_SUCCESS_APSD_SET_ALREADY: // no ACM in effect, U-APSD is desired but was already setup // for these cases everything is already setup so we can // signal TL that it has work to do VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Setup is complete, notify TL", __func__); pAc->wmmAcAccessAllowed = VOS_TRUE; pAc->wmmAcAccessGranted = VOS_TRUE; pAc->wmmAcAccessPending = VOS_FALSE; status = WLANTL_STAPktPending( (WLAN_HDD_GET_CTX(pAdapter))->pvosContext, (WLAN_HDD_GET_STATION_CTX_PTR(pAdapter))->conn_info.staId[0], acType ); if ( !VOS_IS_STATUS_SUCCESS( status ) ) { VOS_TRACE( VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Failed to signal TL for AC=%d", __func__, acType ); } break; default: VOS_TRACE( VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: unexpected SME Status=%d", __func__, smeStatus ); VOS_ASSERT(0); } #endif } /**============================================================================ @brief hdd_wmm_init() - Function which will initialize the WMM configuation and status to an initial state. The configuration can later be overwritten via application APIs @param pAdapter : [in] pointer to Adapter context @return : VOS_STATUS_SUCCESS if successful : other values if failure ===========================================================================*/ VOS_STATUS hdd_wmm_init ( hdd_adapter_t *pAdapter ) { sme_QosWmmUpType* hddWmmDscpToUpMap = pAdapter->hddWmmDscpToUpMap; v_U8_t dscp; VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Entered", __func__); // DSCP to User Priority Lookup Table for (dscp = 0; dscp <= WLAN_HDD_MAX_DSCP; dscp++) { hddWmmDscpToUpMap[dscp] = SME_QOS_WMM_UP_BE; } hddWmmDscpToUpMap[8] = SME_QOS_WMM_UP_BK; hddWmmDscpToUpMap[16] = SME_QOS_WMM_UP_RESV; hddWmmDscpToUpMap[24] = SME_QOS_WMM_UP_EE; hddWmmDscpToUpMap[32] = SME_QOS_WMM_UP_CL; hddWmmDscpToUpMap[40] = SME_QOS_WMM_UP_VI; hddWmmDscpToUpMap[48] = SME_QOS_WMM_UP_VO; hddWmmDscpToUpMap[56] = SME_QOS_WMM_UP_NC; return VOS_STATUS_SUCCESS; } /**============================================================================ @brief hdd_wmm_adapter_init() - Function which will initialize the WMM configuation and status to an initial state. The configuration can later be overwritten via application APIs @param pAdapter : [in] pointer to Adapter context @return : VOS_STATUS_SUCCESS if succssful : other values if failure ===========================================================================*/ VOS_STATUS hdd_wmm_adapter_init( hdd_adapter_t *pAdapter ) { hdd_wmm_ac_status_t *pAcStatus; WLANTL_ACEnumType acType; VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Entered", __func__); pAdapter->hddWmmStatus.wmmQap = VOS_FALSE; INIT_LIST_HEAD(&pAdapter->hddWmmStatus.wmmContextList); for (acType = 0; acType < WLANTL_MAX_AC; acType++) { pAcStatus = &pAdapter->hddWmmStatus.wmmAcStatus[acType]; pAcStatus->wmmAcAccessRequired = VOS_FALSE; pAcStatus->wmmAcAccessNeeded = VOS_FALSE; pAcStatus->wmmAcAccessPending = VOS_FALSE; pAcStatus->wmmAcAccessFailed = VOS_FALSE; pAcStatus->wmmAcAccessGranted = VOS_FALSE; pAcStatus->wmmAcAccessAllowed = VOS_FALSE; pAcStatus->wmmAcTspecValid = VOS_FALSE; pAcStatus->wmmAcUapsdInfoValid = VOS_FALSE; } // Invalid value(0xff) to indicate psb not configured through framework initially. pAdapter->configuredPsb = HDD_PSB_CFG_INVALID; return VOS_STATUS_SUCCESS; } /**============================================================================ @brief hdd_wmm_adapter_clear() - Function which will clear the WMM status for all the ACs @param pAdapter : [in] pointer to Adapter context @return : VOS_STATUS_SUCCESS if succssful : other values if failure ===========================================================================*/ VOS_STATUS hdd_wmm_adapter_clear( hdd_adapter_t *pAdapter ) { hdd_wmm_ac_status_t *pAcStatus; WLANTL_ACEnumType acType; VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Entered", __func__); for (acType = 0; acType < WLANTL_MAX_AC; acType++) { pAcStatus = &pAdapter->hddWmmStatus.wmmAcStatus[acType]; pAcStatus->wmmAcAccessRequired = VOS_FALSE; pAcStatus->wmmAcAccessNeeded = VOS_FALSE; pAcStatus->wmmAcAccessPending = VOS_FALSE; pAcStatus->wmmAcAccessFailed = VOS_FALSE; pAcStatus->wmmAcAccessGranted = VOS_FALSE; pAcStatus->wmmAcAccessAllowed = VOS_FALSE; pAcStatus->wmmAcTspecValid = VOS_FALSE; pAcStatus->wmmAcUapsdInfoValid = VOS_FALSE; } return VOS_STATUS_SUCCESS; } /**============================================================================ @brief hdd_wmm_close() - Function which will perform any necessary work to to clean up the WMM functionality prior to the kernel module unload @param pAdapter : [in] pointer to adapter context @return : VOS_STATUS_SUCCESS if succssful : other values if failure ===========================================================================*/ VOS_STATUS hdd_wmm_adapter_close ( hdd_adapter_t* pAdapter ) { hdd_wmm_qos_context_t* pQosContext; v_CONTEXT_t pVosContext = vos_get_global_context( VOS_MODULE_ID_HDD, NULL ); hdd_context_t *pHddCtx; if (NULL != pVosContext) { pHddCtx = vos_get_context( VOS_MODULE_ID_HDD, pVosContext); if (NULL == pHddCtx) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, FL("HddCtx is NULL")); return VOS_STATUS_E_FAILURE; } } VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Entered", __func__); // free any context records that we still have linked while (!list_empty(&pAdapter->hddWmmStatus.wmmContextList)) { pQosContext = list_first_entry(&pAdapter->hddWmmStatus.wmmContextList, hdd_wmm_qos_context_t, node); #ifdef FEATURE_WLAN_ESE hdd_wmm_disable_inactivity_timer(pQosContext); #endif #ifdef WLAN_OPEN_SOURCE mutex_lock(&pHddCtx->wmmLock); if (pQosContext->handle == HDD_WMM_HANDLE_IMPLICIT && pQosContext->magic == HDD_WMM_CTX_MAGIC) { cancel_work_sync(&pQosContext->wmmAcSetupImplicitQos); } mutex_unlock(&pHddCtx->wmmLock); #endif hdd_wmm_free_context(pQosContext); } return VOS_STATUS_SUCCESS; } /**============================================================================ @brief hdd_is_dhcp_packet() - Function which will check OS packet for DHCP packet @param skb : [in] pointer to OS packet (sk_buff) @return : VOS_TRUE if the OS packet is DHCP packet : otherwise VOS_FALSE ===========================================================================*/ v_BOOL_t hdd_is_dhcp_packet(struct sk_buff *skb) { if (*((u16*)((u8*)skb->data+34)) == DHCP_SOURCE_PORT || *((u16*)((u8*)skb->data+34)) == DHCP_DESTINATION_PORT) return VOS_TRUE; return VOS_FALSE; } /**============================================================================ @brief hdd_skb_is_eapol_or_wai_packet() - Function which will check OS packet for Eapol/Wapi packet @param skb : [in] pointer to OS packet (sk_buff) @return : VOS_TRUE if the OS packet is an Eapol or a Wapi packet : otherwise VOS_FALSE ===========================================================================*/ v_BOOL_t hdd_skb_is_eapol_or_wai_packet(struct sk_buff *skb) { if ((*((u16*)((u8*)skb->data+HDD_ETHERTYPE_802_1_X_FRAME_OFFSET)) == vos_cpu_to_be16(HDD_ETHERTYPE_802_1_X)) #ifdef FEATURE_WLAN_WAPI || (*((u16*)((u8*)skb->data+HDD_ETHERTYPE_802_1_X_FRAME_OFFSET)) == vos_cpu_to_be16(HDD_ETHERTYPE_WAI)) #endif ) return VOS_TRUE; return VOS_FALSE; } /**============================================================================ @brief hdd_wmm_classify_pkt() - Function which will classify an OS packet into a WMM AC based on either 802.1Q or DSCP @param pAdapter : [in] pointer to adapter context @param skb : [in] pointer to OS packet (sk_buff) @param pAcType : [out] pointer to WMM AC type of OS packet @return : None ===========================================================================*/ v_VOID_t hdd_wmm_classify_pkt ( hdd_adapter_t* pAdapter, struct sk_buff *skb, WLANTL_ACEnumType* pAcType, sme_QosWmmUpType *pUserPri) { unsigned char * pPkt; union generic_ethhdr *pHdr; struct iphdr *pIpHdr; unsigned char tos; unsigned char dscp; sme_QosWmmUpType userPri; WLANTL_ACEnumType acType; // this code is executed for every packet therefore // all debug code is kept conditional #ifdef HDD_WMM_DEBUG VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Entered", __func__); #endif // HDD_WMM_DEBUG pPkt = skb->data; pHdr = (union generic_ethhdr *)pPkt; #ifdef HDD_WMM_DEBUG VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: proto/length is 0x%04x", __func__, pHdr->eth_II.h_proto); #endif // HDD_WMM_DEBUG if (HDD_WMM_CLASSIFICATION_DSCP == (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->PktClassificationBasis) { if (pHdr->eth_II.h_proto == htons(ETH_P_IP)) { // case 1: Ethernet II IP packet pIpHdr = (struct iphdr *)&pPkt[sizeof(pHdr->eth_II)]; tos = pIpHdr->tos; #ifdef HDD_WMM_DEBUG VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Ethernet II IP Packet, tos is %d", __func__, tos); #endif // HDD_WMM_DEBUG } else if ((ntohs(pHdr->eth_II.h_proto) < WLAN_MIN_PROTO) && (pHdr->eth_8023.h_snap.dsap == WLAN_SNAP_DSAP) && (pHdr->eth_8023.h_snap.ssap == WLAN_SNAP_SSAP) && (pHdr->eth_8023.h_snap.ctrl == WLAN_SNAP_CTRL) && (pHdr->eth_8023.h_proto == htons(ETH_P_IP))) { // case 2: 802.3 LLC/SNAP IP packet pIpHdr = (struct iphdr *)&pPkt[sizeof(pHdr->eth_8023)]; tos = pIpHdr->tos; #ifdef HDD_WMM_DEBUG VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: 802.3 LLC/SNAP IP Packet, tos is %d", __func__, tos); #endif // HDD_WMM_DEBUG } else if (pHdr->eth_II.h_proto == htons(ETH_P_8021Q)) { // VLAN tagged if (pHdr->eth_IIv.h_vlan_encapsulated_proto == htons(ETH_P_IP)) { // case 3: Ethernet II vlan-tagged IP packet pIpHdr = (struct iphdr *)&pPkt[sizeof(pHdr->eth_IIv)]; tos = pIpHdr->tos; #ifdef HDD_WMM_DEBUG VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Ethernet II VLAN tagged IP Packet, tos is %d", __func__, tos); #endif // HDD_WMM_DEBUG } else if ((ntohs(pHdr->eth_IIv.h_vlan_encapsulated_proto) < WLAN_MIN_PROTO) && (pHdr->eth_8023v.h_snap.dsap == WLAN_SNAP_DSAP) && (pHdr->eth_8023v.h_snap.ssap == WLAN_SNAP_SSAP) && (pHdr->eth_8023v.h_snap.ctrl == WLAN_SNAP_CTRL) && (pHdr->eth_8023v.h_proto == htons(ETH_P_IP))) { // case 4: 802.3 LLC/SNAP vlan-tagged IP packet pIpHdr = (struct iphdr *)&pPkt[sizeof(pHdr->eth_8023v)]; tos = pIpHdr->tos; #ifdef HDD_WMM_DEBUG VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: 802.3 LLC/SNAP VLAN tagged IP Packet, tos is %d", __func__, tos); #endif // HDD_WMM_DEBUG } else { // default #ifdef HDD_WMM_DEBUG VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_WARN, "%s: VLAN tagged Unhandled Protocol, using default tos", __func__); #endif // HDD_WMM_DEBUG tos = 0; } } else { v_BOOL_t toggleArpBDRates = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->toggleArpBDRates; // default #ifdef HDD_WMM_DEBUG VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_WARN, "%s: Unhandled Protocol, using default tos", __func__); #endif // HDD_WMM_DEBUG //Give the highest priority to 802.1x packet if (pHdr->eth_II.h_proto == htons(HDD_ETHERTYPE_802_1_X)) tos = 0xC0; else if (TRUE == toggleArpBDRates && pHdr->eth_II.h_proto == htons(HDD_ETHERTYPE_ARP)) { tos = TID3; } else tos = 0; } dscp = (tos>>2) & 0x3f; userPri = pAdapter->hddWmmDscpToUpMap[dscp]; #ifdef HDD_WMM_DEBUG VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: tos is %d, dscp is %d, up is %d", __func__, tos, dscp, userPri); #endif // HDD_WMM_DEBUG } else if (HDD_WMM_CLASSIFICATION_802_1Q == (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->PktClassificationBasis) { if (pHdr->eth_IIv.h_vlan_proto == htons(ETH_P_8021Q)) { // VLAN tagged userPri = (ntohs(pHdr->eth_IIv.h_vlan_TCI)>>13) & 0x7; #ifdef HDD_WMM_DEBUG VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Tagged frame, UP is %d", __func__, userPri); #endif // HDD_WMM_DEBUG } else { // not VLAN tagged, use default #ifdef HDD_WMM_DEBUG VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_WARN, "%s: Untagged frame, using default UP", __func__); #endif // HDD_WMM_DEBUG //Give the highest priority to 802.1x packet if (pHdr->eth_II.h_proto == htons(HDD_ETHERTYPE_802_1_X)) userPri = SME_QOS_WMM_UP_VO; else userPri = SME_QOS_WMM_UP_BE; } } else { // default #ifdef HDD_WMM_DEBUG VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Unknown classification scheme, using default UP", __func__); #endif // HDD_WMM_DEBUG userPri = SME_QOS_WMM_UP_BE; } acType = hddWmmUpToAcMap[userPri]; #ifdef HDD_WMM_DEBUG VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: UP is %d, AC is %d", __func__, userPri, acType); #endif // HDD_WMM_DEBUG *pUserPri = userPri; *pAcType = acType; return; } /**============================================================================ @brief hdd_hostapd_select_quueue() - Function which will classify the packet according to linux qdisc expectation. @param dev : [in] pointer to net_device structure @param skb : [in] pointer to os packet @return : Qdisc queue index ===========================================================================*/ v_U16_t hdd_hostapd_select_queue(struct net_device * dev, struct sk_buff *skb) { WLANTL_ACEnumType ac; sme_QosWmmUpType up = SME_QOS_WMM_UP_BE; v_USHORT_t queueIndex; v_MACADDR_t *pDestMacAddress = (v_MACADDR_t*)skb->data; hdd_adapter_t *pAdapter = (hdd_adapter_t *)netdev_priv(dev); hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX(pAdapter); v_U8_t STAId; v_U8_t *pSTAId = (v_U8_t *)(((v_U8_t *)(skb->data)) - 1); /*Get the Station ID*/ if (VOS_STATUS_SUCCESS != hdd_softap_GetStaId(pAdapter, pDestMacAddress, &STAId)) { VOS_TRACE( VOS_MODULE_ID_HDD_SOFTAP, VOS_TRACE_LEVEL_INFO, "%s: Failed to find right station", __func__); *pSTAId = HDD_WLAN_INVALID_STA_ID; goto done; } spin_lock_bh( &pAdapter->staInfo_lock ); if (FALSE == vos_is_macaddr_equal(&pAdapter->aStaInfo[STAId].macAddrSTA, pDestMacAddress)) { VOS_TRACE( VOS_MODULE_ID_HDD_SOFTAP, VOS_TRACE_LEVEL_INFO, "%s: Station MAC address does not matching", __func__); *pSTAId = HDD_WLAN_INVALID_STA_ID; goto release_lock; } if (pAdapter->aStaInfo[STAId].isUsed && pAdapter->aStaInfo[STAId].isQosEnabled && (HDD_WMM_USER_MODE_NO_QOS != pHddCtx->cfg_ini->WmmMode)) { /* Get the user priority from IP header & corresponding AC */ hdd_wmm_classify_pkt (pAdapter, skb, &ac, &up); //If 3/4th of Tx queue is used then place the DHCP packet in VOICE AC queue if (pAdapter->aStaInfo[STAId].vosLowResource && hdd_is_dhcp_packet(skb)) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_WARN, "%s: Making priority of DHCP packet as VOICE", __func__); up = SME_QOS_WMM_UP_VO; ac = hddWmmUpToAcMap[up]; } } *pSTAId = STAId; release_lock: spin_unlock_bh( &pAdapter->staInfo_lock ); done: skb->priority = up; if(skb->priority < SME_QOS_WMM_UP_MAX) queueIndex = hddLinuxUpToAcMap[skb->priority]; else { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: up=%d is going beyond max value", __func__, up); queueIndex = hddLinuxUpToAcMap[SME_QOS_WMM_UP_BE]; } return queueIndex; } /**============================================================================ @brief hdd_wmm_select_quueue() - Function which will classify the packet according to linux qdisc expectation. @param dev : [in] pointer to net_device structure @param skb : [in] pointer to os packet @return : Qdisc queue index ===========================================================================*/ v_U16_t hdd_wmm_select_queue(struct net_device * dev, struct sk_buff *skb) { WLANTL_ACEnumType ac; sme_QosWmmUpType up = SME_QOS_WMM_UP_BE; v_USHORT_t queueIndex; hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev); if (isWDresetInProgress()) { VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN, FL("called during WDReset")); skb->priority = SME_QOS_WMM_UP_BE; return HDD_LINUX_AC_BE; } /*Get the Station ID*/ if (WLAN_HDD_IBSS == pAdapter->device_mode) { v_U8_t *pSTAId = (v_U8_t *)(((v_U8_t *)(skb->data)) - 1); v_MACADDR_t *pDestMacAddress = (v_MACADDR_t*)skb->data; if ( VOS_STATUS_SUCCESS != hdd_Ibss_GetStaId(&pAdapter->sessionCtx.station, pDestMacAddress, pSTAId)) { *pSTAId = HDD_WLAN_INVALID_STA_ID; if ( !vos_is_macaddr_broadcast( pDestMacAddress ) && !vos_is_macaddr_group(pDestMacAddress)) { VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, "%s: Failed to find right station pDestMacAddress: " MAC_ADDRESS_STR , __func__, MAC_ADDR_ARRAY(pDestMacAddress->bytes)); goto done; } } } /* All traffic will get equal opportuniy to transmit data frames. */ /* Get the user priority from IP header & corresponding AC */ hdd_wmm_classify_pkt (pAdapter, skb, &ac, &up); /* If 3/4th of BE AC Tx queue is full, * then place the DHCP packet in VOICE AC queue. * Doing this for IBSS alone, since for STA interface * types, these packets will be queued to the new queue. */ if ((WLAN_HDD_IBSS == pAdapter->device_mode) && pAdapter->isVosLowResource && hdd_is_dhcp_packet(skb)) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_WARN, "%s: BestEffort Tx Queue is 3/4th full" " Make DHCP packet's pri as VO", __func__); up = SME_QOS_WMM_UP_VO; ac = hddWmmUpToAcMap[up]; } done: skb->priority = up; if(skb->priority < SME_QOS_WMM_UP_MAX) queueIndex = hddLinuxUpToAcMap[skb->priority]; else { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: up=%d is going beyond max value", __func__, up); queueIndex = hddLinuxUpToAcMap[SME_QOS_WMM_UP_BE]; } if ((WLAN_HDD_IBSS != pAdapter->device_mode) && (hdd_is_dhcp_packet(skb) || hdd_skb_is_eapol_or_wai_packet(skb))) { /* If the packet is a DHCP packet or a Eapol packet or * a Wapi packet, then queue it to the new queue for * STA interfaces alone. */ queueIndex = WLANTL_AC_HIGH_PRIO; VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: up=%d QIndex:%d", __func__, up, queueIndex); } return queueIndex; } /**========================================================================== @brief hdd_wmm_acquire_access_required() - Function which will determine acquire admittance for a WMM AC is required or not based on psb configuration done in framework @param pAdapter : [in] pointer to adapter structure @param acType : [in] WMM AC type of OS packet @return : void ===========================================================================*/ void hdd_wmm_acquire_access_required(hdd_adapter_t *pAdapter, WLANTL_ACEnumType acType) { /* Each bit in the LSB nibble indicates 1 AC. * Clearing the particular bit in LSB nibble to indicate * access required */ switch(acType) { case WLANTL_AC_BK: pAdapter->psbChanged &= ~SME_QOS_UAPSD_CFG_BK_CHANGED_MASK; /* clear first bit */ break; case WLANTL_AC_BE: pAdapter->psbChanged &= ~SME_QOS_UAPSD_CFG_BE_CHANGED_MASK; /* clear second bit */ break; case WLANTL_AC_VI: pAdapter->psbChanged &= ~SME_QOS_UAPSD_CFG_VI_CHANGED_MASK; /* clear third bit */ break; case WLANTL_AC_VO: pAdapter->psbChanged &= ~SME_QOS_UAPSD_CFG_VO_CHANGED_MASK; /* clear fourth bit */ break; default: VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Invalid AC Type", __func__); break; } } /**============================================================================ @brief hdd_wmm_acquire_access() - Function which will attempt to acquire admittance for a WMM AC @param pAdapter : [in] pointer to adapter context @param acType : [in] WMM AC type of OS packet @param pGranted : [out] pointer to boolean flag when indicates if access has been granted or not @return : VOS_STATUS_SUCCESS if succssful : other values if failure ===========================================================================*/ VOS_STATUS hdd_wmm_acquire_access( hdd_adapter_t* pAdapter, WLANTL_ACEnumType acType, v_BOOL_t * pGranted ) { hdd_wmm_qos_context_t *pQosContext; VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Entered for AC %d", __func__, acType); if (!hdd_wmm_is_active(pAdapter) || !(WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->bImplicitQosEnabled) { // either we don't want QoS or the AP doesn't support QoS // or we don't want to do implicit QoS VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: QoS not configured on both ends ", __func__); pAdapter->hddWmmStatus.wmmAcStatus[acType].wmmAcAccessAllowed = VOS_TRUE; *pGranted = VOS_TRUE; return VOS_STATUS_SUCCESS; } // do we already have an implicit QoS request pending for this AC? if ((pAdapter->hddWmmStatus.wmmAcStatus[acType].wmmAcAccessNeeded) || (pAdapter->hddWmmStatus.wmmAcStatus[acType].wmmAcAccessPending)) { // request already pending so we need to wait for that response VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Implicit QoS for TL AC %d already scheduled", __func__, acType); *pGranted = VOS_FALSE; return VOS_STATUS_SUCCESS; } // did we already fail to establish implicit QoS for this AC? // (if so, access should have been granted when the failure was handled) if (pAdapter->hddWmmStatus.wmmAcStatus[acType].wmmAcAccessFailed) { // request previously failed // allow access, but we'll be downgraded VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Implicit QoS for TL AC %d previously failed", __func__, acType); pAdapter->hddWmmStatus.wmmAcStatus[acType].wmmAcAccessAllowed = VOS_TRUE; *pGranted = VOS_TRUE; return VOS_STATUS_SUCCESS; } // we need to establish implicit QoS VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Need to schedule implicit QoS for TL AC %d, pAdapter is %p", __func__, acType, pAdapter); pAdapter->hddWmmStatus.wmmAcStatus[acType].wmmAcAccessNeeded = VOS_TRUE; pQosContext = kmalloc(sizeof(*pQosContext), GFP_ATOMIC); if (NULL == pQosContext) { // no memory for QoS context. Nothing we can do but let data flow VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Unable to allocate context", __func__); pAdapter->hddWmmStatus.wmmAcStatus[acType].wmmAcAccessAllowed = VOS_TRUE; *pGranted = VOS_TRUE; return VOS_STATUS_SUCCESS; } pQosContext->acType = acType; pQosContext->pAdapter = pAdapter; pQosContext->qosFlowId = 0; pQosContext->handle = HDD_WMM_HANDLE_IMPLICIT; pQosContext->magic = HDD_WMM_CTX_MAGIC; INIT_WORK(&pQosContext->wmmAcSetupImplicitQos, hdd_wmm_do_implicit_qos); VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Scheduling work for AC %d, context %p", __func__, acType, pQosContext); schedule_work(&pQosContext->wmmAcSetupImplicitQos); // caller will need to wait until the work takes place and // TSPEC negotiation completes *pGranted = VOS_FALSE; return VOS_STATUS_SUCCESS; } /**============================================================================ @brief hdd_wmm_assoc() - Function which will handle the housekeeping required by WMM when association takes place @param pAdapter : [in] pointer to adapter context @param pRoamInfo: [in] pointer to roam information @param eBssType : [in] type of BSS @return : VOS_STATUS_SUCCESS if succssful : other values if failure ===========================================================================*/ VOS_STATUS hdd_wmm_assoc( hdd_adapter_t* pAdapter, tCsrRoamInfo *pRoamInfo, eCsrRoamBssType eBssType ) { tANI_U8 uapsdMask; VOS_STATUS status; hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX(pAdapter); // when we associate we need to notify TL if it needs to enable // UAPSD for any access categories VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Entered", __func__); if (pRoamInfo->fReassocReq) { // when we reassociate we should continue to use whatever // parameters were previously established. if we are // reassociating due to a U-APSD change for a particular // Access Category, then the change will be communicated // to HDD via the QoS callback associated with the given // flow, and U-APSD parameters will be updated there VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Reassoc so no work, Exiting", __func__); return VOS_STATUS_SUCCESS; } // get the negotiated UAPSD Mask uapsdMask = pRoamInfo->u.pConnectedProfile->modifyProfileFields.uapsd_mask; VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: U-APSD mask is 0x%02x", __func__, (int) uapsdMask); if (uapsdMask & HDD_AC_VO) { status = WLANTL_EnableUAPSDForAC( (WLAN_HDD_GET_CTX(pAdapter))->pvosContext, (WLAN_HDD_GET_STATION_CTX_PTR(pAdapter))->conn_info.staId[0], WLANTL_AC_VO, 7, 7, (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdVoSrvIntv, (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdVoSuspIntv, WLANTL_BI_DIR ); VOS_ASSERT( VOS_IS_STATUS_SUCCESS( status )); } if (uapsdMask & HDD_AC_VI) { status = WLANTL_EnableUAPSDForAC( (WLAN_HDD_GET_CTX(pAdapter))->pvosContext, (WLAN_HDD_GET_STATION_CTX_PTR(pAdapter))->conn_info.staId[0], WLANTL_AC_VI, 5, 5, (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdViSrvIntv, (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdViSuspIntv, WLANTL_BI_DIR ); VOS_ASSERT( VOS_IS_STATUS_SUCCESS( status )); } if (uapsdMask & HDD_AC_BK) { status = WLANTL_EnableUAPSDForAC( (WLAN_HDD_GET_CTX(pAdapter))->pvosContext, (WLAN_HDD_GET_STATION_CTX_PTR(pAdapter))->conn_info.staId[0], WLANTL_AC_BK, 2, 2, (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdBkSrvIntv, (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdBkSuspIntv, WLANTL_BI_DIR ); VOS_ASSERT( VOS_IS_STATUS_SUCCESS( status )); } if (uapsdMask & HDD_AC_BE) { status = WLANTL_EnableUAPSDForAC( (WLAN_HDD_GET_CTX(pAdapter))->pvosContext, (WLAN_HDD_GET_STATION_CTX_PTR(pAdapter))->conn_info.staId[0], WLANTL_AC_BE, 3, 3, (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdBeSrvIntv, (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdBeSuspIntv, WLANTL_BI_DIR ); VOS_ASSERT( VOS_IS_STATUS_SUCCESS( status )); } status = sme_UpdateDSCPtoUPMapping(pHddCtx->hHal, pAdapter->hddWmmDscpToUpMap, pAdapter->sessionId); if (!VOS_IS_STATUS_SUCCESS( status )) { hdd_wmm_init( pAdapter ); } VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Exiting", __func__); return VOS_STATUS_SUCCESS; } static const v_U8_t acmMaskBit[WLANTL_MAX_AC] = { 0x4, /* WLANTL_AC_BK */ 0x8, /* WLANTL_AC_BE */ 0x2, /* WLANTL_AC_VI */ 0x1 /* WLANTL_AC_VO */ }; /**============================================================================ @brief hdd_wmm_connect() - Function which will handle the housekeeping required by WMM when a connection is established @param pAdapter : [in] pointer to adapter context @param pRoamInfo: [in] pointer to roam information @param eBssType : [in] type of BSS @return : VOS_STATUS_SUCCESS if succssful : other values if failure ===========================================================================*/ VOS_STATUS hdd_wmm_connect( hdd_adapter_t* pAdapter, tCsrRoamInfo *pRoamInfo, eCsrRoamBssType eBssType ) { int ac; v_BOOL_t qap; v_BOOL_t qosConnection; v_U8_t acmMask; VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Entered", __func__); if ((eCSR_BSS_TYPE_INFRASTRUCTURE == eBssType) && pRoamInfo && pRoamInfo->u.pConnectedProfile) { qap = pRoamInfo->u.pConnectedProfile->qap; qosConnection = pRoamInfo->u.pConnectedProfile->qosConnection; acmMask = pRoamInfo->u.pConnectedProfile->acm_mask; } else { /* TODO: if a non-qos IBSS peer joins the group make qap and qosConnection false. */ qap = VOS_TRUE; qosConnection = VOS_TRUE; acmMask = 0x0; } VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: qap is %d, qosConnection is %d, acmMask is 0x%x", __func__, qap, qosConnection, acmMask); pAdapter->hddWmmStatus.wmmQap = qap; pAdapter->hddWmmStatus.wmmQosConnection = qosConnection; for (ac = 0; ac < WLANTL_MAX_AC; ac++) { if (qap && qosConnection && (acmMask & acmMaskBit[ac])) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: ac %d on", __func__, ac); // admission is required pAdapter->hddWmmStatus.wmmAcStatus[ac].wmmAcAccessRequired = VOS_TRUE; pAdapter->hddWmmStatus.wmmAcStatus[ac].wmmAcAccessAllowed = VOS_FALSE; pAdapter->hddWmmStatus.wmmAcStatus[ac].wmmAcAccessGranted = VOS_FALSE; /* Making TSPEC invalid here so downgrading can be happen while roaming * It is expected this will be SET in hdd_wmm_sme_callback,once sme is * done with the AddTspec.Here we avoid 11r and ccx based association. This change is done only when reassoc to different AP. */ VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, FL( "fReassocReq = %d" #if defined (FEATURE_WLAN_ESE) "isESEAssoc = %d" #endif #if defined (WLAN_FEATURE_VOWIFI_11R) "is11rAssoc = %d" #endif ), pRoamInfo->fReassocReq #if defined (FEATURE_WLAN_ESE) ,pRoamInfo->isESEAssoc #endif #if defined (WLAN_FEATURE_VOWIFI_11R) ,pRoamInfo->is11rAssoc #endif ); if ( !pRoamInfo->fReassocReq #if defined (WLAN_FEATURE_VOWIFI_11R) && !pRoamInfo->is11rAssoc #endif #if defined (FEATURE_WLAN_ESE) && !pRoamInfo->isESEAssoc #endif ) { pAdapter->hddWmmStatus.wmmAcStatus[ac].wmmAcTspecValid = VOS_FALSE; } } else { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: ac %d off", __func__, ac); // admission is not required so access is allowed pAdapter->hddWmmStatus.wmmAcStatus[ac].wmmAcAccessRequired = VOS_FALSE; pAdapter->hddWmmStatus.wmmAcStatus[ac].wmmAcAccessAllowed = VOS_TRUE; } } VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Exiting", __func__); return VOS_STATUS_SUCCESS; } /**============================================================================ @brief hdd_wmm_get_uapsd_mask() - Function which will calculate the initial value of the UAPSD mask based upon the device configuration @param pAdapter : [in] pointer to adapter context @param pUapsdMask: [in] pointer to where the UAPSD Mask is to be stored @return : VOS_STATUS_SUCCESS if succssful : other values if failure ===========================================================================*/ VOS_STATUS hdd_wmm_get_uapsd_mask( hdd_adapter_t* pAdapter, tANI_U8 *pUapsdMask ) { tANI_U8 uapsdMask; if (HDD_WMM_USER_MODE_NO_QOS == (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->WmmMode) { // no QOS then no UAPSD uapsdMask = 0; } else { // start with the default mask uapsdMask = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->UapsdMask; // disable UAPSD for any ACs with a 0 Service Interval if( (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdVoSrvIntv == 0 ) { uapsdMask &= ~HDD_AC_VO; } if( (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdViSrvIntv == 0 ) { uapsdMask &= ~HDD_AC_VI; } if( (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdBkSrvIntv == 0 ) { uapsdMask &= ~HDD_AC_BK; } if( (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->InfraUapsdBeSrvIntv == 0 ) { uapsdMask &= ~HDD_AC_BE; } } // return calculated mask *pUapsdMask = uapsdMask; return VOS_STATUS_SUCCESS; } /**============================================================================ @brief hdd_wmm_is_active() - Function which will determine if WMM is active on the current connection @param pAdapter : [in] pointer to adapter context @return : VOS_TRUE if WMM is enabled : VOS_FALSE if WMM is not enabled ===========================================================================*/ v_BOOL_t hdd_wmm_is_active( hdd_adapter_t* pAdapter ) { if ((!pAdapter->hddWmmStatus.wmmQosConnection) || (!pAdapter->hddWmmStatus.wmmQap)) { return VOS_FALSE; } else { return VOS_TRUE; } } /**============================================================================ @brief hdd_wmm_addts() - Function which will add a traffic spec at the request of an application @param pAdapter : [in] pointer to adapter context @param handle : [in] handle to uniquely identify a TS @param pTspec : [in] pointer to the traffic spec @return : HDD_WLAN_WMM_STATUS_* ===========================================================================*/ hdd_wlan_wmm_status_e hdd_wmm_addts( hdd_adapter_t* pAdapter, v_U32_t handle, sme_QosWmmTspecInfo* pTspec ) { hdd_wmm_qos_context_t *pQosContext; hdd_wlan_wmm_status_e status = HDD_WLAN_WMM_STATUS_SETUP_SUCCESS ; #ifndef WLAN_MDM_CODE_REDUCTION_OPT sme_QosStatusType smeStatus; #endif v_BOOL_t found = VOS_FALSE; v_CONTEXT_t pVosContext = vos_get_global_context( VOS_MODULE_ID_HDD, NULL ); hdd_context_t *pHddCtx; if (NULL != pVosContext) { pHddCtx = vos_get_context( VOS_MODULE_ID_HDD, pVosContext); if (NULL == pHddCtx) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, FL("HddCtx is NULL")); return HDD_WLAN_WMM_STATUS_SETUP_FAILED; } } VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Entered with handle 0x%x", __func__, handle); // see if a context already exists with the given handle mutex_lock(&pHddCtx->wmmLock); list_for_each_entry(pQosContext, &pAdapter->hddWmmStatus.wmmContextList, node) { if (pQosContext->handle == handle) { found = VOS_TRUE; break; } } mutex_unlock(&pHddCtx->wmmLock); if (found) { // record with that handle already exists VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Record already exists with handle 0x%x", __func__, handle); /* Application is trying to modify some of the Tspec params. Allow it */ smeStatus = sme_QosModifyReq(WLAN_HDD_GET_HAL_CTX(pAdapter), pTspec, pQosContext->qosFlowId); // need to check the return value and act appropriately switch (smeStatus) { case SME_QOS_STATUS_MODIFY_SETUP_PENDING_RSP: status = HDD_WLAN_WMM_STATUS_MODIFY_PENDING; break; case SME_QOS_STATUS_MODIFY_SETUP_SUCCESS_NO_ACM_NO_APSD_RSP: status = HDD_WLAN_WMM_STATUS_MODIFY_SUCCESS_NO_ACM_NO_UAPSD; break; case SME_QOS_STATUS_MODIFY_SETUP_SUCCESS_APSD_SET_ALREADY: status = HDD_WLAN_WMM_STATUS_MODIFY_SUCCESS_NO_ACM_UAPSD_EXISTING; break; case SME_QOS_STATUS_MODIFY_SETUP_INVALID_PARAMS_RSP: status = HDD_WLAN_WMM_STATUS_MODIFY_FAILED_BAD_PARAM; break; case SME_QOS_STATUS_MODIFY_SETUP_FAILURE_RSP: status = HDD_WLAN_WMM_STATUS_MODIFY_FAILED; break; case SME_QOS_STATUS_SETUP_NOT_QOS_AP_RSP: status = HDD_WLAN_WMM_STATUS_SETUP_FAILED_NO_WMM; break; default: // we didn't get back one of the SME_QOS_STATUS_MODIFY_* status codes VOS_TRACE( VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: unexpected SME Status=%d", __func__, smeStatus ); VOS_ASSERT(0); return HDD_WLAN_WMM_STATUS_MODIFY_FAILED; } mutex_lock(&pHddCtx->wmmLock); if (pQosContext->magic == HDD_WMM_CTX_MAGIC) { pQosContext->lastStatus = status; } mutex_unlock(&pHddCtx->wmmLock); return status; } pQosContext = kmalloc(sizeof(*pQosContext), GFP_KERNEL); if (NULL == pQosContext) { // no memory for QoS context. Nothing we can do VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: Unable to allocate QoS context", __func__); return HDD_WLAN_WMM_STATUS_INTERNAL_FAILURE; } // we assume the tspec has already been validated by the caller pQosContext->handle = handle; if (pTspec->ts_info.up < HDD_WMM_UP_TO_AC_MAP_SIZE) pQosContext->acType = hddWmmUpToAcMap[pTspec->ts_info.up]; else { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: ts_info.up (%d) larger than max value (%d), " "use default acType (%d)", __func__, pTspec->ts_info.up, HDD_WMM_UP_TO_AC_MAP_SIZE - 1, hddWmmUpToAcMap[0]); pQosContext->acType = hddWmmUpToAcMap[0]; } pQosContext->pAdapter = pAdapter; pQosContext->qosFlowId = 0; VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: Setting up QoS, context %p", __func__, pQosContext); mutex_lock(&pHddCtx->wmmLock); pQosContext->magic = HDD_WMM_CTX_MAGIC; list_add(&pQosContext->node, &pAdapter->hddWmmStatus.wmmContextList); mutex_unlock(&pHddCtx->wmmLock); #ifndef WLAN_MDM_CODE_REDUCTION_OPT smeStatus = sme_QosSetupReq(WLAN_HDD_GET_HAL_CTX(pAdapter), pAdapter->sessionId, pTspec, hdd_wmm_sme_callback, pQosContext, pTspec->ts_info.up, &pQosContext->qosFlowId); VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO, "%s: sme_QosSetupReq returned %d flowid %d", __func__, smeStatus, pQosContext->qosFlowId); // need to check the return value and act appropriately switch (smeStatus) { case SME_QOS_STATUS_SETUP_REQ_PENDING_RSP: status = HDD_WLAN_WMM_STATUS_SETUP_PENDING; break; case SME_QOS_STATUS_SETUP_SUCCESS_NO_ACM_NO_APSD_RSP: status = HDD_WLAN_WMM_STATUS_SETUP_SUCCESS_NO_ACM_NO_UAPSD; break; case SME_QOS_STATUS_SETUP_SUCCESS_APSD_SET_ALREADY: status = HDD_WLAN_WMM_STATUS_SETUP_SUCCESS_NO_ACM_UAPSD_EXISTING; break; case SME_QOS_STATUS_SETUP_SUCCESS_IND_APSD_PENDING: status = HDD_WLAN_WMM_STATUS_SETUP_PENDING; break; case SME_QOS_STATUS_SETUP_INVALID_PARAMS_RSP: hdd_wmm_free_context(pQosContext); return HDD_WLAN_WMM_STATUS_SETUP_FAILED_BAD_PARAM; case SME_QOS_STATUS_SETUP_FAILURE_RSP: // we can't tell the difference between when a request fails because // AP rejected it versus when SME encounterd an internal error hdd_wmm_free_context(pQosContext); return HDD_WLAN_WMM_STATUS_SETUP_FAILED; case SME_QOS_STATUS_SETUP_NOT_QOS_AP_RSP: hdd_wmm_free_context(pQosContext); return HDD_WLAN_WMM_STATUS_SETUP_FAILED_NO_WMM; default: // we didn't get back one of the SME_QOS_STATUS_SETUP_* status codes hdd_wmm_free_context(pQosContext); VOS_TRACE( VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: unexpected SME Status=%d", __func__, smeStatus ); VOS_ASSERT(0); return HDD_WLAN_WMM_STATUS_SETUP_FAILED; } #endif // we were successful, save the status mutex_lock(&pHddCtx->wmmLock); if (pQosContext->magic == HDD_WMM_CTX_MAGIC) { pQosContext->lastStatus = status; } mutex_unlock(&pHddCtx->wmmLock); return status; } /**============================================================================ @brief hdd_wmm_delts() - Function which will delete a traffic spec at the request of an application @param pAdapter : [in] pointer to adapter context @param handle : [in] handle to uniquely identify a TS @return : HDD_WLAN_WMM_STATUS_* ===========================================================================*/ hdd_wlan_wmm_status_e hdd_wmm_delts( hdd_adapter_t* pAdapter, v_U32_t handle ) { hdd_wmm_qos_context_t *pQosContext; v_BOOL_t found = VOS_FALSE; WLANTL_ACEnumType acType = 0; v_U32_t qosFlowId = 0; hdd_wlan_wmm_status_e status = HDD_WLAN_WMM_STATUS_SETUP_SUCCESS ; #ifndef WLAN_MDM_CODE_REDUCTION_OPT sme_QosStatusType smeStatus; #endif v_CONTEXT_t pVosContext = vos_get_global_context( VOS_MODULE_ID_HDD, NULL ); hdd_context_t *pHddCtx; if (NULL != pVosContext) { pHddCtx = vos_get_context( VOS_MODULE_ID_HDD, pVosContext); if (NULL == pHddCtx) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, FL("HddCtx is NULL")); return HDD_WLAN_WMM_STATUS_RELEASE_FAILED; } } VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Entered with handle 0x%x", __func__, handle); // locate the context with the given handle mutex_lock(&pHddCtx->wmmLock); list_for_each_entry(pQosContext, &pAdapter->hddWmmStatus.wmmContextList, node) { if (pQosContext->handle == handle) { found = VOS_TRUE; acType = pQosContext->acType; qosFlowId = pQosContext->qosFlowId; break; } } mutex_unlock(&pHddCtx->wmmLock); if (VOS_FALSE == found) { // we didn't find the handle VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: handle 0x%x not found", __func__, handle); return HDD_WLAN_WMM_STATUS_RELEASE_FAILED_BAD_PARAM; } VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: found handle 0x%x, flow %d, AC %d, context %p", __func__, handle, qosFlowId, acType, pQosContext); #ifndef WLAN_MDM_CODE_REDUCTION_OPT smeStatus = sme_QosReleaseReq( WLAN_HDD_GET_HAL_CTX(pAdapter), qosFlowId ); VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: SME flow %d released, SME status %d", __func__, qosFlowId, smeStatus); switch(smeStatus) { case SME_QOS_STATUS_RELEASE_SUCCESS_RSP: // this flow is the only one on that AC, so go ahead and update // our TSPEC state for the AC pAdapter->hddWmmStatus.wmmAcStatus[acType].wmmAcTspecValid = VOS_FALSE; // need to tell TL to stop trigger timer, etc hdd_wmm_disable_tl_uapsd(pQosContext); #ifdef FEATURE_WLAN_ESE // disable the inactivity timer hdd_wmm_disable_inactivity_timer(pQosContext); #endif // we are done with this context hdd_wmm_free_context(pQosContext); // SME must not fire any more callbacks for this flow since the context // is no longer valid return HDD_WLAN_WMM_STATUS_RELEASE_SUCCESS; case SME_QOS_STATUS_RELEASE_REQ_PENDING_RSP: // do nothing as we will get a response from SME status = HDD_WLAN_WMM_STATUS_RELEASE_PENDING; break; case SME_QOS_STATUS_RELEASE_INVALID_PARAMS_RSP: // nothing we can do with the existing flow except leave it status = HDD_WLAN_WMM_STATUS_RELEASE_FAILED_BAD_PARAM; break; case SME_QOS_STATUS_RELEASE_FAILURE_RSP: // nothing we can do with the existing flow except leave it status = HDD_WLAN_WMM_STATUS_RELEASE_FAILED; default: // we didn't get back one of the SME_QOS_STATUS_RELEASE_* status codes VOS_TRACE( VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, "%s: unexpected SME Status=%d", __func__, smeStatus ); VOS_ASSERT(0); status = HDD_WLAN_WMM_STATUS_RELEASE_FAILED; } #endif mutex_lock(&pHddCtx->wmmLock); if (pQosContext->magic == HDD_WMM_CTX_MAGIC) { pQosContext->lastStatus = status; } mutex_unlock(&pHddCtx->wmmLock); return status; } /**============================================================================ @brief hdd_wmm_checkts() - Function which will return the status of a traffic spec at the request of an application @param pAdapter : [in] pointer to adapter context @param handle : [in] handle to uniquely identify a TS @return : HDD_WLAN_WMM_STATUS_* ===========================================================================*/ hdd_wlan_wmm_status_e hdd_wmm_checkts( hdd_adapter_t* pAdapter, v_U32_t handle ) { hdd_wmm_qos_context_t *pQosContext; hdd_wlan_wmm_status_e status = HDD_WLAN_WMM_STATUS_LOST; v_CONTEXT_t pVosContext = vos_get_global_context( VOS_MODULE_ID_HDD, NULL ); hdd_context_t *pHddCtx; if (NULL != pVosContext) { pHddCtx = vos_get_context( VOS_MODULE_ID_HDD, pVosContext); if (NULL == pHddCtx) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_ERROR, FL("HddCtx is NULL")); return HDD_WLAN_WMM_STATUS_LOST; } } VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: Entered with handle 0x%x", __func__, handle); // locate the context with the given handle mutex_lock(&pHddCtx->wmmLock); list_for_each_entry(pQosContext, &pAdapter->hddWmmStatus.wmmContextList, node) { if (pQosContext->handle == handle) { VOS_TRACE(VOS_MODULE_ID_HDD, WMM_TRACE_LEVEL_INFO_LOW, "%s: found handle 0x%x, context %p", __func__, handle, pQosContext); status = pQosContext->lastStatus; break; } } mutex_unlock(&pHddCtx->wmmLock); return status; }
gpl-2.0
m1el/irssi
src/perl/perl-sources.c
18
4055
/* perl-sources.c : irssi Copyright (C) 1999-2001 Timo Sirainen This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #define NEED_PERL_H #include "module.h" #include "signals.h" #include "perl-core.h" #include "perl-common.h" #include "perl-sources.h" #include "misc.h" typedef struct { PERL_SCRIPT_REC *script; int tag; int refcount; int once; /* run only once */ SV *func; SV *data; } PERL_SOURCE_REC; static GSList *perl_sources; static void perl_source_ref(PERL_SOURCE_REC *rec) { rec->refcount++; } static int perl_source_unref(PERL_SOURCE_REC *rec) { if (--rec->refcount != 0) return TRUE; SvREFCNT_dec(rec->data); SvREFCNT_dec(rec->func); g_free(rec); return FALSE; } static void perl_source_destroy(PERL_SOURCE_REC *rec) { perl_sources = g_slist_remove(perl_sources, rec); g_source_remove(rec->tag); rec->tag = -1; perl_source_unref(rec); } static int perl_source_event(PERL_SOURCE_REC *rec) { dSP; int retcount; ENTER; SAVETMPS; PUSHMARK(SP); XPUSHs(sv_mortalcopy(rec->data)); PUTBACK; perl_source_ref(rec); retcount = perl_call_sv(rec->func, G_EVAL|G_SCALAR); SPAGAIN; if (SvTRUE(ERRSV)) { char *error = g_strdup(SvPV(ERRSV, PL_na)); signal_emit("script error", 2, rec->script, error); g_free(error); } if (perl_source_unref(rec) && rec->once) perl_source_destroy(rec); PUTBACK; FREETMPS; LEAVE; return 1; } int perl_timeout_add(int msecs, SV *func, SV *data, int once) { PERL_SCRIPT_REC *script; PERL_SOURCE_REC *rec; const char *pkg; pkg = perl_get_package(); script = perl_script_find_package(pkg); g_return_val_if_fail(script != NULL, -1); rec = g_new0(PERL_SOURCE_REC, 1); perl_source_ref(rec); rec->once = once; rec->script = script; rec->func = perl_func_sv_inc(func, pkg); rec->data = SvREFCNT_inc(data); rec->tag = g_timeout_add(msecs, (GSourceFunc) perl_source_event, rec); perl_sources = g_slist_append(perl_sources, rec); return rec->tag; } int perl_input_add(int source, int condition, SV *func, SV *data, int once) { PERL_SCRIPT_REC *script; PERL_SOURCE_REC *rec; const char *pkg; pkg = perl_get_package(); script = perl_script_find_package(pkg); g_return_val_if_fail(script != NULL, -1); rec = g_new0(PERL_SOURCE_REC, 1); perl_source_ref(rec); rec->once = once; rec->script =script; rec->func = perl_func_sv_inc(func, pkg); rec->data = SvREFCNT_inc(data); rec->tag = g_input_add_poll(source, G_PRIORITY_DEFAULT, condition, (GInputFunction) perl_source_event, rec); perl_sources = g_slist_append(perl_sources, rec); return rec->tag; } void perl_source_remove(int tag) { GSList *tmp; for (tmp = perl_sources; tmp != NULL; tmp = tmp->next) { PERL_SOURCE_REC *rec = tmp->data; if (rec->tag == tag) { perl_source_destroy(rec); break; } } } void perl_source_remove_script(PERL_SCRIPT_REC *script) { GSList *tmp, *next; for (tmp = perl_sources; tmp != NULL; tmp = next) { PERL_SOURCE_REC *rec = tmp->data; next = tmp->next; if (rec->script == script) perl_source_destroy(rec); } } void perl_sources_start(void) { perl_sources = NULL; } void perl_sources_stop(void) { /* timeouts and input waits */ while (perl_sources != NULL) perl_source_destroy(perl_sources->data); }
gpl-2.0
peterpark2017/TrinityCore
src/common/Collision/Management/MMapManager.cpp
18
12594
/* * Copyright (C) 2008-2017 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2005-2010 MaNGOS <http://getmangos.com/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "MMapManager.h" #include "Log.h" #include "Config.h" #include "MapDefines.h" namespace MMAP { static char const* const MAP_FILE_NAME_FORMAT = "%s/mmaps/%03i.mmap"; static char const* const TILE_FILE_NAME_FORMAT = "%s/mmaps/%03i%02i%02i.mmtile"; // ######################## MMapManager ######################## MMapManager::~MMapManager() { for (MMapDataSet::iterator i = loadedMMaps.begin(); i != loadedMMaps.end(); ++i) delete i->second; // by now we should not have maps loaded // if we had, tiles in MMapData->mmapLoadedTiles, their actual data is lost! } void MMapManager::InitializeThreadUnsafe(const std::vector<uint32>& mapIds) { // the caller must pass the list of all mapIds that will be used in the VMapManager2 lifetime for (const uint32& mapId : mapIds) loadedMMaps.insert(MMapDataSet::value_type(mapId, nullptr)); thread_safe_environment = false; } MMapDataSet::const_iterator MMapManager::GetMMapData(uint32 mapId) const { // return the iterator if found or end() if not found/NULL MMapDataSet::const_iterator itr = loadedMMaps.find(mapId); if (itr != loadedMMaps.cend() && !itr->second) itr = loadedMMaps.cend(); return itr; } bool MMapManager::loadMapData(uint32 mapId) { // we already have this map loaded? MMapDataSet::iterator itr = loadedMMaps.find(mapId); if (itr != loadedMMaps.end()) { if (itr->second) return true; } else { if (thread_safe_environment) itr = loadedMMaps.insert(MMapDataSet::value_type(mapId, nullptr)).first; else ASSERT(false, "Invalid mapId %u passed to MMapManager after startup in thread unsafe environment", mapId); } // load and init dtNavMesh - read parameters from file std::string fileName = Trinity::StringFormat(MAP_FILE_NAME_FORMAT, sConfigMgr->GetStringDefault("DataDir", ".").c_str(), mapId); FILE* file = fopen(fileName.c_str(), "rb"); if (!file) { TC_LOG_DEBUG("maps", "MMAP:loadMapData: Error: Could not open mmap file '%s'", fileName.c_str()); return false; } dtNavMeshParams params; uint32 count = uint32(fread(&params, sizeof(dtNavMeshParams), 1, file)); fclose(file); if (count != 1) { TC_LOG_DEBUG("maps", "MMAP:loadMapData: Error: Could not read params from file '%s'", fileName.c_str()); return false; } dtNavMesh* mesh = dtAllocNavMesh(); ASSERT(mesh); if (dtStatusFailed(mesh->init(&params))) { dtFreeNavMesh(mesh); TC_LOG_ERROR("maps", "MMAP:loadMapData: Failed to initialize dtNavMesh for mmap %03u from file %s", mapId, fileName.c_str()); return false; } TC_LOG_DEBUG("maps", "MMAP:loadMapData: Loaded %03i.mmap", mapId); // store inside our map list MMapData* mmap_data = new MMapData(mesh); itr->second = mmap_data; return true; } uint32 MMapManager::packTileID(int32 x, int32 y) { return uint32(x << 16 | y); } bool MMapManager::loadMap(const std::string& /*basePath*/, uint32 mapId, int32 x, int32 y) { // make sure the mmap is loaded and ready to load tiles if (!loadMapData(mapId)) return false; // get this mmap data MMapData* mmap = loadedMMaps[mapId]; ASSERT(mmap->navMesh); // check if we already have this tile loaded uint32 packedGridPos = packTileID(x, y); if (mmap->loadedTileRefs.find(packedGridPos) != mmap->loadedTileRefs.end()) return false; // load this tile :: mmaps/MMMXXYY.mmtile std::string fileName = Trinity::StringFormat(TILE_FILE_NAME_FORMAT, sConfigMgr->GetStringDefault("DataDir", ".").c_str(), mapId, x, y); FILE* file = fopen(fileName.c_str(), "rb"); if (!file) { TC_LOG_DEBUG("maps", "MMAP:loadMap: Could not open mmtile file '%s'", fileName.c_str()); return false; } // read header MmapTileHeader fileHeader; if (fread(&fileHeader, sizeof(MmapTileHeader), 1, file) != 1 || fileHeader.mmapMagic != MMAP_MAGIC) { TC_LOG_ERROR("maps", "MMAP:loadMap: Bad header in mmap %03u%02i%02i.mmtile", mapId, x, y); fclose(file); return false; } if (fileHeader.mmapVersion != MMAP_VERSION) { TC_LOG_ERROR("maps", "MMAP:loadMap: %03u%02i%02i.mmtile was built with generator v%i, expected v%i", mapId, x, y, fileHeader.mmapVersion, MMAP_VERSION); fclose(file); return false; } long pos = ftell(file); fseek(file, 0, SEEK_END); if (static_cast<int32>(fileHeader.size) > ftell(file) - pos) { TC_LOG_ERROR("maps", "MMAP:loadMap: %03u%02i%02i.mmtile has corrupted data size", mapId, x, y); fclose(file); return false; } fseek(file, pos, SEEK_SET); unsigned char* data = (unsigned char*)dtAlloc(fileHeader.size, DT_ALLOC_PERM); ASSERT(data); size_t result = fread(data, fileHeader.size, 1, file); if (!result) { TC_LOG_ERROR("maps", "MMAP:loadMap: Bad header or data in mmap %03u%02i%02i.mmtile", mapId, x, y); fclose(file); return false; } fclose(file); dtMeshHeader* header = (dtMeshHeader*)data; dtTileRef tileRef = 0; // memory allocated for data is now managed by detour, and will be deallocated when the tile is removed if (dtStatusSucceed(mmap->navMesh->addTile(data, fileHeader.size, DT_TILE_FREE_DATA, 0, &tileRef))) { mmap->loadedTileRefs.insert(std::pair<uint32, dtTileRef>(packedGridPos, tileRef)); ++loadedTiles; TC_LOG_DEBUG("maps", "MMAP:loadMap: Loaded mmtile %03i[%02i, %02i] into %03i[%02i, %02i]", mapId, x, y, mapId, header->x, header->y); return true; } else { TC_LOG_ERROR("maps", "MMAP:loadMap: Could not load %03u%02i%02i.mmtile into navmesh", mapId, x, y); dtFree(data); return false; } } bool MMapManager::unloadMap(uint32 mapId, int32 x, int32 y) { // check if we have this map loaded MMapDataSet::const_iterator itr = GetMMapData(mapId); if (itr == loadedMMaps.end()) { // file may not exist, therefore not loaded TC_LOG_DEBUG("maps", "MMAP:unloadMap: Asked to unload not loaded navmesh map. %03u%02i%02i.mmtile", mapId, x, y); return false; } MMapData* mmap = itr->second; // check if we have this tile loaded uint32 packedGridPos = packTileID(x, y); if (mmap->loadedTileRefs.find(packedGridPos) == mmap->loadedTileRefs.end()) { // file may not exist, therefore not loaded TC_LOG_DEBUG("maps", "MMAP:unloadMap: Asked to unload not loaded navmesh tile. %03u%02i%02i.mmtile", mapId, x, y); return false; } dtTileRef tileRef = mmap->loadedTileRefs[packedGridPos]; // unload, and mark as non loaded if (dtStatusFailed(mmap->navMesh->removeTile(tileRef, NULL, NULL))) { // this is technically a memory leak // if the grid is later reloaded, dtNavMesh::addTile will return error but no extra memory is used // we cannot recover from this error - assert out TC_LOG_ERROR("maps", "MMAP:unloadMap: Could not unload %03u%02i%02i.mmtile from navmesh", mapId, x, y); ABORT(); } else { mmap->loadedTileRefs.erase(packedGridPos); --loadedTiles; TC_LOG_DEBUG("maps", "MMAP:unloadMap: Unloaded mmtile %03i[%02i, %02i] from %03i", mapId, x, y, mapId); return true; } return false; } bool MMapManager::unloadMap(uint32 mapId) { MMapDataSet::iterator itr = loadedMMaps.find(mapId); if (itr == loadedMMaps.end() || !itr->second) { // file may not exist, therefore not loaded TC_LOG_DEBUG("maps", "MMAP:unloadMap: Asked to unload not loaded navmesh map %03u", mapId); return false; } // unload all tiles from given map MMapData* mmap = itr->second; for (MMapTileSet::iterator i = mmap->loadedTileRefs.begin(); i != mmap->loadedTileRefs.end(); ++i) { uint32 x = (i->first >> 16); uint32 y = (i->first & 0x0000FFFF); if (dtStatusFailed(mmap->navMesh->removeTile(i->second, NULL, NULL))) TC_LOG_ERROR("maps", "MMAP:unloadMap: Could not unload %03u%02i%02i.mmtile from navmesh", mapId, x, y); else { --loadedTiles; TC_LOG_DEBUG("maps", "MMAP:unloadMap: Unloaded mmtile %03i[%02i, %02i] from %03i", mapId, x, y, mapId); } } delete mmap; itr->second = nullptr; TC_LOG_DEBUG("maps", "MMAP:unloadMap: Unloaded %03i.mmap", mapId); return true; } bool MMapManager::unloadMapInstance(uint32 mapId, uint32 instanceId) { // check if we have this map loaded MMapDataSet::const_iterator itr = GetMMapData(mapId); if (itr == loadedMMaps.end()) { // file may not exist, therefore not loaded TC_LOG_DEBUG("maps", "MMAP:unloadMapInstance: Asked to unload not loaded navmesh map %03u", mapId); return false; } MMapData* mmap = itr->second; if (mmap->navMeshQueries.find(instanceId) == mmap->navMeshQueries.end()) { TC_LOG_DEBUG("maps", "MMAP:unloadMapInstance: Asked to unload not loaded dtNavMeshQuery mapId %03u instanceId %u", mapId, instanceId); return false; } dtNavMeshQuery* query = mmap->navMeshQueries[instanceId]; dtFreeNavMeshQuery(query); mmap->navMeshQueries.erase(instanceId); TC_LOG_DEBUG("maps", "MMAP:unloadMapInstance: Unloaded mapId %03u instanceId %u", mapId, instanceId); return true; } dtNavMesh const* MMapManager::GetNavMesh(uint32 mapId) { MMapDataSet::const_iterator itr = GetMMapData(mapId); if (itr == loadedMMaps.end()) return NULL; return itr->second->navMesh; } dtNavMeshQuery const* MMapManager::GetNavMeshQuery(uint32 mapId, uint32 instanceId) { MMapDataSet::const_iterator itr = GetMMapData(mapId); if (itr == loadedMMaps.end()) return NULL; MMapData* mmap = itr->second; if (mmap->navMeshQueries.find(instanceId) == mmap->navMeshQueries.end()) { // allocate mesh query dtNavMeshQuery* query = dtAllocNavMeshQuery(); ASSERT(query); if (dtStatusFailed(query->init(mmap->navMesh, 1024))) { dtFreeNavMeshQuery(query); TC_LOG_ERROR("maps", "MMAP:GetNavMeshQuery: Failed to initialize dtNavMeshQuery for mapId %03u instanceId %u", mapId, instanceId); return NULL; } TC_LOG_DEBUG("maps", "MMAP:GetNavMeshQuery: created dtNavMeshQuery for mapId %03u instanceId %u", mapId, instanceId); mmap->navMeshQueries.insert(std::pair<uint32, dtNavMeshQuery*>(instanceId, query)); } return mmap->navMeshQueries[instanceId]; } }
gpl-2.0
nullie/ardrone-kernel
drivers/char/amiserial.c
18
54287
/* * linux/drivers/char/amiserial.c * * Serial driver for the amiga builtin port. * * This code was created by taking serial.c version 4.30 from kernel * release 2.3.22, replacing all hardware related stuff with the * corresponding amiga hardware actions, and removing all irrelevant * code. As a consequence, it uses many of the constants and names * associated with the registers and bits of 16550 compatible UARTS - * but only to keep track of status, etc in the state variables. It * was done this was to make it easier to keep the code in line with * (non hardware specific) changes to serial.c. * * The port is registered with the tty driver as minor device 64, and * therefore other ports should should only use 65 upwards. * * Richard Lucock 28/12/99 * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, * 1998, 1999 Theodore Ts'o * */ /* * Serial driver configuration section. Here are the various options: * * SERIAL_PARANOIA_CHECK * Check the magic number for the async_structure where * ever possible. */ #include <linux/delay.h> #undef SERIAL_PARANOIA_CHECK #define SERIAL_DO_RESTART /* Set of debugging defines */ #undef SERIAL_DEBUG_INTR #undef SERIAL_DEBUG_OPEN #undef SERIAL_DEBUG_FLOW #undef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT /* Sanity checks */ #if defined(MODULE) && defined(SERIAL_DEBUG_MCOUNT) #define DBG_CNT(s) printk("(%s): [%x] refc=%d, serc=%d, ttyc=%d -> %s\n", \ tty->name, (info->flags), serial_driver->refcount,info->count,tty->count,s) #else #define DBG_CNT(s) #endif /* * End of serial driver configuration section. */ #include <linux/module.h> #include <linux/types.h> #include <linux/serial.h> #include <linux/serialP.h> #include <linux/serial_reg.h> static char *serial_version = "4.30"; #include <linux/errno.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/console.h> #include <linux/major.h> #include <linux/string.h> #include <linux/fcntl.h> #include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/bitops.h> #include <asm/setup.h> #include <asm/system.h> #include <asm/irq.h> #include <asm/amigahw.h> #include <asm/amigaints.h> #define custom amiga_custom static char *serial_name = "Amiga-builtin serial driver"; static struct tty_driver *serial_driver; /* number of characters left in xmit buffer before we ask for more */ #define WAKEUP_CHARS 256 static struct async_struct *IRQ_ports; static unsigned char current_ctl_bits; static void change_speed(struct async_struct *info, struct ktermios *old); static void rs_wait_until_sent(struct tty_struct *tty, int timeout); static struct serial_state rs_table[1]; #define NR_PORTS ARRAY_SIZE(rs_table) #include <asm/uaccess.h> #define serial_isroot() (capable(CAP_SYS_ADMIN)) static inline int serial_paranoia_check(struct async_struct *info, char *name, const char *routine) { #ifdef SERIAL_PARANOIA_CHECK static const char *badmagic = "Warning: bad magic number for serial struct (%s) in %s\n"; static const char *badinfo = "Warning: null async_struct for (%s) in %s\n"; if (!info) { printk(badinfo, name, routine); return 1; } if (info->magic != SERIAL_MAGIC) { printk(badmagic, name, routine); return 1; } #endif return 0; } /* some serial hardware definitions */ #define SDR_OVRUN (1<<15) #define SDR_RBF (1<<14) #define SDR_TBE (1<<13) #define SDR_TSRE (1<<12) #define SERPER_PARENB (1<<15) #define AC_SETCLR (1<<15) #define AC_UARTBRK (1<<11) #define SER_DTR (1<<7) #define SER_RTS (1<<6) #define SER_DCD (1<<5) #define SER_CTS (1<<4) #define SER_DSR (1<<3) static __inline__ void rtsdtr_ctrl(int bits) { ciab.pra = ((bits & (SER_RTS | SER_DTR)) ^ (SER_RTS | SER_DTR)) | (ciab.pra & ~(SER_RTS | SER_DTR)); } /* * ------------------------------------------------------------ * rs_stop() and rs_start() * * This routines are called before setting or resetting tty->stopped. * They enable or disable transmitter interrupts, as necessary. * ------------------------------------------------------------ */ static void rs_stop(struct tty_struct *tty) { struct async_struct *info = (struct async_struct *)tty->driver_data; unsigned long flags; if (serial_paranoia_check(info, tty->name, "rs_stop")) return; local_irq_save(flags); if (info->IER & UART_IER_THRI) { info->IER &= ~UART_IER_THRI; /* disable Tx interrupt and remove any pending interrupts */ custom.intena = IF_TBE; mb(); custom.intreq = IF_TBE; mb(); } local_irq_restore(flags); } static void rs_start(struct tty_struct *tty) { struct async_struct *info = (struct async_struct *)tty->driver_data; unsigned long flags; if (serial_paranoia_check(info, tty->name, "rs_start")) return; local_irq_save(flags); if (info->xmit.head != info->xmit.tail && info->xmit.buf && !(info->IER & UART_IER_THRI)) { info->IER |= UART_IER_THRI; custom.intena = IF_SETCLR | IF_TBE; mb(); /* set a pending Tx Interrupt, transmitter should restart now */ custom.intreq = IF_SETCLR | IF_TBE; mb(); } local_irq_restore(flags); } /* * ---------------------------------------------------------------------- * * Here starts the interrupt handling routines. All of the following * subroutines are declared as inline and are folded into * rs_interrupt(). They were separated out for readability's sake. * * Note: rs_interrupt() is a "fast" interrupt, which means that it * runs with interrupts turned off. People who may want to modify * rs_interrupt() should try to keep the interrupt handler as fast as * possible. After you are done making modifications, it is not a bad * idea to do: * * gcc -S -DKERNEL -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer serial.c * * and look at the resulting assemble code in serial.s. * * - Ted Ts'o (tytso@mit.edu), 7-Mar-93 * ----------------------------------------------------------------------- */ /* * This routine is used by the interrupt handler to schedule * processing in the software interrupt portion of the driver. */ static void rs_sched_event(struct async_struct *info, int event) { info->event |= 1 << event; tasklet_schedule(&info->tlet); } static void receive_chars(struct async_struct *info) { int status; int serdatr; struct tty_struct *tty = info->tty; unsigned char ch, flag; struct async_icount *icount; int oe = 0; icount = &info->state->icount; status = UART_LSR_DR; /* We obviously have a character! */ serdatr = custom.serdatr; mb(); custom.intreq = IF_RBF; mb(); if((serdatr & 0x1ff) == 0) status |= UART_LSR_BI; if(serdatr & SDR_OVRUN) status |= UART_LSR_OE; ch = serdatr & 0xff; icount->rx++; #ifdef SERIAL_DEBUG_INTR printk("DR%02x:%02x...", ch, status); #endif flag = TTY_NORMAL; /* * We don't handle parity or frame errors - but I have left * the code in, since I'm not sure that the errors can't be * detected. */ if (status & (UART_LSR_BI | UART_LSR_PE | UART_LSR_FE | UART_LSR_OE)) { /* * For statistics only */ if (status & UART_LSR_BI) { status &= ~(UART_LSR_FE | UART_LSR_PE); icount->brk++; } else if (status & UART_LSR_PE) icount->parity++; else if (status & UART_LSR_FE) icount->frame++; if (status & UART_LSR_OE) icount->overrun++; /* * Now check to see if character should be * ignored, and mask off conditions which * should be ignored. */ if (status & info->ignore_status_mask) goto out; status &= info->read_status_mask; if (status & (UART_LSR_BI)) { #ifdef SERIAL_DEBUG_INTR printk("handling break...."); #endif flag = TTY_BREAK; if (info->flags & ASYNC_SAK) do_SAK(tty); } else if (status & UART_LSR_PE) flag = TTY_PARITY; else if (status & UART_LSR_FE) flag = TTY_FRAME; if (status & UART_LSR_OE) { /* * Overrun is special, since it's * reported immediately, and doesn't * affect the current character */ oe = 1; } } tty_insert_flip_char(tty, ch, flag); if (oe == 1) tty_insert_flip_char(tty, 0, TTY_OVERRUN); tty_flip_buffer_push(tty); out: return; } static void transmit_chars(struct async_struct *info) { custom.intreq = IF_TBE; mb(); if (info->x_char) { custom.serdat = info->x_char | 0x100; mb(); info->state->icount.tx++; info->x_char = 0; return; } if (info->xmit.head == info->xmit.tail || info->tty->stopped || info->tty->hw_stopped) { info->IER &= ~UART_IER_THRI; custom.intena = IF_TBE; mb(); return; } custom.serdat = info->xmit.buf[info->xmit.tail++] | 0x100; mb(); info->xmit.tail = info->xmit.tail & (SERIAL_XMIT_SIZE-1); info->state->icount.tx++; if (CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) < WAKEUP_CHARS) rs_sched_event(info, RS_EVENT_WRITE_WAKEUP); #ifdef SERIAL_DEBUG_INTR printk("THRE..."); #endif if (info->xmit.head == info->xmit.tail) { custom.intena = IF_TBE; mb(); info->IER &= ~UART_IER_THRI; } } static void check_modem_status(struct async_struct *info) { unsigned char status = ciab.pra & (SER_DCD | SER_CTS | SER_DSR); unsigned char dstatus; struct async_icount *icount; /* Determine bits that have changed */ dstatus = status ^ current_ctl_bits; current_ctl_bits = status; if (dstatus) { icount = &info->state->icount; /* update input line counters */ if (dstatus & SER_DSR) icount->dsr++; if (dstatus & SER_DCD) { icount->dcd++; #ifdef CONFIG_HARD_PPS if ((info->flags & ASYNC_HARDPPS_CD) && !(status & SER_DCD)) hardpps(); #endif } if (dstatus & SER_CTS) icount->cts++; wake_up_interruptible(&info->delta_msr_wait); } if ((info->flags & ASYNC_CHECK_CD) && (dstatus & SER_DCD)) { #if (defined(SERIAL_DEBUG_OPEN) || defined(SERIAL_DEBUG_INTR)) printk("ttyS%d CD now %s...", info->line, (!(status & SER_DCD)) ? "on" : "off"); #endif if (!(status & SER_DCD)) wake_up_interruptible(&info->open_wait); else { #ifdef SERIAL_DEBUG_OPEN printk("doing serial hangup..."); #endif if (info->tty) tty_hangup(info->tty); } } if (info->flags & ASYNC_CTS_FLOW) { if (info->tty->hw_stopped) { if (!(status & SER_CTS)) { #if (defined(SERIAL_DEBUG_INTR) || defined(SERIAL_DEBUG_FLOW)) printk("CTS tx start..."); #endif info->tty->hw_stopped = 0; info->IER |= UART_IER_THRI; custom.intena = IF_SETCLR | IF_TBE; mb(); /* set a pending Tx Interrupt, transmitter should restart now */ custom.intreq = IF_SETCLR | IF_TBE; mb(); rs_sched_event(info, RS_EVENT_WRITE_WAKEUP); return; } } else { if ((status & SER_CTS)) { #if (defined(SERIAL_DEBUG_INTR) || defined(SERIAL_DEBUG_FLOW)) printk("CTS tx stop..."); #endif info->tty->hw_stopped = 1; info->IER &= ~UART_IER_THRI; /* disable Tx interrupt and remove any pending interrupts */ custom.intena = IF_TBE; mb(); custom.intreq = IF_TBE; mb(); } } } } static irqreturn_t ser_vbl_int( int irq, void *data) { /* vbl is just a periodic interrupt we tie into to update modem status */ struct async_struct * info = IRQ_ports; /* * TBD - is it better to unregister from this interrupt or to * ignore it if MSI is clear ? */ if(info->IER & UART_IER_MSI) check_modem_status(info); return IRQ_HANDLED; } static irqreturn_t ser_rx_int(int irq, void *dev_id) { struct async_struct * info; #ifdef SERIAL_DEBUG_INTR printk("ser_rx_int..."); #endif info = IRQ_ports; if (!info || !info->tty) return IRQ_NONE; receive_chars(info); info->last_active = jiffies; #ifdef SERIAL_DEBUG_INTR printk("end.\n"); #endif return IRQ_HANDLED; } static irqreturn_t ser_tx_int(int irq, void *dev_id) { struct async_struct * info; if (custom.serdatr & SDR_TBE) { #ifdef SERIAL_DEBUG_INTR printk("ser_tx_int..."); #endif info = IRQ_ports; if (!info || !info->tty) return IRQ_NONE; transmit_chars(info); info->last_active = jiffies; #ifdef SERIAL_DEBUG_INTR printk("end.\n"); #endif } return IRQ_HANDLED; } /* * ------------------------------------------------------------------- * Here ends the serial interrupt routines. * ------------------------------------------------------------------- */ /* * This routine is used to handle the "bottom half" processing for the * serial driver, known also the "software interrupt" processing. * This processing is done at the kernel interrupt level, after the * rs_interrupt() has returned, BUT WITH INTERRUPTS TURNED ON. This * is where time-consuming activities which can not be done in the * interrupt driver proper are done; the interrupt driver schedules * them using rs_sched_event(), and they get done here. */ static void do_softint(unsigned long private_) { struct async_struct *info = (struct async_struct *) private_; struct tty_struct *tty; tty = info->tty; if (!tty) return; if (test_and_clear_bit(RS_EVENT_WRITE_WAKEUP, &info->event)) tty_wakeup(tty); } /* * --------------------------------------------------------------- * Low level utility subroutines for the serial driver: routines to * figure out the appropriate timeout for an interrupt chain, routines * to initialize and startup a serial port, and routines to shutdown a * serial port. Useful stuff like that. * --------------------------------------------------------------- */ static int startup(struct async_struct * info) { unsigned long flags; int retval=0; unsigned long page; page = get_zeroed_page(GFP_KERNEL); if (!page) return -ENOMEM; local_irq_save(flags); if (info->flags & ASYNC_INITIALIZED) { free_page(page); goto errout; } if (info->xmit.buf) free_page(page); else info->xmit.buf = (unsigned char *) page; #ifdef SERIAL_DEBUG_OPEN printk("starting up ttys%d ...", info->line); #endif /* Clear anything in the input buffer */ custom.intreq = IF_RBF; mb(); retval = request_irq(IRQ_AMIGA_VERTB, ser_vbl_int, 0, "serial status", info); if (retval) { if (serial_isroot()) { if (info->tty) set_bit(TTY_IO_ERROR, &info->tty->flags); retval = 0; } goto errout; } /* enable both Rx and Tx interrupts */ custom.intena = IF_SETCLR | IF_RBF | IF_TBE; mb(); info->IER = UART_IER_MSI; /* remember current state of the DCD and CTS bits */ current_ctl_bits = ciab.pra & (SER_DCD | SER_CTS | SER_DSR); IRQ_ports = info; info->MCR = 0; if (info->tty->termios->c_cflag & CBAUD) info->MCR = SER_DTR | SER_RTS; rtsdtr_ctrl(info->MCR); if (info->tty) clear_bit(TTY_IO_ERROR, &info->tty->flags); info->xmit.head = info->xmit.tail = 0; /* * Set up the tty->alt_speed kludge */ if (info->tty) { if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) info->tty->alt_speed = 57600; if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) info->tty->alt_speed = 115200; if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI) info->tty->alt_speed = 230400; if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP) info->tty->alt_speed = 460800; } /* * and set the speed of the serial port */ change_speed(info, NULL); info->flags |= ASYNC_INITIALIZED; local_irq_restore(flags); return 0; errout: local_irq_restore(flags); return retval; } /* * This routine will shutdown a serial port; interrupts are disabled, and * DTR is dropped if the hangup on close termio flag is on. */ static void shutdown(struct async_struct * info) { unsigned long flags; struct serial_state *state; if (!(info->flags & ASYNC_INITIALIZED)) return; state = info->state; #ifdef SERIAL_DEBUG_OPEN printk("Shutting down serial port %d ....\n", info->line); #endif local_irq_save(flags); /* Disable interrupts */ /* * clear delta_msr_wait queue to avoid mem leaks: we may free the irq * here so the queue might never be waken up */ wake_up_interruptible(&info->delta_msr_wait); IRQ_ports = NULL; /* * Free the IRQ, if necessary */ free_irq(IRQ_AMIGA_VERTB, info); if (info->xmit.buf) { free_page((unsigned long) info->xmit.buf); info->xmit.buf = NULL; } info->IER = 0; custom.intena = IF_RBF | IF_TBE; mb(); /* disable break condition */ custom.adkcon = AC_UARTBRK; mb(); if (!info->tty || (info->tty->termios->c_cflag & HUPCL)) info->MCR &= ~(SER_DTR|SER_RTS); rtsdtr_ctrl(info->MCR); if (info->tty) set_bit(TTY_IO_ERROR, &info->tty->flags); info->flags &= ~ASYNC_INITIALIZED; local_irq_restore(flags); } /* * This routine is called to set the UART divisor registers to match * the specified baud rate for a serial port. */ static void change_speed(struct async_struct *info, struct ktermios *old_termios) { int quot = 0, baud_base, baud; unsigned cflag, cval = 0; int bits; unsigned long flags; if (!info->tty || !info->tty->termios) return; cflag = info->tty->termios->c_cflag; /* Byte size is always 8 bits plus parity bit if requested */ cval = 3; bits = 10; if (cflag & CSTOPB) { cval |= 0x04; bits++; } if (cflag & PARENB) { cval |= UART_LCR_PARITY; bits++; } if (!(cflag & PARODD)) cval |= UART_LCR_EPAR; #ifdef CMSPAR if (cflag & CMSPAR) cval |= UART_LCR_SPAR; #endif /* Determine divisor based on baud rate */ baud = tty_get_baud_rate(info->tty); if (!baud) baud = 9600; /* B0 transition handled in rs_set_termios */ baud_base = info->state->baud_base; if (baud == 38400 && ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST)) quot = info->state->custom_divisor; else { if (baud == 134) /* Special case since 134 is really 134.5 */ quot = (2*baud_base / 269); else if (baud) quot = baud_base / baud; } /* If the quotient is zero refuse the change */ if (!quot && old_termios) { /* FIXME: Will need updating for new tty in the end */ info->tty->termios->c_cflag &= ~CBAUD; info->tty->termios->c_cflag |= (old_termios->c_cflag & CBAUD); baud = tty_get_baud_rate(info->tty); if (!baud) baud = 9600; if (baud == 38400 && ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST)) quot = info->state->custom_divisor; else { if (baud == 134) /* Special case since 134 is really 134.5 */ quot = (2*baud_base / 269); else if (baud) quot = baud_base / baud; } } /* As a last resort, if the quotient is zero, default to 9600 bps */ if (!quot) quot = baud_base / 9600; info->quot = quot; info->timeout = ((info->xmit_fifo_size*HZ*bits*quot) / baud_base); info->timeout += HZ/50; /* Add .02 seconds of slop */ /* CTS flow control flag and modem status interrupts */ info->IER &= ~UART_IER_MSI; if (info->flags & ASYNC_HARDPPS_CD) info->IER |= UART_IER_MSI; if (cflag & CRTSCTS) { info->flags |= ASYNC_CTS_FLOW; info->IER |= UART_IER_MSI; } else info->flags &= ~ASYNC_CTS_FLOW; if (cflag & CLOCAL) info->flags &= ~ASYNC_CHECK_CD; else { info->flags |= ASYNC_CHECK_CD; info->IER |= UART_IER_MSI; } /* TBD: * Does clearing IER_MSI imply that we should disbale the VBL interrupt ? */ /* * Set up parity check flag */ info->read_status_mask = UART_LSR_OE | UART_LSR_DR; if (I_INPCK(info->tty)) info->read_status_mask |= UART_LSR_FE | UART_LSR_PE; if (I_BRKINT(info->tty) || I_PARMRK(info->tty)) info->read_status_mask |= UART_LSR_BI; /* * Characters to ignore */ info->ignore_status_mask = 0; if (I_IGNPAR(info->tty)) info->ignore_status_mask |= UART_LSR_PE | UART_LSR_FE; if (I_IGNBRK(info->tty)) { info->ignore_status_mask |= UART_LSR_BI; /* * If we're ignore parity and break indicators, ignore * overruns too. (For real raw support). */ if (I_IGNPAR(info->tty)) info->ignore_status_mask |= UART_LSR_OE; } /* * !!! ignore all characters if CREAD is not set */ if ((cflag & CREAD) == 0) info->ignore_status_mask |= UART_LSR_DR; local_irq_save(flags); { short serper; /* Set up the baud rate */ serper = quot - 1; /* Enable or disable parity bit */ if(cval & UART_LCR_PARITY) serper |= (SERPER_PARENB); custom.serper = serper; mb(); } info->LCR = cval; /* Save LCR */ local_irq_restore(flags); } static int rs_put_char(struct tty_struct *tty, unsigned char ch) { struct async_struct *info; unsigned long flags; if (!tty) return 0; info = tty->driver_data; if (serial_paranoia_check(info, tty->name, "rs_put_char")) return 0; if (!info->xmit.buf) return 0; local_irq_save(flags); if (CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) == 0) { local_irq_restore(flags); return 0; } info->xmit.buf[info->xmit.head++] = ch; info->xmit.head &= SERIAL_XMIT_SIZE-1; local_irq_restore(flags); return 1; } static void rs_flush_chars(struct tty_struct *tty) { struct async_struct *info = (struct async_struct *)tty->driver_data; unsigned long flags; if (serial_paranoia_check(info, tty->name, "rs_flush_chars")) return; if (info->xmit.head == info->xmit.tail || tty->stopped || tty->hw_stopped || !info->xmit.buf) return; local_irq_save(flags); info->IER |= UART_IER_THRI; custom.intena = IF_SETCLR | IF_TBE; mb(); /* set a pending Tx Interrupt, transmitter should restart now */ custom.intreq = IF_SETCLR | IF_TBE; mb(); local_irq_restore(flags); } static int rs_write(struct tty_struct * tty, const unsigned char *buf, int count) { int c, ret = 0; struct async_struct *info; unsigned long flags; if (!tty) return 0; info = tty->driver_data; if (serial_paranoia_check(info, tty->name, "rs_write")) return 0; if (!info->xmit.buf) return 0; local_irq_save(flags); while (1) { c = CIRC_SPACE_TO_END(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); if (count < c) c = count; if (c <= 0) { break; } memcpy(info->xmit.buf + info->xmit.head, buf, c); info->xmit.head = ((info->xmit.head + c) & (SERIAL_XMIT_SIZE-1)); buf += c; count -= c; ret += c; } local_irq_restore(flags); if (info->xmit.head != info->xmit.tail && !tty->stopped && !tty->hw_stopped && !(info->IER & UART_IER_THRI)) { info->IER |= UART_IER_THRI; local_irq_disable(); custom.intena = IF_SETCLR | IF_TBE; mb(); /* set a pending Tx Interrupt, transmitter should restart now */ custom.intreq = IF_SETCLR | IF_TBE; mb(); local_irq_restore(flags); } return ret; } static int rs_write_room(struct tty_struct *tty) { struct async_struct *info = (struct async_struct *)tty->driver_data; if (serial_paranoia_check(info, tty->name, "rs_write_room")) return 0; return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); } static int rs_chars_in_buffer(struct tty_struct *tty) { struct async_struct *info = (struct async_struct *)tty->driver_data; if (serial_paranoia_check(info, tty->name, "rs_chars_in_buffer")) return 0; return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); } static void rs_flush_buffer(struct tty_struct *tty) { struct async_struct *info = (struct async_struct *)tty->driver_data; unsigned long flags; if (serial_paranoia_check(info, tty->name, "rs_flush_buffer")) return; local_irq_save(flags); info->xmit.head = info->xmit.tail = 0; local_irq_restore(flags); tty_wakeup(tty); } /* * This function is used to send a high-priority XON/XOFF character to * the device */ static void rs_send_xchar(struct tty_struct *tty, char ch) { struct async_struct *info = (struct async_struct *)tty->driver_data; unsigned long flags; if (serial_paranoia_check(info, tty->name, "rs_send_char")) return; info->x_char = ch; if (ch) { /* Make sure transmit interrupts are on */ /* Check this ! */ local_irq_save(flags); if(!(custom.intenar & IF_TBE)) { custom.intena = IF_SETCLR | IF_TBE; mb(); /* set a pending Tx Interrupt, transmitter should restart now */ custom.intreq = IF_SETCLR | IF_TBE; mb(); } local_irq_restore(flags); info->IER |= UART_IER_THRI; } } /* * ------------------------------------------------------------ * rs_throttle() * * This routine is called by the upper-layer tty layer to signal that * incoming characters should be throttled. * ------------------------------------------------------------ */ static void rs_throttle(struct tty_struct * tty) { struct async_struct *info = (struct async_struct *)tty->driver_data; unsigned long flags; #ifdef SERIAL_DEBUG_THROTTLE char buf[64]; printk("throttle %s: %d....\n", tty_name(tty, buf), tty->ldisc.chars_in_buffer(tty)); #endif if (serial_paranoia_check(info, tty->name, "rs_throttle")) return; if (I_IXOFF(tty)) rs_send_xchar(tty, STOP_CHAR(tty)); if (tty->termios->c_cflag & CRTSCTS) info->MCR &= ~SER_RTS; local_irq_save(flags); rtsdtr_ctrl(info->MCR); local_irq_restore(flags); } static void rs_unthrottle(struct tty_struct * tty) { struct async_struct *info = (struct async_struct *)tty->driver_data; unsigned long flags; #ifdef SERIAL_DEBUG_THROTTLE char buf[64]; printk("unthrottle %s: %d....\n", tty_name(tty, buf), tty->ldisc.chars_in_buffer(tty)); #endif if (serial_paranoia_check(info, tty->name, "rs_unthrottle")) return; if (I_IXOFF(tty)) { if (info->x_char) info->x_char = 0; else rs_send_xchar(tty, START_CHAR(tty)); } if (tty->termios->c_cflag & CRTSCTS) info->MCR |= SER_RTS; local_irq_save(flags); rtsdtr_ctrl(info->MCR); local_irq_restore(flags); } /* * ------------------------------------------------------------ * rs_ioctl() and friends * ------------------------------------------------------------ */ static int get_serial_info(struct async_struct * info, struct serial_struct __user * retinfo) { struct serial_struct tmp; struct serial_state *state = info->state; if (!retinfo) return -EFAULT; memset(&tmp, 0, sizeof(tmp)); lock_kernel(); tmp.type = state->type; tmp.line = state->line; tmp.port = state->port; tmp.irq = state->irq; tmp.flags = state->flags; tmp.xmit_fifo_size = state->xmit_fifo_size; tmp.baud_base = state->baud_base; tmp.close_delay = state->close_delay; tmp.closing_wait = state->closing_wait; tmp.custom_divisor = state->custom_divisor; unlock_kernel(); if (copy_to_user(retinfo,&tmp,sizeof(*retinfo))) return -EFAULT; return 0; } static int set_serial_info(struct async_struct * info, struct serial_struct __user * new_info) { struct serial_struct new_serial; struct serial_state old_state, *state; unsigned int change_irq,change_port; int retval = 0; if (copy_from_user(&new_serial,new_info,sizeof(new_serial))) return -EFAULT; lock_kernel(); state = info->state; old_state = *state; change_irq = new_serial.irq != state->irq; change_port = (new_serial.port != state->port); if(change_irq || change_port || (new_serial.xmit_fifo_size != state->xmit_fifo_size)) { unlock_kernel(); return -EINVAL; } if (!serial_isroot()) { if ((new_serial.baud_base != state->baud_base) || (new_serial.close_delay != state->close_delay) || (new_serial.xmit_fifo_size != state->xmit_fifo_size) || ((new_serial.flags & ~ASYNC_USR_MASK) != (state->flags & ~ASYNC_USR_MASK))) return -EPERM; state->flags = ((state->flags & ~ASYNC_USR_MASK) | (new_serial.flags & ASYNC_USR_MASK)); info->flags = ((info->flags & ~ASYNC_USR_MASK) | (new_serial.flags & ASYNC_USR_MASK)); state->custom_divisor = new_serial.custom_divisor; goto check_and_exit; } if (new_serial.baud_base < 9600) { unlock_kernel(); return -EINVAL; } /* * OK, past this point, all the error checking has been done. * At this point, we start making changes..... */ state->baud_base = new_serial.baud_base; state->flags = ((state->flags & ~ASYNC_FLAGS) | (new_serial.flags & ASYNC_FLAGS)); info->flags = ((state->flags & ~ASYNC_INTERNAL_FLAGS) | (info->flags & ASYNC_INTERNAL_FLAGS)); state->custom_divisor = new_serial.custom_divisor; state->close_delay = new_serial.close_delay * HZ/100; state->closing_wait = new_serial.closing_wait * HZ/100; info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0; check_and_exit: if (info->flags & ASYNC_INITIALIZED) { if (((old_state.flags & ASYNC_SPD_MASK) != (state->flags & ASYNC_SPD_MASK)) || (old_state.custom_divisor != state->custom_divisor)) { if ((state->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) info->tty->alt_speed = 57600; if ((state->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) info->tty->alt_speed = 115200; if ((state->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI) info->tty->alt_speed = 230400; if ((state->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP) info->tty->alt_speed = 460800; change_speed(info, NULL); } } else retval = startup(info); unlock_kernel(); return retval; } /* * get_lsr_info - get line status register info * * Purpose: Let user call ioctl() to get info when the UART physically * is emptied. On bus types like RS485, the transmitter must * release the bus after transmitting. This must be done when * the transmit shift register is empty, not be done when the * transmit holding register is empty. This functionality * allows an RS485 driver to be written in user space. */ static int get_lsr_info(struct async_struct * info, unsigned int __user *value) { unsigned char status; unsigned int result; unsigned long flags; local_irq_save(flags); status = custom.serdatr; mb(); local_irq_restore(flags); result = ((status & SDR_TSRE) ? TIOCSER_TEMT : 0); if (copy_to_user(value, &result, sizeof(int))) return -EFAULT; return 0; } static int rs_tiocmget(struct tty_struct *tty, struct file *file) { struct async_struct * info = (struct async_struct *)tty->driver_data; unsigned char control, status; unsigned long flags; if (serial_paranoia_check(info, tty->name, "rs_ioctl")) return -ENODEV; if (tty->flags & (1 << TTY_IO_ERROR)) return -EIO; control = info->MCR; local_irq_save(flags); status = ciab.pra; local_irq_restore(flags); return ((control & SER_RTS) ? TIOCM_RTS : 0) | ((control & SER_DTR) ? TIOCM_DTR : 0) | (!(status & SER_DCD) ? TIOCM_CAR : 0) | (!(status & SER_DSR) ? TIOCM_DSR : 0) | (!(status & SER_CTS) ? TIOCM_CTS : 0); } static int rs_tiocmset(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear) { struct async_struct * info = (struct async_struct *)tty->driver_data; unsigned long flags; if (serial_paranoia_check(info, tty->name, "rs_ioctl")) return -ENODEV; if (tty->flags & (1 << TTY_IO_ERROR)) return -EIO; local_irq_save(flags); if (set & TIOCM_RTS) info->MCR |= SER_RTS; if (set & TIOCM_DTR) info->MCR |= SER_DTR; if (clear & TIOCM_RTS) info->MCR &= ~SER_RTS; if (clear & TIOCM_DTR) info->MCR &= ~SER_DTR; rtsdtr_ctrl(info->MCR); local_irq_restore(flags); return 0; } /* * rs_break() --- routine which turns the break handling on or off */ static int rs_break(struct tty_struct *tty, int break_state) { struct async_struct * info = (struct async_struct *)tty->driver_data; unsigned long flags; if (serial_paranoia_check(info, tty->name, "rs_break")) return -EINVAL; local_irq_save(flags); if (break_state == -1) custom.adkcon = AC_SETCLR | AC_UARTBRK; else custom.adkcon = AC_UARTBRK; mb(); local_irq_restore(flags); return 0; } static int rs_ioctl(struct tty_struct *tty, struct file * file, unsigned int cmd, unsigned long arg) { struct async_struct * info = (struct async_struct *)tty->driver_data; struct async_icount cprev, cnow; /* kernel counter temps */ struct serial_icounter_struct icount; void __user *argp = (void __user *)arg; unsigned long flags; if (serial_paranoia_check(info, tty->name, "rs_ioctl")) return -ENODEV; if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) && (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) { if (tty->flags & (1 << TTY_IO_ERROR)) return -EIO; } switch (cmd) { case TIOCGSERIAL: return get_serial_info(info, argp); case TIOCSSERIAL: return set_serial_info(info, argp); case TIOCSERCONFIG: return 0; case TIOCSERGETLSR: /* Get line status register */ return get_lsr_info(info, argp); case TIOCSERGSTRUCT: if (copy_to_user(argp, info, sizeof(struct async_struct))) return -EFAULT; return 0; /* * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change * - mask passed in arg for lines of interest * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) * Caller should use TIOCGICOUNT to see which one it was */ case TIOCMIWAIT: local_irq_save(flags); /* note the counters on entry */ cprev = info->state->icount; local_irq_restore(flags); while (1) { interruptible_sleep_on(&info->delta_msr_wait); /* see if a signal did it */ if (signal_pending(current)) return -ERESTARTSYS; local_irq_save(flags); cnow = info->state->icount; /* atomic copy */ local_irq_restore(flags); if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) return -EIO; /* no change => error */ if ( ((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) || ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts)) ) { return 0; } cprev = cnow; } /* NOTREACHED */ /* * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) * Return: write counters to the user passed counter struct * NB: both 1->0 and 0->1 transitions are counted except for * RI where only 0->1 is counted. */ case TIOCGICOUNT: local_irq_save(flags); cnow = info->state->icount; local_irq_restore(flags); icount.cts = cnow.cts; icount.dsr = cnow.dsr; icount.rng = cnow.rng; icount.dcd = cnow.dcd; icount.rx = cnow.rx; icount.tx = cnow.tx; icount.frame = cnow.frame; icount.overrun = cnow.overrun; icount.parity = cnow.parity; icount.brk = cnow.brk; icount.buf_overrun = cnow.buf_overrun; if (copy_to_user(argp, &icount, sizeof(icount))) return -EFAULT; return 0; case TIOCSERGWILD: case TIOCSERSWILD: /* "setserial -W" is called in Debian boot */ printk ("TIOCSER?WILD ioctl obsolete, ignored.\n"); return 0; default: return -ENOIOCTLCMD; } return 0; } static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios) { struct async_struct *info = (struct async_struct *)tty->driver_data; unsigned long flags; unsigned int cflag = tty->termios->c_cflag; change_speed(info, old_termios); /* Handle transition to B0 status */ if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD)) { info->MCR &= ~(SER_DTR|SER_RTS); local_irq_save(flags); rtsdtr_ctrl(info->MCR); local_irq_restore(flags); } /* Handle transition away from B0 status */ if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) { info->MCR |= SER_DTR; if (!(tty->termios->c_cflag & CRTSCTS) || !test_bit(TTY_THROTTLED, &tty->flags)) { info->MCR |= SER_RTS; } local_irq_save(flags); rtsdtr_ctrl(info->MCR); local_irq_restore(flags); } /* Handle turning off CRTSCTS */ if ((old_termios->c_cflag & CRTSCTS) && !(tty->termios->c_cflag & CRTSCTS)) { tty->hw_stopped = 0; rs_start(tty); } #if 0 /* * No need to wake up processes in open wait, since they * sample the CLOCAL flag once, and don't recheck it. * XXX It's not clear whether the current behavior is correct * or not. Hence, this may change..... */ if (!(old_termios->c_cflag & CLOCAL) && (tty->termios->c_cflag & CLOCAL)) wake_up_interruptible(&info->open_wait); #endif } /* * ------------------------------------------------------------ * rs_close() * * This routine is called when the serial port gets closed. First, we * wait for the last remaining data to be sent. Then, we unlink its * async structure from the interrupt chain if necessary, and we free * that IRQ if nothing is left in the chain. * ------------------------------------------------------------ */ static void rs_close(struct tty_struct *tty, struct file * filp) { struct async_struct * info = (struct async_struct *)tty->driver_data; struct serial_state *state; unsigned long flags; if (!info || serial_paranoia_check(info, tty->name, "rs_close")) return; state = info->state; local_irq_save(flags); if (tty_hung_up_p(filp)) { DBG_CNT("before DEC-hung"); local_irq_restore(flags); return; } #ifdef SERIAL_DEBUG_OPEN printk("rs_close ttys%d, count = %d\n", info->line, state->count); #endif if ((tty->count == 1) && (state->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. state->count should always * be one in these conditions. If it's greater than * one, we've got real problems, since it means the * serial port won't be shutdown. */ printk("rs_close: bad serial port count; tty->count is 1, " "state->count is %d\n", state->count); state->count = 1; } if (--state->count < 0) { printk("rs_close: bad serial port count for ttys%d: %d\n", info->line, state->count); state->count = 0; } if (state->count) { DBG_CNT("before DEC-2"); local_irq_restore(flags); return; } info->flags |= ASYNC_CLOSING; /* * Now we wait for the transmit buffer to clear; and we notify * the line discipline to only process XON/XOFF characters. */ tty->closing = 1; if (info->closing_wait != ASYNC_CLOSING_WAIT_NONE) tty_wait_until_sent(tty, info->closing_wait); /* * At this point we stop accepting input. To do this, we * disable the receive line status interrupts, and tell the * interrupt driver to stop checking the data ready bit in the * line status register. */ info->read_status_mask &= ~UART_LSR_DR; if (info->flags & ASYNC_INITIALIZED) { /* disable receive interrupts */ custom.intena = IF_RBF; mb(); /* clear any pending receive interrupt */ custom.intreq = IF_RBF; mb(); /* * Before we drop DTR, make sure the UART transmitter * has completely drained; this is especially * important if there is a transmit FIFO! */ rs_wait_until_sent(tty, info->timeout); } shutdown(info); rs_flush_buffer(tty); tty_ldisc_flush(tty); tty->closing = 0; info->event = 0; info->tty = NULL; if (info->blocked_open) { if (info->close_delay) { msleep_interruptible(jiffies_to_msecs(info->close_delay)); } wake_up_interruptible(&info->open_wait); } info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); wake_up_interruptible(&info->close_wait); local_irq_restore(flags); } /* * rs_wait_until_sent() --- wait until the transmitter is empty */ static void rs_wait_until_sent(struct tty_struct *tty, int timeout) { struct async_struct * info = (struct async_struct *)tty->driver_data; unsigned long orig_jiffies, char_time; int lsr; if (serial_paranoia_check(info, tty->name, "rs_wait_until_sent")) return; if (info->xmit_fifo_size == 0) return; /* Just in case.... */ orig_jiffies = jiffies; lock_kernel(); /* * Set the check interval to be 1/5 of the estimated time to * send a single character, and make it at least 1. The check * interval should also be less than the timeout. * * Note: we have to use pretty tight timings here to satisfy * the NIST-PCTS. */ char_time = (info->timeout - HZ/50) / info->xmit_fifo_size; char_time = char_time / 5; if (char_time == 0) char_time = 1; if (timeout) char_time = min_t(unsigned long, char_time, timeout); /* * If the transmitter hasn't cleared in twice the approximate * amount of time to send the entire FIFO, it probably won't * ever clear. This assumes the UART isn't doing flow * control, which is currently the case. Hence, if it ever * takes longer than info->timeout, this is probably due to a * UART bug of some kind. So, we clamp the timeout parameter at * 2*info->timeout. */ if (!timeout || timeout > 2*info->timeout) timeout = 2*info->timeout; #ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT printk("In rs_wait_until_sent(%d) check=%lu...", timeout, char_time); printk("jiff=%lu...", jiffies); #endif while(!((lsr = custom.serdatr) & SDR_TSRE)) { #ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT printk("serdatr = %d (jiff=%lu)...", lsr, jiffies); #endif msleep_interruptible(jiffies_to_msecs(char_time)); if (signal_pending(current)) break; if (timeout && time_after(jiffies, orig_jiffies + timeout)) break; } __set_current_state(TASK_RUNNING); unlock_kernel(); #ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies); #endif } /* * rs_hangup() --- called by tty_hangup() when a hangup is signaled. */ static void rs_hangup(struct tty_struct *tty) { struct async_struct * info = (struct async_struct *)tty->driver_data; struct serial_state *state = info->state; if (serial_paranoia_check(info, tty->name, "rs_hangup")) return; state = info->state; rs_flush_buffer(tty); shutdown(info); info->event = 0; state->count = 0; info->flags &= ~ASYNC_NORMAL_ACTIVE; info->tty = NULL; wake_up_interruptible(&info->open_wait); } /* * ------------------------------------------------------------ * rs_open() and friends * ------------------------------------------------------------ */ static int block_til_ready(struct tty_struct *tty, struct file * filp, struct async_struct *info) { #ifdef DECLARE_WAITQUEUE DECLARE_WAITQUEUE(wait, current); #else struct wait_queue wait = { current, NULL }; #endif struct serial_state *state = info->state; int retval; int do_clocal = 0, extra_count = 0; unsigned long flags; /* * If the device is in the middle of being closed, then block * until it's done, and then try again. */ if (tty_hung_up_p(filp) || (info->flags & ASYNC_CLOSING)) { if (info->flags & ASYNC_CLOSING) interruptible_sleep_on(&info->close_wait); #ifdef SERIAL_DO_RESTART return ((info->flags & ASYNC_HUP_NOTIFY) ? -EAGAIN : -ERESTARTSYS); #else return -EAGAIN; #endif } /* * If non-blocking mode is set, or the port is not enabled, * then make the check up front and then exit. */ if ((filp->f_flags & O_NONBLOCK) || (tty->flags & (1 << TTY_IO_ERROR))) { info->flags |= ASYNC_NORMAL_ACTIVE; return 0; } if (tty->termios->c_cflag & CLOCAL) do_clocal = 1; /* * Block waiting for the carrier detect and the line to become * free (i.e., not in use by the callout). While we are in * this loop, state->count is dropped by one, so that * rs_close() knows when to free things. We restore it upon * exit, either normal or abnormal. */ retval = 0; add_wait_queue(&info->open_wait, &wait); #ifdef SERIAL_DEBUG_OPEN printk("block_til_ready before block: ttys%d, count = %d\n", state->line, state->count); #endif local_irq_save(flags); if (!tty_hung_up_p(filp)) { extra_count = 1; state->count--; } local_irq_restore(flags); info->blocked_open++; while (1) { local_irq_save(flags); if (tty->termios->c_cflag & CBAUD) rtsdtr_ctrl(SER_DTR|SER_RTS); local_irq_restore(flags); set_current_state(TASK_INTERRUPTIBLE); if (tty_hung_up_p(filp) || !(info->flags & ASYNC_INITIALIZED)) { #ifdef SERIAL_DO_RESTART if (info->flags & ASYNC_HUP_NOTIFY) retval = -EAGAIN; else retval = -ERESTARTSYS; #else retval = -EAGAIN; #endif break; } if (!(info->flags & ASYNC_CLOSING) && (do_clocal || (!(ciab.pra & SER_DCD)) )) break; if (signal_pending(current)) { retval = -ERESTARTSYS; break; } #ifdef SERIAL_DEBUG_OPEN printk("block_til_ready blocking: ttys%d, count = %d\n", info->line, state->count); #endif schedule(); } __set_current_state(TASK_RUNNING); remove_wait_queue(&info->open_wait, &wait); if (extra_count) state->count++; info->blocked_open--; #ifdef SERIAL_DEBUG_OPEN printk("block_til_ready after blocking: ttys%d, count = %d\n", info->line, state->count); #endif if (retval) return retval; info->flags |= ASYNC_NORMAL_ACTIVE; return 0; } static int get_async_struct(int line, struct async_struct **ret_info) { struct async_struct *info; struct serial_state *sstate; sstate = rs_table + line; sstate->count++; if (sstate->info) { *ret_info = sstate->info; return 0; } info = kzalloc(sizeof(struct async_struct), GFP_KERNEL); if (!info) { sstate->count--; return -ENOMEM; } #ifdef DECLARE_WAITQUEUE init_waitqueue_head(&info->open_wait); init_waitqueue_head(&info->close_wait); init_waitqueue_head(&info->delta_msr_wait); #endif info->magic = SERIAL_MAGIC; info->port = sstate->port; info->flags = sstate->flags; info->xmit_fifo_size = sstate->xmit_fifo_size; info->line = line; tasklet_init(&info->tlet, do_softint, (unsigned long)info); info->state = sstate; if (sstate->info) { kfree(info); *ret_info = sstate->info; return 0; } *ret_info = sstate->info = info; return 0; } /* * This routine is called whenever a serial port is opened. It * enables interrupts for a serial port, linking in its async structure into * the IRQ chain. It also performs the serial-specific * initialization for the tty structure. */ static int rs_open(struct tty_struct *tty, struct file * filp) { struct async_struct *info; int retval, line; line = tty->index; if ((line < 0) || (line >= NR_PORTS)) { return -ENODEV; } retval = get_async_struct(line, &info); if (retval) { return retval; } tty->driver_data = info; info->tty = tty; if (serial_paranoia_check(info, tty->name, "rs_open")) return -ENODEV; #ifdef SERIAL_DEBUG_OPEN printk("rs_open %s, count = %d\n", tty->name, info->state->count); #endif info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0; /* * If the port is the middle of closing, bail out now */ if (tty_hung_up_p(filp) || (info->flags & ASYNC_CLOSING)) { if (info->flags & ASYNC_CLOSING) interruptible_sleep_on(&info->close_wait); #ifdef SERIAL_DO_RESTART return ((info->flags & ASYNC_HUP_NOTIFY) ? -EAGAIN : -ERESTARTSYS); #else return -EAGAIN; #endif } /* * Start up serial port */ retval = startup(info); if (retval) { return retval; } retval = block_til_ready(tty, filp, info); if (retval) { #ifdef SERIAL_DEBUG_OPEN printk("rs_open returning after block_til_ready with %d\n", retval); #endif return retval; } #ifdef SERIAL_DEBUG_OPEN printk("rs_open %s successful...", tty->name); #endif return 0; } /* * /proc fs routines.... */ static inline int line_info(char *buf, struct serial_state *state) { struct async_struct *info = state->info, scr_info; char stat_buf[30], control, status; int ret; unsigned long flags; ret = sprintf(buf, "%d: uart:amiga_builtin",state->line); /* * Figure out the current RS-232 lines */ if (!info) { info = &scr_info; /* This is just for serial_{in,out} */ info->magic = SERIAL_MAGIC; info->flags = state->flags; info->quot = 0; info->tty = NULL; } local_irq_save(flags); status = ciab.pra; control = info ? info->MCR : status; local_irq_restore(flags); stat_buf[0] = 0; stat_buf[1] = 0; if(!(control & SER_RTS)) strcat(stat_buf, "|RTS"); if(!(status & SER_CTS)) strcat(stat_buf, "|CTS"); if(!(control & SER_DTR)) strcat(stat_buf, "|DTR"); if(!(status & SER_DSR)) strcat(stat_buf, "|DSR"); if(!(status & SER_DCD)) strcat(stat_buf, "|CD"); if (info->quot) { ret += sprintf(buf+ret, " baud:%d", state->baud_base / info->quot); } ret += sprintf(buf+ret, " tx:%d rx:%d", state->icount.tx, state->icount.rx); if (state->icount.frame) ret += sprintf(buf+ret, " fe:%d", state->icount.frame); if (state->icount.parity) ret += sprintf(buf+ret, " pe:%d", state->icount.parity); if (state->icount.brk) ret += sprintf(buf+ret, " brk:%d", state->icount.brk); if (state->icount.overrun) ret += sprintf(buf+ret, " oe:%d", state->icount.overrun); /* * Last thing is the RS-232 status lines */ ret += sprintf(buf+ret, " %s\n", stat_buf+1); return ret; } static int rs_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { int len = 0, l; off_t begin = 0; len += sprintf(page, "serinfo:1.0 driver:%s\n", serial_version); l = line_info(page + len, &rs_table[0]); len += l; if (len+begin > off+count) goto done; if (len+begin < off) { begin += len; len = 0; } *eof = 1; done: if (off >= len+begin) return 0; *start = page + (off-begin); return ((count < begin+len-off) ? count : begin+len-off); } /* * --------------------------------------------------------------------- * rs_init() and friends * * rs_init() is called at boot-time to initialize the serial driver. * --------------------------------------------------------------------- */ /* * This routine prints out the appropriate serial driver version * number, and identifies which options were configured into this * driver. */ static void show_serial_version(void) { printk(KERN_INFO "%s version %s\n", serial_name, serial_version); } static const struct tty_operations serial_ops = { .open = rs_open, .close = rs_close, .write = rs_write, .put_char = rs_put_char, .flush_chars = rs_flush_chars, .write_room = rs_write_room, .chars_in_buffer = rs_chars_in_buffer, .flush_buffer = rs_flush_buffer, .ioctl = rs_ioctl, .throttle = rs_throttle, .unthrottle = rs_unthrottle, .set_termios = rs_set_termios, .stop = rs_stop, .start = rs_start, .hangup = rs_hangup, .break_ctl = rs_break, .send_xchar = rs_send_xchar, .wait_until_sent = rs_wait_until_sent, .read_proc = rs_read_proc, .tiocmget = rs_tiocmget, .tiocmset = rs_tiocmset, }; /* * The serial driver boot-time initialization code! */ static int __init rs_init(void) { unsigned long flags; struct serial_state * state; if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_SERIAL)) return -ENODEV; serial_driver = alloc_tty_driver(1); if (!serial_driver) return -ENOMEM; /* * We request SERDAT and SERPER only, because the serial registers are * too spreaded over the custom register space */ if (!request_mem_region(CUSTOM_PHYSADDR+0x30, 4, "amiserial [Paula]")) return -EBUSY; IRQ_ports = NULL; show_serial_version(); /* Initialize the tty_driver structure */ serial_driver->owner = THIS_MODULE; serial_driver->driver_name = "amiserial"; serial_driver->name = "ttyS"; serial_driver->major = TTY_MAJOR; serial_driver->minor_start = 64; serial_driver->type = TTY_DRIVER_TYPE_SERIAL; serial_driver->subtype = SERIAL_TYPE_NORMAL; serial_driver->init_termios = tty_std_termios; serial_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; serial_driver->flags = TTY_DRIVER_REAL_RAW; tty_set_operations(serial_driver, &serial_ops); if (tty_register_driver(serial_driver)) panic("Couldn't register serial driver\n"); state = rs_table; state->magic = SSTATE_MAGIC; state->port = (int)&custom.serdatr; /* Just to give it a value */ state->line = 0; state->custom_divisor = 0; state->close_delay = 5*HZ/10; state->closing_wait = 30*HZ; state->icount.cts = state->icount.dsr = state->icount.rng = state->icount.dcd = 0; state->icount.rx = state->icount.tx = 0; state->icount.frame = state->icount.parity = 0; state->icount.overrun = state->icount.brk = 0; printk(KERN_INFO "ttyS%d is the amiga builtin serial port\n", state->line); /* Hardware set up */ state->baud_base = amiga_colorclock; state->xmit_fifo_size = 1; local_irq_save(flags); /* set ISRs, and then disable the rx interrupts */ request_irq(IRQ_AMIGA_TBE, ser_tx_int, 0, "serial TX", state); request_irq(IRQ_AMIGA_RBF, ser_rx_int, IRQF_DISABLED, "serial RX", state); /* turn off Rx and Tx interrupts */ custom.intena = IF_RBF | IF_TBE; mb(); /* clear any pending interrupt */ custom.intreq = IF_RBF | IF_TBE; mb(); local_irq_restore(flags); /* * set the appropriate directions for the modem control flags, * and clear RTS and DTR */ ciab.ddra |= (SER_DTR | SER_RTS); /* outputs */ ciab.ddra &= ~(SER_DCD | SER_CTS | SER_DSR); /* inputs */ return 0; } static __exit void rs_exit(void) { int error; struct async_struct *info = rs_table[0].info; /* printk("Unloading %s: version %s\n", serial_name, serial_version); */ tasklet_kill(&info->tlet); if ((error = tty_unregister_driver(serial_driver))) printk("SERIAL: failed to unregister serial driver (%d)\n", error); put_tty_driver(serial_driver); if (info) { rs_table[0].info = NULL; kfree(info); } release_mem_region(CUSTOM_PHYSADDR+0x30, 4); } module_init(rs_init) module_exit(rs_exit) /* * ------------------------------------------------------------ * Serial console driver * ------------------------------------------------------------ */ #ifdef CONFIG_SERIAL_CONSOLE static void amiga_serial_putc(char c) { custom.serdat = (unsigned char)c | 0x100; while (!(custom.serdatr & 0x2000)) barrier(); } /* * Print a string to the serial port trying not to disturb * any possible real use of the port... * * The console must be locked when we get here. */ static void serial_console_write(struct console *co, const char *s, unsigned count) { unsigned short intena = custom.intenar; custom.intena = IF_TBE; while (count--) { if (*s == '\n') amiga_serial_putc('\r'); amiga_serial_putc(*s++); } custom.intena = IF_SETCLR | (intena & IF_TBE); } static struct tty_driver *serial_console_device(struct console *c, int *index) { *index = 0; return serial_driver; } static struct console sercons = { .name = "ttyS", .write = serial_console_write, .device = serial_console_device, .flags = CON_PRINTBUFFER, .index = -1, }; /* * Register console. */ static int __init amiserial_console_init(void) { register_console(&sercons); return 0; } console_initcall(amiserial_console_init); #endif MODULE_LICENSE("GPL");
gpl-2.0
ktoonsez/MIUIv4-I777
arch/arm/mach-exynos/midas-camera.c
18
29170
/* * camera class init */ #include <linux/gpio.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/regulator/machine.h> #include <plat/devs.h> #include <plat/csis.h> #include <plat/pd.h> #include <plat/gpio-cfg.h> #include <media/exynos_flite.h> #if defined(CONFIG_VIDEO_S5C73M3) || defined(CONFIG_VIDEO_SLP_S5C73M3) #include <media/s5c73m3_platform.h> #endif #if defined(CONFIG_VIDEO_M5MO) #include <mach/regs-gpio.h> #include <media/m5mo_platform.h> #endif #ifdef CONFIG_EXYNOS4_CONTENT_PATH_PROTECTION #include <mach/secmem.h> #endif struct class *camera_class; static int __init camera_class_init(void) { camera_class = class_create(THIS_MODULE, "camera"); return 0; } subsys_initcall(camera_class_init); #if defined(CONFIG_VIDEO_FIMC) /* * External camera reset * Because the most of cameras take i2c bus signal, so that * you have to reset at the boot time for other i2c slave devices. * This function also called at fimc_init_camera() * Do optimization for cameras on your platform. */ int s3c_csis_power(int enable) { struct regulator *regulator; int ret = 0; /* mipi_1.1v ,mipi_1.8v are always powered-on. * If they are off, we then power them on. */ if (enable) { /* VMIPI_1.0V */ regulator = regulator_get(NULL, "vmipi_1.0v"); if (IS_ERR(regulator)) goto error_out; if (!regulator_is_enabled(regulator)) { printk(KERN_WARNING "%s: vmipi_1.1v is off. so ON\n", __func__); ret = regulator_enable(regulator); } regulator_put(regulator); /* VMIPI_1.8V */ regulator = regulator_get(NULL, "vmipi_1.8v"); if (IS_ERR(regulator)) goto error_out; if (!regulator_is_enabled(regulator)) { printk(KERN_WARNING "%s: vmipi_1.8v is off. so ON\n", __func__); ret = regulator_enable(regulator); } regulator_put(regulator); printk(KERN_WARNING "%s: vmipi_1.0v and vmipi_1.8v were ON\n", __func__); } return 0; error_out: printk(KERN_ERR "%s: ERROR: failed to check mipi-power\n", __func__); return 0; } static int s5k6a3_power(int enable) { int ret; struct regulator *regulator; int err; printk(KERN_ERR "%s %s\n", __func__, enable ? "on" : "down"); if (enable) { /* CAM_SENSOR_A2.8V */ #if defined(CONFIG_MACH_MIDAS_02_BD) || defined(CONFIG_GPIO_MIDAS_02_BD) err = gpio_request(GPIO_CAM_IO_EN, "GPY6"); #else err = gpio_request(GPIO_CAM_IO_EN, "GPM0"); #endif if (err) printk(KERN_ERR "#### failed to request GPIO_CAM_IO_EN ####\n"); ret = gpio_direction_output(GPIO_CAM_IO_EN, 1); gpio_free(GPIO_CAM_IO_EN); udelay(50); /* Camera MCLK */ #if defined(CONFIG_MACH_MIDAS_02_BD) || defined(CONFIG_GPIO_MIDAS_02_BD) err = gpio_request(GPIO_VTCAM_MCLK, "GPJ1"); #else err = gpio_request(GPIO_VTCAM_MCLK, "GPM2"); #endif if (err) printk(KERN_ERR "#### failed to request GPM2_2 ####\n"); #if defined(CONFIG_MACH_MIDAS_02_BD) || defined(CONFIG_GPIO_MIDAS_02_BD) s3c_gpio_cfgpin(GPIO_VTCAM_MCLK, S3C_GPIO_SFN(3)); #else s3c_gpio_cfgpin(GPIO_VTCAM_MCLK, S3C_GPIO_SFN(2)); #endif s3c_gpio_setpull(GPIO_VTCAM_MCLK, S3C_GPIO_PULL_NONE); gpio_free(GPIO_VTCAM_MCLK); /* VT_CAM_1.8V */ regulator = regulator_get(NULL, "vt_cam_1.8v"); if (IS_ERR(regulator)) { printk(KERN_INFO "smdk4212_cam1_reset regulator get err\n"); goto out; } if (!regulator_is_enabled(regulator)) { printk(KERN_WARNING "%s: vt_cam_1.8v is off. so ON\n", __func__); ret = regulator_enable(regulator); } regulator_put(regulator); /* Camera reset */ err = gpio_request(GPIO_CAM_VT_nRST, "GPM1"); if (err) printk(KERN_ERR "#### failed to request GPIO_CAM_VT_nRST ####\n"); s3c_gpio_setpull(GPIO_CAM_VT_nRST, S3C_GPIO_PULL_NONE); gpio_direction_output(GPIO_CAM_VT_nRST, 0); gpio_direction_output(GPIO_CAM_VT_nRST, 1); gpio_free(GPIO_CAM_VT_nRST); s3c_csis_power(1); } else { /* Camera reset */ err = gpio_request(GPIO_CAM_VT_nRST, "GPM1"); if (err) printk(KERN_ERR "#### failed to request GPIO_CAM_VT_nRST ####\n"); s3c_gpio_setpull(GPIO_CAM_VT_nRST, S3C_GPIO_PULL_NONE); gpio_direction_output(GPIO_CAM_VT_nRST, 0); gpio_free(GPIO_CAM_VT_nRST); /* VT_CAM_1.8V */ regulator = regulator_get(NULL, "vt_cam_1.8v"); if (IS_ERR(regulator)) { printk(KERN_INFO "smdk4212_cam1_reset regulator get err\n"); goto out; } if (regulator_is_enabled(regulator)) { printk(KERN_WARNING "%s: vt_cam_1.8v is on. so OFF\n", __func__); ret = regulator_disable(regulator); } regulator_put(regulator); /* Camera MCLK */ #if defined(CONFIG_MACH_MIDAS_02_BD) || defined(CONFIG_GPIO_MIDAS_02_BD) err = gpio_request(GPIO_VTCAM_MCLK, "GPM2"); #else err = gpio_request(GPIO_VTCAM_MCLK, "GPJ1"); #endif if (err) printk(KERN_ERR "#### failed to request GPM2_2 ####\n"); s3c_gpio_cfgpin(GPIO_VTCAM_MCLK, S3C_GPIO_INPUT); s3c_gpio_setpull(GPIO_VTCAM_MCLK, S3C_GPIO_PULL_DOWN); gpio_free(GPIO_VTCAM_MCLK); /* CAM_SENSOR_A2.8V */ #if defined(CONFIG_MACH_MIDAS_02_BD) || defined(CONFIG_GPIO_MIDAS_02_BD) err = gpio_request(GPIO_CAM_IO_EN, "GPY6"); #else err = gpio_request(GPIO_CAM_IO_EN, "GPM0"); #endif if (err) printk(KERN_ERR "#### failed to request GPIO_CAM_IO_EN ####\n"); ret = gpio_direction_output(GPIO_CAM_IO_EN, 0); gpio_free(GPIO_CAM_IO_EN); s3c_csis_power(0); } out: return 0; } #ifdef WRITEBACK_ENABLED static int get_i2c_busnum_writeback(void) { return 0; } static struct i2c_board_info writeback_i2c_info = { I2C_BOARD_INFO("WriteBack", 0x0), }; static struct s3c_platform_camera writeback = { .id = CAMERA_WB, .fmt = ITU_601_YCBCR422_8BIT, .order422 = CAM_ORDER422_8BIT_CBYCRY, .get_i2c_busnum = get_i2c_busnum_writeback, .info = &writeback_i2c_info, .pixelformat = V4L2_PIX_FMT_YUV444, .line_length = 800, .width = 480, .height = 800, .window = { .left = 0, .top = 0, .width = 480, .height = 800, }, .initialized = 0, }; #endif #ifdef CONFIG_VIDEO_EXYNOS_FIMC_IS #ifdef CONFIG_VIDEO_S5K6A3 static struct s3c_platform_camera s5k6a3 = { .id = CAMERA_CSI_D, #if defined(CONFIG_MACH_MIDAS_02_BD) || defined(CONFIG_GPIO_MIDAS_02_BD) .clk_name = "sclk_cam1", #else .clk_name = "sclk_cam0", #endif .cam_power = s5k6a3_power, .type = CAM_TYPE_MIPI, .fmt = MIPI_CSI_RAW10, .order422 = CAM_ORDER422_8BIT_YCBYCR, .pixelformat = V4L2_PIX_FMT_UYVY, .line_length = 1920, .width = 1920, .height = 1080, .window = { .left = 0, .top = 0, .width = 1920, .height = 1080, }, .srclk_name = "xusbxti", .clk_rate = 24000000, .mipi_lanes = 1, .mipi_settle = 12, .mipi_align = 24, .initialized = 0, .flite_id = FLITE_IDX_B, .use_isp = true, .sensor_index = 102, }; #endif #endif #if defined(CONFIG_VIDEO_S5C73M3) || defined(CONFIG_VIDEO_SLP_S5C73M3) #define CAM_CHECK_ERR_RET(x, msg) \ if (unlikely((x) < 0)) { \ printk(KERN_ERR "\nfail to %s: err = %d\n", msg, x); \ return x; \ } #define CAM_CHECK_ERR(x, msg) \ if (unlikely((x) < 0)) { \ printk(KERN_ERR "\nfail to %s: err = %d\n", msg, x); \ } #if defined(CONFIG_MACH_MIDAS_02_BD) || defined(CONFIG_GPIO_MIDAS_02_BD) static int vddCore = 1200000; #else static int vddCore = 1150000; #endif static bool isVddCoreSet; static void s5c73m3_set_vdd_core(int level) { vddCore = level; isVddCoreSet = true; printk(KERN_ERR "%s : %d\n", __func__, vddCore); } static bool s5c73m3_is_vdd_core_set(void) { return isVddCoreSet; } static int s5c73m3_is_isp_reset(void) { int ret = 0; #if defined(CONFIG_MACH_MIDAS_02_BD) || defined(CONFIG_GPIO_MIDAS_02_BD) ret = gpio_request(GPIO_ISP_RESET, "GPY3"); #else ret = gpio_request(GPIO_ISP_RESET, "GPF1"); #endif if (ret) { printk(KERN_ERR "faile to request gpio(GPIO_ISP_RESET)\n"); return ret; } /* ISP_RESET */ ret = gpio_direction_output(GPIO_ISP_RESET, 0); CAM_CHECK_ERR_RET(ret, "output GPIO_ISP_RESET"); udelay(10); /* 200 cycle */ ret = gpio_direction_output(GPIO_ISP_RESET, 1); CAM_CHECK_ERR_RET(ret, "output GPIO_ISP_RESET"); udelay(10); /* 200 cycle */ gpio_free(GPIO_ISP_RESET); return ret; } static int s5c73m3_gpio_request(void) { int ret = 0; #if defined(CONFIG_MACH_MIDAS_02_BD) || defined(CONFIG_GPIO_MIDAS_02_BD) ret = gpio_request(GPIO_ISP_STANDBY, "GPY5"); #else ret = gpio_request(GPIO_ISP_STANDBY, "GPM0"); #endif if (ret) { printk(KERN_ERR "faile to request gpio(GPIO_ISP_STANDBY)\n"); return ret; } #if defined(CONFIG_MACH_MIDAS_02_BD) || defined(CONFIG_GPIO_MIDAS_02_BD) ret = gpio_request(GPIO_ISP_RESET, "GPY3"); #else ret = gpio_request(GPIO_ISP_RESET, "GPF1"); #endif if (ret) { printk(KERN_ERR "faile to request gpio(GPIO_ISP_RESET)\n"); return ret; } /* SENSOR_A2.8V */ #if defined(CONFIG_MACH_MIDAS_02_BD) || defined(CONFIG_GPIO_MIDAS_02_BD) ret = gpio_request(GPIO_CAM_IO_EN, "GPY6"); #else ret = gpio_request(GPIO_CAM_IO_EN, "GPM0"); #endif if (ret) { printk(KERN_ERR "fail to request gpio(GPIO_CAM_IO_EN)\n"); return ret; } #if defined(CONFIG_MACH_MIDAS_02_BD) || defined(CONFIG_GPIO_MIDAS_02_BD) ret = gpio_request(GPIO_CAM_AF_EN, "GPY6"); #else ret = gpio_request(GPIO_CAM_AF_EN, "GPM0"); #endif if (ret) { printk(KERN_ERR "faile to request gpio(GPIO_CAM_AF_EN)\n"); return ret; } #if defined(CONFIG_MACH_MIDAS_02_BD) || defined(CONFIG_GPIO_MIDAS_02_BD) ret = gpio_request(GPIO_ISP_CORE_EN, "GPY6"); #else ret = gpio_request(GPIO_ISP_CORE_EN, "GPM0"); #endif if (ret) { printk(KERN_ERR "fail to request gpio(GPIO_ISP_CORE_EN)\n"); return ret; } return ret; } static int s5c73m3_power_on(void) { struct regulator *regulator; int ret = 0; printk(KERN_DEBUG "%s: in\n", __func__); printk(KERN_DEBUG "vddCore : %d\n", vddCore); s5c73m3_gpio_request(); /* CAM_ISP_CORE_1.2V */ ret = gpio_direction_output(GPIO_ISP_CORE_EN, 1); CAM_CHECK_ERR_RET(ret, "output GPIO_ISP_CORE_EN"); regulator = regulator_get(NULL, "cam_isp_core_1.2v"); if (IS_ERR(regulator)) return -ENODEV; regulator_set_voltage(regulator, vddCore, vddCore); ret = regulator_enable(regulator); regulator_put(regulator); CAM_CHECK_ERR_RET(ret, "enable cam_isp_core_1.2v"); /* CAM_SENSOR_A2.8V */ ret = gpio_direction_output(GPIO_CAM_IO_EN, 1); CAM_CHECK_ERR_RET(ret, "output IO_EN"); /* CAM_SENSOR_CORE_1.2V */ regulator = regulator_get(NULL, "cam_sensor_core_1.2v"); if (IS_ERR(regulator)) return -ENODEV; ret = regulator_enable(regulator); regulator_put(regulator); CAM_CHECK_ERR_RET(ret, "enable cam_sensor_core_1.2v"); /* delay is needed : pmu control is slower than gpio control*/ mdelay(8); /* MCLK */ ret = s3c_gpio_cfgpin(GPIO_CAM_MCLK, S3C_GPIO_SFN(2)); CAM_CHECK_ERR_RET(ret, "cfg mclk"); s3c_gpio_setpull(GPIO_CAM_MCLK, S3C_GPIO_PULL_NONE); /* CAM_AF_2.8V */ ret = gpio_direction_output(GPIO_CAM_AF_EN, 1); CAM_CHECK_ERR_RET(ret, "output GPIO_CAM_AF_EN"); udelay(2000); #if 0 /* VT_CORE_1.8V */ regulator = regulator_get(NULL, "vt_cam_1.8v"); if (IS_ERR(regulator)) return -ENODEV; ret = regulator_enable(regulator); regulator_put(regulator); CAM_CHECK_ERR_RET(ret, "enable vt_cam_1.8v"); #endif /* CAM_ISP_SENSOR_1.8V */ regulator = regulator_get(NULL, "cam_isp_sensor_1.8v"); if (IS_ERR(regulator)) return -ENODEV; ret = regulator_enable(regulator); regulator_put(regulator); CAM_CHECK_ERR_RET(ret, "enable cam_isp_sensor_1.8v"); /* CAM_ISP_MIPI_1.2V */ regulator = regulator_get(NULL, "cam_isp_mipi_1.2v"); if (IS_ERR(regulator)) return -ENODEV; ret = regulator_enable(regulator); regulator_put(regulator); CAM_CHECK_ERR_RET(ret, "enable cam_isp_mipi_1.2v"); /* ISP_STANDBY */ ret = gpio_direction_output(GPIO_ISP_STANDBY, 1); CAM_CHECK_ERR_RET(ret, "output GPIO_ISP_STANDBY"); udelay(100); /* 2000 cycle */ /* ISP_RESET */ ret = gpio_direction_output(GPIO_ISP_RESET, 1); CAM_CHECK_ERR_RET(ret, "output GPIO_ISP_RESET"); udelay(10); /* 200 cycle */ /* EVT0 : set VDD_INT as 1.3V when entering camera */ #if defined(CONFIG_MACH_MIDAS_02_BD) || defined(CONFIG_GPIO_MIDAS_02_BD) regulator = regulator_get(NULL, "vdd_int"); if (IS_ERR(regulator)) return -ENODEV; regulator_set_voltage(regulator, 1300000, 1300000); regulator_put(regulator); CAM_CHECK_ERR_RET(ret, "set vdd_int as 1.3V"); #else /*M0, C1 REV00*/ if (system_rev == 0x03) { regulator = regulator_get(NULL, "vdd_int"); if (IS_ERR(regulator)) return -ENODEV; regulator_set_voltage(regulator, 1300000, 1300000); regulator_put(regulator); CAM_CHECK_ERR_RET(ret, "set vdd_int as 1.3V"); } #endif gpio_free(GPIO_ISP_STANDBY); gpio_free(GPIO_ISP_RESET); gpio_free(GPIO_CAM_IO_EN); gpio_free(GPIO_CAM_AF_EN); gpio_free(GPIO_ISP_CORE_EN); return ret; } static int s5c73m3_power_down(void) { struct regulator *regulator; int ret = 0; printk(KERN_DEBUG "%s: in\n", __func__); s5c73m3_gpio_request(); /* ISP_STANDBY */ ret = gpio_direction_output(GPIO_ISP_STANDBY, 0); CAM_CHECK_ERR_RET(ret, "output GPIO_ISP_STANDBY"); udelay(2); /* 40 cycle */ /* ISP_RESET */ ret = gpio_direction_output(GPIO_ISP_RESET, 0); CAM_CHECK_ERR_RET(ret, "output GPIO_ISP_RESET"); /* CAM_AF_2.8V */ ret = gpio_direction_output(GPIO_CAM_AF_EN, 0); CAM_CHECK_ERR_RET(ret, "output GPIO_CAM_AF_EN"); /* CAM_ISP_MIPI_1.2V */ regulator = regulator_get(NULL, "cam_isp_mipi_1.2v"); if (IS_ERR(regulator)) return -ENODEV; if (regulator_is_enabled(regulator)) ret = regulator_force_disable(regulator); regulator_put(regulator); CAM_CHECK_ERR(ret, "disable cam_isp_mipi_1.2v"); udelay(10); /* 200 cycle */ /* CAM_ISP_SENSOR_1.8V */ regulator = regulator_get(NULL, "cam_isp_sensor_1.8v"); if (IS_ERR(regulator)) return -ENODEV; if (regulator_is_enabled(regulator)) ret = regulator_force_disable(regulator); regulator_put(regulator); CAM_CHECK_ERR(ret, "disable cam_isp_sensor_1.8v"); #if 0 /* VT_CORE_1.8V */ regulator = regulator_get(NULL, "vt_cam_1.8v"); if (IS_ERR(regulator)) return -ENODEV; if (regulator_is_enabled(regulator)) ret = regulator_force_disable(regulator); regulator_put(regulator); CAM_CHECK_ERR(ret, "disable vt_cam_1.8v"); #endif /* MCLK */ ret = s3c_gpio_cfgpin(GPIO_CAM_MCLK, S3C_GPIO_INPUT); s3c_gpio_setpull(GPIO_CAM_MCLK, S3C_GPIO_PULL_DOWN); CAM_CHECK_ERR(ret, "cfg mclk"); /* CAM_SENSOR_CORE_1.2V */ regulator = regulator_get(NULL, "cam_sensor_core_1.2v"); if (IS_ERR(regulator)) return -ENODEV; if (regulator_is_enabled(regulator)) ret = regulator_force_disable(regulator); regulator_put(regulator); CAM_CHECK_ERR(ret, "disable cam_sensor_core_1.2v"); /* CAM_SENSOR_A2.8V */ ret = gpio_direction_output(GPIO_CAM_IO_EN, 0); CAM_CHECK_ERR_RET(ret, "output GPIO_CAM_IO_EN"); /* CAM_ISP_CORE_1.2V */ regulator = regulator_get(NULL, "cam_isp_core_1.2v"); if (IS_ERR(regulator)) return -ENODEV; if (regulator_is_enabled(regulator)) ret = regulator_force_disable(regulator); regulator_put(regulator); CAM_CHECK_ERR(ret, "disable cam_isp_core_1.2v"); ret = gpio_direction_output(GPIO_ISP_CORE_EN, 0); CAM_CHECK_ERR_RET(ret, "output GPIO_CAM_ISP_CORE_EN"); /* EVT0 : set VDD_INT as 1.0V when exiting camera */ #if defined(CONFIG_MACH_MIDAS_02_BD) || defined(CONFIG_GPIO_MIDAS_02_BD) regulator = regulator_get(NULL, "vdd_int"); if (IS_ERR(regulator)) return -ENODEV; regulator_set_voltage(regulator, 1000000, 1000000); regulator_put(regulator); CAM_CHECK_ERR_RET(ret, "set vdd_int as 1.0V"); #else /*M0, C1 REV00*/ if (system_rev == 0x03) { regulator = regulator_get(NULL, "vdd_int"); if (IS_ERR(regulator)) return -ENODEV; regulator_set_voltage(regulator, 1000000, 1000000); regulator_put(regulator); CAM_CHECK_ERR_RET(ret, "set vdd_int as 1.0V"); } #endif gpio_free(GPIO_ISP_STANDBY); gpio_free(GPIO_ISP_RESET); gpio_free(GPIO_CAM_IO_EN); gpio_free(GPIO_CAM_AF_EN); gpio_free(GPIO_ISP_CORE_EN); return ret; } static int s5c73m3_power(int enable) { int ret = 0; printk(KERN_ERR "%s %s\n", __func__, enable ? "on" : "down"); if (enable) { ret = s5c73m3_power_on(); if (unlikely(ret)) goto error_out; } else ret = s5c73m3_power_down(); ret = s3c_csis_power(enable); error_out: return ret; } static int s5c73m3_get_i2c_busnum(void) { #if defined(CONFIG_MACH_MIDAS_02_BD) || defined(CONFIG_GPIO_MIDAS_02_BD) return 0; #else if (system_rev == 0x03) /*M0, M1 REV00*/ return 18; else return 0; #endif } static struct s5c73m3_platform_data s5c73m3_plat = { .default_width = 640, /* 1920 */ .default_height = 480, /* 1080 */ .pixelformat = V4L2_PIX_FMT_UYVY, .freq = 24000000, .is_mipi = 1, .set_vdd_core = s5c73m3_set_vdd_core, .is_vdd_core_set = s5c73m3_is_vdd_core_set, .is_isp_reset = s5c73m3_is_isp_reset, }; static struct i2c_board_info s5c73m3_i2c_info = { #if defined(CONFIG_MACH_MIDAS_02_BD) || defined(CONFIG_GPIO_MIDAS_02_BD) I2C_BOARD_INFO("S5C73M3", 0x5A >> 1), #else I2C_BOARD_INFO("S5C73M3", 0x78 >> 1), #endif .platform_data = &s5c73m3_plat, }; static struct s3c_platform_camera s5c73m3 = { .id = CAMERA_CSI_C, .clk_name = "sclk_cam0", .get_i2c_busnum = s5c73m3_get_i2c_busnum, .cam_power = s5c73m3_power, .type = CAM_TYPE_MIPI, .fmt = MIPI_CSI_YCBCR422_8BIT, .order422 = CAM_ORDER422_8BIT_YCBYCR, .info = &s5c73m3_i2c_info, .pixelformat = V4L2_PIX_FMT_UYVY, .srclk_name = "xusbxti", /* "mout_mpll" */ .clk_rate = 24000000, /* 48000000 */ .line_length = 1920, .width = 640, .height = 480, .window = { .left = 0, .top = 0, .width = 640, .height = 480, }, .mipi_lanes = 4, .mipi_settle = 12, .mipi_align = 32, /* Polarity */ .inv_pclk = 1, .inv_vsync = 1, .inv_href = 0, .inv_hsync = 0, .reset_camera = 0, .initialized = 0, }; #endif #ifdef CONFIG_VIDEO_M5MO #define CAM_CHECK_ERR_RET(x, msg) \ if (unlikely((x) < 0)) { \ printk(KERN_ERR "\nfail to %s: err = %d\n", msg, x); \ return x; \ } #define CAM_CHECK_ERR(x, msg) \ if (unlikely((x) < 0)) { \ printk(KERN_ERR "\nfail to %s: err = %d\n", msg, x); \ } static int m5mo_get_i2c_busnum(void) { #ifdef CONFIG_VIDEO_M5MO_USE_SWI2C return 25; #else return 0; #endif } static int m5mo_power_on(void) { struct regulator *regulator; int ret = 0; printk(KERN_DEBUG "%s: in\n", __func__); ret = gpio_request(GPIO_CAM_VT_nSTBY, "GPL2"); if (ret) { printk(KERN_ERR "faile to request gpio(GPIO_CAM_VGA_nSTBY)\n"); return ret; } ret = gpio_request(GPIO_CAM_VT_nRST, "GPL2"); if (ret) { printk(KERN_ERR "faile to request gpio(GPIO_CAM_VGA_nRST)\n"); return ret; } ret = gpio_request(GPIO_ISP_CORE_EN, "GPM0"); if (ret) { printk(KERN_ERR "fail to request gpio(CAM_SENSOR_CORE)\n"); return ret; } ret = gpio_request(GPIO_ISP_RESET, "GPY3"); if (ret) { printk(KERN_ERR "faile to request gpio(GPIO_ISP_RESET)\n"); return ret; } /* CAM_VT_nSTBY low */ ret = gpio_direction_output(GPIO_CAM_VT_nSTBY, 0); CAM_CHECK_ERR_RET(ret, "output VT_nSTBY"); /* CAM_VT_nRST low */ gpio_direction_output(GPIO_CAM_VT_nRST, 0); CAM_CHECK_ERR_RET(ret, "output VT_nRST"); udelay(10); /* CAM_ISP_CORE_1.2V */ ret = gpio_direction_output(GPIO_ISP_CORE_EN, 1); CAM_CHECK_ERR_RET(ret, "output GPIO_ISP_CORE_EN"); /* No delay */ /* CAM_SENSOR_CORE_1.2V */ regulator = regulator_get(NULL, "cam_isp_core_1.2v"); if (IS_ERR(regulator)) return -ENODEV; ret = regulator_enable(regulator); regulator_put(regulator); CAM_CHECK_ERR_RET(ret, "enable cam_isp_core_1.2v"); udelay(10); regulator = regulator_get(NULL, "cam_sensor_core_1.2v"); if (IS_ERR(regulator)) { CAM_CHECK_ERR_RET(ret, "output Err cam_sensor_core_1.2v"); return -ENODEV; } ret = regulator_enable(regulator); regulator_put(regulator); CAM_CHECK_ERR_RET(ret, "enable cam_sensor_core_1.2v"); udelay(10); /* CAM_SENSOR_A2.8V */ regulator = regulator_get(NULL, "cam_sensor_a2.8v"); if (IS_ERR(regulator)) { CAM_CHECK_ERR_RET(ret, "output Err cam_sensor_a2.8v"); return -ENODEV; } ret = regulator_enable(regulator); regulator_put(regulator); CAM_CHECK_ERR_RET(ret, "enable cam_sensor_a2.8v"); /* it takes about 100us at least during level transition.*/ udelay(160); /* 130us -> 160us */ /* VT_CAM_DVDD_1.8V */ regulator = regulator_get(NULL, "vt_cam_dvdd_1.8v"); if (IS_ERR(regulator)) return -ENODEV; ret = regulator_enable(regulator); regulator_put(regulator); CAM_CHECK_ERR_RET(ret, "enable vt_cam_dvdd_1.8v"); udelay(10); /* CAM_AF_2.8V */ regulator = regulator_get(NULL, "cam_af_2.8v"); if (IS_ERR(regulator)) return -ENODEV; ret = regulator_enable(regulator); regulator_put(regulator); CAM_CHECK_ERR(ret, "output cam_af_2.8v"); mdelay(7); /* VT_CAM_1.8V */ regulator = regulator_get(NULL, "vt_cam_1.8v"); if (IS_ERR(regulator)) { CAM_CHECK_ERR_RET(ret, "output Err vt_cam_1.8v"); return -ENODEV; } ret = regulator_enable(regulator); regulator_put(regulator); CAM_CHECK_ERR_RET(ret, "enable vt_cam_1.8v"); udelay(20); /* CAM_ISP_1.8V */ regulator = regulator_get(NULL, "cam_isp_1.8v"); if (IS_ERR(regulator)) { CAM_CHECK_ERR_RET(ret, "output Err cam_isp_1.8v"); return -ENODEV; } ret = regulator_enable(regulator); regulator_put(regulator); CAM_CHECK_ERR_RET(ret, "enable cam_isp_1.8v"); udelay(120); /* at least */ /* CAM_ISP_SEN_IO_1.8V */ regulator = regulator_get(NULL, "cam_isp_sensor_1.8v"); if (IS_ERR(regulator)) { CAM_CHECK_ERR_RET(ret, "output Err cam_isp_sensor_1.8v"); return -ENODEV; } ret = regulator_enable(regulator); regulator_put(regulator); CAM_CHECK_ERR_RET(ret, "enable cam_isp_sensor_1.8v"); udelay(30); /* MCLK */ ret = s3c_gpio_cfgpin(GPIO_CAM_MCLK, S3C_GPIO_SFN(2)); CAM_CHECK_ERR_RET(ret, "cfg mclk"); s3c_gpio_setpull(GPIO_CAM_MCLK, S3C_GPIO_PULL_NONE); udelay(70); /* ISP_RESET */ ret = gpio_direction_output(GPIO_ISP_RESET, 1); CAM_CHECK_ERR_RET(ret, "output reset"); mdelay(4); gpio_free(GPIO_CAM_VT_nSTBY); gpio_free(GPIO_CAM_VT_nRST); gpio_free(GPIO_ISP_CORE_EN); gpio_free(GPIO_ISP_RESET); return ret; } static int m5mo_power_down(void) { struct regulator *regulator; int ret = 0; printk(KERN_DEBUG "%s: in\n", __func__); ret = gpio_request(GPIO_CAM_VT_nSTBY, "GPL2"); if (ret) { printk(KERN_ERR "faile to request gpio(GPIO_CAM_VGA_nSTBY)\n"); return ret; } ret = gpio_request(GPIO_CAM_VT_nRST, "GPL2"); if (ret) { printk(KERN_ERR "faile to request gpio(GPIO_CAM_VGA_nRST)\n"); return ret; } ret = gpio_request(GPIO_ISP_CORE_EN, "GPM0"); if (ret) { printk(KERN_ERR "fail to request gpio(CAM_SENSOR_CORE)\n"); return ret; } ret = gpio_request(GPIO_ISP_RESET, "GPY3"); if (ret) { printk(KERN_ERR "faile to request gpio(GPIO_ISP_RESET)\n"); return ret; } /* s3c_i2c0_force_stop(); */ mdelay(3); /* ISP_RESET */ ret = gpio_direction_output(GPIO_ISP_RESET, 0); CAM_CHECK_ERR(ret, "output reset"); mdelay(2); /* MCLK */ ret = s3c_gpio_cfgpin(GPIO_CAM_MCLK, S3C_GPIO_INPUT); s3c_gpio_setpull(GPIO_CAM_MCLK, S3C_GPIO_PULL_DOWN); CAM_CHECK_ERR(ret, "cfg mclk"); udelay(20); /* CAM_AF_2.8V */ regulator = regulator_get(NULL, "cam_af_2.8v"); if (IS_ERR(regulator)) return -ENODEV; if (regulator_is_enabled(regulator)) ret = regulator_force_disable(regulator); regulator_put(regulator); CAM_CHECK_ERR(ret, "disable cam_af_2.8v"); /* CAM_ISP_SEN_IO_1.8V */ regulator = regulator_get(NULL, "cam_isp_sensor_1.8v"); if (IS_ERR(regulator)) return -ENODEV; if (regulator_is_enabled(regulator)) ret = regulator_force_disable(regulator); regulator_put(regulator); CAM_CHECK_ERR(ret, "disable, cam_isp_sensor_1.8v"); udelay(10); /* CAM_ISP_1.8V */ regulator = regulator_get(NULL, "cam_isp_1.8v"); if (IS_ERR(regulator)) return -ENODEV; if (regulator_is_enabled(regulator)) ret = regulator_force_disable(regulator); regulator_put(regulator); CAM_CHECK_ERR(ret, "disable cam_isp_1.8v"); udelay(500); /* 100us -> 500us */ /* VT_CAM_1.8V */ regulator = regulator_get(NULL, "vt_cam_1.8v"); if (IS_ERR(regulator)) return -ENODEV; if (regulator_is_enabled(regulator)) ret = regulator_disable(regulator); regulator_put(regulator); CAM_CHECK_ERR(ret, "disable vt_cam_1.8v"); udelay(250); /* 10us -> 250us */ /* VT_CAM_DVDD_1.8V */ regulator = regulator_get(NULL, "vt_cam_dvdd_1.8v"); if (IS_ERR(regulator)) return -ENODEV; if (regulator_is_enabled(regulator)) ret = regulator_disable(regulator); regulator_put(regulator); CAM_CHECK_ERR(ret, "disable vt_cam_dvdd_1.8v"); udelay(300); /*10 -> 300 us */ /* CAM_SENSOR_A2.8V */ regulator = regulator_get(NULL, "cam_sensor_a2.8v"); if (IS_ERR(regulator)) return -ENODEV; if (regulator_is_enabled(regulator)) ret = regulator_force_disable(regulator); regulator_put(regulator); CAM_CHECK_ERR(ret, "disable cam_sensor_a2.8v"); udelay(800); /* CAM_SENSOR_CORE_1.2V */ regulator = regulator_get(NULL, "cam_sensor_core_1.2v"); if (IS_ERR(regulator)) return -ENODEV; if (regulator_is_enabled(regulator)) ret = regulator_force_disable(regulator); regulator_put(regulator); CAM_CHECK_ERR(ret, "disable cam_sensor_core_1.2v"); udelay(5); /* CAM_ISP_CORE_1.2V */ ret = gpio_direction_output(GPIO_ISP_CORE_EN, 0); CAM_CHECK_ERR(ret, "output ISP_CORE"); gpio_free(GPIO_CAM_VT_nSTBY); gpio_free(GPIO_CAM_VT_nRST); gpio_free(GPIO_ISP_CORE_EN); gpio_free(GPIO_ISP_RESET); return ret; } static int m5mo_flash_power(int enable) { /* TODO */ return 0; } static int m5mo_power(int enable) { int ret = 0; printk(KERN_ERR "%s %s\n", __func__, enable ? "on" : "down"); if (enable) { ret = m5mo_power_on(); if (unlikely(ret)) goto error_out; } else ret = m5mo_power_down(); ret = s3c_csis_power(enable); m5mo_flash_power(enable); error_out: return ret; } static int m5mo_config_isp_irq(void) { s3c_gpio_cfgpin(GPIO_ISP_INT, S3C_GPIO_SFN(0xF)); s3c_gpio_setpull(GPIO_ISP_INT, S3C_GPIO_PULL_NONE); return 0; } static struct m5mo_platform_data m5mo_plat = { .default_width = 640, /* 1920 */ .default_height = 480, /* 1080 */ .pixelformat = V4L2_PIX_FMT_UYVY, .freq = 24000000, .is_mipi = 1, .config_isp_irq = m5mo_config_isp_irq, .irq = IRQ_EINT(24), }; static struct i2c_board_info m5mo_i2c_info = { I2C_BOARD_INFO("M5MO", 0x1F), .platform_data = &m5mo_plat, }; static struct s3c_platform_camera m5mo = { .id = CAMERA_CSI_C, .clk_name = "sclk_cam0", .get_i2c_busnum = m5mo_get_i2c_busnum, .cam_power = m5mo_power, /*smdkv310_mipi_cam0_reset,*/ .type = CAM_TYPE_MIPI, .fmt = ITU_601_YCBCR422_8BIT, /*MIPI_CSI_YCBCR422_8BIT*/ .order422 = CAM_ORDER422_8BIT_CBYCRY, .info = &m5mo_i2c_info, .pixelformat = V4L2_PIX_FMT_UYVY, .srclk_name = "xusbxti", /* "mout_mpll" */ .clk_rate = 24000000, /* 48000000 */ .line_length = 1920, .width = 640, .height = 480, .window = { .left = 0, .top = 0, .width = 640, .height = 480, }, .mipi_lanes = 2, .mipi_settle = 12, .mipi_align = 32, /* Polarity */ .inv_pclk = 0, .inv_vsync = 1, .inv_href = 0, .inv_hsync = 0, .reset_camera = 0, .initialized = 0, }; #endif /* #ifdef CONFIG_VIDEO_M5MO */ /* Interface setting */ static struct s3c_platform_fimc fimc_plat = { .default_cam = CAMERA_CSI_D, #ifdef WRITEBACK_ENABLED .default_cam = CAMERA_WB, #endif .camera = { #if defined(CONFIG_VIDEO_S5C73M3) || defined(CONFIG_VIDEO_SLP_S5C73M3) &s5c73m3, #endif #ifdef CONFIG_VIDEO_S5K6A3 &s5k6a3, #endif #if defined(CONFIG_VIDEO_M5MO) &m5mo, #endif #ifdef WRITEBACK_ENABLED &writeback, #endif }, .hw_ver = 0x51, }; #endif /* CONFIG_VIDEO_FIMC */ #ifdef CONFIG_VIDEO_EXYNOS_FIMC_LITE static void __set_flite_camera_config(struct exynos_platform_flite *data, u32 active_index, u32 max_cam) { data->active_cam_index = active_index; data->num_clients = max_cam; } static void __init smdk4x12_set_camera_flite_platdata(void) { int flite0_cam_index = 0; int flite1_cam_index = 0; #ifdef CONFIG_VIDEO_S5K6A3 exynos_flite1_default_data.cam[flite1_cam_index++] = &s5k6a3; #endif __set_flite_camera_config(&exynos_flite0_default_data, 0, flite0_cam_index); __set_flite_camera_config(&exynos_flite1_default_data, 0, flite1_cam_index); } #endif void __init midas_camera_init(void) { #ifdef CONFIG_VIDEO_FIMC s3c_fimc0_set_platdata(&fimc_plat); s3c_fimc1_set_platdata(&fimc_plat); s3c_fimc2_set_platdata(NULL); s3c_fimc3_set_platdata(NULL); #ifdef CONFIG_EXYNOS_DEV_PD s3c_device_fimc0.dev.parent = &exynos4_device_pd[PD_CAM].dev; s3c_device_fimc1.dev.parent = &exynos4_device_pd[PD_CAM].dev; s3c_device_fimc2.dev.parent = &exynos4_device_pd[PD_CAM].dev; s3c_device_fimc3.dev.parent = &exynos4_device_pd[PD_CAM].dev; #ifdef CONFIG_EXYNOS4_CONTENT_PATH_PROTECTION secmem.parent = &exynos4_device_pd[PD_CAM].dev; #endif #endif #ifdef CONFIG_VIDEO_FIMC_MIPI s3c_csis0_set_platdata(NULL); s3c_csis1_set_platdata(NULL); #ifdef CONFIG_EXYNOS_DEV_PD s3c_device_csis0.dev.parent = &exynos4_device_pd[PD_CAM].dev; s3c_device_csis1.dev.parent = &exynos4_device_pd[PD_CAM].dev; #endif #endif #ifdef CONFIG_VIDEO_EXYNOS_FIMC_LITE smdk4x12_set_camera_flite_platdata(); s3c_set_platdata(&exynos_flite0_default_data, sizeof(exynos_flite0_default_data), &exynos_device_flite0); s3c_set_platdata(&exynos_flite1_default_data, sizeof(exynos_flite1_default_data), &exynos_device_flite1); #endif #endif /* CONFIG_VIDEO_FIMC */ }
gpl-2.0
pratyushanand/linux
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
786
4060
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "nv04.h" #include <core/gpuobj.h> #define NV04_PDMA_SIZE (128 * 1024 * 1024) #define NV04_PDMA_PAGE ( 4 * 1024) /******************************************************************************* * VM map/unmap callbacks ******************************************************************************/ static void nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt, struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) { pte = 0x00008 + (pte * 4); nvkm_kmap(pgt); while (cnt) { u32 page = PAGE_SIZE / NV04_PDMA_PAGE; u32 phys = (u32)*list++; while (cnt && page--) { nvkm_wo32(pgt, pte, phys | 3); phys += NV04_PDMA_PAGE; pte += 4; cnt -= 1; } } nvkm_done(pgt); } static void nv04_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt) { pte = 0x00008 + (pte * 4); nvkm_kmap(pgt); while (cnt--) { nvkm_wo32(pgt, pte, 0x00000000); pte += 4; } nvkm_done(pgt); } static void nv04_vm_flush(struct nvkm_vm *vm) { } /******************************************************************************* * MMU subdev ******************************************************************************/ static int nv04_mmu_oneinit(struct nvkm_mmu *base) { struct nv04_mmu *mmu = nv04_mmu(base); struct nvkm_device *device = mmu->base.subdev.device; struct nvkm_memory *dma; int ret; ret = nvkm_vm_create(&mmu->base, 0, NV04_PDMA_SIZE, 0, 4096, NULL, &mmu->vm); if (ret) return ret; ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 + 8, 16, true, &dma); mmu->vm->pgt[0].mem[0] = dma; mmu->vm->pgt[0].refcount[0] = 1; if (ret) return ret; nvkm_kmap(dma); nvkm_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */ nvkm_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1); nvkm_done(dma); return 0; } void * nv04_mmu_dtor(struct nvkm_mmu *base) { struct nv04_mmu *mmu = nv04_mmu(base); struct nvkm_device *device = mmu->base.subdev.device; if (mmu->vm) { nvkm_memory_del(&mmu->vm->pgt[0].mem[0]); nvkm_vm_ref(NULL, &mmu->vm, NULL); } if (mmu->nullp) { dma_free_coherent(device->dev, 16 * 1024, mmu->nullp, mmu->null); } return mmu; } int nv04_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device, int index, struct nvkm_mmu **pmmu) { struct nv04_mmu *mmu; if (!(mmu = kzalloc(sizeof(*mmu), GFP_KERNEL))) return -ENOMEM; *pmmu = &mmu->base; nvkm_mmu_ctor(func, device, index, &mmu->base); return 0; } const struct nvkm_mmu_func nv04_mmu = { .oneinit = nv04_mmu_oneinit, .dtor = nv04_mmu_dtor, .limit = NV04_PDMA_SIZE, .dma_bits = 32, .pgt_bits = 32 - 12, .spg_shift = 12, .lpg_shift = 12, .map_sg = nv04_vm_map_sg, .unmap = nv04_vm_unmap, .flush = nv04_vm_flush, }; int nv04_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu) { return nv04_mmu_new_(&nv04_mmu, device, index, pmmu); }
gpl-2.0
ddk50/cbc-linux-kernel
arch/sh/kernel/sys_sh32.c
2578
1563
#include <linux/errno.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/sem.h> #include <linux/msg.h> #include <linux/shm.h> #include <linux/stat.h> #include <linux/syscalls.h> #include <linux/mman.h> #include <linux/file.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/ipc.h> #include <asm/cacheflush.h> #include <asm/uaccess.h> #include <asm/unistd.h> #include <asm/syscalls.h> /* * sys_pipe() is the normal C calling standard for creating * a pipe. It's not the way Unix traditionally does this, though. */ asmlinkage int sys_sh_pipe(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, struct pt_regs __regs) { struct pt_regs *regs = RELOC_HIDE(&__regs, 0); int fd[2]; int error; error = do_pipe_flags(fd, 0); if (!error) { regs->regs[1] = fd[1]; return fd[0]; } return error; } asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char __user *buf, size_t count, long dummy, loff_t pos) { return sys_pread64(fd, buf, count, pos); } asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char __user *buf, size_t count, long dummy, loff_t pos) { return sys_pwrite64(fd, buf, count, pos); } asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1, u32 len0, u32 len1, int advice) { #ifdef __LITTLE_ENDIAN__ return sys_fadvise64_64(fd, (u64)offset1 << 32 | offset0, (u64)len1 << 32 | len0, advice); #else return sys_fadvise64_64(fd, (u64)offset0 << 32 | offset1, (u64)len0 << 32 | len1, advice); #endif }
gpl-2.0
sgs3/SPH-L710_Kernel
arch/powerpc/platforms/iseries/irq.c
2834
10646
/* * This module supports the iSeries PCI bus interrupt handling * Copyright (C) 20yy <Robert L Holtorf> <IBM Corp> * Copyright (C) 2004-2005 IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the: * Free Software Foundation, Inc., * 59 Temple Place, Suite 330, * Boston, MA 02111-1307 USA * * Change Activity: * Created, December 13, 2000 by Wayne Holm * End Change Activity */ #include <linux/pci.h> #include <linux/init.h> #include <linux/threads.h> #include <linux/smp.h> #include <linux/param.h> #include <linux/string.h> #include <linux/bootmem.h> #include <linux/irq.h> #include <linux/spinlock.h> #include <asm/paca.h> #include <asm/iseries/hv_types.h> #include <asm/iseries/hv_lp_event.h> #include <asm/iseries/hv_call_xm.h> #include <asm/iseries/it_lp_queue.h> #include "irq.h" #include "pci.h" #include "call_pci.h" #ifdef CONFIG_PCI enum pci_event_type { pe_bus_created = 0, /* PHB has been created */ pe_bus_error = 1, /* PHB has failed */ pe_bus_failed = 2, /* Msg to Secondary, Primary failed bus */ pe_node_failed = 4, /* Multi-adapter bridge has failed */ pe_node_recovered = 5, /* Multi-adapter bridge has recovered */ pe_bus_recovered = 12, /* PHB has been recovered */ pe_unquiese_bus = 18, /* Secondary bus unqiescing */ pe_bridge_error = 21, /* Bridge Error */ pe_slot_interrupt = 22 /* Slot interrupt */ }; struct pci_event { struct HvLpEvent event; union { u64 __align; /* Align on an 8-byte boundary */ struct { u32 fisr; HvBusNumber bus_number; HvSubBusNumber sub_bus_number; HvAgentId dev_id; } slot; struct { HvBusNumber bus_number; HvSubBusNumber sub_bus_number; } bus; struct { HvBusNumber bus_number; HvSubBusNumber sub_bus_number; HvAgentId dev_id; } node; } data; }; static DEFINE_SPINLOCK(pending_irqs_lock); static int num_pending_irqs; static int pending_irqs[NR_IRQS]; static void int_received(struct pci_event *event) { int irq; switch (event->event.xSubtype) { case pe_slot_interrupt: irq = event->event.xCorrelationToken; if (irq < NR_IRQS) { spin_lock(&pending_irqs_lock); pending_irqs[irq]++; num_pending_irqs++; spin_unlock(&pending_irqs_lock); } else { printk(KERN_WARNING "int_received: bad irq number %d\n", irq); HvCallPci_eoi(event->data.slot.bus_number, event->data.slot.sub_bus_number, event->data.slot.dev_id); } break; /* Ignore error recovery events for now */ case pe_bus_created: printk(KERN_INFO "int_received: system bus %d created\n", event->data.bus.bus_number); break; case pe_bus_error: case pe_bus_failed: printk(KERN_INFO "int_received: system bus %d failed\n", event->data.bus.bus_number); break; case pe_bus_recovered: case pe_unquiese_bus: printk(KERN_INFO "int_received: system bus %d recovered\n", event->data.bus.bus_number); break; case pe_node_failed: case pe_bridge_error: printk(KERN_INFO "int_received: multi-adapter bridge %d/%d/%d failed\n", event->data.node.bus_number, event->data.node.sub_bus_number, event->data.node.dev_id); break; case pe_node_recovered: printk(KERN_INFO "int_received: multi-adapter bridge %d/%d/%d recovered\n", event->data.node.bus_number, event->data.node.sub_bus_number, event->data.node.dev_id); break; default: printk(KERN_ERR "int_received: unrecognized event subtype 0x%x\n", event->event.xSubtype); break; } } static void pci_event_handler(struct HvLpEvent *event) { if (event && (event->xType == HvLpEvent_Type_PciIo)) { if (hvlpevent_is_int(event)) int_received((struct pci_event *)event); else printk(KERN_ERR "pci_event_handler: unexpected ack received\n"); } else if (event) printk(KERN_ERR "pci_event_handler: Unrecognized PCI event type 0x%x\n", (int)event->xType); else printk(KERN_ERR "pci_event_handler: NULL event received\n"); } #define REAL_IRQ_TO_SUBBUS(irq) (((irq) >> 14) & 0xff) #define REAL_IRQ_TO_BUS(irq) ((((irq) >> 6) & 0xff) + 1) #define REAL_IRQ_TO_IDSEL(irq) ((((irq) >> 3) & 7) + 1) #define REAL_IRQ_TO_FUNC(irq) ((irq) & 7) /* * This will be called by device drivers (via enable_IRQ) * to enable INTA in the bridge interrupt status register. */ static void iseries_enable_IRQ(struct irq_data *d) { u32 bus, dev_id, function, mask; const u32 sub_bus = 0; unsigned int rirq = (unsigned int)irqd_to_hwirq(d); /* The IRQ has already been locked by the caller */ bus = REAL_IRQ_TO_BUS(rirq); function = REAL_IRQ_TO_FUNC(rirq); dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; /* Unmask secondary INTA */ mask = 0x80000000; HvCallPci_unmaskInterrupts(bus, sub_bus, dev_id, mask); } /* This is called by iseries_activate_IRQs */ static unsigned int iseries_startup_IRQ(struct irq_data *d) { u32 bus, dev_id, function, mask; const u32 sub_bus = 0; unsigned int rirq = (unsigned int)irqd_to_hwirq(d); bus = REAL_IRQ_TO_BUS(rirq); function = REAL_IRQ_TO_FUNC(rirq); dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; /* Link the IRQ number to the bridge */ HvCallXm_connectBusUnit(bus, sub_bus, dev_id, d->irq); /* Unmask bridge interrupts in the FISR */ mask = 0x01010000 << function; HvCallPci_unmaskFisr(bus, sub_bus, dev_id, mask); iseries_enable_IRQ(d); return 0; } /* * This is called out of iSeries_fixup to activate interrupt * generation for usable slots */ void __init iSeries_activate_IRQs() { int irq; unsigned long flags; for_each_irq (irq) { struct irq_desc *desc = irq_to_desc(irq); struct irq_chip *chip; if (!desc) continue; chip = irq_desc_get_chip(desc); if (chip && chip->irq_startup) { raw_spin_lock_irqsave(&desc->lock, flags); chip->irq_startup(&desc->irq_data); raw_spin_unlock_irqrestore(&desc->lock, flags); } } } /* this is not called anywhere currently */ static void iseries_shutdown_IRQ(struct irq_data *d) { u32 bus, dev_id, function, mask; const u32 sub_bus = 0; unsigned int rirq = (unsigned int)irqd_to_hwirq(d); /* irq should be locked by the caller */ bus = REAL_IRQ_TO_BUS(rirq); function = REAL_IRQ_TO_FUNC(rirq); dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; /* Invalidate the IRQ number in the bridge */ HvCallXm_connectBusUnit(bus, sub_bus, dev_id, 0); /* Mask bridge interrupts in the FISR */ mask = 0x01010000 << function; HvCallPci_maskFisr(bus, sub_bus, dev_id, mask); } /* * This will be called by device drivers (via disable_IRQ) * to disable INTA in the bridge interrupt status register. */ static void iseries_disable_IRQ(struct irq_data *d) { u32 bus, dev_id, function, mask; const u32 sub_bus = 0; unsigned int rirq = (unsigned int)irqd_to_hwirq(d); /* The IRQ has already been locked by the caller */ bus = REAL_IRQ_TO_BUS(rirq); function = REAL_IRQ_TO_FUNC(rirq); dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; /* Mask secondary INTA */ mask = 0x80000000; HvCallPci_maskInterrupts(bus, sub_bus, dev_id, mask); } static void iseries_end_IRQ(struct irq_data *d) { unsigned int rirq = (unsigned int)irqd_to_hwirq(d); HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq), (REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq)); } static struct irq_chip iseries_pic = { .name = "iSeries", .irq_startup = iseries_startup_IRQ, .irq_shutdown = iseries_shutdown_IRQ, .irq_unmask = iseries_enable_IRQ, .irq_mask = iseries_disable_IRQ, .irq_eoi = iseries_end_IRQ }; /* * This is called out of iSeries_scan_slot to allocate an IRQ for an EADS slot * It calculates the irq value for the slot. * Note that sub_bus is always 0 (at the moment at least). */ int __init iSeries_allocate_IRQ(HvBusNumber bus, HvSubBusNumber sub_bus, u32 bsubbus) { unsigned int realirq; u8 idsel = ISERIES_GET_DEVICE_FROM_SUBBUS(bsubbus); u8 function = ISERIES_GET_FUNCTION_FROM_SUBBUS(bsubbus); realirq = (((((sub_bus << 8) + (bus - 1)) << 3) + (idsel - 1)) << 3) + function; return irq_create_mapping(NULL, realirq); } #endif /* CONFIG_PCI */ /* * Get the next pending IRQ. */ unsigned int iSeries_get_irq(void) { int irq = NO_IRQ_IGNORE; #ifdef CONFIG_SMP if (get_lppaca()->int_dword.fields.ipi_cnt) { get_lppaca()->int_dword.fields.ipi_cnt = 0; smp_ipi_demux(); } #endif /* CONFIG_SMP */ if (hvlpevent_is_pending()) process_hvlpevents(); #ifdef CONFIG_PCI if (num_pending_irqs) { spin_lock(&pending_irqs_lock); for (irq = 0; irq < NR_IRQS; irq++) { if (pending_irqs[irq]) { pending_irqs[irq]--; num_pending_irqs--; break; } } spin_unlock(&pending_irqs_lock); if (irq >= NR_IRQS) irq = NO_IRQ_IGNORE; } #endif return irq; } #ifdef CONFIG_PCI static int iseries_irq_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { irq_set_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq); return 0; } static int iseries_irq_host_match(struct irq_host *h, struct device_node *np) { /* Match all */ return 1; } static struct irq_host_ops iseries_irq_host_ops = { .map = iseries_irq_host_map, .match = iseries_irq_host_match, }; /* * This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c * It must be called before the bus walk. */ void __init iSeries_init_IRQ(void) { /* Register PCI event handler and open an event path */ struct irq_host *host; int ret; /* * The Hypervisor only allows us up to 256 interrupt * sources (the irq number is passed in a u8). */ irq_set_virq_count(256); /* Create irq host. No need for a revmap since HV will give us * back our virtual irq number */ host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, &iseries_irq_host_ops, 0); BUG_ON(host == NULL); irq_set_default_host(host); ret = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo, &pci_event_handler); if (ret == 0) { ret = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0); if (ret != 0) printk(KERN_ERR "iseries_init_IRQ: open event path " "failed with rc 0x%x\n", ret); } else printk(KERN_ERR "iseries_init_IRQ: register handler " "failed with rc 0x%x\n", ret); } #endif /* CONFIG_PCI */
gpl-2.0
Jimmyk422/android_kernel_samsung_iconvmu
drivers/char/briq_panel.c
3346
5347
/* * Drivers for the Total Impact PPC based computer "BRIQ" * by Dr. Karsten Jeppesen * */ #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/tty.h> #include <linux/timer.h> #include <linux/kernel.h> #include <linux/wait.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/miscdevice.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/init.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/prom.h> #define BRIQ_PANEL_MINOR 156 #define BRIQ_PANEL_VFD_IOPORT 0x0390 #define BRIQ_PANEL_LED_IOPORT 0x0398 #define BRIQ_PANEL_VER "1.1 (04/20/2002)" #define BRIQ_PANEL_MSG0 "Loading Linux" static int vfd_is_open; static unsigned char vfd[40]; static int vfd_cursor; static unsigned char ledpb, led; static void update_vfd(void) { int i; /* cursor home */ outb(0x02, BRIQ_PANEL_VFD_IOPORT); for (i=0; i<20; i++) outb(vfd[i], BRIQ_PANEL_VFD_IOPORT + 1); /* cursor to next line */ outb(0xc0, BRIQ_PANEL_VFD_IOPORT); for (i=20; i<40; i++) outb(vfd[i], BRIQ_PANEL_VFD_IOPORT + 1); } static void set_led(char state) { if (state == 'R') led = 0x01; else if (state == 'G') led = 0x02; else if (state == 'Y') led = 0x03; else if (state == 'X') led = 0x00; outb(led, BRIQ_PANEL_LED_IOPORT); } static int briq_panel_open(struct inode *ino, struct file *filep) { tty_lock(); /* enforce single access, vfd_is_open is protected by BKL */ if (vfd_is_open) { tty_unlock(); return -EBUSY; } vfd_is_open = 1; tty_unlock(); return 0; } static int briq_panel_release(struct inode *ino, struct file *filep) { if (!vfd_is_open) return -ENODEV; vfd_is_open = 0; return 0; } static ssize_t briq_panel_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned short c; unsigned char cp; if (!vfd_is_open) return -ENODEV; c = (inb(BRIQ_PANEL_LED_IOPORT) & 0x000c) | (ledpb & 0x0003); set_led(' '); /* upper button released */ if ((!(ledpb & 0x0004)) && (c & 0x0004)) { cp = ' '; ledpb = c; if (copy_to_user(buf, &cp, 1)) return -EFAULT; return 1; } /* lower button released */ else if ((!(ledpb & 0x0008)) && (c & 0x0008)) { cp = '\r'; ledpb = c; if (copy_to_user(buf, &cp, 1)) return -EFAULT; return 1; } else { ledpb = c; return 0; } } static void scroll_vfd( void ) { int i; for (i=0; i<20; i++) { vfd[i] = vfd[i+20]; vfd[i+20] = ' '; } vfd_cursor = 20; } static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { size_t indx = len; int i, esc = 0; if (!vfd_is_open) return -EBUSY; for (;;) { char c; if (!indx) break; if (get_user(c, buf)) return -EFAULT; if (esc) { set_led(c); esc = 0; } else if (c == 27) { esc = 1; } else if (c == 12) { /* do a form feed */ for (i=0; i<40; i++) vfd[i] = ' '; vfd_cursor = 0; } else if (c == 10) { if (vfd_cursor < 20) vfd_cursor = 20; else if (vfd_cursor < 40) vfd_cursor = 40; else if (vfd_cursor < 60) vfd_cursor = 60; if (vfd_cursor > 59) scroll_vfd(); } else { /* just a character */ if (vfd_cursor > 39) scroll_vfd(); vfd[vfd_cursor++] = c; } indx--; buf++; } update_vfd(); return len; } static const struct file_operations briq_panel_fops = { .owner = THIS_MODULE, .read = briq_panel_read, .write = briq_panel_write, .open = briq_panel_open, .release = briq_panel_release, .llseek = noop_llseek, }; static struct miscdevice briq_panel_miscdev = { BRIQ_PANEL_MINOR, "briq_panel", &briq_panel_fops }; static int __init briq_panel_init(void) { struct device_node *root = of_find_node_by_path("/"); const char *machine; int i; machine = of_get_property(root, "model", NULL); if (!machine || strncmp(machine, "TotalImpact,BRIQ-1", 18) != 0) { of_node_put(root); return -ENODEV; } of_node_put(root); printk(KERN_INFO "briq_panel: v%s Dr. Karsten Jeppesen (kj@totalimpact.com)\n", BRIQ_PANEL_VER); if (!request_region(BRIQ_PANEL_VFD_IOPORT, 4, "BRIQ Front Panel")) return -EBUSY; if (!request_region(BRIQ_PANEL_LED_IOPORT, 2, "BRIQ Front Panel")) { release_region(BRIQ_PANEL_VFD_IOPORT, 4); return -EBUSY; } ledpb = inb(BRIQ_PANEL_LED_IOPORT) & 0x000c; if (misc_register(&briq_panel_miscdev) < 0) { release_region(BRIQ_PANEL_VFD_IOPORT, 4); release_region(BRIQ_PANEL_LED_IOPORT, 2); return -EBUSY; } outb(0x38, BRIQ_PANEL_VFD_IOPORT); /* Function set */ outb(0x01, BRIQ_PANEL_VFD_IOPORT); /* Clear display */ outb(0x0c, BRIQ_PANEL_VFD_IOPORT); /* Display on */ outb(0x06, BRIQ_PANEL_VFD_IOPORT); /* Entry normal */ for (i=0; i<40; i++) vfd[i]=' '; #ifndef MODULE vfd[0] = 'L'; vfd[1] = 'o'; vfd[2] = 'a'; vfd[3] = 'd'; vfd[4] = 'i'; vfd[5] = 'n'; vfd[6] = 'g'; vfd[7] = ' '; vfd[8] = '.'; vfd[9] = '.'; vfd[10] = '.'; #endif /* !MODULE */ update_vfd(); return 0; } static void __exit briq_panel_exit(void) { misc_deregister(&briq_panel_miscdev); release_region(BRIQ_PANEL_VFD_IOPORT, 4); release_region(BRIQ_PANEL_LED_IOPORT, 2); } module_init(briq_panel_init); module_exit(briq_panel_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Karsten Jeppesen <karsten@jeppesens.com>"); MODULE_DESCRIPTION("Driver for the Total Impact briQ front panel");
gpl-2.0
ricardon/intel-audio
drivers/video/fbdev/via/lcd.c
4626
31735
/* * Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved. * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved. * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; * either version 2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even * the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE.See the GNU General Public License * for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/via-core.h> #include <linux/via_i2c.h> #include "global.h" #define viafb_compact_res(x, y) (((x)<<16)|(y)) /* CLE266 Software Power Sequence */ /* {Mask}, {Data}, {Delay} */ static const int PowerSequenceOn[3][3] = { {0x10, 0x08, 0x06}, {0x10, 0x08, 0x06}, {0x19, 0x1FE, 0x01} }; static const int PowerSequenceOff[3][3] = { {0x06, 0x08, 0x10}, {0x00, 0x00, 0x00}, {0xD2, 0x19, 0x01} }; static struct _lcd_scaling_factor lcd_scaling_factor = { /* LCD Horizontal Scaling Factor Register */ {LCD_HOR_SCALING_FACTOR_REG_NUM, {{CR9F, 0, 1}, {CR77, 0, 7}, {CR79, 4, 5} } }, /* LCD Vertical Scaling Factor Register */ {LCD_VER_SCALING_FACTOR_REG_NUM, {{CR79, 3, 3}, {CR78, 0, 7}, {CR79, 6, 7} } } }; static struct _lcd_scaling_factor lcd_scaling_factor_CLE = { /* LCD Horizontal Scaling Factor Register */ {LCD_HOR_SCALING_FACTOR_REG_NUM_CLE, {{CR77, 0, 7}, {CR79, 4, 5} } }, /* LCD Vertical Scaling Factor Register */ {LCD_VER_SCALING_FACTOR_REG_NUM_CLE, {{CR78, 0, 7}, {CR79, 6, 7} } } }; static bool lvds_identify_integratedlvds(void); static void fp_id_to_vindex(int panel_id); static int lvds_register_read(int index); static void load_lcd_scaling(int set_hres, int set_vres, int panel_hres, int panel_vres); static void lcd_patch_skew_dvp0(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info); static void lcd_patch_skew_dvp1(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info); static void lcd_patch_skew(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info); static void integrated_lvds_disable(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info); static void integrated_lvds_enable(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info); static void lcd_powersequence_off(void); static void lcd_powersequence_on(void); static void fill_lcd_format(void); static void check_diport_of_integrated_lvds( struct lvds_chip_information *plvds_chip_info, struct lvds_setting_information *plvds_setting_info); static inline bool check_lvds_chip(int device_id_subaddr, int device_id) { return lvds_register_read(device_id_subaddr) == device_id; } void viafb_init_lcd_size(void) { DEBUG_MSG(KERN_INFO "viafb_init_lcd_size()\n"); fp_id_to_vindex(viafb_lcd_panel_id); viaparinfo->lvds_setting_info2->lcd_panel_hres = viaparinfo->lvds_setting_info->lcd_panel_hres; viaparinfo->lvds_setting_info2->lcd_panel_vres = viaparinfo->lvds_setting_info->lcd_panel_vres; viaparinfo->lvds_setting_info2->device_lcd_dualedge = viaparinfo->lvds_setting_info->device_lcd_dualedge; viaparinfo->lvds_setting_info2->LCDDithering = viaparinfo->lvds_setting_info->LCDDithering; } static bool lvds_identify_integratedlvds(void) { if (viafb_display_hardware_layout == HW_LAYOUT_LCD_EXTERNAL_LCD2) { /* Two dual channel LCD (Internal LVDS + External LVDS): */ /* If we have an external LVDS, such as VT1636, we should have its chip ID already. */ if (viaparinfo->chip_info->lvds_chip_info.lvds_chip_name) { viaparinfo->chip_info->lvds_chip_info2.lvds_chip_name = INTEGRATED_LVDS; DEBUG_MSG(KERN_INFO "Support two dual channel LVDS! " "(Internal LVDS + External LVDS)\n"); } else { viaparinfo->chip_info->lvds_chip_info.lvds_chip_name = INTEGRATED_LVDS; DEBUG_MSG(KERN_INFO "Not found external LVDS, " "so can't support two dual channel LVDS!\n"); } } else if (viafb_display_hardware_layout == HW_LAYOUT_LCD1_LCD2) { /* Two single channel LCD (Internal LVDS + Internal LVDS): */ viaparinfo->chip_info->lvds_chip_info.lvds_chip_name = INTEGRATED_LVDS; viaparinfo->chip_info->lvds_chip_info2.lvds_chip_name = INTEGRATED_LVDS; DEBUG_MSG(KERN_INFO "Support two single channel LVDS! " "(Internal LVDS + Internal LVDS)\n"); } else if (viafb_display_hardware_layout != HW_LAYOUT_DVI_ONLY) { /* If we have found external LVDS, just use it, otherwise, we will use internal LVDS as default. */ if (!viaparinfo->chip_info->lvds_chip_info.lvds_chip_name) { viaparinfo->chip_info->lvds_chip_info.lvds_chip_name = INTEGRATED_LVDS; DEBUG_MSG(KERN_INFO "Found Integrated LVDS!\n"); } } else { viaparinfo->chip_info->lvds_chip_info.lvds_chip_name = NON_LVDS_TRANSMITTER; DEBUG_MSG(KERN_INFO "Do not support LVDS!\n"); return false; } return true; } bool viafb_lvds_trasmitter_identify(void) { if (viafb_lvds_identify_vt1636(VIA_PORT_31)) { viaparinfo->chip_info->lvds_chip_info.i2c_port = VIA_PORT_31; DEBUG_MSG(KERN_INFO "Found VIA VT1636 LVDS on port i2c 0x31\n"); } else { if (viafb_lvds_identify_vt1636(VIA_PORT_2C)) { viaparinfo->chip_info->lvds_chip_info.i2c_port = VIA_PORT_2C; DEBUG_MSG(KERN_INFO "Found VIA VT1636 LVDS on port gpio 0x2c\n"); } } if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CX700) lvds_identify_integratedlvds(); if (viaparinfo->chip_info->lvds_chip_info.lvds_chip_name) return true; /* Check for VT1631: */ viaparinfo->chip_info->lvds_chip_info.lvds_chip_name = VT1631_LVDS; viaparinfo->chip_info->lvds_chip_info.lvds_chip_slave_addr = VT1631_LVDS_I2C_ADDR; if (check_lvds_chip(VT1631_DEVICE_ID_REG, VT1631_DEVICE_ID)) { DEBUG_MSG(KERN_INFO "\n VT1631 LVDS ! \n"); DEBUG_MSG(KERN_INFO "\n %2d", viaparinfo->chip_info->lvds_chip_info.lvds_chip_name); DEBUG_MSG(KERN_INFO "\n %2d", viaparinfo->chip_info->lvds_chip_info.lvds_chip_name); return true; } viaparinfo->chip_info->lvds_chip_info.lvds_chip_name = NON_LVDS_TRANSMITTER; viaparinfo->chip_info->lvds_chip_info.lvds_chip_slave_addr = VT1631_LVDS_I2C_ADDR; return false; } static void fp_id_to_vindex(int panel_id) { DEBUG_MSG(KERN_INFO "fp_get_panel_id()\n"); if (panel_id > LCD_PANEL_ID_MAXIMUM) viafb_lcd_panel_id = panel_id = viafb_read_reg(VIACR, CR3F) & 0x0F; switch (panel_id) { case 0x0: viaparinfo->lvds_setting_info->lcd_panel_hres = 640; viaparinfo->lvds_setting_info->lcd_panel_vres = 480; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 1; break; case 0x1: viaparinfo->lvds_setting_info->lcd_panel_hres = 800; viaparinfo->lvds_setting_info->lcd_panel_vres = 600; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 1; break; case 0x2: viaparinfo->lvds_setting_info->lcd_panel_hres = 1024; viaparinfo->lvds_setting_info->lcd_panel_vres = 768; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 1; break; case 0x3: viaparinfo->lvds_setting_info->lcd_panel_hres = 1280; viaparinfo->lvds_setting_info->lcd_panel_vres = 768; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 1; break; case 0x4: viaparinfo->lvds_setting_info->lcd_panel_hres = 1280; viaparinfo->lvds_setting_info->lcd_panel_vres = 1024; viaparinfo->lvds_setting_info->device_lcd_dualedge = 1; viaparinfo->lvds_setting_info->LCDDithering = 1; break; case 0x5: viaparinfo->lvds_setting_info->lcd_panel_hres = 1400; viaparinfo->lvds_setting_info->lcd_panel_vres = 1050; viaparinfo->lvds_setting_info->device_lcd_dualedge = 1; viaparinfo->lvds_setting_info->LCDDithering = 1; break; case 0x6: viaparinfo->lvds_setting_info->lcd_panel_hres = 1600; viaparinfo->lvds_setting_info->lcd_panel_vres = 1200; viaparinfo->lvds_setting_info->device_lcd_dualedge = 1; viaparinfo->lvds_setting_info->LCDDithering = 1; break; case 0x8: viaparinfo->lvds_setting_info->lcd_panel_hres = 800; viaparinfo->lvds_setting_info->lcd_panel_vres = 480; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 1; break; case 0x9: viaparinfo->lvds_setting_info->lcd_panel_hres = 1024; viaparinfo->lvds_setting_info->lcd_panel_vres = 768; viaparinfo->lvds_setting_info->device_lcd_dualedge = 1; viaparinfo->lvds_setting_info->LCDDithering = 1; break; case 0xA: viaparinfo->lvds_setting_info->lcd_panel_hres = 1024; viaparinfo->lvds_setting_info->lcd_panel_vres = 768; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 0; break; case 0xB: viaparinfo->lvds_setting_info->lcd_panel_hres = 1024; viaparinfo->lvds_setting_info->lcd_panel_vres = 768; viaparinfo->lvds_setting_info->device_lcd_dualedge = 1; viaparinfo->lvds_setting_info->LCDDithering = 0; break; case 0xC: viaparinfo->lvds_setting_info->lcd_panel_hres = 1280; viaparinfo->lvds_setting_info->lcd_panel_vres = 768; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 0; break; case 0xD: viaparinfo->lvds_setting_info->lcd_panel_hres = 1280; viaparinfo->lvds_setting_info->lcd_panel_vres = 1024; viaparinfo->lvds_setting_info->device_lcd_dualedge = 1; viaparinfo->lvds_setting_info->LCDDithering = 0; break; case 0xE: viaparinfo->lvds_setting_info->lcd_panel_hres = 1400; viaparinfo->lvds_setting_info->lcd_panel_vres = 1050; viaparinfo->lvds_setting_info->device_lcd_dualedge = 1; viaparinfo->lvds_setting_info->LCDDithering = 0; break; case 0xF: viaparinfo->lvds_setting_info->lcd_panel_hres = 1600; viaparinfo->lvds_setting_info->lcd_panel_vres = 1200; viaparinfo->lvds_setting_info->device_lcd_dualedge = 1; viaparinfo->lvds_setting_info->LCDDithering = 0; break; case 0x10: viaparinfo->lvds_setting_info->lcd_panel_hres = 1366; viaparinfo->lvds_setting_info->lcd_panel_vres = 768; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 0; break; case 0x11: viaparinfo->lvds_setting_info->lcd_panel_hres = 1024; viaparinfo->lvds_setting_info->lcd_panel_vres = 600; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 1; break; case 0x12: viaparinfo->lvds_setting_info->lcd_panel_hres = 1280; viaparinfo->lvds_setting_info->lcd_panel_vres = 768; viaparinfo->lvds_setting_info->device_lcd_dualedge = 1; viaparinfo->lvds_setting_info->LCDDithering = 1; break; case 0x13: viaparinfo->lvds_setting_info->lcd_panel_hres = 1280; viaparinfo->lvds_setting_info->lcd_panel_vres = 800; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 1; break; case 0x14: viaparinfo->lvds_setting_info->lcd_panel_hres = 1360; viaparinfo->lvds_setting_info->lcd_panel_vres = 768; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 0; break; case 0x15: viaparinfo->lvds_setting_info->lcd_panel_hres = 1280; viaparinfo->lvds_setting_info->lcd_panel_vres = 768; viaparinfo->lvds_setting_info->device_lcd_dualedge = 1; viaparinfo->lvds_setting_info->LCDDithering = 0; break; case 0x16: viaparinfo->lvds_setting_info->lcd_panel_hres = 480; viaparinfo->lvds_setting_info->lcd_panel_vres = 640; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 1; break; case 0x17: /* OLPC XO-1.5 panel */ viaparinfo->lvds_setting_info->lcd_panel_hres = 1200; viaparinfo->lvds_setting_info->lcd_panel_vres = 900; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 0; break; default: viaparinfo->lvds_setting_info->lcd_panel_hres = 800; viaparinfo->lvds_setting_info->lcd_panel_vres = 600; viaparinfo->lvds_setting_info->device_lcd_dualedge = 0; viaparinfo->lvds_setting_info->LCDDithering = 1; } } static int lvds_register_read(int index) { u8 data; viafb_i2c_readbyte(VIA_PORT_2C, (u8) viaparinfo->chip_info->lvds_chip_info.lvds_chip_slave_addr, (u8) index, &data); return data; } static void load_lcd_scaling(int set_hres, int set_vres, int panel_hres, int panel_vres) { int reg_value = 0; int viafb_load_reg_num; struct io_register *reg = NULL; DEBUG_MSG(KERN_INFO "load_lcd_scaling()!!\n"); /* LCD Scaling Enable */ viafb_write_reg_mask(CR79, VIACR, 0x07, BIT0 + BIT1 + BIT2); /* Check if expansion for horizontal */ if (set_hres < panel_hres) { /* Load Horizontal Scaling Factor */ switch (viaparinfo->chip_info->gfx_chip_name) { case UNICHROME_CLE266: case UNICHROME_K400: reg_value = CLE266_LCD_HOR_SCF_FORMULA(set_hres, panel_hres); viafb_load_reg_num = lcd_scaling_factor_CLE.lcd_hor_scaling_factor. reg_num; reg = lcd_scaling_factor_CLE.lcd_hor_scaling_factor.reg; viafb_load_reg(reg_value, viafb_load_reg_num, reg, VIACR); break; case UNICHROME_K800: case UNICHROME_PM800: case UNICHROME_CN700: case UNICHROME_CX700: case UNICHROME_K8M890: case UNICHROME_P4M890: case UNICHROME_P4M900: case UNICHROME_CN750: case UNICHROME_VX800: case UNICHROME_VX855: case UNICHROME_VX900: reg_value = K800_LCD_HOR_SCF_FORMULA(set_hres, panel_hres); /* Horizontal scaling enabled */ viafb_write_reg_mask(CRA2, VIACR, 0xC0, BIT7 + BIT6); viafb_load_reg_num = lcd_scaling_factor.lcd_hor_scaling_factor.reg_num; reg = lcd_scaling_factor.lcd_hor_scaling_factor.reg; viafb_load_reg(reg_value, viafb_load_reg_num, reg, VIACR); break; } DEBUG_MSG(KERN_INFO "Horizontal Scaling value = %d", reg_value); } else { /* Horizontal scaling disabled */ viafb_write_reg_mask(CRA2, VIACR, 0x00, BIT7); } /* Check if expansion for vertical */ if (set_vres < panel_vres) { /* Load Vertical Scaling Factor */ switch (viaparinfo->chip_info->gfx_chip_name) { case UNICHROME_CLE266: case UNICHROME_K400: reg_value = CLE266_LCD_VER_SCF_FORMULA(set_vres, panel_vres); viafb_load_reg_num = lcd_scaling_factor_CLE.lcd_ver_scaling_factor. reg_num; reg = lcd_scaling_factor_CLE.lcd_ver_scaling_factor.reg; viafb_load_reg(reg_value, viafb_load_reg_num, reg, VIACR); break; case UNICHROME_K800: case UNICHROME_PM800: case UNICHROME_CN700: case UNICHROME_CX700: case UNICHROME_K8M890: case UNICHROME_P4M890: case UNICHROME_P4M900: case UNICHROME_CN750: case UNICHROME_VX800: case UNICHROME_VX855: case UNICHROME_VX900: reg_value = K800_LCD_VER_SCF_FORMULA(set_vres, panel_vres); /* Vertical scaling enabled */ viafb_write_reg_mask(CRA2, VIACR, 0x08, BIT3); viafb_load_reg_num = lcd_scaling_factor.lcd_ver_scaling_factor.reg_num; reg = lcd_scaling_factor.lcd_ver_scaling_factor.reg; viafb_load_reg(reg_value, viafb_load_reg_num, reg, VIACR); break; } DEBUG_MSG(KERN_INFO "Vertical Scaling value = %d", reg_value); } else { /* Vertical scaling disabled */ viafb_write_reg_mask(CRA2, VIACR, 0x00, BIT3); } } static void via_pitch_alignment_patch_lcd(int iga_path, int hres, int bpp) { unsigned char cr13, cr35, cr65, cr66, cr67; unsigned long dwScreenPitch = 0; unsigned long dwPitch; dwPitch = hres * (bpp >> 3); if (dwPitch & 0x1F) { dwScreenPitch = ((dwPitch + 31) & ~31) >> 3; if (iga_path == IGA2) { if (bpp > 8) { cr66 = (unsigned char)(dwScreenPitch & 0xFF); viafb_write_reg(CR66, VIACR, cr66); cr67 = viafb_read_reg(VIACR, CR67) & 0xFC; cr67 |= (unsigned char)((dwScreenPitch & 0x300) >> 8); viafb_write_reg(CR67, VIACR, cr67); } /* Fetch Count */ cr67 = viafb_read_reg(VIACR, CR67) & 0xF3; cr67 |= (unsigned char)((dwScreenPitch & 0x600) >> 7); viafb_write_reg(CR67, VIACR, cr67); cr65 = (unsigned char)((dwScreenPitch >> 1) & 0xFF); cr65 += 2; viafb_write_reg(CR65, VIACR, cr65); } else { if (bpp > 8) { cr13 = (unsigned char)(dwScreenPitch & 0xFF); viafb_write_reg(CR13, VIACR, cr13); cr35 = viafb_read_reg(VIACR, CR35) & 0x1F; cr35 |= (unsigned char)((dwScreenPitch & 0x700) >> 3); viafb_write_reg(CR35, VIACR, cr35); } } } } static void lcd_patch_skew_dvp0(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info) { if (VT1636_LVDS == plvds_chip_info->lvds_chip_name) { switch (viaparinfo->chip_info->gfx_chip_name) { case UNICHROME_P4M900: viafb_vt1636_patch_skew_on_vt3364(plvds_setting_info, plvds_chip_info); break; case UNICHROME_P4M890: viafb_vt1636_patch_skew_on_vt3327(plvds_setting_info, plvds_chip_info); break; } } } static void lcd_patch_skew_dvp1(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info) { if (VT1636_LVDS == plvds_chip_info->lvds_chip_name) { switch (viaparinfo->chip_info->gfx_chip_name) { case UNICHROME_CX700: viafb_vt1636_patch_skew_on_vt3324(plvds_setting_info, plvds_chip_info); break; } } } static void lcd_patch_skew(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info) { DEBUG_MSG(KERN_INFO "lcd_patch_skew\n"); switch (plvds_chip_info->output_interface) { case INTERFACE_DVP0: lcd_patch_skew_dvp0(plvds_setting_info, plvds_chip_info); break; case INTERFACE_DVP1: lcd_patch_skew_dvp1(plvds_setting_info, plvds_chip_info); break; case INTERFACE_DFP_LOW: if (UNICHROME_P4M900 == viaparinfo->chip_info->gfx_chip_name) { viafb_write_reg_mask(CR99, VIACR, 0x08, BIT0 + BIT1 + BIT2 + BIT3); } break; } } /* LCD Set Mode */ void viafb_lcd_set_mode(const struct fb_var_screeninfo *var, u16 cxres, u16 cyres, struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info) { int set_iga = plvds_setting_info->iga_path; int mode_bpp = var->bits_per_pixel; int set_hres = cxres ? cxres : var->xres; int set_vres = cyres ? cyres : var->yres; int panel_hres = plvds_setting_info->lcd_panel_hres; int panel_vres = plvds_setting_info->lcd_panel_vres; u32 clock; struct via_display_timing timing; struct fb_var_screeninfo panel_var; const struct fb_videomode *mode_crt_table, *panel_crt_table; DEBUG_MSG(KERN_INFO "viafb_lcd_set_mode!!\n"); /* Get mode table */ mode_crt_table = viafb_get_best_mode(set_hres, set_vres, 60); /* Get panel table Pointer */ panel_crt_table = viafb_get_best_mode(panel_hres, panel_vres, 60); viafb_fill_var_timing_info(&panel_var, panel_crt_table); DEBUG_MSG(KERN_INFO "bellow viafb_lcd_set_mode!!\n"); if (VT1636_LVDS == plvds_chip_info->lvds_chip_name) viafb_init_lvds_vt1636(plvds_setting_info, plvds_chip_info); clock = PICOS2KHZ(panel_crt_table->pixclock) * 1000; plvds_setting_info->vclk = clock; if (set_iga == IGA2 && (set_hres < panel_hres || set_vres < panel_vres) && plvds_setting_info->display_method == LCD_EXPANDSION) { timing = var_to_timing(&panel_var, panel_hres, panel_vres); load_lcd_scaling(set_hres, set_vres, panel_hres, panel_vres); } else { timing = var_to_timing(&panel_var, set_hres, set_vres); if (set_iga == IGA2) /* disable scaling */ via_write_reg_mask(VIACR, 0x79, 0x00, BIT0 + BIT1 + BIT2); } if (set_iga == IGA1) via_set_primary_timing(&timing); else if (set_iga == IGA2) via_set_secondary_timing(&timing); /* Fetch count for IGA2 only */ viafb_load_fetch_count_reg(set_hres, mode_bpp / 8, set_iga); if ((viaparinfo->chip_info->gfx_chip_name != UNICHROME_CLE266) && (viaparinfo->chip_info->gfx_chip_name != UNICHROME_K400)) viafb_load_FIFO_reg(set_iga, set_hres, set_vres); fill_lcd_format(); viafb_set_vclock(clock, set_iga); lcd_patch_skew(plvds_setting_info, plvds_chip_info); /* If K8M800, enable LCD Prefetch Mode. */ if ((viaparinfo->chip_info->gfx_chip_name == UNICHROME_K800) || (UNICHROME_K8M890 == viaparinfo->chip_info->gfx_chip_name)) viafb_write_reg_mask(CR6A, VIACR, 0x01, BIT0); /* Patch for non 32bit alignment mode */ via_pitch_alignment_patch_lcd(plvds_setting_info->iga_path, set_hres, var->bits_per_pixel); } static void integrated_lvds_disable(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info) { bool turn_off_first_powersequence = false; bool turn_off_second_powersequence = false; if (INTERFACE_LVDS0LVDS1 == plvds_chip_info->output_interface) turn_off_first_powersequence = true; if (INTERFACE_LVDS0 == plvds_chip_info->output_interface) turn_off_first_powersequence = true; if (INTERFACE_LVDS1 == plvds_chip_info->output_interface) turn_off_second_powersequence = true; if (turn_off_second_powersequence) { /* Use second power sequence control: */ /* Turn off power sequence. */ viafb_write_reg_mask(CRD4, VIACR, 0, BIT1); /* Turn off back light. */ viafb_write_reg_mask(CRD3, VIACR, 0xC0, BIT6 + BIT7); } if (turn_off_first_powersequence) { /* Use first power sequence control: */ /* Turn off power sequence. */ viafb_write_reg_mask(CR6A, VIACR, 0, BIT3); /* Turn off back light. */ viafb_write_reg_mask(CR91, VIACR, 0xC0, BIT6 + BIT7); } /* Power off LVDS channel. */ switch (plvds_chip_info->output_interface) { case INTERFACE_LVDS0: { viafb_write_reg_mask(CRD2, VIACR, 0x80, BIT7); break; } case INTERFACE_LVDS1: { viafb_write_reg_mask(CRD2, VIACR, 0x40, BIT6); break; } case INTERFACE_LVDS0LVDS1: { viafb_write_reg_mask(CRD2, VIACR, 0xC0, BIT6 + BIT7); break; } } } static void integrated_lvds_enable(struct lvds_setting_information *plvds_setting_info, struct lvds_chip_information *plvds_chip_info) { DEBUG_MSG(KERN_INFO "integrated_lvds_enable, out_interface:%d\n", plvds_chip_info->output_interface); if (plvds_setting_info->lcd_mode == LCD_SPWG) viafb_write_reg_mask(CRD2, VIACR, 0x00, BIT0 + BIT1); else viafb_write_reg_mask(CRD2, VIACR, 0x03, BIT0 + BIT1); switch (plvds_chip_info->output_interface) { case INTERFACE_LVDS0LVDS1: case INTERFACE_LVDS0: /* Use first power sequence control: */ /* Use hardware control power sequence. */ viafb_write_reg_mask(CR91, VIACR, 0, BIT0); /* Turn on back light. */ viafb_write_reg_mask(CR91, VIACR, 0, BIT6 + BIT7); /* Turn on hardware power sequence. */ viafb_write_reg_mask(CR6A, VIACR, 0x08, BIT3); break; case INTERFACE_LVDS1: /* Use second power sequence control: */ /* Use hardware control power sequence. */ viafb_write_reg_mask(CRD3, VIACR, 0, BIT0); /* Turn on back light. */ viafb_write_reg_mask(CRD3, VIACR, 0, BIT6 + BIT7); /* Turn on hardware power sequence. */ viafb_write_reg_mask(CRD4, VIACR, 0x02, BIT1); break; } /* Power on LVDS channel. */ switch (plvds_chip_info->output_interface) { case INTERFACE_LVDS0: { viafb_write_reg_mask(CRD2, VIACR, 0, BIT7); break; } case INTERFACE_LVDS1: { viafb_write_reg_mask(CRD2, VIACR, 0, BIT6); break; } case INTERFACE_LVDS0LVDS1: { viafb_write_reg_mask(CRD2, VIACR, 0, BIT6 + BIT7); break; } } } void viafb_lcd_disable(void) { if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266) { lcd_powersequence_off(); /* DI1 pad off */ viafb_write_reg_mask(SR1E, VIASR, 0x00, 0x30); } else if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CX700) { if (viafb_LCD2_ON && (INTEGRATED_LVDS == viaparinfo->chip_info->lvds_chip_info2.lvds_chip_name)) integrated_lvds_disable(viaparinfo->lvds_setting_info, &viaparinfo->chip_info->lvds_chip_info2); if (INTEGRATED_LVDS == viaparinfo->chip_info->lvds_chip_info.lvds_chip_name) integrated_lvds_disable(viaparinfo->lvds_setting_info, &viaparinfo->chip_info->lvds_chip_info); if (VT1636_LVDS == viaparinfo->chip_info-> lvds_chip_info.lvds_chip_name) viafb_disable_lvds_vt1636(viaparinfo->lvds_setting_info, &viaparinfo->chip_info->lvds_chip_info); } else if (VT1636_LVDS == viaparinfo->chip_info->lvds_chip_info.lvds_chip_name) { viafb_disable_lvds_vt1636(viaparinfo->lvds_setting_info, &viaparinfo->chip_info->lvds_chip_info); } else { /* Backlight off */ viafb_write_reg_mask(SR3D, VIASR, 0x00, 0x20); /* 24 bit DI data paht off */ viafb_write_reg_mask(CR91, VIACR, 0x80, 0x80); } /* Disable expansion bit */ viafb_write_reg_mask(CR79, VIACR, 0x00, 0x01); /* Simultaneout disabled */ viafb_write_reg_mask(CR6B, VIACR, 0x00, 0x08); } static void set_lcd_output_path(int set_iga, int output_interface) { switch (output_interface) { case INTERFACE_DFP: if ((UNICHROME_K8M890 == viaparinfo->chip_info->gfx_chip_name) || (UNICHROME_P4M890 == viaparinfo->chip_info->gfx_chip_name)) viafb_write_reg_mask(CR97, VIACR, 0x84, BIT7 + BIT2 + BIT1 + BIT0); case INTERFACE_DVP0: case INTERFACE_DVP1: case INTERFACE_DFP_HIGH: case INTERFACE_DFP_LOW: if (set_iga == IGA2) viafb_write_reg(CR91, VIACR, 0x00); break; } } void viafb_lcd_enable(void) { viafb_write_reg_mask(CR6B, VIACR, 0x00, BIT3); viafb_write_reg_mask(CR6A, VIACR, 0x08, BIT3); set_lcd_output_path(viaparinfo->lvds_setting_info->iga_path, viaparinfo->chip_info->lvds_chip_info.output_interface); if (viafb_LCD2_ON) set_lcd_output_path(viaparinfo->lvds_setting_info2->iga_path, viaparinfo->chip_info-> lvds_chip_info2.output_interface); if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266) { /* DI1 pad on */ viafb_write_reg_mask(SR1E, VIASR, 0x30, 0x30); lcd_powersequence_on(); } else if (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CX700) { if (viafb_LCD2_ON && (INTEGRATED_LVDS == viaparinfo->chip_info->lvds_chip_info2.lvds_chip_name)) integrated_lvds_enable(viaparinfo->lvds_setting_info2, \ &viaparinfo->chip_info->lvds_chip_info2); if (INTEGRATED_LVDS == viaparinfo->chip_info->lvds_chip_info.lvds_chip_name) integrated_lvds_enable(viaparinfo->lvds_setting_info, &viaparinfo->chip_info->lvds_chip_info); if (VT1636_LVDS == viaparinfo->chip_info-> lvds_chip_info.lvds_chip_name) viafb_enable_lvds_vt1636(viaparinfo-> lvds_setting_info, &viaparinfo->chip_info-> lvds_chip_info); } else if (VT1636_LVDS == viaparinfo->chip_info->lvds_chip_info.lvds_chip_name) { viafb_enable_lvds_vt1636(viaparinfo->lvds_setting_info, &viaparinfo->chip_info->lvds_chip_info); } else { /* Backlight on */ viafb_write_reg_mask(SR3D, VIASR, 0x20, 0x20); /* 24 bit DI data paht on */ viafb_write_reg_mask(CR91, VIACR, 0x00, 0x80); /* LCD enabled */ viafb_write_reg_mask(CR6A, VIACR, 0x48, 0x48); } } static void lcd_powersequence_off(void) { int i, mask, data; /* Software control power sequence */ viafb_write_reg_mask(CR91, VIACR, 0x11, 0x11); for (i = 0; i < 3; i++) { mask = PowerSequenceOff[0][i]; data = PowerSequenceOff[1][i] & mask; viafb_write_reg_mask(CR91, VIACR, (u8) data, (u8) mask); udelay(PowerSequenceOff[2][i]); } /* Disable LCD */ viafb_write_reg_mask(CR6A, VIACR, 0x00, 0x08); } static void lcd_powersequence_on(void) { int i, mask, data; /* Software control power sequence */ viafb_write_reg_mask(CR91, VIACR, 0x11, 0x11); /* Enable LCD */ viafb_write_reg_mask(CR6A, VIACR, 0x08, 0x08); for (i = 0; i < 3; i++) { mask = PowerSequenceOn[0][i]; data = PowerSequenceOn[1][i] & mask; viafb_write_reg_mask(CR91, VIACR, (u8) data, (u8) mask); udelay(PowerSequenceOn[2][i]); } udelay(1); } static void fill_lcd_format(void) { u8 bdithering = 0, bdual = 0; if (viaparinfo->lvds_setting_info->device_lcd_dualedge) bdual = BIT4; if (viaparinfo->lvds_setting_info->LCDDithering) bdithering = BIT0; /* Dual & Dithering */ viafb_write_reg_mask(CR88, VIACR, (bdithering | bdual), BIT4 + BIT0); } static void check_diport_of_integrated_lvds( struct lvds_chip_information *plvds_chip_info, struct lvds_setting_information *plvds_setting_info) { /* Determine LCD DI Port by hardware layout. */ switch (viafb_display_hardware_layout) { case HW_LAYOUT_LCD_ONLY: { if (plvds_setting_info->device_lcd_dualedge) { plvds_chip_info->output_interface = INTERFACE_LVDS0LVDS1; } else { plvds_chip_info->output_interface = INTERFACE_LVDS0; } break; } case HW_LAYOUT_DVI_ONLY: { plvds_chip_info->output_interface = INTERFACE_NONE; break; } case HW_LAYOUT_LCD1_LCD2: case HW_LAYOUT_LCD_EXTERNAL_LCD2: { plvds_chip_info->output_interface = INTERFACE_LVDS0LVDS1; break; } case HW_LAYOUT_LCD_DVI: { plvds_chip_info->output_interface = INTERFACE_LVDS1; break; } default: { plvds_chip_info->output_interface = INTERFACE_LVDS1; break; } } DEBUG_MSG(KERN_INFO "Display Hardware Layout: 0x%x, LCD DI Port: 0x%x\n", viafb_display_hardware_layout, plvds_chip_info->output_interface); } void viafb_init_lvds_output_interface(struct lvds_chip_information *plvds_chip_info, struct lvds_setting_information *plvds_setting_info) { if (INTERFACE_NONE != plvds_chip_info->output_interface) { /*Do nothing, lcd port is specified by module parameter */ return; } switch (plvds_chip_info->lvds_chip_name) { case VT1636_LVDS: switch (viaparinfo->chip_info->gfx_chip_name) { case UNICHROME_CX700: plvds_chip_info->output_interface = INTERFACE_DVP1; break; case UNICHROME_CN700: plvds_chip_info->output_interface = INTERFACE_DFP_LOW; break; default: plvds_chip_info->output_interface = INTERFACE_DVP0; break; } break; case INTEGRATED_LVDS: check_diport_of_integrated_lvds(plvds_chip_info, plvds_setting_info); break; default: switch (viaparinfo->chip_info->gfx_chip_name) { case UNICHROME_K8M890: case UNICHROME_P4M900: case UNICHROME_P4M890: plvds_chip_info->output_interface = INTERFACE_DFP_LOW; break; default: plvds_chip_info->output_interface = INTERFACE_DFP; break; } break; } } bool viafb_lcd_get_mobile_state(bool *mobile) { unsigned char __iomem *romptr, *tableptr, *biosptr; u8 core_base; /* Rom address */ const u32 romaddr = 0x000C0000; u16 start_pattern; biosptr = ioremap(romaddr, 0x10000); start_pattern = readw(biosptr); /* Compare pattern */ if (start_pattern == 0xAA55) { /* Get the start of Table */ /* 0x1B means BIOS offset position */ romptr = biosptr + 0x1B; tableptr = biosptr + readw(romptr); /* Get the start of biosver structure */ /* 18 means BIOS version position. */ romptr = tableptr + 18; romptr = biosptr + readw(romptr); /* The offset should be 44, but the actual image is less three char. */ /* pRom += 44; */ romptr += 41; core_base = readb(romptr); if (core_base & 0x8) *mobile = false; else *mobile = true; /* release memory */ iounmap(biosptr); return true; } else { iounmap(biosptr); return false; } }
gpl-2.0
Fusion-Devices/android_kernel_samsung_klte
drivers/net/arcnet/com20020_cs.c
5138
8111
/* * Linux ARCnet driver - COM20020 PCMCIA support * * Written 1994-1999 by Avery Pennarun, * based on an ISA version by David Woodhouse. * Derived from ibmtr_cs.c by Steve Kipisz (pcmcia-cs 3.1.4) * which was derived from pcnet_cs.c by David Hinds. * Some additional portions derived from skeleton.c by Donald Becker. * * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com) * for sponsoring the further development of this driver. * * ********************** * * The original copyright of skeleton.c was as follows: * * skeleton.c Written 1993 by Donald Becker. * Copyright 1993 United States Government as represented by the * Director, National Security Agency. This software may only be used * and distributed according to the terms of the GNU General Public License as * modified by SRC, incorporated herein by reference. * * ********************** * Changes: * Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 08/08/2000 * - reorganize kmallocs in com20020_attach, checking all for failure * and releasing the previous allocations if one fails * ********************** * * For more details, see drivers/net/arcnet.c * * ********************** */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/arcdevice.h> #include <linux/com20020.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include <asm/io.h> #define VERSION "arcnet: COM20020 PCMCIA support loaded.\n" static void regdump(struct net_device *dev) { #ifdef DEBUG int ioaddr = dev->base_addr; int count; netdev_dbg(dev, "register dump:\n"); for (count = ioaddr; count < ioaddr + 16; count++) { if (!(count % 16)) pr_cont("%04X:", count); pr_cont(" %02X", inb(count)); } pr_cont("\n"); netdev_dbg(dev, "buffer0 dump:\n"); /* set up the address register */ count = 0; outb((count >> 8) | RDDATAflag | AUTOINCflag, _ADDR_HI); outb(count & 0xff, _ADDR_LO); for (count = 0; count < 256+32; count++) { if (!(count % 16)) pr_cont("%04X:", count); /* copy the data */ pr_cont(" %02X", inb(_MEMDATA)); } pr_cont("\n"); #endif } /*====================================================================*/ /* Parameters that can be set with 'insmod' */ static int node; static int timeout = 3; static int backplane; static int clockp; static int clockm; module_param(node, int, 0); module_param(timeout, int, 0); module_param(backplane, int, 0); module_param(clockp, int, 0); module_param(clockm, int, 0); MODULE_LICENSE("GPL"); /*====================================================================*/ static int com20020_config(struct pcmcia_device *link); static void com20020_release(struct pcmcia_device *link); static void com20020_detach(struct pcmcia_device *p_dev); /*====================================================================*/ typedef struct com20020_dev_t { struct net_device *dev; } com20020_dev_t; static int com20020_probe(struct pcmcia_device *p_dev) { com20020_dev_t *info; struct net_device *dev; struct arcnet_local *lp; dev_dbg(&p_dev->dev, "com20020_attach()\n"); /* Create new network device */ info = kzalloc(sizeof(struct com20020_dev_t), GFP_KERNEL); if (!info) goto fail_alloc_info; dev = alloc_arcdev(""); if (!dev) goto fail_alloc_dev; lp = netdev_priv(dev); lp->timeout = timeout; lp->backplane = backplane; lp->clockp = clockp; lp->clockm = clockm & 3; lp->hw.owner = THIS_MODULE; /* fill in our module parameters as defaults */ dev->dev_addr[0] = node; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; p_dev->resource[0]->end = 16; p_dev->config_flags |= CONF_ENABLE_IRQ; info->dev = dev; p_dev->priv = info; return com20020_config(p_dev); fail_alloc_dev: kfree(info); fail_alloc_info: return -ENOMEM; } /* com20020_attach */ static void com20020_detach(struct pcmcia_device *link) { struct com20020_dev_t *info = link->priv; struct net_device *dev = info->dev; dev_dbg(&link->dev, "detach...\n"); dev_dbg(&link->dev, "com20020_detach\n"); dev_dbg(&link->dev, "unregister...\n"); unregister_netdev(dev); /* * this is necessary because we register our IRQ separately * from card services. */ if (dev->irq) free_irq(dev->irq, dev); com20020_release(link); /* Unlink device structure, free bits */ dev_dbg(&link->dev, "unlinking...\n"); if (link->priv) { dev = info->dev; if (dev) { dev_dbg(&link->dev, "kfree...\n"); free_netdev(dev); } dev_dbg(&link->dev, "kfree2...\n"); kfree(info); } } /* com20020_detach */ static int com20020_config(struct pcmcia_device *link) { struct arcnet_local *lp; com20020_dev_t *info; struct net_device *dev; int i, ret; int ioaddr; info = link->priv; dev = info->dev; dev_dbg(&link->dev, "config...\n"); dev_dbg(&link->dev, "com20020_config\n"); dev_dbg(&link->dev, "baseport1 is %Xh\n", (unsigned int) link->resource[0]->start); i = -ENODEV; link->io_lines = 16; if (!link->resource[0]->start) { for (ioaddr = 0x100; ioaddr < 0x400; ioaddr += 0x10) { link->resource[0]->start = ioaddr; i = pcmcia_request_io(link); if (i == 0) break; } } else i = pcmcia_request_io(link); if (i != 0) { dev_dbg(&link->dev, "requestIO failed totally!\n"); goto failed; } ioaddr = dev->base_addr = link->resource[0]->start; dev_dbg(&link->dev, "got ioaddr %Xh\n", ioaddr); dev_dbg(&link->dev, "request IRQ %d\n", link->irq); if (!link->irq) { dev_dbg(&link->dev, "requestIRQ failed totally!\n"); goto failed; } dev->irq = link->irq; ret = pcmcia_enable_device(link); if (ret) goto failed; if (com20020_check(dev)) { regdump(dev); goto failed; } lp = netdev_priv(dev); lp->card_name = "PCMCIA COM20020"; lp->card_flags = ARC_CAN_10MBIT; /* pretend all of them can 10Mbit */ SET_NETDEV_DEV(dev, &link->dev); i = com20020_found(dev, 0); /* calls register_netdev */ if (i != 0) { dev_notice(&link->dev, "com20020_found() failed\n"); goto failed; } netdev_dbg(dev, "port %#3lx, irq %d\n", dev->base_addr, dev->irq); return 0; failed: dev_dbg(&link->dev, "com20020_config failed...\n"); com20020_release(link); return -ENODEV; } /* com20020_config */ static void com20020_release(struct pcmcia_device *link) { dev_dbg(&link->dev, "com20020_release\n"); pcmcia_disable_device(link); } static int com20020_suspend(struct pcmcia_device *link) { com20020_dev_t *info = link->priv; struct net_device *dev = info->dev; if (link->open) netif_device_detach(dev); return 0; } static int com20020_resume(struct pcmcia_device *link) { com20020_dev_t *info = link->priv; struct net_device *dev = info->dev; if (link->open) { int ioaddr = dev->base_addr; struct arcnet_local *lp = netdev_priv(dev); ARCRESET; } return 0; } static const struct pcmcia_device_id com20020_ids[] = { PCMCIA_DEVICE_PROD_ID12("Contemporary Control Systems, Inc.", "PCM20 Arcnet Adapter", 0x59991666, 0x95dfffaf), PCMCIA_DEVICE_PROD_ID12("SoHard AG", "SH ARC PCMCIA", 0xf8991729, 0x69dff0c7), PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, com20020_ids); static struct pcmcia_driver com20020_cs_driver = { .owner = THIS_MODULE, .name = "com20020_cs", .probe = com20020_probe, .remove = com20020_detach, .id_table = com20020_ids, .suspend = com20020_suspend, .resume = com20020_resume, }; static int __init init_com20020_cs(void) { return pcmcia_register_driver(&com20020_cs_driver); } static void __exit exit_com20020_cs(void) { pcmcia_unregister_driver(&com20020_cs_driver); } module_init(init_com20020_cs); module_exit(exit_com20020_cs);
gpl-2.0
MyAOSP/kernel_samsung_manta
net/bridge/netfilter/ebtable_nat.c
9234
3080
/* * ebtable_nat * * Authors: * Bart De Schuymer <bdschuym@pandora.be> * * April, 2002 * */ #include <linux/netfilter_bridge/ebtables.h> #include <linux/module.h> #define NAT_VALID_HOOKS ((1 << NF_BR_PRE_ROUTING) | (1 << NF_BR_LOCAL_OUT) | \ (1 << NF_BR_POST_ROUTING)) static struct ebt_entries initial_chains[] = { { .name = "PREROUTING", .policy = EBT_ACCEPT, }, { .name = "OUTPUT", .policy = EBT_ACCEPT, }, { .name = "POSTROUTING", .policy = EBT_ACCEPT, } }; static struct ebt_replace_kernel initial_table = { .name = "nat", .valid_hooks = NAT_VALID_HOOKS, .entries_size = 3 * sizeof(struct ebt_entries), .hook_entry = { [NF_BR_PRE_ROUTING] = &initial_chains[0], [NF_BR_LOCAL_OUT] = &initial_chains[1], [NF_BR_POST_ROUTING] = &initial_chains[2], }, .entries = (char *)initial_chains, }; static int check(const struct ebt_table_info *info, unsigned int valid_hooks) { if (valid_hooks & ~NAT_VALID_HOOKS) return -EINVAL; return 0; } static struct ebt_table frame_nat = { .name = "nat", .table = &initial_table, .valid_hooks = NAT_VALID_HOOKS, .check = check, .me = THIS_MODULE, }; static unsigned int ebt_nat_in(unsigned int hook, struct sk_buff *skb, const struct net_device *in , const struct net_device *out, int (*okfn)(struct sk_buff *)) { return ebt_do_table(hook, skb, in, out, dev_net(in)->xt.frame_nat); } static unsigned int ebt_nat_out(unsigned int hook, struct sk_buff *skb, const struct net_device *in , const struct net_device *out, int (*okfn)(struct sk_buff *)) { return ebt_do_table(hook, skb, in, out, dev_net(out)->xt.frame_nat); } static struct nf_hook_ops ebt_ops_nat[] __read_mostly = { { .hook = ebt_nat_out, .owner = THIS_MODULE, .pf = NFPROTO_BRIDGE, .hooknum = NF_BR_LOCAL_OUT, .priority = NF_BR_PRI_NAT_DST_OTHER, }, { .hook = ebt_nat_out, .owner = THIS_MODULE, .pf = NFPROTO_BRIDGE, .hooknum = NF_BR_POST_ROUTING, .priority = NF_BR_PRI_NAT_SRC, }, { .hook = ebt_nat_in, .owner = THIS_MODULE, .pf = NFPROTO_BRIDGE, .hooknum = NF_BR_PRE_ROUTING, .priority = NF_BR_PRI_NAT_DST_BRIDGED, }, }; static int __net_init frame_nat_net_init(struct net *net) { net->xt.frame_nat = ebt_register_table(net, &frame_nat); if (IS_ERR(net->xt.frame_nat)) return PTR_ERR(net->xt.frame_nat); return 0; } static void __net_exit frame_nat_net_exit(struct net *net) { ebt_unregister_table(net, net->xt.frame_nat); } static struct pernet_operations frame_nat_net_ops = { .init = frame_nat_net_init, .exit = frame_nat_net_exit, }; static int __init ebtable_nat_init(void) { int ret; ret = register_pernet_subsys(&frame_nat_net_ops); if (ret < 0) return ret; ret = nf_register_hooks(ebt_ops_nat, ARRAY_SIZE(ebt_ops_nat)); if (ret < 0) unregister_pernet_subsys(&frame_nat_net_ops); return ret; } static void __exit ebtable_nat_fini(void) { nf_unregister_hooks(ebt_ops_nat, ARRAY_SIZE(ebt_ops_nat)); unregister_pernet_subsys(&frame_nat_net_ops); } module_init(ebtable_nat_init); module_exit(ebtable_nat_fini); MODULE_LICENSE("GPL");
gpl-2.0
somesnow/AK-OnePone
arch/mips/sibyte/swarm/rtc_xicor1241.c
9490
5504
/* * Copyright (C) 2000, 2001 Broadcom Corporation * * Copyright (C) 2002 MontaVista Software Inc. * Author: jsun@mvista.com or jsun@junsun.net * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/bcd.h> #include <linux/types.h> #include <linux/time.h> #include <asm/time.h> #include <asm/addrspace.h> #include <asm/io.h> #include <asm/sibyte/sb1250.h> #include <asm/sibyte/sb1250_regs.h> #include <asm/sibyte/sb1250_smbus.h> /* Xicor 1241 definitions */ /* * Register bits */ #define X1241REG_SR_BAT 0x80 /* currently on battery power */ #define X1241REG_SR_RWEL 0x04 /* r/w latch is enabled, can write RTC */ #define X1241REG_SR_WEL 0x02 /* r/w latch is unlocked, can enable r/w now */ #define X1241REG_SR_RTCF 0x01 /* clock failed */ #define X1241REG_BL_BP2 0x80 /* block protect 2 */ #define X1241REG_BL_BP1 0x40 /* block protect 1 */ #define X1241REG_BL_BP0 0x20 /* block protect 0 */ #define X1241REG_BL_WD1 0x10 #define X1241REG_BL_WD0 0x08 #define X1241REG_HR_MIL 0x80 /* military time format */ /* * Register numbers */ #define X1241REG_BL 0x10 /* block protect bits */ #define X1241REG_INT 0x11 /* */ #define X1241REG_SC 0x30 /* Seconds */ #define X1241REG_MN 0x31 /* Minutes */ #define X1241REG_HR 0x32 /* Hours */ #define X1241REG_DT 0x33 /* Day of month */ #define X1241REG_MO 0x34 /* Month */ #define X1241REG_YR 0x35 /* Year */ #define X1241REG_DW 0x36 /* Day of Week */ #define X1241REG_Y2K 0x37 /* Year 2K */ #define X1241REG_SR 0x3F /* Status register */ #define X1241_CCR_ADDRESS 0x6F #define SMB_CSR(reg) IOADDR(A_SMB_REGISTER(1, reg)) static int xicor_read(uint8_t addr) { while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY) ; __raw_writeq((addr >> 8) & 0x7, SMB_CSR(R_SMB_CMD)); __raw_writeq(addr & 0xff, SMB_CSR(R_SMB_DATA)); __raw_writeq(V_SMB_ADDR(X1241_CCR_ADDRESS) | V_SMB_TT_WR2BYTE, SMB_CSR(R_SMB_START)); while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY) ; __raw_writeq(V_SMB_ADDR(X1241_CCR_ADDRESS) | V_SMB_TT_RD1BYTE, SMB_CSR(R_SMB_START)); while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY) ; if (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_ERROR) { /* Clear error bit by writing a 1 */ __raw_writeq(M_SMB_ERROR, SMB_CSR(R_SMB_STATUS)); return -1; } return (__raw_readq(SMB_CSR(R_SMB_DATA)) & 0xff); } static int xicor_write(uint8_t addr, int b) { while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY) ; __raw_writeq(addr, SMB_CSR(R_SMB_CMD)); __raw_writeq((addr & 0xff) | ((b & 0xff) << 8), SMB_CSR(R_SMB_DATA)); __raw_writeq(V_SMB_ADDR(X1241_CCR_ADDRESS) | V_SMB_TT_WR3BYTE, SMB_CSR(R_SMB_START)); while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY) ; if (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_ERROR) { /* Clear error bit by writing a 1 */ __raw_writeq(M_SMB_ERROR, SMB_CSR(R_SMB_STATUS)); return -1; } else { return 0; } } int xicor_set_time(unsigned long t) { struct rtc_time tm; int tmp; unsigned long flags; rtc_time_to_tm(t, &tm); tm.tm_year += 1900; spin_lock_irqsave(&rtc_lock, flags); /* unlock writes to the CCR */ xicor_write(X1241REG_SR, X1241REG_SR_WEL); xicor_write(X1241REG_SR, X1241REG_SR_WEL | X1241REG_SR_RWEL); /* trivial ones */ tm.tm_sec = bin2bcd(tm.tm_sec); xicor_write(X1241REG_SC, tm.tm_sec); tm.tm_min = bin2bcd(tm.tm_min); xicor_write(X1241REG_MN, tm.tm_min); tm.tm_mday = bin2bcd(tm.tm_mday); xicor_write(X1241REG_DT, tm.tm_mday); /* tm_mon starts from 0, *ick* */ tm.tm_mon ++; tm.tm_mon = bin2bcd(tm.tm_mon); xicor_write(X1241REG_MO, tm.tm_mon); /* year is split */ tmp = tm.tm_year / 100; tm.tm_year %= 100; xicor_write(X1241REG_YR, tm.tm_year); xicor_write(X1241REG_Y2K, tmp); /* hour is the most tricky one */ tmp = xicor_read(X1241REG_HR); if (tmp & X1241REG_HR_MIL) { /* 24 hour format */ tm.tm_hour = bin2bcd(tm.tm_hour); tmp = (tmp & ~0x3f) | (tm.tm_hour & 0x3f); } else { /* 12 hour format, with 0x2 for pm */ tmp = tmp & ~0x3f; if (tm.tm_hour >= 12) { tmp |= 0x20; tm.tm_hour -= 12; } tm.tm_hour = bin2bcd(tm.tm_hour); tmp |= tm.tm_hour; } xicor_write(X1241REG_HR, tmp); xicor_write(X1241REG_SR, 0); spin_unlock_irqrestore(&rtc_lock, flags); return 0; } unsigned long xicor_get_time(void) { unsigned int year, mon, day, hour, min, sec, y2k; unsigned long flags; spin_lock_irqsave(&rtc_lock, flags); sec = xicor_read(X1241REG_SC); min = xicor_read(X1241REG_MN); hour = xicor_read(X1241REG_HR); if (hour & X1241REG_HR_MIL) { hour &= 0x3f; } else { if (hour & 0x20) hour = (hour & 0xf) + 0x12; } day = xicor_read(X1241REG_DT); mon = xicor_read(X1241REG_MO); year = xicor_read(X1241REG_YR); y2k = xicor_read(X1241REG_Y2K); spin_unlock_irqrestore(&rtc_lock, flags); sec = bcd2bin(sec); min = bcd2bin(min); hour = bcd2bin(hour); day = bcd2bin(day); mon = bcd2bin(mon); year = bcd2bin(year); y2k = bcd2bin(y2k); year += (y2k * 100); return mktime(year, mon, day, hour, min, sec); } int xicor_probe(void) { return (xicor_read(X1241REG_SC) != -1); }
gpl-2.0
blackdeviant/nickless
arch/sh/kernel/cpu/sh2a/pinmux-sh7203.c
9490
51278
/* * SH7203 Pinmux * * Copyright (C) 2008 Magnus Damm * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/gpio.h> #include <cpu/sh7203.h> enum { PINMUX_RESERVED = 0, PINMUX_DATA_BEGIN, PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA, PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA, PB12_DATA, PB11_DATA, PB10_DATA, PB9_DATA, PB8_DATA, PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA, PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA, PC14_DATA, PC13_DATA, PC12_DATA, PC11_DATA, PC10_DATA, PC9_DATA, PC8_DATA, PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA, PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA, PD15_DATA, PD14_DATA, PD13_DATA, PD12_DATA, PD11_DATA, PD10_DATA, PD9_DATA, PD8_DATA, PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA, PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA, PE15_DATA, PE14_DATA, PE13_DATA, PE12_DATA, PE11_DATA, PE10_DATA, PE9_DATA, PE8_DATA, PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA, PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA, PF30_DATA, PF29_DATA, PF28_DATA, PF27_DATA, PF26_DATA, PF25_DATA, PF24_DATA, PF23_DATA, PF22_DATA, PF21_DATA, PF20_DATA, PF19_DATA, PF18_DATA, PF17_DATA, PF16_DATA, PF15_DATA, PF14_DATA, PF13_DATA, PF12_DATA, PF11_DATA, PF10_DATA, PF9_DATA, PF8_DATA, PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA, PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA, PINMUX_DATA_END, PINMUX_INPUT_BEGIN, FORCE_IN, PA7_IN, PA6_IN, PA5_IN, PA4_IN, PA3_IN, PA2_IN, PA1_IN, PA0_IN, PB11_IN, PB10_IN, PB9_IN, PB8_IN, PC14_IN, PC13_IN, PC12_IN, PC11_IN, PC10_IN, PC9_IN, PC8_IN, PC7_IN, PC6_IN, PC5_IN, PC4_IN, PC3_IN, PC2_IN, PC1_IN, PC0_IN, PD15_IN, PD14_IN, PD13_IN, PD12_IN, PD11_IN, PD10_IN, PD9_IN, PD8_IN, PD7_IN, PD6_IN, PD5_IN, PD4_IN, PD3_IN, PD2_IN, PD1_IN, PD0_IN, PE15_IN, PE14_IN, PE13_IN, PE12_IN, PE11_IN, PE10_IN, PE9_IN, PE8_IN, PE7_IN, PE6_IN, PE5_IN, PE4_IN, PE3_IN, PE2_IN, PE1_IN, PE0_IN, PF30_IN, PF29_IN, PF28_IN, PF27_IN, PF26_IN, PF25_IN, PF24_IN, PF23_IN, PF22_IN, PF21_IN, PF20_IN, PF19_IN, PF18_IN, PF17_IN, PF16_IN, PF15_IN, PF14_IN, PF13_IN, PF12_IN, PF11_IN, PF10_IN, PF9_IN, PF8_IN, PF7_IN, PF6_IN, PF5_IN, PF4_IN, PF3_IN, PF2_IN, PF1_IN, PF0_IN, PINMUX_INPUT_END, PINMUX_OUTPUT_BEGIN, FORCE_OUT, PB11_OUT, PB10_OUT, PB9_OUT, PB8_OUT, PC14_OUT, PC13_OUT, PC12_OUT, PC11_OUT, PC10_OUT, PC9_OUT, PC8_OUT, PC7_OUT, PC6_OUT, PC5_OUT, PC4_OUT, PC3_OUT, PC2_OUT, PC1_OUT, PC0_OUT, PD15_OUT, PD14_OUT, PD13_OUT, PD12_OUT, PD11_OUT, PD10_OUT, PD9_OUT, PD8_OUT, PD7_OUT, PD6_OUT, PD5_OUT, PD4_OUT, PD3_OUT, PD2_OUT, PD1_OUT, PD0_OUT, PE15_OUT, PE14_OUT, PE13_OUT, PE12_OUT, PE11_OUT, PE10_OUT, PE9_OUT, PE8_OUT, PE7_OUT, PE6_OUT, PE5_OUT, PE4_OUT, PE3_OUT, PE2_OUT, PE1_OUT, PE0_OUT, PF30_OUT, PF29_OUT, PF28_OUT, PF27_OUT, PF26_OUT, PF25_OUT, PF24_OUT, PF23_OUT, PF22_OUT, PF21_OUT, PF20_OUT, PF19_OUT, PF18_OUT, PF17_OUT, PF16_OUT, PF15_OUT, PF14_OUT, PF13_OUT, PF12_OUT, PF11_OUT, PF10_OUT, PF9_OUT, PF8_OUT, PF7_OUT, PF6_OUT, PF5_OUT, PF4_OUT, PF3_OUT, PF2_OUT, PF1_OUT, PF0_OUT, PINMUX_OUTPUT_END, PINMUX_FUNCTION_BEGIN, PB11_IOR_IN, PB11_IOR_OUT, PB10_IOR_IN, PB10_IOR_OUT, PB9_IOR_IN, PB9_IOR_OUT, PB8_IOR_IN, PB8_IOR_OUT, PB12MD_00, PB12MD_01, PB12MD_10, PB12MD_11, PB11MD_0, PB11MD_1, PB10MD_0, PB10MD_1, PB9MD_00, PB9MD_01, PB9MD_10, PB8MD_00, PB8MD_01, PB8MD_10, PB7MD_00, PB7MD_01, PB7MD_10, PB7MD_11, PB6MD_00, PB6MD_01, PB6MD_10, PB6MD_11, PB5MD_00, PB5MD_01, PB5MD_10, PB5MD_11, PB4MD_00, PB4MD_01, PB4MD_10, PB4MD_11, PB3MD_00, PB3MD_01, PB3MD_10, PB3MD_11, PB2MD_00, PB2MD_01, PB2MD_10, PB2MD_11, PB1MD_00, PB1MD_01, PB1MD_10, PB1MD_11, PB0MD_00, PB0MD_01, PB0MD_10, PB0MD_11, PB12IRQ_00, PB12IRQ_01, PB12IRQ_10, PC14MD_0, PC14MD_1, PC13MD_0, PC13MD_1, PC12MD_0, PC12MD_1, PC11MD_00, PC11MD_01, PC11MD_10, PC10MD_00, PC10MD_01, PC10MD_10, PC9MD_0, PC9MD_1, PC8MD_0, PC8MD_1, PC7MD_0, PC7MD_1, PC6MD_0, PC6MD_1, PC5MD_0, PC5MD_1, PC4MD_0, PC4MD_1, PC3MD_0, PC3MD_1, PC2MD_0, PC2MD_1, PC1MD_0, PC1MD_1, PC0MD_00, PC0MD_01, PC0MD_10, PD15MD_000, PD15MD_001, PD15MD_010, PD15MD_100, PD15MD_101, PD14MD_000, PD14MD_001, PD14MD_010, PD14MD_101, PD13MD_000, PD13MD_001, PD13MD_010, PD13MD_100, PD13MD_101, PD12MD_000, PD12MD_001, PD12MD_010, PD12MD_100, PD12MD_101, PD11MD_000, PD11MD_001, PD11MD_010, PD11MD_100, PD11MD_101, PD10MD_000, PD10MD_001, PD10MD_010, PD10MD_100, PD10MD_101, PD9MD_000, PD9MD_001, PD9MD_010, PD9MD_100, PD9MD_101, PD8MD_000, PD8MD_001, PD8MD_010, PD8MD_100, PD8MD_101, PD7MD_000, PD7MD_001, PD7MD_010, PD7MD_011, PD7MD_100, PD7MD_101, PD6MD_000, PD6MD_001, PD6MD_010, PD6MD_011, PD6MD_100, PD6MD_101, PD5MD_000, PD5MD_001, PD5MD_010, PD5MD_011, PD5MD_100, PD5MD_101, PD4MD_000, PD4MD_001, PD4MD_010, PD4MD_011, PD4MD_100, PD4MD_101, PD3MD_000, PD3MD_001, PD3MD_010, PD3MD_011, PD3MD_100, PD3MD_101, PD2MD_000, PD2MD_001, PD2MD_010, PD2MD_011, PD2MD_100, PD2MD_101, PD1MD_000, PD1MD_001, PD1MD_010, PD1MD_011, PD1MD_100, PD1MD_101, PD0MD_000, PD0MD_001, PD0MD_010, PD0MD_011, PD0MD_100, PD0MD_101, PE15MD_00, PE15MD_01, PE15MD_11, PE14MD_00, PE14MD_01, PE14MD_11, PE13MD_00, PE13MD_11, PE12MD_00, PE12MD_11, PE11MD_000, PE11MD_001, PE11MD_010, PE11MD_100, PE10MD_000, PE10MD_001, PE10MD_010, PE10MD_100, PE9MD_00, PE9MD_01, PE9MD_10, PE9MD_11, PE8MD_00, PE8MD_01, PE8MD_10, PE8MD_11, PE7MD_000, PE7MD_001, PE7MD_010, PE7MD_011, PE7MD_100, PE6MD_000, PE6MD_001, PE6MD_010, PE6MD_011, PE6MD_100, PE5MD_000, PE5MD_001, PE5MD_010, PE5MD_011, PE5MD_100, PE4MD_000, PE4MD_001, PE4MD_010, PE4MD_011, PE4MD_100, PE3MD_00, PE3MD_01, PE3MD_11, PE2MD_00, PE2MD_01, PE2MD_11, PE1MD_00, PE1MD_01, PE1MD_10, PE1MD_11, PE0MD_000, PE0MD_001, PE0MD_011, PE0MD_100, PF30MD_0, PF30MD_1, PF29MD_0, PF29MD_1, PF28MD_0, PF28MD_1, PF27MD_0, PF27MD_1, PF26MD_0, PF26MD_1, PF25MD_0, PF25MD_1, PF24MD_0, PF24MD_1, PF23MD_00, PF23MD_01, PF23MD_10, PF22MD_00, PF22MD_01, PF22MD_10, PF21MD_00, PF21MD_01, PF21MD_10, PF20MD_00, PF20MD_01, PF20MD_10, PF19MD_00, PF19MD_01, PF19MD_10, PF18MD_00, PF18MD_01, PF18MD_10, PF17MD_00, PF17MD_01, PF17MD_10, PF16MD_00, PF16MD_01, PF16MD_10, PF15MD_00, PF15MD_01, PF15MD_10, PF14MD_00, PF14MD_01, PF14MD_10, PF13MD_00, PF13MD_01, PF13MD_10, PF12MD_00, PF12MD_01, PF12MD_10, PF11MD_00, PF11MD_01, PF11MD_10, PF10MD_00, PF10MD_01, PF10MD_10, PF9MD_00, PF9MD_01, PF9MD_10, PF8MD_00, PF8MD_01, PF8MD_10, PF7MD_00, PF7MD_01, PF7MD_10, PF7MD_11, PF6MD_00, PF6MD_01, PF6MD_10, PF6MD_11, PF5MD_00, PF5MD_01, PF5MD_10, PF5MD_11, PF4MD_00, PF4MD_01, PF4MD_10, PF4MD_11, PF3MD_00, PF3MD_01, PF3MD_10, PF3MD_11, PF2MD_00, PF2MD_01, PF2MD_10, PF2MD_11, PF1MD_00, PF1MD_01, PF1MD_10, PF1MD_11, PF0MD_00, PF0MD_01, PF0MD_10, PF0MD_11, PINMUX_FUNCTION_END, PINMUX_MARK_BEGIN, PINT7_PB_MARK, PINT6_PB_MARK, PINT5_PB_MARK, PINT4_PB_MARK, PINT3_PB_MARK, PINT2_PB_MARK, PINT1_PB_MARK, PINT0_PB_MARK, PINT7_PD_MARK, PINT6_PD_MARK, PINT5_PD_MARK, PINT4_PD_MARK, PINT3_PD_MARK, PINT2_PD_MARK, PINT1_PD_MARK, PINT0_PD_MARK, IRQ7_PB_MARK, IRQ6_PB_MARK, IRQ5_PB_MARK, IRQ4_PB_MARK, IRQ3_PB_MARK, IRQ2_PB_MARK, IRQ1_PB_MARK, IRQ0_PB_MARK, IRQ7_PD_MARK, IRQ6_PD_MARK, IRQ5_PD_MARK, IRQ4_PD_MARK, IRQ3_PD_MARK, IRQ2_PD_MARK, IRQ1_PD_MARK, IRQ0_PD_MARK, IRQ7_PE_MARK, IRQ6_PE_MARK, IRQ5_PE_MARK, IRQ4_PE_MARK, IRQ3_PE_MARK, IRQ2_PE_MARK, IRQ1_PE_MARK, IRQ0_PE_MARK, WDTOVF_MARK, IRQOUT_MARK, REFOUT_MARK, IRQOUT_REFOUT_MARK, UBCTRG_MARK, CTX1_MARK, CRX1_MARK, CTX0_MARK, CTX0_CTX1_MARK, CRX0_MARK, CRX0_CRX1_MARK, SDA3_MARK, SCL3_MARK, SDA2_MARK, SCL2_MARK, SDA1_MARK, SCL1_MARK, SDA0_MARK, SCL0_MARK, TEND0_PD_MARK, TEND0_PE_MARK, DACK0_PD_MARK, DACK0_PE_MARK, DREQ0_PD_MARK, DREQ0_PE_MARK, TEND1_PD_MARK, TEND1_PE_MARK, DACK1_PD_MARK, DACK1_PE_MARK, DREQ1_PD_MARK, DREQ1_PE_MARK, DACK2_MARK, DREQ2_MARK, DACK3_MARK, DREQ3_MARK, ADTRG_PD_MARK, ADTRG_PE_MARK, D31_MARK, D30_MARK, D29_MARK, D28_MARK, D27_MARK, D26_MARK, D25_MARK, D24_MARK, D23_MARK, D22_MARK, D21_MARK, D20_MARK, D19_MARK, D18_MARK, D17_MARK, D16_MARK, A25_MARK, A24_MARK, A23_MARK, A22_MARK, A21_MARK, CS4_MARK, MRES_MARK, BS_MARK, IOIS16_MARK, CS1_MARK, CS6_CE1B_MARK, CE2B_MARK, CS5_CE1A_MARK, CE2A_MARK, FRAME_MARK, WAIT_MARK, RDWR_MARK, CKE_MARK, CASU_MARK, BREQ_MARK, RASU_MARK, BACK_MARK, CASL_MARK, RASL_MARK, WE3_DQMUU_AH_ICIO_WR_MARK, WE2_DQMUL_ICIORD_MARK, WE1_DQMLU_WE_MARK, WE0_DQMLL_MARK, CS3_MARK, CS2_MARK, A1_MARK, A0_MARK, CS7_MARK, TIOC4D_MARK, TIOC4C_MARK, TIOC4B_MARK, TIOC4A_MARK, TIOC3D_MARK, TIOC3C_MARK, TIOC3B_MARK, TIOC3A_MARK, TIOC2B_MARK, TIOC1B_MARK, TIOC2A_MARK, TIOC1A_MARK, TIOC0D_MARK, TIOC0C_MARK, TIOC0B_MARK, TIOC0A_MARK, TCLKD_PD_MARK, TCLKC_PD_MARK, TCLKB_PD_MARK, TCLKA_PD_MARK, TCLKD_PF_MARK, TCLKC_PF_MARK, TCLKB_PF_MARK, TCLKA_PF_MARK, SCS0_PD_MARK, SSO0_PD_MARK, SSI0_PD_MARK, SSCK0_PD_MARK, SCS0_PF_MARK, SSO0_PF_MARK, SSI0_PF_MARK, SSCK0_PF_MARK, SCS1_PD_MARK, SSO1_PD_MARK, SSI1_PD_MARK, SSCK1_PD_MARK, SCS1_PF_MARK, SSO1_PF_MARK, SSI1_PF_MARK, SSCK1_PF_MARK, TXD0_MARK, RXD0_MARK, SCK0_MARK, TXD1_MARK, RXD1_MARK, SCK1_MARK, TXD2_MARK, RXD2_MARK, SCK2_MARK, RTS3_MARK, CTS3_MARK, TXD3_MARK, RXD3_MARK, SCK3_MARK, AUDIO_CLK_MARK, SSIDATA3_MARK, SSIWS3_MARK, SSISCK3_MARK, SSIDATA2_MARK, SSIWS2_MARK, SSISCK2_MARK, SSIDATA1_MARK, SSIWS1_MARK, SSISCK1_MARK, SSIDATA0_MARK, SSIWS0_MARK, SSISCK0_MARK, FCE_MARK, FRB_MARK, NAF7_MARK, NAF6_MARK, NAF5_MARK, NAF4_MARK, NAF3_MARK, NAF2_MARK, NAF1_MARK, NAF0_MARK, FSC_MARK, FOE_MARK, FCDE_MARK, FWE_MARK, LCD_VEPWC_MARK, LCD_VCPWC_MARK, LCD_CLK_MARK, LCD_FLM_MARK, LCD_M_DISP_MARK, LCD_CL2_MARK, LCD_CL1_MARK, LCD_DON_MARK, LCD_DATA15_MARK, LCD_DATA14_MARK, LCD_DATA13_MARK, LCD_DATA12_MARK, LCD_DATA11_MARK, LCD_DATA10_MARK, LCD_DATA9_MARK, LCD_DATA8_MARK, LCD_DATA7_MARK, LCD_DATA6_MARK, LCD_DATA5_MARK, LCD_DATA4_MARK, LCD_DATA3_MARK, LCD_DATA2_MARK, LCD_DATA1_MARK, LCD_DATA0_MARK, PINMUX_MARK_END, }; static pinmux_enum_t pinmux_data[] = { /* PA */ PINMUX_DATA(PA7_DATA, PA7_IN), PINMUX_DATA(PA6_DATA, PA6_IN), PINMUX_DATA(PA5_DATA, PA5_IN), PINMUX_DATA(PA4_DATA, PA4_IN), PINMUX_DATA(PA3_DATA, PA3_IN), PINMUX_DATA(PA2_DATA, PA2_IN), PINMUX_DATA(PA1_DATA, PA1_IN), PINMUX_DATA(PA0_DATA, PA0_IN), /* PB */ PINMUX_DATA(PB12_DATA, PB12MD_00, FORCE_OUT), PINMUX_DATA(WDTOVF_MARK, PB12MD_01), PINMUX_DATA(IRQOUT_MARK, PB12MD_10, PB12IRQ_00), PINMUX_DATA(REFOUT_MARK, PB12MD_10, PB12IRQ_01), PINMUX_DATA(IRQOUT_REFOUT_MARK, PB12MD_10, PB12IRQ_10), PINMUX_DATA(UBCTRG_MARK, PB12MD_11), PINMUX_DATA(PB11_DATA, PB11MD_0, PB11_IN, PB11_OUT), PINMUX_DATA(CTX1_MARK, PB11MD_1), PINMUX_DATA(PB10_DATA, PB10MD_0, PB10_IN, PB10_OUT), PINMUX_DATA(CRX1_MARK, PB10MD_1), PINMUX_DATA(PB9_DATA, PB9MD_00, PB9_IN, PB9_OUT), PINMUX_DATA(CTX0_MARK, PB9MD_01), PINMUX_DATA(CTX0_CTX1_MARK, PB9MD_10), PINMUX_DATA(PB8_DATA, PB8MD_00, PB8_IN, PB8_OUT), PINMUX_DATA(CRX0_MARK, PB8MD_01), PINMUX_DATA(CRX0_CRX1_MARK, PB8MD_10), PINMUX_DATA(PB7_DATA, PB7MD_00, FORCE_IN), PINMUX_DATA(SDA3_MARK, PB7MD_01), PINMUX_DATA(PINT7_PB_MARK, PB7MD_10), PINMUX_DATA(IRQ7_PB_MARK, PB7MD_11), PINMUX_DATA(PB6_DATA, PB6MD_00, FORCE_IN), PINMUX_DATA(SCL3_MARK, PB6MD_01), PINMUX_DATA(PINT6_PB_MARK, PB6MD_10), PINMUX_DATA(IRQ6_PB_MARK, PB6MD_11), PINMUX_DATA(PB5_DATA, PB5MD_00, FORCE_IN), PINMUX_DATA(SDA2_MARK, PB6MD_01), PINMUX_DATA(PINT5_PB_MARK, PB6MD_10), PINMUX_DATA(IRQ5_PB_MARK, PB6MD_11), PINMUX_DATA(PB4_DATA, PB4MD_00, FORCE_IN), PINMUX_DATA(SCL2_MARK, PB4MD_01), PINMUX_DATA(PINT4_PB_MARK, PB4MD_10), PINMUX_DATA(IRQ4_PB_MARK, PB4MD_11), PINMUX_DATA(PB3_DATA, PB3MD_00, FORCE_IN), PINMUX_DATA(SDA1_MARK, PB3MD_01), PINMUX_DATA(PINT3_PB_MARK, PB3MD_10), PINMUX_DATA(IRQ3_PB_MARK, PB3MD_11), PINMUX_DATA(PB2_DATA, PB2MD_00, FORCE_IN), PINMUX_DATA(SCL1_MARK, PB2MD_01), PINMUX_DATA(PINT2_PB_MARK, PB2MD_10), PINMUX_DATA(IRQ2_PB_MARK, PB2MD_11), PINMUX_DATA(PB1_DATA, PB1MD_00, FORCE_IN), PINMUX_DATA(SDA0_MARK, PB1MD_01), PINMUX_DATA(PINT1_PB_MARK, PB1MD_10), PINMUX_DATA(IRQ1_PB_MARK, PB1MD_11), PINMUX_DATA(PB0_DATA, PB0MD_00, FORCE_IN), PINMUX_DATA(SCL0_MARK, PB0MD_01), PINMUX_DATA(PINT0_PB_MARK, PB0MD_10), PINMUX_DATA(IRQ0_PB_MARK, PB0MD_11), /* PC */ PINMUX_DATA(PC14_DATA, PC14MD_0, PC14_IN, PC14_OUT), PINMUX_DATA(WAIT_MARK, PC14MD_1), PINMUX_DATA(PC13_DATA, PC13MD_0, PC13_IN, PC13_OUT), PINMUX_DATA(RDWR_MARK, PC13MD_1), PINMUX_DATA(PC12_DATA, PC12MD_0, PC12_IN, PC12_OUT), PINMUX_DATA(CKE_MARK, PC12MD_1), PINMUX_DATA(PC11_DATA, PC11MD_00, PC11_IN, PC11_OUT), PINMUX_DATA(CASU_MARK, PC11MD_01), PINMUX_DATA(BREQ_MARK, PC11MD_10), PINMUX_DATA(PC10_DATA, PC10MD_00, PC10_IN, PC10_OUT), PINMUX_DATA(RASU_MARK, PC10MD_01), PINMUX_DATA(BACK_MARK, PC10MD_10), PINMUX_DATA(PC9_DATA, PC9MD_0, PC9_IN, PC9_OUT), PINMUX_DATA(CASL_MARK, PC9MD_1), PINMUX_DATA(PC8_DATA, PC8MD_0, PC8_IN, PC8_OUT), PINMUX_DATA(RASL_MARK, PC8MD_1), PINMUX_DATA(PC7_DATA, PC7MD_0, PC7_IN, PC7_OUT), PINMUX_DATA(WE3_DQMUU_AH_ICIO_WR_MARK, PC7MD_1), PINMUX_DATA(PC6_DATA, PC6MD_0, PC6_IN, PC6_OUT), PINMUX_DATA(WE2_DQMUL_ICIORD_MARK, PC6MD_1), PINMUX_DATA(PC5_DATA, PC5MD_0, PC5_IN, PC5_OUT), PINMUX_DATA(WE1_DQMLU_WE_MARK, PC5MD_1), PINMUX_DATA(PC4_DATA, PC4MD_0, PC4_IN, PC4_OUT), PINMUX_DATA(WE0_DQMLL_MARK, PC4MD_1), PINMUX_DATA(PC3_DATA, PC3MD_0, PC3_IN, PC3_OUT), PINMUX_DATA(CS3_MARK, PC3MD_1), PINMUX_DATA(PC2_DATA, PC2MD_0, PC2_IN, PC2_OUT), PINMUX_DATA(CS2_MARK, PC2MD_1), PINMUX_DATA(PC1_DATA, PC1MD_0, PC1_IN, PC1_OUT), PINMUX_DATA(A1_MARK, PC1MD_1), PINMUX_DATA(PC0_DATA, PC0MD_00, PC0_IN, PC0_OUT), PINMUX_DATA(A0_MARK, PC0MD_01), PINMUX_DATA(CS7_MARK, PC0MD_10), /* PD */ PINMUX_DATA(PD15_DATA, PD15MD_000, PD15_IN, PD15_OUT), PINMUX_DATA(D31_MARK, PD15MD_001), PINMUX_DATA(PINT7_PD_MARK, PD15MD_010), PINMUX_DATA(ADTRG_PD_MARK, PD15MD_100), PINMUX_DATA(TIOC4D_MARK, PD15MD_101), PINMUX_DATA(PD14_DATA, PD14MD_000, PD14_IN, PD14_OUT), PINMUX_DATA(D30_MARK, PD14MD_001), PINMUX_DATA(PINT6_PD_MARK, PD14MD_010), PINMUX_DATA(TIOC4C_MARK, PD14MD_101), PINMUX_DATA(PD13_DATA, PD13MD_000, PD13_IN, PD13_OUT), PINMUX_DATA(D29_MARK, PD13MD_001), PINMUX_DATA(PINT5_PD_MARK, PD13MD_010), PINMUX_DATA(TEND1_PD_MARK, PD13MD_100), PINMUX_DATA(TIOC4B_MARK, PD13MD_101), PINMUX_DATA(PD12_DATA, PD12MD_000, PD12_IN, PD12_OUT), PINMUX_DATA(D28_MARK, PD12MD_001), PINMUX_DATA(PINT4_PD_MARK, PD12MD_010), PINMUX_DATA(DACK1_PD_MARK, PD12MD_100), PINMUX_DATA(TIOC4A_MARK, PD12MD_101), PINMUX_DATA(PD11_DATA, PD11MD_000, PD11_IN, PD11_OUT), PINMUX_DATA(D27_MARK, PD11MD_001), PINMUX_DATA(PINT3_PD_MARK, PD11MD_010), PINMUX_DATA(DREQ1_PD_MARK, PD11MD_100), PINMUX_DATA(TIOC3D_MARK, PD11MD_101), PINMUX_DATA(PD10_DATA, PD10MD_000, PD10_IN, PD10_OUT), PINMUX_DATA(D26_MARK, PD10MD_001), PINMUX_DATA(PINT2_PD_MARK, PD10MD_010), PINMUX_DATA(TEND0_PD_MARK, PD10MD_100), PINMUX_DATA(TIOC3C_MARK, PD10MD_101), PINMUX_DATA(PD9_DATA, PD9MD_000, PD9_IN, PD9_OUT), PINMUX_DATA(D25_MARK, PD9MD_001), PINMUX_DATA(PINT1_PD_MARK, PD9MD_010), PINMUX_DATA(DACK0_PD_MARK, PD9MD_100), PINMUX_DATA(TIOC3B_MARK, PD9MD_101), PINMUX_DATA(PD8_DATA, PD8MD_000, PD8_IN, PD8_OUT), PINMUX_DATA(D24_MARK, PD8MD_001), PINMUX_DATA(PINT0_PD_MARK, PD8MD_010), PINMUX_DATA(DREQ0_PD_MARK, PD8MD_100), PINMUX_DATA(TIOC3A_MARK, PD8MD_101), PINMUX_DATA(PD7_DATA, PD7MD_000, PD7_IN, PD7_OUT), PINMUX_DATA(D23_MARK, PD7MD_001), PINMUX_DATA(IRQ7_PD_MARK, PD7MD_010), PINMUX_DATA(SCS1_PD_MARK, PD7MD_011), PINMUX_DATA(TCLKD_PD_MARK, PD7MD_100), PINMUX_DATA(TIOC2B_MARK, PD7MD_101), PINMUX_DATA(PD6_DATA, PD6MD_000, PD6_IN, PD6_OUT), PINMUX_DATA(D22_MARK, PD6MD_001), PINMUX_DATA(IRQ6_PD_MARK, PD6MD_010), PINMUX_DATA(SSO1_PD_MARK, PD6MD_011), PINMUX_DATA(TCLKC_PD_MARK, PD6MD_100), PINMUX_DATA(TIOC2A_MARK, PD6MD_101), PINMUX_DATA(PD5_DATA, PD5MD_000, PD5_IN, PD5_OUT), PINMUX_DATA(D21_MARK, PD5MD_001), PINMUX_DATA(IRQ5_PD_MARK, PD5MD_010), PINMUX_DATA(SSI1_PD_MARK, PD5MD_011), PINMUX_DATA(TCLKB_PD_MARK, PD5MD_100), PINMUX_DATA(TIOC1B_MARK, PD5MD_101), PINMUX_DATA(PD4_DATA, PD4MD_000, PD4_IN, PD4_OUT), PINMUX_DATA(D20_MARK, PD4MD_001), PINMUX_DATA(IRQ4_PD_MARK, PD4MD_010), PINMUX_DATA(SSCK1_PD_MARK, PD4MD_011), PINMUX_DATA(TCLKA_PD_MARK, PD4MD_100), PINMUX_DATA(TIOC1A_MARK, PD4MD_101), PINMUX_DATA(PD3_DATA, PD3MD_000, PD3_IN, PD3_OUT), PINMUX_DATA(D19_MARK, PD3MD_001), PINMUX_DATA(IRQ3_PD_MARK, PD3MD_010), PINMUX_DATA(SCS0_PD_MARK, PD3MD_011), PINMUX_DATA(DACK3_MARK, PD3MD_100), PINMUX_DATA(TIOC0D_MARK, PD3MD_101), PINMUX_DATA(PD2_DATA, PD2MD_000, PD2_IN, PD2_OUT), PINMUX_DATA(D18_MARK, PD2MD_001), PINMUX_DATA(IRQ2_PD_MARK, PD2MD_010), PINMUX_DATA(SSO0_PD_MARK, PD2MD_011), PINMUX_DATA(DREQ3_MARK, PD2MD_100), PINMUX_DATA(TIOC0C_MARK, PD2MD_101), PINMUX_DATA(PD1_DATA, PD1MD_000, PD1_IN, PD1_OUT), PINMUX_DATA(D17_MARK, PD1MD_001), PINMUX_DATA(IRQ1_PD_MARK, PD1MD_010), PINMUX_DATA(SSI0_PD_MARK, PD1MD_011), PINMUX_DATA(DACK2_MARK, PD1MD_100), PINMUX_DATA(TIOC0B_MARK, PD1MD_101), PINMUX_DATA(PD0_DATA, PD0MD_000, PD0_IN, PD0_OUT), PINMUX_DATA(D16_MARK, PD0MD_001), PINMUX_DATA(IRQ0_PD_MARK, PD0MD_010), PINMUX_DATA(SSCK0_PD_MARK, PD0MD_011), PINMUX_DATA(DREQ2_MARK, PD0MD_100), PINMUX_DATA(TIOC0A_MARK, PD0MD_101), /* PE */ PINMUX_DATA(PE15_DATA, PE15MD_00, PE15_IN, PE15_OUT), PINMUX_DATA(IOIS16_MARK, PE15MD_01), PINMUX_DATA(RTS3_MARK, PE15MD_11), PINMUX_DATA(PE14_DATA, PE14MD_00, PE14_IN, PE14_OUT), PINMUX_DATA(CS1_MARK, PE14MD_01), PINMUX_DATA(CTS3_MARK, PE14MD_11), PINMUX_DATA(PE13_DATA, PE13MD_00, PE13_IN, PE13_OUT), PINMUX_DATA(TXD3_MARK, PE13MD_11), PINMUX_DATA(PE12_DATA, PE12MD_00, PE12_IN, PE12_OUT), PINMUX_DATA(RXD3_MARK, PE12MD_11), PINMUX_DATA(PE11_DATA, PE11MD_000, PE11_IN, PE11_OUT), PINMUX_DATA(CS6_CE1B_MARK, PE11MD_001), PINMUX_DATA(IRQ7_PE_MARK, PE11MD_010), PINMUX_DATA(TEND1_PE_MARK, PE11MD_100), PINMUX_DATA(PE10_DATA, PE10MD_000, PE10_IN, PE10_OUT), PINMUX_DATA(CE2B_MARK, PE10MD_001), PINMUX_DATA(IRQ6_PE_MARK, PE10MD_010), PINMUX_DATA(TEND0_PE_MARK, PE10MD_100), PINMUX_DATA(PE9_DATA, PE9MD_00, PE9_IN, PE9_OUT), PINMUX_DATA(CS5_CE1A_MARK, PE9MD_01), PINMUX_DATA(IRQ5_PE_MARK, PE9MD_10), PINMUX_DATA(SCK3_MARK, PE9MD_11), PINMUX_DATA(PE8_DATA, PE8MD_00, PE8_IN, PE8_OUT), PINMUX_DATA(CE2A_MARK, PE8MD_01), PINMUX_DATA(IRQ4_PE_MARK, PE8MD_10), PINMUX_DATA(SCK2_MARK, PE8MD_11), PINMUX_DATA(PE7_DATA, PE7MD_000, PE7_IN, PE7_OUT), PINMUX_DATA(FRAME_MARK, PE7MD_001), PINMUX_DATA(IRQ3_PE_MARK, PE7MD_010), PINMUX_DATA(TXD2_MARK, PE7MD_011), PINMUX_DATA(DACK1_PE_MARK, PE7MD_100), PINMUX_DATA(PE6_DATA, PE6MD_000, PE6_IN, PE6_OUT), PINMUX_DATA(A25_MARK, PE6MD_001), PINMUX_DATA(IRQ2_PE_MARK, PE6MD_010), PINMUX_DATA(RXD2_MARK, PE6MD_011), PINMUX_DATA(DREQ1_PE_MARK, PE6MD_100), PINMUX_DATA(PE5_DATA, PE5MD_000, PE5_IN, PE5_OUT), PINMUX_DATA(A24_MARK, PE5MD_001), PINMUX_DATA(IRQ1_PE_MARK, PE5MD_010), PINMUX_DATA(TXD1_MARK, PE5MD_011), PINMUX_DATA(DACK0_PE_MARK, PE5MD_100), PINMUX_DATA(PE4_DATA, PE4MD_000, PE4_IN, PE4_OUT), PINMUX_DATA(A23_MARK, PE4MD_001), PINMUX_DATA(IRQ0_PE_MARK, PE4MD_010), PINMUX_DATA(RXD1_MARK, PE4MD_011), PINMUX_DATA(DREQ0_PE_MARK, PE4MD_100), PINMUX_DATA(PE3_DATA, PE3MD_00, PE3_IN, PE3_OUT), PINMUX_DATA(A22_MARK, PE3MD_01), PINMUX_DATA(SCK1_MARK, PE3MD_11), PINMUX_DATA(PE2_DATA, PE2MD_00, PE2_IN, PE2_OUT), PINMUX_DATA(A21_MARK, PE2MD_01), PINMUX_DATA(SCK0_MARK, PE2MD_11), PINMUX_DATA(PE1_DATA, PE1MD_00, PE1_IN, PE1_OUT), PINMUX_DATA(CS4_MARK, PE1MD_01), PINMUX_DATA(MRES_MARK, PE1MD_10), PINMUX_DATA(TXD0_MARK, PE1MD_11), PINMUX_DATA(PE0_DATA, PE0MD_000, PE0_IN, PE0_OUT), PINMUX_DATA(BS_MARK, PE0MD_001), PINMUX_DATA(RXD0_MARK, PE0MD_011), PINMUX_DATA(ADTRG_PE_MARK, PE0MD_100), /* PF */ PINMUX_DATA(PF30_DATA, PF30MD_0, PF30_IN, PF30_OUT), PINMUX_DATA(AUDIO_CLK_MARK, PF30MD_1), PINMUX_DATA(PF29_DATA, PF29MD_0, PF29_IN, PF29_OUT), PINMUX_DATA(SSIDATA3_MARK, PF29MD_1), PINMUX_DATA(PF28_DATA, PF28MD_0, PF28_IN, PF28_OUT), PINMUX_DATA(SSIWS3_MARK, PF28MD_1), PINMUX_DATA(PF27_DATA, PF27MD_0, PF27_IN, PF27_OUT), PINMUX_DATA(SSISCK3_MARK, PF27MD_1), PINMUX_DATA(PF26_DATA, PF26MD_0, PF26_IN, PF26_OUT), PINMUX_DATA(SSIDATA2_MARK, PF26MD_1), PINMUX_DATA(PF25_DATA, PF25MD_0, PF25_IN, PF25_OUT), PINMUX_DATA(SSIWS2_MARK, PF25MD_1), PINMUX_DATA(PF24_DATA, PF24MD_0, PF24_IN, PF24_OUT), PINMUX_DATA(SSISCK2_MARK, PF24MD_1), PINMUX_DATA(PF23_DATA, PF23MD_00, PF23_IN, PF23_OUT), PINMUX_DATA(SSIDATA1_MARK, PF23MD_01), PINMUX_DATA(LCD_VEPWC_MARK, PF23MD_10), PINMUX_DATA(PF22_DATA, PF22MD_00, PF22_IN, PF22_OUT), PINMUX_DATA(SSIWS1_MARK, PF22MD_01), PINMUX_DATA(LCD_VCPWC_MARK, PF22MD_10), PINMUX_DATA(PF21_DATA, PF21MD_00, PF21_IN, PF21_OUT), PINMUX_DATA(SSISCK1_MARK, PF21MD_01), PINMUX_DATA(LCD_CLK_MARK, PF21MD_10), PINMUX_DATA(PF20_DATA, PF20MD_00, PF20_IN, PF20_OUT), PINMUX_DATA(SSIDATA0_MARK, PF20MD_01), PINMUX_DATA(LCD_FLM_MARK, PF20MD_10), PINMUX_DATA(PF19_DATA, PF19MD_00, PF19_IN, PF19_OUT), PINMUX_DATA(SSIWS0_MARK, PF19MD_01), PINMUX_DATA(LCD_M_DISP_MARK, PF19MD_10), PINMUX_DATA(PF18_DATA, PF18MD_00, PF18_IN, PF18_OUT), PINMUX_DATA(SSISCK0_MARK, PF18MD_01), PINMUX_DATA(LCD_CL2_MARK, PF18MD_10), PINMUX_DATA(PF17_DATA, PF17MD_00, PF17_IN, PF17_OUT), PINMUX_DATA(FCE_MARK, PF17MD_01), PINMUX_DATA(LCD_CL1_MARK, PF17MD_10), PINMUX_DATA(PF16_DATA, PF16MD_00, PF16_IN, PF16_OUT), PINMUX_DATA(FRB_MARK, PF16MD_01), PINMUX_DATA(LCD_DON_MARK, PF16MD_10), PINMUX_DATA(PF15_DATA, PF15MD_00, PF15_IN, PF15_OUT), PINMUX_DATA(NAF7_MARK, PF15MD_01), PINMUX_DATA(LCD_DATA15_MARK, PF15MD_10), PINMUX_DATA(PF14_DATA, PF14MD_00, PF14_IN, PF14_OUT), PINMUX_DATA(NAF6_MARK, PF14MD_01), PINMUX_DATA(LCD_DATA14_MARK, PF14MD_10), PINMUX_DATA(PF13_DATA, PF13MD_00, PF13_IN, PF13_OUT), PINMUX_DATA(NAF5_MARK, PF13MD_01), PINMUX_DATA(LCD_DATA13_MARK, PF13MD_10), PINMUX_DATA(PF12_DATA, PF12MD_00, PF12_IN, PF12_OUT), PINMUX_DATA(NAF4_MARK, PF12MD_01), PINMUX_DATA(LCD_DATA12_MARK, PF12MD_10), PINMUX_DATA(PF11_DATA, PF11MD_00, PF11_IN, PF11_OUT), PINMUX_DATA(NAF3_MARK, PF11MD_01), PINMUX_DATA(LCD_DATA11_MARK, PF11MD_10), PINMUX_DATA(PF10_DATA, PF10MD_00, PF10_IN, PF10_OUT), PINMUX_DATA(NAF2_MARK, PF10MD_01), PINMUX_DATA(LCD_DATA10_MARK, PF10MD_10), PINMUX_DATA(PF9_DATA, PF9MD_00, PF9_IN, PF9_OUT), PINMUX_DATA(NAF1_MARK, PF9MD_01), PINMUX_DATA(LCD_DATA9_MARK, PF9MD_10), PINMUX_DATA(PF8_DATA, PF8MD_00, PF8_IN, PF8_OUT), PINMUX_DATA(NAF0_MARK, PF8MD_01), PINMUX_DATA(LCD_DATA8_MARK, PF8MD_10), PINMUX_DATA(PF7_DATA, PF7MD_00, PF7_IN, PF7_OUT), PINMUX_DATA(FSC_MARK, PF7MD_01), PINMUX_DATA(LCD_DATA7_MARK, PF7MD_10), PINMUX_DATA(SCS1_PF_MARK, PF7MD_11), PINMUX_DATA(PF6_DATA, PF6MD_00, PF6_IN, PF6_OUT), PINMUX_DATA(FOE_MARK, PF6MD_01), PINMUX_DATA(LCD_DATA6_MARK, PF6MD_10), PINMUX_DATA(SSO1_PF_MARK, PF6MD_11), PINMUX_DATA(PF5_DATA, PF5MD_00, PF5_IN, PF5_OUT), PINMUX_DATA(FCDE_MARK, PF5MD_01), PINMUX_DATA(LCD_DATA5_MARK, PF5MD_10), PINMUX_DATA(SSI1_PF_MARK, PF5MD_11), PINMUX_DATA(PF4_DATA, PF4MD_00, PF4_IN, PF4_OUT), PINMUX_DATA(FWE_MARK, PF4MD_01), PINMUX_DATA(LCD_DATA4_MARK, PF4MD_10), PINMUX_DATA(SSCK1_PF_MARK, PF4MD_11), PINMUX_DATA(PF3_DATA, PF3MD_00, PF3_IN, PF3_OUT), PINMUX_DATA(TCLKD_PF_MARK, PF3MD_01), PINMUX_DATA(LCD_DATA3_MARK, PF3MD_10), PINMUX_DATA(SCS0_PF_MARK, PF3MD_11), PINMUX_DATA(PF2_DATA, PF2MD_00, PF2_IN, PF2_OUT), PINMUX_DATA(TCLKC_PF_MARK, PF2MD_01), PINMUX_DATA(LCD_DATA2_MARK, PF2MD_10), PINMUX_DATA(SSO0_PF_MARK, PF2MD_11), PINMUX_DATA(PF1_DATA, PF1MD_00, PF1_IN, PF1_OUT), PINMUX_DATA(TCLKB_PF_MARK, PF1MD_01), PINMUX_DATA(LCD_DATA1_MARK, PF1MD_10), PINMUX_DATA(SSI0_PF_MARK, PF1MD_11), PINMUX_DATA(PF0_DATA, PF0MD_00, PF0_IN, PF0_OUT), PINMUX_DATA(TCLKA_PF_MARK, PF0MD_01), PINMUX_DATA(LCD_DATA0_MARK, PF0MD_10), PINMUX_DATA(SSCK0_PF_MARK, PF0MD_11), }; static struct pinmux_gpio pinmux_gpios[] = { /* PA */ PINMUX_GPIO(GPIO_PA7, PA7_DATA), PINMUX_GPIO(GPIO_PA6, PA6_DATA), PINMUX_GPIO(GPIO_PA5, PA5_DATA), PINMUX_GPIO(GPIO_PA4, PA4_DATA), PINMUX_GPIO(GPIO_PA3, PA3_DATA), PINMUX_GPIO(GPIO_PA2, PA2_DATA), PINMUX_GPIO(GPIO_PA1, PA1_DATA), PINMUX_GPIO(GPIO_PA0, PA0_DATA), /* PB */ PINMUX_GPIO(GPIO_PB12, PB12_DATA), PINMUX_GPIO(GPIO_PB11, PB11_DATA), PINMUX_GPIO(GPIO_PB10, PB10_DATA), PINMUX_GPIO(GPIO_PB9, PB9_DATA), PINMUX_GPIO(GPIO_PB8, PB8_DATA), PINMUX_GPIO(GPIO_PB7, PB7_DATA), PINMUX_GPIO(GPIO_PB6, PB6_DATA), PINMUX_GPIO(GPIO_PB5, PB5_DATA), PINMUX_GPIO(GPIO_PB4, PB4_DATA), PINMUX_GPIO(GPIO_PB3, PB3_DATA), PINMUX_GPIO(GPIO_PB2, PB2_DATA), PINMUX_GPIO(GPIO_PB1, PB1_DATA), PINMUX_GPIO(GPIO_PB0, PB0_DATA), /* PC */ PINMUX_GPIO(GPIO_PC14, PC14_DATA), PINMUX_GPIO(GPIO_PC13, PC13_DATA), PINMUX_GPIO(GPIO_PC12, PC12_DATA), PINMUX_GPIO(GPIO_PC11, PC11_DATA), PINMUX_GPIO(GPIO_PC10, PC10_DATA), PINMUX_GPIO(GPIO_PC9, PC9_DATA), PINMUX_GPIO(GPIO_PC8, PC8_DATA), PINMUX_GPIO(GPIO_PC7, PC7_DATA), PINMUX_GPIO(GPIO_PC6, PC6_DATA), PINMUX_GPIO(GPIO_PC5, PC5_DATA), PINMUX_GPIO(GPIO_PC4, PC4_DATA), PINMUX_GPIO(GPIO_PC3, PC3_DATA), PINMUX_GPIO(GPIO_PC2, PC2_DATA), PINMUX_GPIO(GPIO_PC1, PC1_DATA), PINMUX_GPIO(GPIO_PC0, PC0_DATA), /* PD */ PINMUX_GPIO(GPIO_PD15, PD15_DATA), PINMUX_GPIO(GPIO_PD14, PD14_DATA), PINMUX_GPIO(GPIO_PD13, PD13_DATA), PINMUX_GPIO(GPIO_PD12, PD12_DATA), PINMUX_GPIO(GPIO_PD11, PD11_DATA), PINMUX_GPIO(GPIO_PD10, PD10_DATA), PINMUX_GPIO(GPIO_PD9, PD9_DATA), PINMUX_GPIO(GPIO_PD8, PD8_DATA), PINMUX_GPIO(GPIO_PD7, PD7_DATA), PINMUX_GPIO(GPIO_PD6, PD6_DATA), PINMUX_GPIO(GPIO_PD5, PD5_DATA), PINMUX_GPIO(GPIO_PD4, PD4_DATA), PINMUX_GPIO(GPIO_PD3, PD3_DATA), PINMUX_GPIO(GPIO_PD2, PD2_DATA), PINMUX_GPIO(GPIO_PD1, PD1_DATA), PINMUX_GPIO(GPIO_PD0, PD0_DATA), /* PE */ PINMUX_GPIO(GPIO_PE15, PE15_DATA), PINMUX_GPIO(GPIO_PE14, PE14_DATA), PINMUX_GPIO(GPIO_PE13, PE13_DATA), PINMUX_GPIO(GPIO_PE12, PE12_DATA), PINMUX_GPIO(GPIO_PE11, PE11_DATA), PINMUX_GPIO(GPIO_PE10, PE10_DATA), PINMUX_GPIO(GPIO_PE9, PE9_DATA), PINMUX_GPIO(GPIO_PE8, PE8_DATA), PINMUX_GPIO(GPIO_PE7, PE7_DATA), PINMUX_GPIO(GPIO_PE6, PE6_DATA), PINMUX_GPIO(GPIO_PE5, PE5_DATA), PINMUX_GPIO(GPIO_PE4, PE4_DATA), PINMUX_GPIO(GPIO_PE3, PE3_DATA), PINMUX_GPIO(GPIO_PE2, PE2_DATA), PINMUX_GPIO(GPIO_PE1, PE1_DATA), PINMUX_GPIO(GPIO_PE0, PE0_DATA), /* PF */ PINMUX_GPIO(GPIO_PF30, PF30_DATA), PINMUX_GPIO(GPIO_PF29, PF29_DATA), PINMUX_GPIO(GPIO_PF28, PF28_DATA), PINMUX_GPIO(GPIO_PF27, PF27_DATA), PINMUX_GPIO(GPIO_PF26, PF26_DATA), PINMUX_GPIO(GPIO_PF25, PF25_DATA), PINMUX_GPIO(GPIO_PF24, PF24_DATA), PINMUX_GPIO(GPIO_PF23, PF23_DATA), PINMUX_GPIO(GPIO_PF22, PF22_DATA), PINMUX_GPIO(GPIO_PF21, PF21_DATA), PINMUX_GPIO(GPIO_PF20, PF20_DATA), PINMUX_GPIO(GPIO_PF19, PF19_DATA), PINMUX_GPIO(GPIO_PF18, PF18_DATA), PINMUX_GPIO(GPIO_PF17, PF17_DATA), PINMUX_GPIO(GPIO_PF16, PF16_DATA), PINMUX_GPIO(GPIO_PF15, PF15_DATA), PINMUX_GPIO(GPIO_PF14, PF14_DATA), PINMUX_GPIO(GPIO_PF13, PF13_DATA), PINMUX_GPIO(GPIO_PF12, PF12_DATA), PINMUX_GPIO(GPIO_PF11, PF11_DATA), PINMUX_GPIO(GPIO_PF10, PF10_DATA), PINMUX_GPIO(GPIO_PF9, PF9_DATA), PINMUX_GPIO(GPIO_PF8, PF8_DATA), PINMUX_GPIO(GPIO_PF7, PF7_DATA), PINMUX_GPIO(GPIO_PF6, PF6_DATA), PINMUX_GPIO(GPIO_PF5, PF5_DATA), PINMUX_GPIO(GPIO_PF4, PF4_DATA), PINMUX_GPIO(GPIO_PF3, PF3_DATA), PINMUX_GPIO(GPIO_PF2, PF2_DATA), PINMUX_GPIO(GPIO_PF1, PF1_DATA), PINMUX_GPIO(GPIO_PF0, PF0_DATA), /* INTC */ PINMUX_GPIO(GPIO_FN_PINT7_PB, PINT7_PB_MARK), PINMUX_GPIO(GPIO_FN_PINT6_PB, PINT6_PB_MARK), PINMUX_GPIO(GPIO_FN_PINT5_PB, PINT5_PB_MARK), PINMUX_GPIO(GPIO_FN_PINT4_PB, PINT4_PB_MARK), PINMUX_GPIO(GPIO_FN_PINT3_PB, PINT3_PB_MARK), PINMUX_GPIO(GPIO_FN_PINT2_PB, PINT2_PB_MARK), PINMUX_GPIO(GPIO_FN_PINT1_PB, PINT1_PB_MARK), PINMUX_GPIO(GPIO_FN_PINT0_PB, PINT0_PB_MARK), PINMUX_GPIO(GPIO_FN_PINT7_PD, PINT7_PD_MARK), PINMUX_GPIO(GPIO_FN_PINT6_PD, PINT6_PD_MARK), PINMUX_GPIO(GPIO_FN_PINT5_PD, PINT5_PD_MARK), PINMUX_GPIO(GPIO_FN_PINT4_PD, PINT4_PD_MARK), PINMUX_GPIO(GPIO_FN_PINT3_PD, PINT3_PD_MARK), PINMUX_GPIO(GPIO_FN_PINT2_PD, PINT2_PD_MARK), PINMUX_GPIO(GPIO_FN_PINT1_PD, PINT1_PD_MARK), PINMUX_GPIO(GPIO_FN_PINT0_PD, PINT0_PD_MARK), PINMUX_GPIO(GPIO_FN_IRQ7_PB, IRQ7_PB_MARK), PINMUX_GPIO(GPIO_FN_IRQ6_PB, IRQ6_PB_MARK), PINMUX_GPIO(GPIO_FN_IRQ5_PB, IRQ5_PB_MARK), PINMUX_GPIO(GPIO_FN_IRQ4_PB, IRQ4_PB_MARK), PINMUX_GPIO(GPIO_FN_IRQ3_PB, IRQ3_PB_MARK), PINMUX_GPIO(GPIO_FN_IRQ2_PB, IRQ2_PB_MARK), PINMUX_GPIO(GPIO_FN_IRQ1_PB, IRQ1_PB_MARK), PINMUX_GPIO(GPIO_FN_IRQ0_PB, IRQ0_PB_MARK), PINMUX_GPIO(GPIO_FN_IRQ7_PD, IRQ7_PD_MARK), PINMUX_GPIO(GPIO_FN_IRQ6_PD, IRQ6_PD_MARK), PINMUX_GPIO(GPIO_FN_IRQ5_PD, IRQ5_PD_MARK), PINMUX_GPIO(GPIO_FN_IRQ4_PD, IRQ4_PD_MARK), PINMUX_GPIO(GPIO_FN_IRQ3_PD, IRQ3_PD_MARK), PINMUX_GPIO(GPIO_FN_IRQ2_PD, IRQ2_PD_MARK), PINMUX_GPIO(GPIO_FN_IRQ1_PD, IRQ1_PD_MARK), PINMUX_GPIO(GPIO_FN_IRQ0_PD, IRQ0_PD_MARK), PINMUX_GPIO(GPIO_FN_IRQ7_PE, IRQ7_PE_MARK), PINMUX_GPIO(GPIO_FN_IRQ6_PE, IRQ6_PE_MARK), PINMUX_GPIO(GPIO_FN_IRQ5_PE, IRQ5_PE_MARK), PINMUX_GPIO(GPIO_FN_IRQ4_PE, IRQ4_PE_MARK), PINMUX_GPIO(GPIO_FN_IRQ3_PE, IRQ3_PE_MARK), PINMUX_GPIO(GPIO_FN_IRQ2_PE, IRQ2_PE_MARK), PINMUX_GPIO(GPIO_FN_IRQ1_PE, IRQ1_PE_MARK), PINMUX_GPIO(GPIO_FN_IRQ0_PE, IRQ0_PE_MARK), PINMUX_GPIO(GPIO_FN_WDTOVF, WDTOVF_MARK), PINMUX_GPIO(GPIO_FN_IRQOUT, IRQOUT_MARK), PINMUX_GPIO(GPIO_FN_REFOUT, REFOUT_MARK), PINMUX_GPIO(GPIO_FN_IRQOUT_REFOUT, IRQOUT_REFOUT_MARK), PINMUX_GPIO(GPIO_FN_UBCTRG, UBCTRG_MARK), /* CAN */ PINMUX_GPIO(GPIO_FN_CTX1, CTX1_MARK), PINMUX_GPIO(GPIO_FN_CRX1, CRX1_MARK), PINMUX_GPIO(GPIO_FN_CTX0, CTX0_MARK), PINMUX_GPIO(GPIO_FN_CTX0_CTX1, CTX0_CTX1_MARK), PINMUX_GPIO(GPIO_FN_CRX0, CRX0_MARK), PINMUX_GPIO(GPIO_FN_CRX0_CRX1, CRX0_CRX1_MARK), /* IIC3 */ PINMUX_GPIO(GPIO_FN_SDA3, SDA3_MARK), PINMUX_GPIO(GPIO_FN_SCL3, SCL3_MARK), PINMUX_GPIO(GPIO_FN_SDA2, SDA2_MARK), PINMUX_GPIO(GPIO_FN_SCL2, SCL2_MARK), PINMUX_GPIO(GPIO_FN_SDA1, SDA1_MARK), PINMUX_GPIO(GPIO_FN_SCL1, SCL1_MARK), PINMUX_GPIO(GPIO_FN_SDA0, SDA0_MARK), PINMUX_GPIO(GPIO_FN_SCL0, SCL0_MARK), /* DMAC */ PINMUX_GPIO(GPIO_FN_TEND0_PD, TEND0_PD_MARK), PINMUX_GPIO(GPIO_FN_TEND0_PE, TEND0_PE_MARK), PINMUX_GPIO(GPIO_FN_DACK0_PD, DACK0_PD_MARK), PINMUX_GPIO(GPIO_FN_DACK0_PE, DACK0_PE_MARK), PINMUX_GPIO(GPIO_FN_DREQ0_PD, DREQ0_PD_MARK), PINMUX_GPIO(GPIO_FN_DREQ0_PE, DREQ0_PE_MARK), PINMUX_GPIO(GPIO_FN_TEND1_PD, TEND1_PD_MARK), PINMUX_GPIO(GPIO_FN_TEND1_PE, TEND1_PE_MARK), PINMUX_GPIO(GPIO_FN_DACK1_PD, DACK1_PD_MARK), PINMUX_GPIO(GPIO_FN_DACK1_PE, DACK1_PE_MARK), PINMUX_GPIO(GPIO_FN_DREQ1_PD, DREQ1_PD_MARK), PINMUX_GPIO(GPIO_FN_DREQ1_PE, DREQ1_PE_MARK), PINMUX_GPIO(GPIO_FN_DACK2, DACK2_MARK), PINMUX_GPIO(GPIO_FN_DREQ2, DREQ2_MARK), PINMUX_GPIO(GPIO_FN_DACK3, DACK3_MARK), PINMUX_GPIO(GPIO_FN_DREQ3, DREQ3_MARK), /* ADC */ PINMUX_GPIO(GPIO_FN_ADTRG_PD, ADTRG_PD_MARK), PINMUX_GPIO(GPIO_FN_ADTRG_PE, ADTRG_PE_MARK), /* BSC */ PINMUX_GPIO(GPIO_FN_D31, D31_MARK), PINMUX_GPIO(GPIO_FN_D30, D30_MARK), PINMUX_GPIO(GPIO_FN_D29, D29_MARK), PINMUX_GPIO(GPIO_FN_D28, D28_MARK), PINMUX_GPIO(GPIO_FN_D27, D27_MARK), PINMUX_GPIO(GPIO_FN_D26, D26_MARK), PINMUX_GPIO(GPIO_FN_D25, D25_MARK), PINMUX_GPIO(GPIO_FN_D24, D24_MARK), PINMUX_GPIO(GPIO_FN_D23, D23_MARK), PINMUX_GPIO(GPIO_FN_D22, D22_MARK), PINMUX_GPIO(GPIO_FN_D21, D21_MARK), PINMUX_GPIO(GPIO_FN_D20, D20_MARK), PINMUX_GPIO(GPIO_FN_D19, D19_MARK), PINMUX_GPIO(GPIO_FN_D18, D18_MARK), PINMUX_GPIO(GPIO_FN_D17, D17_MARK), PINMUX_GPIO(GPIO_FN_D16, D16_MARK), PINMUX_GPIO(GPIO_FN_A25, A25_MARK), PINMUX_GPIO(GPIO_FN_A24, A24_MARK), PINMUX_GPIO(GPIO_FN_A23, A23_MARK), PINMUX_GPIO(GPIO_FN_A22, A22_MARK), PINMUX_GPIO(GPIO_FN_A21, A21_MARK), PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK), PINMUX_GPIO(GPIO_FN_MRES, MRES_MARK), PINMUX_GPIO(GPIO_FN_BS, BS_MARK), PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK), PINMUX_GPIO(GPIO_FN_CS1, CS1_MARK), PINMUX_GPIO(GPIO_FN_CS6_CE1B, CS6_CE1B_MARK), PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK), PINMUX_GPIO(GPIO_FN_CS5_CE1A, CS5_CE1A_MARK), PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK), PINMUX_GPIO(GPIO_FN_FRAME, FRAME_MARK), PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK), PINMUX_GPIO(GPIO_FN_RDWR, RDWR_MARK), PINMUX_GPIO(GPIO_FN_CKE, CKE_MARK), PINMUX_GPIO(GPIO_FN_CASU, CASU_MARK), PINMUX_GPIO(GPIO_FN_BREQ, BREQ_MARK), PINMUX_GPIO(GPIO_FN_RASU, RASU_MARK), PINMUX_GPIO(GPIO_FN_BACK, BACK_MARK), PINMUX_GPIO(GPIO_FN_CASL, CASL_MARK), PINMUX_GPIO(GPIO_FN_RASL, RASL_MARK), PINMUX_GPIO(GPIO_FN_WE3_DQMUU_AH_ICIO_WR, WE3_DQMUU_AH_ICIO_WR_MARK), PINMUX_GPIO(GPIO_FN_WE2_DQMUL_ICIORD, WE2_DQMUL_ICIORD_MARK), PINMUX_GPIO(GPIO_FN_WE1_DQMLU_WE, WE1_DQMLU_WE_MARK), PINMUX_GPIO(GPIO_FN_WE0_DQMLL, WE0_DQMLL_MARK), PINMUX_GPIO(GPIO_FN_CS3, CS3_MARK), PINMUX_GPIO(GPIO_FN_CS2, CS2_MARK), PINMUX_GPIO(GPIO_FN_A1, A1_MARK), PINMUX_GPIO(GPIO_FN_A0, A0_MARK), PINMUX_GPIO(GPIO_FN_CS7, CS7_MARK), /* TMU */ PINMUX_GPIO(GPIO_FN_TIOC4D, TIOC4D_MARK), PINMUX_GPIO(GPIO_FN_TIOC4C, TIOC4C_MARK), PINMUX_GPIO(GPIO_FN_TIOC4B, TIOC4B_MARK), PINMUX_GPIO(GPIO_FN_TIOC4A, TIOC4A_MARK), PINMUX_GPIO(GPIO_FN_TIOC3D, TIOC3D_MARK), PINMUX_GPIO(GPIO_FN_TIOC3C, TIOC3C_MARK), PINMUX_GPIO(GPIO_FN_TIOC3B, TIOC3B_MARK), PINMUX_GPIO(GPIO_FN_TIOC3A, TIOC3A_MARK), PINMUX_GPIO(GPIO_FN_TIOC2B, TIOC2B_MARK), PINMUX_GPIO(GPIO_FN_TIOC1B, TIOC1B_MARK), PINMUX_GPIO(GPIO_FN_TIOC2A, TIOC2A_MARK), PINMUX_GPIO(GPIO_FN_TIOC1A, TIOC1A_MARK), PINMUX_GPIO(GPIO_FN_TIOC0D, TIOC0D_MARK), PINMUX_GPIO(GPIO_FN_TIOC0C, TIOC0C_MARK), PINMUX_GPIO(GPIO_FN_TIOC0B, TIOC0B_MARK), PINMUX_GPIO(GPIO_FN_TIOC0A, TIOC0A_MARK), PINMUX_GPIO(GPIO_FN_TCLKD_PD, TCLKD_PD_MARK), PINMUX_GPIO(GPIO_FN_TCLKC_PD, TCLKC_PD_MARK), PINMUX_GPIO(GPIO_FN_TCLKB_PD, TCLKB_PD_MARK), PINMUX_GPIO(GPIO_FN_TCLKA_PD, TCLKA_PD_MARK), PINMUX_GPIO(GPIO_FN_TCLKD_PF, TCLKD_PF_MARK), PINMUX_GPIO(GPIO_FN_TCLKC_PF, TCLKC_PF_MARK), PINMUX_GPIO(GPIO_FN_TCLKB_PF, TCLKB_PF_MARK), PINMUX_GPIO(GPIO_FN_TCLKA_PF, TCLKA_PF_MARK), /* SSU */ PINMUX_GPIO(GPIO_FN_SCS0_PD, SCS0_PD_MARK), PINMUX_GPIO(GPIO_FN_SSO0_PD, SSO0_PD_MARK), PINMUX_GPIO(GPIO_FN_SSI0_PD, SSI0_PD_MARK), PINMUX_GPIO(GPIO_FN_SSCK0_PD, SSCK0_PD_MARK), PINMUX_GPIO(GPIO_FN_SCS0_PF, SCS0_PF_MARK), PINMUX_GPIO(GPIO_FN_SSO0_PF, SSO0_PF_MARK), PINMUX_GPIO(GPIO_FN_SSI0_PF, SSI0_PF_MARK), PINMUX_GPIO(GPIO_FN_SSCK0_PF, SSCK0_PF_MARK), PINMUX_GPIO(GPIO_FN_SCS1_PD, SCS1_PD_MARK), PINMUX_GPIO(GPIO_FN_SSO1_PD, SSO1_PD_MARK), PINMUX_GPIO(GPIO_FN_SSI1_PD, SSI1_PD_MARK), PINMUX_GPIO(GPIO_FN_SSCK1_PD, SSCK1_PD_MARK), PINMUX_GPIO(GPIO_FN_SCS1_PF, SCS1_PF_MARK), PINMUX_GPIO(GPIO_FN_SSO1_PF, SSO1_PF_MARK), PINMUX_GPIO(GPIO_FN_SSI1_PF, SSI1_PF_MARK), PINMUX_GPIO(GPIO_FN_SSCK1_PF, SSCK1_PF_MARK), /* SCIF */ PINMUX_GPIO(GPIO_FN_TXD0, TXD0_MARK), PINMUX_GPIO(GPIO_FN_RXD0, RXD0_MARK), PINMUX_GPIO(GPIO_FN_SCK0, SCK0_MARK), PINMUX_GPIO(GPIO_FN_TXD1, TXD1_MARK), PINMUX_GPIO(GPIO_FN_RXD1, RXD1_MARK), PINMUX_GPIO(GPIO_FN_SCK1, SCK1_MARK), PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK), PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK), PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK), PINMUX_GPIO(GPIO_FN_RTS3, RTS3_MARK), PINMUX_GPIO(GPIO_FN_CTS3, CTS3_MARK), PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK), PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK), PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK), /* SSI */ PINMUX_GPIO(GPIO_FN_AUDIO_CLK, AUDIO_CLK_MARK), PINMUX_GPIO(GPIO_FN_SSIDATA3, SSIDATA3_MARK), PINMUX_GPIO(GPIO_FN_SSIWS3, SSIWS3_MARK), PINMUX_GPIO(GPIO_FN_SSISCK3, SSISCK3_MARK), PINMUX_GPIO(GPIO_FN_SSIDATA2, SSIDATA2_MARK), PINMUX_GPIO(GPIO_FN_SSIWS2, SSIWS2_MARK), PINMUX_GPIO(GPIO_FN_SSISCK2, SSISCK2_MARK), PINMUX_GPIO(GPIO_FN_SSIDATA1, SSIDATA1_MARK), PINMUX_GPIO(GPIO_FN_SSIWS1, SSIWS1_MARK), PINMUX_GPIO(GPIO_FN_SSISCK1, SSISCK1_MARK), PINMUX_GPIO(GPIO_FN_SSIDATA0, SSIDATA0_MARK), PINMUX_GPIO(GPIO_FN_SSIWS0, SSIWS0_MARK), PINMUX_GPIO(GPIO_FN_SSISCK0, SSISCK0_MARK), /* FLCTL */ PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK), PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK), PINMUX_GPIO(GPIO_FN_NAF7, NAF7_MARK), PINMUX_GPIO(GPIO_FN_NAF6, NAF6_MARK), PINMUX_GPIO(GPIO_FN_NAF5, NAF5_MARK), PINMUX_GPIO(GPIO_FN_NAF4, NAF4_MARK), PINMUX_GPIO(GPIO_FN_NAF3, NAF3_MARK), PINMUX_GPIO(GPIO_FN_NAF2, NAF2_MARK), PINMUX_GPIO(GPIO_FN_NAF1, NAF1_MARK), PINMUX_GPIO(GPIO_FN_NAF0, NAF0_MARK), PINMUX_GPIO(GPIO_FN_FSC, FSC_MARK), PINMUX_GPIO(GPIO_FN_FOE, FOE_MARK), PINMUX_GPIO(GPIO_FN_FCDE, FCDE_MARK), PINMUX_GPIO(GPIO_FN_FWE, FWE_MARK), /* LCDC */ PINMUX_GPIO(GPIO_FN_LCD_VEPWC, LCD_VEPWC_MARK), PINMUX_GPIO(GPIO_FN_LCD_VCPWC, LCD_VCPWC_MARK), PINMUX_GPIO(GPIO_FN_LCD_CLK, LCD_CLK_MARK), PINMUX_GPIO(GPIO_FN_LCD_FLM, LCD_FLM_MARK), PINMUX_GPIO(GPIO_FN_LCD_M_DISP, LCD_M_DISP_MARK), PINMUX_GPIO(GPIO_FN_LCD_CL2, LCD_CL2_MARK), PINMUX_GPIO(GPIO_FN_LCD_CL1, LCD_CL1_MARK), PINMUX_GPIO(GPIO_FN_LCD_DON, LCD_DON_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA15, LCD_DATA15_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA14, LCD_DATA14_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA13, LCD_DATA13_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA12, LCD_DATA12_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA11, LCD_DATA11_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA10, LCD_DATA10_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA9, LCD_DATA9_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA8, LCD_DATA8_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA7, LCD_DATA7_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA6, LCD_DATA6_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA5, LCD_DATA5_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA4, LCD_DATA4_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA3, LCD_DATA3_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA2, LCD_DATA2_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA1, LCD_DATA1_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA0, LCD_DATA0_MARK), }; static struct pinmux_cfg_reg pinmux_config_regs[] = { { PINMUX_CFG_REG("PBIORL", 0xfffe3886, 16, 1) { 0, 0, 0, 0, 0, 0, 0, 0, PB11_IN, PB11_OUT, PB10_IN, PB10_OUT, PB9_IN, PB9_OUT, PB8_IN, PB8_OUT, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PBCRL4", 0xfffe3890, 16, 4) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB12MD_00, PB12MD_01, PB12MD_10, PB12MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PBCRL3", 0xfffe3892, 16, 4) { PB11MD_0, PB11MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB10MD_0, PB10MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB9MD_00, PB9MD_01, PB9MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB8MD_00, PB8MD_01, PB8MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PBCRL2", 0xfffe3894, 16, 4) { PB7MD_00, PB7MD_01, PB7MD_10, PB7MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB6MD_00, PB6MD_01, PB6MD_10, PB6MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB5MD_00, PB5MD_01, PB5MD_10, PB5MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB4MD_00, PB4MD_01, PB4MD_10, PB4MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PBCRL1", 0xfffe3896, 16, 4) { PB3MD_00, PB3MD_01, PB3MD_10, PB3MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB2MD_00, PB2MD_01, PB2MD_10, PB2MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB1MD_00, PB1MD_01, PB1MD_10, PB1MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB0MD_00, PB0MD_01, PB0MD_10, PB0MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("IFCR", 0xfffe38a2, 16, 4) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB12IRQ_00, PB12IRQ_01, PB12IRQ_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PCIORL", 0xfffe3906, 16, 1) { 0, 0, PC14_IN, PC14_OUT, PC13_IN, PC13_OUT, PC12_IN, PC12_OUT, PC11_IN, PC11_OUT, PC10_IN, PC10_OUT, PC9_IN, PC9_OUT, PC8_IN, PC8_OUT, PC7_IN, PC7_OUT, PC6_IN, PC6_OUT, PC5_IN, PC5_OUT, PC4_IN, PC4_OUT, PC3_IN, PC3_OUT, PC2_IN, PC2_OUT, PC1_IN, PC1_OUT, PC0_IN, PC0_OUT } }, { PINMUX_CFG_REG("PCCRL4", 0xfffe3910, 16, 4) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC14MD_0, PC14MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC13MD_0, PC13MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC12MD_0, PC12MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PCCRL3", 0xfffe3912, 16, 4) { PC11MD_00, PC11MD_01, PC11MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC10MD_00, PC10MD_01, PC10MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC9MD_0, PC9MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC8MD_0, PC8MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PCCRL2", 0xfffe3914, 16, 4) { PC7MD_0, PC7MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC6MD_0, PC6MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC5MD_0, PC5MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC4MD_0, PC4MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PCCRL1", 0xfffe3916, 16, 4) { PC3MD_0, PC3MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC2MD_0, PC2MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC1MD_0, PC1MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC0MD_00, PC0MD_01, PC0MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PDIORL", 0xfffe3986, 16, 1) { PD15_IN, PD15_OUT, PD14_IN, PD14_OUT, PD13_IN, PD13_OUT, PD12_IN, PD12_OUT, PD11_IN, PD11_OUT, PD10_IN, PD10_OUT, PD9_IN, PD9_OUT, PD8_IN, PD8_OUT, PD7_IN, PD7_OUT, PD6_IN, PD6_OUT, PD5_IN, PD5_OUT, PD4_IN, PD4_OUT, PD3_IN, PD3_OUT, PD2_IN, PD2_OUT, PD1_IN, PD1_OUT, PD0_IN, PD0_OUT } }, { PINMUX_CFG_REG("PDCRL4", 0xfffe3990, 16, 4) { PD15MD_000, PD15MD_001, PD15MD_010, 0, PD15MD_100, PD15MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD14MD_000, PD14MD_001, PD14MD_010, 0, 0, PD14MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD13MD_000, PD13MD_001, PD13MD_010, 0, PD13MD_100, PD13MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD12MD_000, PD12MD_001, PD12MD_010, 0, PD12MD_100, PD12MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PDCRL3", 0xfffe3992, 16, 4) { PD11MD_000, PD11MD_001, PD11MD_010, 0, PD11MD_100, PD11MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD10MD_000, PD10MD_001, PD10MD_010, 0, PD10MD_100, PD10MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD9MD_000, PD9MD_001, PD9MD_010, 0, PD9MD_100, PD9MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD8MD_000, PD8MD_001, PD8MD_010, 0, PD8MD_100, PD8MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PDCRL2", 0xfffe3994, 16, 4) { PD7MD_000, PD7MD_001, PD7MD_010, PD7MD_011, PD7MD_100, PD7MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD6MD_000, PD6MD_001, PD6MD_010, PD6MD_011, PD6MD_100, PD6MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD5MD_000, PD5MD_001, PD5MD_010, PD5MD_011, PD5MD_100, PD5MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD4MD_000, PD4MD_001, PD4MD_010, PD4MD_011, PD4MD_100, PD4MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PDCRL1", 0xfffe3996, 16, 4) { PD3MD_000, PD3MD_001, PD3MD_010, PD3MD_011, PD3MD_100, PD3MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD2MD_000, PD2MD_001, PD2MD_010, PD2MD_011, PD2MD_100, PD2MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD1MD_000, PD1MD_001, PD1MD_010, PD1MD_011, PD1MD_100, PD1MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD0MD_000, PD0MD_001, PD0MD_010, PD0MD_011, PD0MD_100, PD0MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PEIORL", 0xfffe3a06, 16, 1) { PE15_IN, PE15_OUT, PE14_IN, PE14_OUT, PE13_IN, PE13_OUT, PE12_IN, PE12_OUT, PE11_IN, PE11_OUT, PE10_IN, PE10_OUT, PE9_IN, PE9_OUT, PE8_IN, PE8_OUT, PE7_IN, PE7_OUT, PE6_IN, PE6_OUT, PE5_IN, PE5_OUT, PE4_IN, PE4_OUT, PE3_IN, PE3_OUT, PE2_IN, PE2_OUT, PE1_IN, PE1_OUT, PE0_IN, PE0_OUT } }, { PINMUX_CFG_REG("PECRL4", 0xfffe3a10, 16, 4) { PE15MD_00, PE15MD_01, 0, PE15MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE14MD_00, PE14MD_01, 0, PE14MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE13MD_00, 0, 0, PE13MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE12MD_00, 0, 0, PE12MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PECRL3", 0xfffe3a12, 16, 4) { PE11MD_000, PE11MD_001, PE11MD_010, 0, PE11MD_100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE10MD_000, PE10MD_001, PE10MD_010, 0, PE10MD_100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE9MD_00, PE9MD_01, PE9MD_10, PE9MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE8MD_00, PE8MD_01, PE8MD_10, PE8MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PECRL2", 0xfffe3a14, 16, 4) { PE7MD_000, PE7MD_001, PE7MD_010, PE7MD_011, PE7MD_100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE6MD_000, PE6MD_001, PE6MD_010, PE6MD_011, PE6MD_100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE5MD_000, PE5MD_001, PE5MD_010, PE5MD_011, PE5MD_100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE4MD_000, PE4MD_001, PE4MD_010, PE4MD_011, PE4MD_100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PECRL1", 0xfffe3a16, 16, 4) { PE3MD_00, PE3MD_01, 0, PE3MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE2MD_00, PE2MD_01, 0, PE2MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE1MD_00, PE1MD_01, PE1MD_10, PE1MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE0MD_000, PE0MD_001, 0, PE0MD_011, PE0MD_100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PFIORH", 0xfffe3a84, 16, 1) { 0, 0, PF30_IN, PF30_OUT, PF29_IN, PF29_OUT, PF28_IN, PF28_OUT, PF27_IN, PF27_OUT, PF26_IN, PF26_OUT, PF25_IN, PF25_OUT, PF24_IN, PF24_OUT, PF23_IN, PF23_OUT, PF22_IN, PF22_OUT, PF21_IN, PF21_OUT, PF20_IN, PF20_OUT, PF19_IN, PF19_OUT, PF18_IN, PF18_OUT, PF17_IN, PF17_OUT, PF16_IN, PF16_OUT } }, { PINMUX_CFG_REG("PFIORL", 0xfffe3a86, 16, 1) { PF15_IN, PF15_OUT, PF14_IN, PF14_OUT, PF13_IN, PF13_OUT, PF12_IN, PF12_OUT, PF11_IN, PF11_OUT, PF10_IN, PF10_OUT, PF9_IN, PF9_OUT, PF8_IN, PF8_OUT, PF7_IN, PF7_OUT, PF6_IN, PF6_OUT, PF5_IN, PF5_OUT, PF4_IN, PF4_OUT, PF3_IN, PF3_OUT, PF2_IN, PF2_OUT, PF1_IN, PF1_OUT, PF0_IN, PF0_OUT } }, { PINMUX_CFG_REG("PFCRH4", 0xfffe3a88, 16, 4) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF30MD_0, PF30MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF29MD_0, PF29MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF28MD_0, PF28MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PFCRH3", 0xfffe3a8a, 16, 4) { PF27MD_0, PF27MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF26MD_0, PF26MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF25MD_0, PF25MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF24MD_0, PF24MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PFCRH2", 0xfffe3a8c, 16, 4) { PF23MD_00, PF23MD_01, PF23MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF22MD_00, PF22MD_01, PF22MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF21MD_00, PF21MD_01, PF21MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF20MD_00, PF20MD_01, PF20MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PFCRH1", 0xfffe3a8e, 16, 4) { PF19MD_00, PF19MD_01, PF19MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF18MD_00, PF18MD_01, PF18MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF17MD_00, PF17MD_01, PF17MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF16MD_00, PF16MD_01, PF16MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PFCRL4", 0xfffe3a90, 16, 4) { PF15MD_00, PF15MD_01, PF15MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF14MD_00, PF14MD_01, PF14MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF13MD_00, PF13MD_01, PF13MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF12MD_00, PF12MD_01, PF12MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PFCRL3", 0xfffe3a92, 16, 4) { PF11MD_00, PF11MD_01, PF11MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF10MD_00, PF10MD_01, PF10MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF9MD_00, PF9MD_01, PF9MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF8MD_00, PF8MD_01, PF8MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PFCRL2", 0xfffe3a94, 16, 4) { PF7MD_00, PF7MD_01, PF7MD_10, PF7MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF6MD_00, PF6MD_01, PF6MD_10, PF6MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF5MD_00, PF5MD_01, PF5MD_10, PF5MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF4MD_00, PF4MD_01, PF4MD_10, PF4MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PFCRL1", 0xfffe3a96, 16, 4) { PF3MD_00, PF3MD_01, PF3MD_10, PF3MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF2MD_00, PF2MD_01, PF2MD_10, PF2MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF1MD_00, PF1MD_01, PF1MD_10, PF1MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF0MD_00, PF0MD_01, PF0MD_10, PF0MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, {} }; static struct pinmux_data_reg pinmux_data_regs[] = { { PINMUX_DATA_REG("PADRL", 0xfffe3802, 16) { 0, 0, 0, 0, 0, 0, 0, 0, PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA, PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA } }, { PINMUX_DATA_REG("PBDRL", 0xfffe3882, 16) { 0, 0, 0, PB12_DATA, PB11_DATA, PB10_DATA, PB9_DATA, PB8_DATA, PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA, PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA } }, { PINMUX_DATA_REG("PCDRL", 0xfffe3902, 16) { 0, PC14_DATA, PC13_DATA, PC12_DATA, PC11_DATA, PC10_DATA, PC9_DATA, PC8_DATA, PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA, PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA } }, { PINMUX_DATA_REG("PDDRL", 0xfffe3982, 16) { PD15_DATA, PD14_DATA, PD13_DATA, PD12_DATA, PD11_DATA, PD10_DATA, PD9_DATA, PD8_DATA, PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA, PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA } }, { PINMUX_DATA_REG("PEDRL", 0xfffe3a02, 16) { PE15_DATA, PE14_DATA, PE13_DATA, PE12_DATA, PE11_DATA, PE10_DATA, PE9_DATA, PE8_DATA, PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA, PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA } }, { PINMUX_DATA_REG("PFDRH", 0xfffe3a80, 16) { 0, PF30_DATA, PF29_DATA, PF28_DATA, PF27_DATA, PF26_DATA, PF25_DATA, PF24_DATA, PF23_DATA, PF22_DATA, PF21_DATA, PF20_DATA, PF19_DATA, PF18_DATA, PF17_DATA, PF16_DATA } }, { PINMUX_DATA_REG("PFDRL", 0xfffe3a82, 16) { PF15_DATA, PF14_DATA, PF13_DATA, PF12_DATA, PF11_DATA, PF10_DATA, PF9_DATA, PF8_DATA, PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA, PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA } }, { }, }; static struct pinmux_info sh7203_pinmux_info = { .name = "sh7203_pfc", .reserved_id = PINMUX_RESERVED, .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END }, .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END, FORCE_IN }, .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END, FORCE_OUT }, .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END }, .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END }, .first_gpio = GPIO_PA7, .last_gpio = GPIO_FN_LCD_DATA0, .gpios = pinmux_gpios, .cfg_regs = pinmux_config_regs, .data_regs = pinmux_data_regs, .gpio_data = pinmux_data, .gpio_data_size = ARRAY_SIZE(pinmux_data), }; static int __init plat_pinmux_setup(void) { return register_pinmux(&sh7203_pinmux_info); } arch_initcall(plat_pinmux_setup);
gpl-2.0
nitroglycerine33/Note2_Tmo-Att-Vzw_Kernel
arch/sh/kernel/cpu/sh2a/pinmux-sh7203.c
9490
51278
/* * SH7203 Pinmux * * Copyright (C) 2008 Magnus Damm * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/gpio.h> #include <cpu/sh7203.h> enum { PINMUX_RESERVED = 0, PINMUX_DATA_BEGIN, PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA, PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA, PB12_DATA, PB11_DATA, PB10_DATA, PB9_DATA, PB8_DATA, PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA, PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA, PC14_DATA, PC13_DATA, PC12_DATA, PC11_DATA, PC10_DATA, PC9_DATA, PC8_DATA, PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA, PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA, PD15_DATA, PD14_DATA, PD13_DATA, PD12_DATA, PD11_DATA, PD10_DATA, PD9_DATA, PD8_DATA, PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA, PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA, PE15_DATA, PE14_DATA, PE13_DATA, PE12_DATA, PE11_DATA, PE10_DATA, PE9_DATA, PE8_DATA, PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA, PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA, PF30_DATA, PF29_DATA, PF28_DATA, PF27_DATA, PF26_DATA, PF25_DATA, PF24_DATA, PF23_DATA, PF22_DATA, PF21_DATA, PF20_DATA, PF19_DATA, PF18_DATA, PF17_DATA, PF16_DATA, PF15_DATA, PF14_DATA, PF13_DATA, PF12_DATA, PF11_DATA, PF10_DATA, PF9_DATA, PF8_DATA, PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA, PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA, PINMUX_DATA_END, PINMUX_INPUT_BEGIN, FORCE_IN, PA7_IN, PA6_IN, PA5_IN, PA4_IN, PA3_IN, PA2_IN, PA1_IN, PA0_IN, PB11_IN, PB10_IN, PB9_IN, PB8_IN, PC14_IN, PC13_IN, PC12_IN, PC11_IN, PC10_IN, PC9_IN, PC8_IN, PC7_IN, PC6_IN, PC5_IN, PC4_IN, PC3_IN, PC2_IN, PC1_IN, PC0_IN, PD15_IN, PD14_IN, PD13_IN, PD12_IN, PD11_IN, PD10_IN, PD9_IN, PD8_IN, PD7_IN, PD6_IN, PD5_IN, PD4_IN, PD3_IN, PD2_IN, PD1_IN, PD0_IN, PE15_IN, PE14_IN, PE13_IN, PE12_IN, PE11_IN, PE10_IN, PE9_IN, PE8_IN, PE7_IN, PE6_IN, PE5_IN, PE4_IN, PE3_IN, PE2_IN, PE1_IN, PE0_IN, PF30_IN, PF29_IN, PF28_IN, PF27_IN, PF26_IN, PF25_IN, PF24_IN, PF23_IN, PF22_IN, PF21_IN, PF20_IN, PF19_IN, PF18_IN, PF17_IN, PF16_IN, PF15_IN, PF14_IN, PF13_IN, PF12_IN, PF11_IN, PF10_IN, PF9_IN, PF8_IN, PF7_IN, PF6_IN, PF5_IN, PF4_IN, PF3_IN, PF2_IN, PF1_IN, PF0_IN, PINMUX_INPUT_END, PINMUX_OUTPUT_BEGIN, FORCE_OUT, PB11_OUT, PB10_OUT, PB9_OUT, PB8_OUT, PC14_OUT, PC13_OUT, PC12_OUT, PC11_OUT, PC10_OUT, PC9_OUT, PC8_OUT, PC7_OUT, PC6_OUT, PC5_OUT, PC4_OUT, PC3_OUT, PC2_OUT, PC1_OUT, PC0_OUT, PD15_OUT, PD14_OUT, PD13_OUT, PD12_OUT, PD11_OUT, PD10_OUT, PD9_OUT, PD8_OUT, PD7_OUT, PD6_OUT, PD5_OUT, PD4_OUT, PD3_OUT, PD2_OUT, PD1_OUT, PD0_OUT, PE15_OUT, PE14_OUT, PE13_OUT, PE12_OUT, PE11_OUT, PE10_OUT, PE9_OUT, PE8_OUT, PE7_OUT, PE6_OUT, PE5_OUT, PE4_OUT, PE3_OUT, PE2_OUT, PE1_OUT, PE0_OUT, PF30_OUT, PF29_OUT, PF28_OUT, PF27_OUT, PF26_OUT, PF25_OUT, PF24_OUT, PF23_OUT, PF22_OUT, PF21_OUT, PF20_OUT, PF19_OUT, PF18_OUT, PF17_OUT, PF16_OUT, PF15_OUT, PF14_OUT, PF13_OUT, PF12_OUT, PF11_OUT, PF10_OUT, PF9_OUT, PF8_OUT, PF7_OUT, PF6_OUT, PF5_OUT, PF4_OUT, PF3_OUT, PF2_OUT, PF1_OUT, PF0_OUT, PINMUX_OUTPUT_END, PINMUX_FUNCTION_BEGIN, PB11_IOR_IN, PB11_IOR_OUT, PB10_IOR_IN, PB10_IOR_OUT, PB9_IOR_IN, PB9_IOR_OUT, PB8_IOR_IN, PB8_IOR_OUT, PB12MD_00, PB12MD_01, PB12MD_10, PB12MD_11, PB11MD_0, PB11MD_1, PB10MD_0, PB10MD_1, PB9MD_00, PB9MD_01, PB9MD_10, PB8MD_00, PB8MD_01, PB8MD_10, PB7MD_00, PB7MD_01, PB7MD_10, PB7MD_11, PB6MD_00, PB6MD_01, PB6MD_10, PB6MD_11, PB5MD_00, PB5MD_01, PB5MD_10, PB5MD_11, PB4MD_00, PB4MD_01, PB4MD_10, PB4MD_11, PB3MD_00, PB3MD_01, PB3MD_10, PB3MD_11, PB2MD_00, PB2MD_01, PB2MD_10, PB2MD_11, PB1MD_00, PB1MD_01, PB1MD_10, PB1MD_11, PB0MD_00, PB0MD_01, PB0MD_10, PB0MD_11, PB12IRQ_00, PB12IRQ_01, PB12IRQ_10, PC14MD_0, PC14MD_1, PC13MD_0, PC13MD_1, PC12MD_0, PC12MD_1, PC11MD_00, PC11MD_01, PC11MD_10, PC10MD_00, PC10MD_01, PC10MD_10, PC9MD_0, PC9MD_1, PC8MD_0, PC8MD_1, PC7MD_0, PC7MD_1, PC6MD_0, PC6MD_1, PC5MD_0, PC5MD_1, PC4MD_0, PC4MD_1, PC3MD_0, PC3MD_1, PC2MD_0, PC2MD_1, PC1MD_0, PC1MD_1, PC0MD_00, PC0MD_01, PC0MD_10, PD15MD_000, PD15MD_001, PD15MD_010, PD15MD_100, PD15MD_101, PD14MD_000, PD14MD_001, PD14MD_010, PD14MD_101, PD13MD_000, PD13MD_001, PD13MD_010, PD13MD_100, PD13MD_101, PD12MD_000, PD12MD_001, PD12MD_010, PD12MD_100, PD12MD_101, PD11MD_000, PD11MD_001, PD11MD_010, PD11MD_100, PD11MD_101, PD10MD_000, PD10MD_001, PD10MD_010, PD10MD_100, PD10MD_101, PD9MD_000, PD9MD_001, PD9MD_010, PD9MD_100, PD9MD_101, PD8MD_000, PD8MD_001, PD8MD_010, PD8MD_100, PD8MD_101, PD7MD_000, PD7MD_001, PD7MD_010, PD7MD_011, PD7MD_100, PD7MD_101, PD6MD_000, PD6MD_001, PD6MD_010, PD6MD_011, PD6MD_100, PD6MD_101, PD5MD_000, PD5MD_001, PD5MD_010, PD5MD_011, PD5MD_100, PD5MD_101, PD4MD_000, PD4MD_001, PD4MD_010, PD4MD_011, PD4MD_100, PD4MD_101, PD3MD_000, PD3MD_001, PD3MD_010, PD3MD_011, PD3MD_100, PD3MD_101, PD2MD_000, PD2MD_001, PD2MD_010, PD2MD_011, PD2MD_100, PD2MD_101, PD1MD_000, PD1MD_001, PD1MD_010, PD1MD_011, PD1MD_100, PD1MD_101, PD0MD_000, PD0MD_001, PD0MD_010, PD0MD_011, PD0MD_100, PD0MD_101, PE15MD_00, PE15MD_01, PE15MD_11, PE14MD_00, PE14MD_01, PE14MD_11, PE13MD_00, PE13MD_11, PE12MD_00, PE12MD_11, PE11MD_000, PE11MD_001, PE11MD_010, PE11MD_100, PE10MD_000, PE10MD_001, PE10MD_010, PE10MD_100, PE9MD_00, PE9MD_01, PE9MD_10, PE9MD_11, PE8MD_00, PE8MD_01, PE8MD_10, PE8MD_11, PE7MD_000, PE7MD_001, PE7MD_010, PE7MD_011, PE7MD_100, PE6MD_000, PE6MD_001, PE6MD_010, PE6MD_011, PE6MD_100, PE5MD_000, PE5MD_001, PE5MD_010, PE5MD_011, PE5MD_100, PE4MD_000, PE4MD_001, PE4MD_010, PE4MD_011, PE4MD_100, PE3MD_00, PE3MD_01, PE3MD_11, PE2MD_00, PE2MD_01, PE2MD_11, PE1MD_00, PE1MD_01, PE1MD_10, PE1MD_11, PE0MD_000, PE0MD_001, PE0MD_011, PE0MD_100, PF30MD_0, PF30MD_1, PF29MD_0, PF29MD_1, PF28MD_0, PF28MD_1, PF27MD_0, PF27MD_1, PF26MD_0, PF26MD_1, PF25MD_0, PF25MD_1, PF24MD_0, PF24MD_1, PF23MD_00, PF23MD_01, PF23MD_10, PF22MD_00, PF22MD_01, PF22MD_10, PF21MD_00, PF21MD_01, PF21MD_10, PF20MD_00, PF20MD_01, PF20MD_10, PF19MD_00, PF19MD_01, PF19MD_10, PF18MD_00, PF18MD_01, PF18MD_10, PF17MD_00, PF17MD_01, PF17MD_10, PF16MD_00, PF16MD_01, PF16MD_10, PF15MD_00, PF15MD_01, PF15MD_10, PF14MD_00, PF14MD_01, PF14MD_10, PF13MD_00, PF13MD_01, PF13MD_10, PF12MD_00, PF12MD_01, PF12MD_10, PF11MD_00, PF11MD_01, PF11MD_10, PF10MD_00, PF10MD_01, PF10MD_10, PF9MD_00, PF9MD_01, PF9MD_10, PF8MD_00, PF8MD_01, PF8MD_10, PF7MD_00, PF7MD_01, PF7MD_10, PF7MD_11, PF6MD_00, PF6MD_01, PF6MD_10, PF6MD_11, PF5MD_00, PF5MD_01, PF5MD_10, PF5MD_11, PF4MD_00, PF4MD_01, PF4MD_10, PF4MD_11, PF3MD_00, PF3MD_01, PF3MD_10, PF3MD_11, PF2MD_00, PF2MD_01, PF2MD_10, PF2MD_11, PF1MD_00, PF1MD_01, PF1MD_10, PF1MD_11, PF0MD_00, PF0MD_01, PF0MD_10, PF0MD_11, PINMUX_FUNCTION_END, PINMUX_MARK_BEGIN, PINT7_PB_MARK, PINT6_PB_MARK, PINT5_PB_MARK, PINT4_PB_MARK, PINT3_PB_MARK, PINT2_PB_MARK, PINT1_PB_MARK, PINT0_PB_MARK, PINT7_PD_MARK, PINT6_PD_MARK, PINT5_PD_MARK, PINT4_PD_MARK, PINT3_PD_MARK, PINT2_PD_MARK, PINT1_PD_MARK, PINT0_PD_MARK, IRQ7_PB_MARK, IRQ6_PB_MARK, IRQ5_PB_MARK, IRQ4_PB_MARK, IRQ3_PB_MARK, IRQ2_PB_MARK, IRQ1_PB_MARK, IRQ0_PB_MARK, IRQ7_PD_MARK, IRQ6_PD_MARK, IRQ5_PD_MARK, IRQ4_PD_MARK, IRQ3_PD_MARK, IRQ2_PD_MARK, IRQ1_PD_MARK, IRQ0_PD_MARK, IRQ7_PE_MARK, IRQ6_PE_MARK, IRQ5_PE_MARK, IRQ4_PE_MARK, IRQ3_PE_MARK, IRQ2_PE_MARK, IRQ1_PE_MARK, IRQ0_PE_MARK, WDTOVF_MARK, IRQOUT_MARK, REFOUT_MARK, IRQOUT_REFOUT_MARK, UBCTRG_MARK, CTX1_MARK, CRX1_MARK, CTX0_MARK, CTX0_CTX1_MARK, CRX0_MARK, CRX0_CRX1_MARK, SDA3_MARK, SCL3_MARK, SDA2_MARK, SCL2_MARK, SDA1_MARK, SCL1_MARK, SDA0_MARK, SCL0_MARK, TEND0_PD_MARK, TEND0_PE_MARK, DACK0_PD_MARK, DACK0_PE_MARK, DREQ0_PD_MARK, DREQ0_PE_MARK, TEND1_PD_MARK, TEND1_PE_MARK, DACK1_PD_MARK, DACK1_PE_MARK, DREQ1_PD_MARK, DREQ1_PE_MARK, DACK2_MARK, DREQ2_MARK, DACK3_MARK, DREQ3_MARK, ADTRG_PD_MARK, ADTRG_PE_MARK, D31_MARK, D30_MARK, D29_MARK, D28_MARK, D27_MARK, D26_MARK, D25_MARK, D24_MARK, D23_MARK, D22_MARK, D21_MARK, D20_MARK, D19_MARK, D18_MARK, D17_MARK, D16_MARK, A25_MARK, A24_MARK, A23_MARK, A22_MARK, A21_MARK, CS4_MARK, MRES_MARK, BS_MARK, IOIS16_MARK, CS1_MARK, CS6_CE1B_MARK, CE2B_MARK, CS5_CE1A_MARK, CE2A_MARK, FRAME_MARK, WAIT_MARK, RDWR_MARK, CKE_MARK, CASU_MARK, BREQ_MARK, RASU_MARK, BACK_MARK, CASL_MARK, RASL_MARK, WE3_DQMUU_AH_ICIO_WR_MARK, WE2_DQMUL_ICIORD_MARK, WE1_DQMLU_WE_MARK, WE0_DQMLL_MARK, CS3_MARK, CS2_MARK, A1_MARK, A0_MARK, CS7_MARK, TIOC4D_MARK, TIOC4C_MARK, TIOC4B_MARK, TIOC4A_MARK, TIOC3D_MARK, TIOC3C_MARK, TIOC3B_MARK, TIOC3A_MARK, TIOC2B_MARK, TIOC1B_MARK, TIOC2A_MARK, TIOC1A_MARK, TIOC0D_MARK, TIOC0C_MARK, TIOC0B_MARK, TIOC0A_MARK, TCLKD_PD_MARK, TCLKC_PD_MARK, TCLKB_PD_MARK, TCLKA_PD_MARK, TCLKD_PF_MARK, TCLKC_PF_MARK, TCLKB_PF_MARK, TCLKA_PF_MARK, SCS0_PD_MARK, SSO0_PD_MARK, SSI0_PD_MARK, SSCK0_PD_MARK, SCS0_PF_MARK, SSO0_PF_MARK, SSI0_PF_MARK, SSCK0_PF_MARK, SCS1_PD_MARK, SSO1_PD_MARK, SSI1_PD_MARK, SSCK1_PD_MARK, SCS1_PF_MARK, SSO1_PF_MARK, SSI1_PF_MARK, SSCK1_PF_MARK, TXD0_MARK, RXD0_MARK, SCK0_MARK, TXD1_MARK, RXD1_MARK, SCK1_MARK, TXD2_MARK, RXD2_MARK, SCK2_MARK, RTS3_MARK, CTS3_MARK, TXD3_MARK, RXD3_MARK, SCK3_MARK, AUDIO_CLK_MARK, SSIDATA3_MARK, SSIWS3_MARK, SSISCK3_MARK, SSIDATA2_MARK, SSIWS2_MARK, SSISCK2_MARK, SSIDATA1_MARK, SSIWS1_MARK, SSISCK1_MARK, SSIDATA0_MARK, SSIWS0_MARK, SSISCK0_MARK, FCE_MARK, FRB_MARK, NAF7_MARK, NAF6_MARK, NAF5_MARK, NAF4_MARK, NAF3_MARK, NAF2_MARK, NAF1_MARK, NAF0_MARK, FSC_MARK, FOE_MARK, FCDE_MARK, FWE_MARK, LCD_VEPWC_MARK, LCD_VCPWC_MARK, LCD_CLK_MARK, LCD_FLM_MARK, LCD_M_DISP_MARK, LCD_CL2_MARK, LCD_CL1_MARK, LCD_DON_MARK, LCD_DATA15_MARK, LCD_DATA14_MARK, LCD_DATA13_MARK, LCD_DATA12_MARK, LCD_DATA11_MARK, LCD_DATA10_MARK, LCD_DATA9_MARK, LCD_DATA8_MARK, LCD_DATA7_MARK, LCD_DATA6_MARK, LCD_DATA5_MARK, LCD_DATA4_MARK, LCD_DATA3_MARK, LCD_DATA2_MARK, LCD_DATA1_MARK, LCD_DATA0_MARK, PINMUX_MARK_END, }; static pinmux_enum_t pinmux_data[] = { /* PA */ PINMUX_DATA(PA7_DATA, PA7_IN), PINMUX_DATA(PA6_DATA, PA6_IN), PINMUX_DATA(PA5_DATA, PA5_IN), PINMUX_DATA(PA4_DATA, PA4_IN), PINMUX_DATA(PA3_DATA, PA3_IN), PINMUX_DATA(PA2_DATA, PA2_IN), PINMUX_DATA(PA1_DATA, PA1_IN), PINMUX_DATA(PA0_DATA, PA0_IN), /* PB */ PINMUX_DATA(PB12_DATA, PB12MD_00, FORCE_OUT), PINMUX_DATA(WDTOVF_MARK, PB12MD_01), PINMUX_DATA(IRQOUT_MARK, PB12MD_10, PB12IRQ_00), PINMUX_DATA(REFOUT_MARK, PB12MD_10, PB12IRQ_01), PINMUX_DATA(IRQOUT_REFOUT_MARK, PB12MD_10, PB12IRQ_10), PINMUX_DATA(UBCTRG_MARK, PB12MD_11), PINMUX_DATA(PB11_DATA, PB11MD_0, PB11_IN, PB11_OUT), PINMUX_DATA(CTX1_MARK, PB11MD_1), PINMUX_DATA(PB10_DATA, PB10MD_0, PB10_IN, PB10_OUT), PINMUX_DATA(CRX1_MARK, PB10MD_1), PINMUX_DATA(PB9_DATA, PB9MD_00, PB9_IN, PB9_OUT), PINMUX_DATA(CTX0_MARK, PB9MD_01), PINMUX_DATA(CTX0_CTX1_MARK, PB9MD_10), PINMUX_DATA(PB8_DATA, PB8MD_00, PB8_IN, PB8_OUT), PINMUX_DATA(CRX0_MARK, PB8MD_01), PINMUX_DATA(CRX0_CRX1_MARK, PB8MD_10), PINMUX_DATA(PB7_DATA, PB7MD_00, FORCE_IN), PINMUX_DATA(SDA3_MARK, PB7MD_01), PINMUX_DATA(PINT7_PB_MARK, PB7MD_10), PINMUX_DATA(IRQ7_PB_MARK, PB7MD_11), PINMUX_DATA(PB6_DATA, PB6MD_00, FORCE_IN), PINMUX_DATA(SCL3_MARK, PB6MD_01), PINMUX_DATA(PINT6_PB_MARK, PB6MD_10), PINMUX_DATA(IRQ6_PB_MARK, PB6MD_11), PINMUX_DATA(PB5_DATA, PB5MD_00, FORCE_IN), PINMUX_DATA(SDA2_MARK, PB6MD_01), PINMUX_DATA(PINT5_PB_MARK, PB6MD_10), PINMUX_DATA(IRQ5_PB_MARK, PB6MD_11), PINMUX_DATA(PB4_DATA, PB4MD_00, FORCE_IN), PINMUX_DATA(SCL2_MARK, PB4MD_01), PINMUX_DATA(PINT4_PB_MARK, PB4MD_10), PINMUX_DATA(IRQ4_PB_MARK, PB4MD_11), PINMUX_DATA(PB3_DATA, PB3MD_00, FORCE_IN), PINMUX_DATA(SDA1_MARK, PB3MD_01), PINMUX_DATA(PINT3_PB_MARK, PB3MD_10), PINMUX_DATA(IRQ3_PB_MARK, PB3MD_11), PINMUX_DATA(PB2_DATA, PB2MD_00, FORCE_IN), PINMUX_DATA(SCL1_MARK, PB2MD_01), PINMUX_DATA(PINT2_PB_MARK, PB2MD_10), PINMUX_DATA(IRQ2_PB_MARK, PB2MD_11), PINMUX_DATA(PB1_DATA, PB1MD_00, FORCE_IN), PINMUX_DATA(SDA0_MARK, PB1MD_01), PINMUX_DATA(PINT1_PB_MARK, PB1MD_10), PINMUX_DATA(IRQ1_PB_MARK, PB1MD_11), PINMUX_DATA(PB0_DATA, PB0MD_00, FORCE_IN), PINMUX_DATA(SCL0_MARK, PB0MD_01), PINMUX_DATA(PINT0_PB_MARK, PB0MD_10), PINMUX_DATA(IRQ0_PB_MARK, PB0MD_11), /* PC */ PINMUX_DATA(PC14_DATA, PC14MD_0, PC14_IN, PC14_OUT), PINMUX_DATA(WAIT_MARK, PC14MD_1), PINMUX_DATA(PC13_DATA, PC13MD_0, PC13_IN, PC13_OUT), PINMUX_DATA(RDWR_MARK, PC13MD_1), PINMUX_DATA(PC12_DATA, PC12MD_0, PC12_IN, PC12_OUT), PINMUX_DATA(CKE_MARK, PC12MD_1), PINMUX_DATA(PC11_DATA, PC11MD_00, PC11_IN, PC11_OUT), PINMUX_DATA(CASU_MARK, PC11MD_01), PINMUX_DATA(BREQ_MARK, PC11MD_10), PINMUX_DATA(PC10_DATA, PC10MD_00, PC10_IN, PC10_OUT), PINMUX_DATA(RASU_MARK, PC10MD_01), PINMUX_DATA(BACK_MARK, PC10MD_10), PINMUX_DATA(PC9_DATA, PC9MD_0, PC9_IN, PC9_OUT), PINMUX_DATA(CASL_MARK, PC9MD_1), PINMUX_DATA(PC8_DATA, PC8MD_0, PC8_IN, PC8_OUT), PINMUX_DATA(RASL_MARK, PC8MD_1), PINMUX_DATA(PC7_DATA, PC7MD_0, PC7_IN, PC7_OUT), PINMUX_DATA(WE3_DQMUU_AH_ICIO_WR_MARK, PC7MD_1), PINMUX_DATA(PC6_DATA, PC6MD_0, PC6_IN, PC6_OUT), PINMUX_DATA(WE2_DQMUL_ICIORD_MARK, PC6MD_1), PINMUX_DATA(PC5_DATA, PC5MD_0, PC5_IN, PC5_OUT), PINMUX_DATA(WE1_DQMLU_WE_MARK, PC5MD_1), PINMUX_DATA(PC4_DATA, PC4MD_0, PC4_IN, PC4_OUT), PINMUX_DATA(WE0_DQMLL_MARK, PC4MD_1), PINMUX_DATA(PC3_DATA, PC3MD_0, PC3_IN, PC3_OUT), PINMUX_DATA(CS3_MARK, PC3MD_1), PINMUX_DATA(PC2_DATA, PC2MD_0, PC2_IN, PC2_OUT), PINMUX_DATA(CS2_MARK, PC2MD_1), PINMUX_DATA(PC1_DATA, PC1MD_0, PC1_IN, PC1_OUT), PINMUX_DATA(A1_MARK, PC1MD_1), PINMUX_DATA(PC0_DATA, PC0MD_00, PC0_IN, PC0_OUT), PINMUX_DATA(A0_MARK, PC0MD_01), PINMUX_DATA(CS7_MARK, PC0MD_10), /* PD */ PINMUX_DATA(PD15_DATA, PD15MD_000, PD15_IN, PD15_OUT), PINMUX_DATA(D31_MARK, PD15MD_001), PINMUX_DATA(PINT7_PD_MARK, PD15MD_010), PINMUX_DATA(ADTRG_PD_MARK, PD15MD_100), PINMUX_DATA(TIOC4D_MARK, PD15MD_101), PINMUX_DATA(PD14_DATA, PD14MD_000, PD14_IN, PD14_OUT), PINMUX_DATA(D30_MARK, PD14MD_001), PINMUX_DATA(PINT6_PD_MARK, PD14MD_010), PINMUX_DATA(TIOC4C_MARK, PD14MD_101), PINMUX_DATA(PD13_DATA, PD13MD_000, PD13_IN, PD13_OUT), PINMUX_DATA(D29_MARK, PD13MD_001), PINMUX_DATA(PINT5_PD_MARK, PD13MD_010), PINMUX_DATA(TEND1_PD_MARK, PD13MD_100), PINMUX_DATA(TIOC4B_MARK, PD13MD_101), PINMUX_DATA(PD12_DATA, PD12MD_000, PD12_IN, PD12_OUT), PINMUX_DATA(D28_MARK, PD12MD_001), PINMUX_DATA(PINT4_PD_MARK, PD12MD_010), PINMUX_DATA(DACK1_PD_MARK, PD12MD_100), PINMUX_DATA(TIOC4A_MARK, PD12MD_101), PINMUX_DATA(PD11_DATA, PD11MD_000, PD11_IN, PD11_OUT), PINMUX_DATA(D27_MARK, PD11MD_001), PINMUX_DATA(PINT3_PD_MARK, PD11MD_010), PINMUX_DATA(DREQ1_PD_MARK, PD11MD_100), PINMUX_DATA(TIOC3D_MARK, PD11MD_101), PINMUX_DATA(PD10_DATA, PD10MD_000, PD10_IN, PD10_OUT), PINMUX_DATA(D26_MARK, PD10MD_001), PINMUX_DATA(PINT2_PD_MARK, PD10MD_010), PINMUX_DATA(TEND0_PD_MARK, PD10MD_100), PINMUX_DATA(TIOC3C_MARK, PD10MD_101), PINMUX_DATA(PD9_DATA, PD9MD_000, PD9_IN, PD9_OUT), PINMUX_DATA(D25_MARK, PD9MD_001), PINMUX_DATA(PINT1_PD_MARK, PD9MD_010), PINMUX_DATA(DACK0_PD_MARK, PD9MD_100), PINMUX_DATA(TIOC3B_MARK, PD9MD_101), PINMUX_DATA(PD8_DATA, PD8MD_000, PD8_IN, PD8_OUT), PINMUX_DATA(D24_MARK, PD8MD_001), PINMUX_DATA(PINT0_PD_MARK, PD8MD_010), PINMUX_DATA(DREQ0_PD_MARK, PD8MD_100), PINMUX_DATA(TIOC3A_MARK, PD8MD_101), PINMUX_DATA(PD7_DATA, PD7MD_000, PD7_IN, PD7_OUT), PINMUX_DATA(D23_MARK, PD7MD_001), PINMUX_DATA(IRQ7_PD_MARK, PD7MD_010), PINMUX_DATA(SCS1_PD_MARK, PD7MD_011), PINMUX_DATA(TCLKD_PD_MARK, PD7MD_100), PINMUX_DATA(TIOC2B_MARK, PD7MD_101), PINMUX_DATA(PD6_DATA, PD6MD_000, PD6_IN, PD6_OUT), PINMUX_DATA(D22_MARK, PD6MD_001), PINMUX_DATA(IRQ6_PD_MARK, PD6MD_010), PINMUX_DATA(SSO1_PD_MARK, PD6MD_011), PINMUX_DATA(TCLKC_PD_MARK, PD6MD_100), PINMUX_DATA(TIOC2A_MARK, PD6MD_101), PINMUX_DATA(PD5_DATA, PD5MD_000, PD5_IN, PD5_OUT), PINMUX_DATA(D21_MARK, PD5MD_001), PINMUX_DATA(IRQ5_PD_MARK, PD5MD_010), PINMUX_DATA(SSI1_PD_MARK, PD5MD_011), PINMUX_DATA(TCLKB_PD_MARK, PD5MD_100), PINMUX_DATA(TIOC1B_MARK, PD5MD_101), PINMUX_DATA(PD4_DATA, PD4MD_000, PD4_IN, PD4_OUT), PINMUX_DATA(D20_MARK, PD4MD_001), PINMUX_DATA(IRQ4_PD_MARK, PD4MD_010), PINMUX_DATA(SSCK1_PD_MARK, PD4MD_011), PINMUX_DATA(TCLKA_PD_MARK, PD4MD_100), PINMUX_DATA(TIOC1A_MARK, PD4MD_101), PINMUX_DATA(PD3_DATA, PD3MD_000, PD3_IN, PD3_OUT), PINMUX_DATA(D19_MARK, PD3MD_001), PINMUX_DATA(IRQ3_PD_MARK, PD3MD_010), PINMUX_DATA(SCS0_PD_MARK, PD3MD_011), PINMUX_DATA(DACK3_MARK, PD3MD_100), PINMUX_DATA(TIOC0D_MARK, PD3MD_101), PINMUX_DATA(PD2_DATA, PD2MD_000, PD2_IN, PD2_OUT), PINMUX_DATA(D18_MARK, PD2MD_001), PINMUX_DATA(IRQ2_PD_MARK, PD2MD_010), PINMUX_DATA(SSO0_PD_MARK, PD2MD_011), PINMUX_DATA(DREQ3_MARK, PD2MD_100), PINMUX_DATA(TIOC0C_MARK, PD2MD_101), PINMUX_DATA(PD1_DATA, PD1MD_000, PD1_IN, PD1_OUT), PINMUX_DATA(D17_MARK, PD1MD_001), PINMUX_DATA(IRQ1_PD_MARK, PD1MD_010), PINMUX_DATA(SSI0_PD_MARK, PD1MD_011), PINMUX_DATA(DACK2_MARK, PD1MD_100), PINMUX_DATA(TIOC0B_MARK, PD1MD_101), PINMUX_DATA(PD0_DATA, PD0MD_000, PD0_IN, PD0_OUT), PINMUX_DATA(D16_MARK, PD0MD_001), PINMUX_DATA(IRQ0_PD_MARK, PD0MD_010), PINMUX_DATA(SSCK0_PD_MARK, PD0MD_011), PINMUX_DATA(DREQ2_MARK, PD0MD_100), PINMUX_DATA(TIOC0A_MARK, PD0MD_101), /* PE */ PINMUX_DATA(PE15_DATA, PE15MD_00, PE15_IN, PE15_OUT), PINMUX_DATA(IOIS16_MARK, PE15MD_01), PINMUX_DATA(RTS3_MARK, PE15MD_11), PINMUX_DATA(PE14_DATA, PE14MD_00, PE14_IN, PE14_OUT), PINMUX_DATA(CS1_MARK, PE14MD_01), PINMUX_DATA(CTS3_MARK, PE14MD_11), PINMUX_DATA(PE13_DATA, PE13MD_00, PE13_IN, PE13_OUT), PINMUX_DATA(TXD3_MARK, PE13MD_11), PINMUX_DATA(PE12_DATA, PE12MD_00, PE12_IN, PE12_OUT), PINMUX_DATA(RXD3_MARK, PE12MD_11), PINMUX_DATA(PE11_DATA, PE11MD_000, PE11_IN, PE11_OUT), PINMUX_DATA(CS6_CE1B_MARK, PE11MD_001), PINMUX_DATA(IRQ7_PE_MARK, PE11MD_010), PINMUX_DATA(TEND1_PE_MARK, PE11MD_100), PINMUX_DATA(PE10_DATA, PE10MD_000, PE10_IN, PE10_OUT), PINMUX_DATA(CE2B_MARK, PE10MD_001), PINMUX_DATA(IRQ6_PE_MARK, PE10MD_010), PINMUX_DATA(TEND0_PE_MARK, PE10MD_100), PINMUX_DATA(PE9_DATA, PE9MD_00, PE9_IN, PE9_OUT), PINMUX_DATA(CS5_CE1A_MARK, PE9MD_01), PINMUX_DATA(IRQ5_PE_MARK, PE9MD_10), PINMUX_DATA(SCK3_MARK, PE9MD_11), PINMUX_DATA(PE8_DATA, PE8MD_00, PE8_IN, PE8_OUT), PINMUX_DATA(CE2A_MARK, PE8MD_01), PINMUX_DATA(IRQ4_PE_MARK, PE8MD_10), PINMUX_DATA(SCK2_MARK, PE8MD_11), PINMUX_DATA(PE7_DATA, PE7MD_000, PE7_IN, PE7_OUT), PINMUX_DATA(FRAME_MARK, PE7MD_001), PINMUX_DATA(IRQ3_PE_MARK, PE7MD_010), PINMUX_DATA(TXD2_MARK, PE7MD_011), PINMUX_DATA(DACK1_PE_MARK, PE7MD_100), PINMUX_DATA(PE6_DATA, PE6MD_000, PE6_IN, PE6_OUT), PINMUX_DATA(A25_MARK, PE6MD_001), PINMUX_DATA(IRQ2_PE_MARK, PE6MD_010), PINMUX_DATA(RXD2_MARK, PE6MD_011), PINMUX_DATA(DREQ1_PE_MARK, PE6MD_100), PINMUX_DATA(PE5_DATA, PE5MD_000, PE5_IN, PE5_OUT), PINMUX_DATA(A24_MARK, PE5MD_001), PINMUX_DATA(IRQ1_PE_MARK, PE5MD_010), PINMUX_DATA(TXD1_MARK, PE5MD_011), PINMUX_DATA(DACK0_PE_MARK, PE5MD_100), PINMUX_DATA(PE4_DATA, PE4MD_000, PE4_IN, PE4_OUT), PINMUX_DATA(A23_MARK, PE4MD_001), PINMUX_DATA(IRQ0_PE_MARK, PE4MD_010), PINMUX_DATA(RXD1_MARK, PE4MD_011), PINMUX_DATA(DREQ0_PE_MARK, PE4MD_100), PINMUX_DATA(PE3_DATA, PE3MD_00, PE3_IN, PE3_OUT), PINMUX_DATA(A22_MARK, PE3MD_01), PINMUX_DATA(SCK1_MARK, PE3MD_11), PINMUX_DATA(PE2_DATA, PE2MD_00, PE2_IN, PE2_OUT), PINMUX_DATA(A21_MARK, PE2MD_01), PINMUX_DATA(SCK0_MARK, PE2MD_11), PINMUX_DATA(PE1_DATA, PE1MD_00, PE1_IN, PE1_OUT), PINMUX_DATA(CS4_MARK, PE1MD_01), PINMUX_DATA(MRES_MARK, PE1MD_10), PINMUX_DATA(TXD0_MARK, PE1MD_11), PINMUX_DATA(PE0_DATA, PE0MD_000, PE0_IN, PE0_OUT), PINMUX_DATA(BS_MARK, PE0MD_001), PINMUX_DATA(RXD0_MARK, PE0MD_011), PINMUX_DATA(ADTRG_PE_MARK, PE0MD_100), /* PF */ PINMUX_DATA(PF30_DATA, PF30MD_0, PF30_IN, PF30_OUT), PINMUX_DATA(AUDIO_CLK_MARK, PF30MD_1), PINMUX_DATA(PF29_DATA, PF29MD_0, PF29_IN, PF29_OUT), PINMUX_DATA(SSIDATA3_MARK, PF29MD_1), PINMUX_DATA(PF28_DATA, PF28MD_0, PF28_IN, PF28_OUT), PINMUX_DATA(SSIWS3_MARK, PF28MD_1), PINMUX_DATA(PF27_DATA, PF27MD_0, PF27_IN, PF27_OUT), PINMUX_DATA(SSISCK3_MARK, PF27MD_1), PINMUX_DATA(PF26_DATA, PF26MD_0, PF26_IN, PF26_OUT), PINMUX_DATA(SSIDATA2_MARK, PF26MD_1), PINMUX_DATA(PF25_DATA, PF25MD_0, PF25_IN, PF25_OUT), PINMUX_DATA(SSIWS2_MARK, PF25MD_1), PINMUX_DATA(PF24_DATA, PF24MD_0, PF24_IN, PF24_OUT), PINMUX_DATA(SSISCK2_MARK, PF24MD_1), PINMUX_DATA(PF23_DATA, PF23MD_00, PF23_IN, PF23_OUT), PINMUX_DATA(SSIDATA1_MARK, PF23MD_01), PINMUX_DATA(LCD_VEPWC_MARK, PF23MD_10), PINMUX_DATA(PF22_DATA, PF22MD_00, PF22_IN, PF22_OUT), PINMUX_DATA(SSIWS1_MARK, PF22MD_01), PINMUX_DATA(LCD_VCPWC_MARK, PF22MD_10), PINMUX_DATA(PF21_DATA, PF21MD_00, PF21_IN, PF21_OUT), PINMUX_DATA(SSISCK1_MARK, PF21MD_01), PINMUX_DATA(LCD_CLK_MARK, PF21MD_10), PINMUX_DATA(PF20_DATA, PF20MD_00, PF20_IN, PF20_OUT), PINMUX_DATA(SSIDATA0_MARK, PF20MD_01), PINMUX_DATA(LCD_FLM_MARK, PF20MD_10), PINMUX_DATA(PF19_DATA, PF19MD_00, PF19_IN, PF19_OUT), PINMUX_DATA(SSIWS0_MARK, PF19MD_01), PINMUX_DATA(LCD_M_DISP_MARK, PF19MD_10), PINMUX_DATA(PF18_DATA, PF18MD_00, PF18_IN, PF18_OUT), PINMUX_DATA(SSISCK0_MARK, PF18MD_01), PINMUX_DATA(LCD_CL2_MARK, PF18MD_10), PINMUX_DATA(PF17_DATA, PF17MD_00, PF17_IN, PF17_OUT), PINMUX_DATA(FCE_MARK, PF17MD_01), PINMUX_DATA(LCD_CL1_MARK, PF17MD_10), PINMUX_DATA(PF16_DATA, PF16MD_00, PF16_IN, PF16_OUT), PINMUX_DATA(FRB_MARK, PF16MD_01), PINMUX_DATA(LCD_DON_MARK, PF16MD_10), PINMUX_DATA(PF15_DATA, PF15MD_00, PF15_IN, PF15_OUT), PINMUX_DATA(NAF7_MARK, PF15MD_01), PINMUX_DATA(LCD_DATA15_MARK, PF15MD_10), PINMUX_DATA(PF14_DATA, PF14MD_00, PF14_IN, PF14_OUT), PINMUX_DATA(NAF6_MARK, PF14MD_01), PINMUX_DATA(LCD_DATA14_MARK, PF14MD_10), PINMUX_DATA(PF13_DATA, PF13MD_00, PF13_IN, PF13_OUT), PINMUX_DATA(NAF5_MARK, PF13MD_01), PINMUX_DATA(LCD_DATA13_MARK, PF13MD_10), PINMUX_DATA(PF12_DATA, PF12MD_00, PF12_IN, PF12_OUT), PINMUX_DATA(NAF4_MARK, PF12MD_01), PINMUX_DATA(LCD_DATA12_MARK, PF12MD_10), PINMUX_DATA(PF11_DATA, PF11MD_00, PF11_IN, PF11_OUT), PINMUX_DATA(NAF3_MARK, PF11MD_01), PINMUX_DATA(LCD_DATA11_MARK, PF11MD_10), PINMUX_DATA(PF10_DATA, PF10MD_00, PF10_IN, PF10_OUT), PINMUX_DATA(NAF2_MARK, PF10MD_01), PINMUX_DATA(LCD_DATA10_MARK, PF10MD_10), PINMUX_DATA(PF9_DATA, PF9MD_00, PF9_IN, PF9_OUT), PINMUX_DATA(NAF1_MARK, PF9MD_01), PINMUX_DATA(LCD_DATA9_MARK, PF9MD_10), PINMUX_DATA(PF8_DATA, PF8MD_00, PF8_IN, PF8_OUT), PINMUX_DATA(NAF0_MARK, PF8MD_01), PINMUX_DATA(LCD_DATA8_MARK, PF8MD_10), PINMUX_DATA(PF7_DATA, PF7MD_00, PF7_IN, PF7_OUT), PINMUX_DATA(FSC_MARK, PF7MD_01), PINMUX_DATA(LCD_DATA7_MARK, PF7MD_10), PINMUX_DATA(SCS1_PF_MARK, PF7MD_11), PINMUX_DATA(PF6_DATA, PF6MD_00, PF6_IN, PF6_OUT), PINMUX_DATA(FOE_MARK, PF6MD_01), PINMUX_DATA(LCD_DATA6_MARK, PF6MD_10), PINMUX_DATA(SSO1_PF_MARK, PF6MD_11), PINMUX_DATA(PF5_DATA, PF5MD_00, PF5_IN, PF5_OUT), PINMUX_DATA(FCDE_MARK, PF5MD_01), PINMUX_DATA(LCD_DATA5_MARK, PF5MD_10), PINMUX_DATA(SSI1_PF_MARK, PF5MD_11), PINMUX_DATA(PF4_DATA, PF4MD_00, PF4_IN, PF4_OUT), PINMUX_DATA(FWE_MARK, PF4MD_01), PINMUX_DATA(LCD_DATA4_MARK, PF4MD_10), PINMUX_DATA(SSCK1_PF_MARK, PF4MD_11), PINMUX_DATA(PF3_DATA, PF3MD_00, PF3_IN, PF3_OUT), PINMUX_DATA(TCLKD_PF_MARK, PF3MD_01), PINMUX_DATA(LCD_DATA3_MARK, PF3MD_10), PINMUX_DATA(SCS0_PF_MARK, PF3MD_11), PINMUX_DATA(PF2_DATA, PF2MD_00, PF2_IN, PF2_OUT), PINMUX_DATA(TCLKC_PF_MARK, PF2MD_01), PINMUX_DATA(LCD_DATA2_MARK, PF2MD_10), PINMUX_DATA(SSO0_PF_MARK, PF2MD_11), PINMUX_DATA(PF1_DATA, PF1MD_00, PF1_IN, PF1_OUT), PINMUX_DATA(TCLKB_PF_MARK, PF1MD_01), PINMUX_DATA(LCD_DATA1_MARK, PF1MD_10), PINMUX_DATA(SSI0_PF_MARK, PF1MD_11), PINMUX_DATA(PF0_DATA, PF0MD_00, PF0_IN, PF0_OUT), PINMUX_DATA(TCLKA_PF_MARK, PF0MD_01), PINMUX_DATA(LCD_DATA0_MARK, PF0MD_10), PINMUX_DATA(SSCK0_PF_MARK, PF0MD_11), }; static struct pinmux_gpio pinmux_gpios[] = { /* PA */ PINMUX_GPIO(GPIO_PA7, PA7_DATA), PINMUX_GPIO(GPIO_PA6, PA6_DATA), PINMUX_GPIO(GPIO_PA5, PA5_DATA), PINMUX_GPIO(GPIO_PA4, PA4_DATA), PINMUX_GPIO(GPIO_PA3, PA3_DATA), PINMUX_GPIO(GPIO_PA2, PA2_DATA), PINMUX_GPIO(GPIO_PA1, PA1_DATA), PINMUX_GPIO(GPIO_PA0, PA0_DATA), /* PB */ PINMUX_GPIO(GPIO_PB12, PB12_DATA), PINMUX_GPIO(GPIO_PB11, PB11_DATA), PINMUX_GPIO(GPIO_PB10, PB10_DATA), PINMUX_GPIO(GPIO_PB9, PB9_DATA), PINMUX_GPIO(GPIO_PB8, PB8_DATA), PINMUX_GPIO(GPIO_PB7, PB7_DATA), PINMUX_GPIO(GPIO_PB6, PB6_DATA), PINMUX_GPIO(GPIO_PB5, PB5_DATA), PINMUX_GPIO(GPIO_PB4, PB4_DATA), PINMUX_GPIO(GPIO_PB3, PB3_DATA), PINMUX_GPIO(GPIO_PB2, PB2_DATA), PINMUX_GPIO(GPIO_PB1, PB1_DATA), PINMUX_GPIO(GPIO_PB0, PB0_DATA), /* PC */ PINMUX_GPIO(GPIO_PC14, PC14_DATA), PINMUX_GPIO(GPIO_PC13, PC13_DATA), PINMUX_GPIO(GPIO_PC12, PC12_DATA), PINMUX_GPIO(GPIO_PC11, PC11_DATA), PINMUX_GPIO(GPIO_PC10, PC10_DATA), PINMUX_GPIO(GPIO_PC9, PC9_DATA), PINMUX_GPIO(GPIO_PC8, PC8_DATA), PINMUX_GPIO(GPIO_PC7, PC7_DATA), PINMUX_GPIO(GPIO_PC6, PC6_DATA), PINMUX_GPIO(GPIO_PC5, PC5_DATA), PINMUX_GPIO(GPIO_PC4, PC4_DATA), PINMUX_GPIO(GPIO_PC3, PC3_DATA), PINMUX_GPIO(GPIO_PC2, PC2_DATA), PINMUX_GPIO(GPIO_PC1, PC1_DATA), PINMUX_GPIO(GPIO_PC0, PC0_DATA), /* PD */ PINMUX_GPIO(GPIO_PD15, PD15_DATA), PINMUX_GPIO(GPIO_PD14, PD14_DATA), PINMUX_GPIO(GPIO_PD13, PD13_DATA), PINMUX_GPIO(GPIO_PD12, PD12_DATA), PINMUX_GPIO(GPIO_PD11, PD11_DATA), PINMUX_GPIO(GPIO_PD10, PD10_DATA), PINMUX_GPIO(GPIO_PD9, PD9_DATA), PINMUX_GPIO(GPIO_PD8, PD8_DATA), PINMUX_GPIO(GPIO_PD7, PD7_DATA), PINMUX_GPIO(GPIO_PD6, PD6_DATA), PINMUX_GPIO(GPIO_PD5, PD5_DATA), PINMUX_GPIO(GPIO_PD4, PD4_DATA), PINMUX_GPIO(GPIO_PD3, PD3_DATA), PINMUX_GPIO(GPIO_PD2, PD2_DATA), PINMUX_GPIO(GPIO_PD1, PD1_DATA), PINMUX_GPIO(GPIO_PD0, PD0_DATA), /* PE */ PINMUX_GPIO(GPIO_PE15, PE15_DATA), PINMUX_GPIO(GPIO_PE14, PE14_DATA), PINMUX_GPIO(GPIO_PE13, PE13_DATA), PINMUX_GPIO(GPIO_PE12, PE12_DATA), PINMUX_GPIO(GPIO_PE11, PE11_DATA), PINMUX_GPIO(GPIO_PE10, PE10_DATA), PINMUX_GPIO(GPIO_PE9, PE9_DATA), PINMUX_GPIO(GPIO_PE8, PE8_DATA), PINMUX_GPIO(GPIO_PE7, PE7_DATA), PINMUX_GPIO(GPIO_PE6, PE6_DATA), PINMUX_GPIO(GPIO_PE5, PE5_DATA), PINMUX_GPIO(GPIO_PE4, PE4_DATA), PINMUX_GPIO(GPIO_PE3, PE3_DATA), PINMUX_GPIO(GPIO_PE2, PE2_DATA), PINMUX_GPIO(GPIO_PE1, PE1_DATA), PINMUX_GPIO(GPIO_PE0, PE0_DATA), /* PF */ PINMUX_GPIO(GPIO_PF30, PF30_DATA), PINMUX_GPIO(GPIO_PF29, PF29_DATA), PINMUX_GPIO(GPIO_PF28, PF28_DATA), PINMUX_GPIO(GPIO_PF27, PF27_DATA), PINMUX_GPIO(GPIO_PF26, PF26_DATA), PINMUX_GPIO(GPIO_PF25, PF25_DATA), PINMUX_GPIO(GPIO_PF24, PF24_DATA), PINMUX_GPIO(GPIO_PF23, PF23_DATA), PINMUX_GPIO(GPIO_PF22, PF22_DATA), PINMUX_GPIO(GPIO_PF21, PF21_DATA), PINMUX_GPIO(GPIO_PF20, PF20_DATA), PINMUX_GPIO(GPIO_PF19, PF19_DATA), PINMUX_GPIO(GPIO_PF18, PF18_DATA), PINMUX_GPIO(GPIO_PF17, PF17_DATA), PINMUX_GPIO(GPIO_PF16, PF16_DATA), PINMUX_GPIO(GPIO_PF15, PF15_DATA), PINMUX_GPIO(GPIO_PF14, PF14_DATA), PINMUX_GPIO(GPIO_PF13, PF13_DATA), PINMUX_GPIO(GPIO_PF12, PF12_DATA), PINMUX_GPIO(GPIO_PF11, PF11_DATA), PINMUX_GPIO(GPIO_PF10, PF10_DATA), PINMUX_GPIO(GPIO_PF9, PF9_DATA), PINMUX_GPIO(GPIO_PF8, PF8_DATA), PINMUX_GPIO(GPIO_PF7, PF7_DATA), PINMUX_GPIO(GPIO_PF6, PF6_DATA), PINMUX_GPIO(GPIO_PF5, PF5_DATA), PINMUX_GPIO(GPIO_PF4, PF4_DATA), PINMUX_GPIO(GPIO_PF3, PF3_DATA), PINMUX_GPIO(GPIO_PF2, PF2_DATA), PINMUX_GPIO(GPIO_PF1, PF1_DATA), PINMUX_GPIO(GPIO_PF0, PF0_DATA), /* INTC */ PINMUX_GPIO(GPIO_FN_PINT7_PB, PINT7_PB_MARK), PINMUX_GPIO(GPIO_FN_PINT6_PB, PINT6_PB_MARK), PINMUX_GPIO(GPIO_FN_PINT5_PB, PINT5_PB_MARK), PINMUX_GPIO(GPIO_FN_PINT4_PB, PINT4_PB_MARK), PINMUX_GPIO(GPIO_FN_PINT3_PB, PINT3_PB_MARK), PINMUX_GPIO(GPIO_FN_PINT2_PB, PINT2_PB_MARK), PINMUX_GPIO(GPIO_FN_PINT1_PB, PINT1_PB_MARK), PINMUX_GPIO(GPIO_FN_PINT0_PB, PINT0_PB_MARK), PINMUX_GPIO(GPIO_FN_PINT7_PD, PINT7_PD_MARK), PINMUX_GPIO(GPIO_FN_PINT6_PD, PINT6_PD_MARK), PINMUX_GPIO(GPIO_FN_PINT5_PD, PINT5_PD_MARK), PINMUX_GPIO(GPIO_FN_PINT4_PD, PINT4_PD_MARK), PINMUX_GPIO(GPIO_FN_PINT3_PD, PINT3_PD_MARK), PINMUX_GPIO(GPIO_FN_PINT2_PD, PINT2_PD_MARK), PINMUX_GPIO(GPIO_FN_PINT1_PD, PINT1_PD_MARK), PINMUX_GPIO(GPIO_FN_PINT0_PD, PINT0_PD_MARK), PINMUX_GPIO(GPIO_FN_IRQ7_PB, IRQ7_PB_MARK), PINMUX_GPIO(GPIO_FN_IRQ6_PB, IRQ6_PB_MARK), PINMUX_GPIO(GPIO_FN_IRQ5_PB, IRQ5_PB_MARK), PINMUX_GPIO(GPIO_FN_IRQ4_PB, IRQ4_PB_MARK), PINMUX_GPIO(GPIO_FN_IRQ3_PB, IRQ3_PB_MARK), PINMUX_GPIO(GPIO_FN_IRQ2_PB, IRQ2_PB_MARK), PINMUX_GPIO(GPIO_FN_IRQ1_PB, IRQ1_PB_MARK), PINMUX_GPIO(GPIO_FN_IRQ0_PB, IRQ0_PB_MARK), PINMUX_GPIO(GPIO_FN_IRQ7_PD, IRQ7_PD_MARK), PINMUX_GPIO(GPIO_FN_IRQ6_PD, IRQ6_PD_MARK), PINMUX_GPIO(GPIO_FN_IRQ5_PD, IRQ5_PD_MARK), PINMUX_GPIO(GPIO_FN_IRQ4_PD, IRQ4_PD_MARK), PINMUX_GPIO(GPIO_FN_IRQ3_PD, IRQ3_PD_MARK), PINMUX_GPIO(GPIO_FN_IRQ2_PD, IRQ2_PD_MARK), PINMUX_GPIO(GPIO_FN_IRQ1_PD, IRQ1_PD_MARK), PINMUX_GPIO(GPIO_FN_IRQ0_PD, IRQ0_PD_MARK), PINMUX_GPIO(GPIO_FN_IRQ7_PE, IRQ7_PE_MARK), PINMUX_GPIO(GPIO_FN_IRQ6_PE, IRQ6_PE_MARK), PINMUX_GPIO(GPIO_FN_IRQ5_PE, IRQ5_PE_MARK), PINMUX_GPIO(GPIO_FN_IRQ4_PE, IRQ4_PE_MARK), PINMUX_GPIO(GPIO_FN_IRQ3_PE, IRQ3_PE_MARK), PINMUX_GPIO(GPIO_FN_IRQ2_PE, IRQ2_PE_MARK), PINMUX_GPIO(GPIO_FN_IRQ1_PE, IRQ1_PE_MARK), PINMUX_GPIO(GPIO_FN_IRQ0_PE, IRQ0_PE_MARK), PINMUX_GPIO(GPIO_FN_WDTOVF, WDTOVF_MARK), PINMUX_GPIO(GPIO_FN_IRQOUT, IRQOUT_MARK), PINMUX_GPIO(GPIO_FN_REFOUT, REFOUT_MARK), PINMUX_GPIO(GPIO_FN_IRQOUT_REFOUT, IRQOUT_REFOUT_MARK), PINMUX_GPIO(GPIO_FN_UBCTRG, UBCTRG_MARK), /* CAN */ PINMUX_GPIO(GPIO_FN_CTX1, CTX1_MARK), PINMUX_GPIO(GPIO_FN_CRX1, CRX1_MARK), PINMUX_GPIO(GPIO_FN_CTX0, CTX0_MARK), PINMUX_GPIO(GPIO_FN_CTX0_CTX1, CTX0_CTX1_MARK), PINMUX_GPIO(GPIO_FN_CRX0, CRX0_MARK), PINMUX_GPIO(GPIO_FN_CRX0_CRX1, CRX0_CRX1_MARK), /* IIC3 */ PINMUX_GPIO(GPIO_FN_SDA3, SDA3_MARK), PINMUX_GPIO(GPIO_FN_SCL3, SCL3_MARK), PINMUX_GPIO(GPIO_FN_SDA2, SDA2_MARK), PINMUX_GPIO(GPIO_FN_SCL2, SCL2_MARK), PINMUX_GPIO(GPIO_FN_SDA1, SDA1_MARK), PINMUX_GPIO(GPIO_FN_SCL1, SCL1_MARK), PINMUX_GPIO(GPIO_FN_SDA0, SDA0_MARK), PINMUX_GPIO(GPIO_FN_SCL0, SCL0_MARK), /* DMAC */ PINMUX_GPIO(GPIO_FN_TEND0_PD, TEND0_PD_MARK), PINMUX_GPIO(GPIO_FN_TEND0_PE, TEND0_PE_MARK), PINMUX_GPIO(GPIO_FN_DACK0_PD, DACK0_PD_MARK), PINMUX_GPIO(GPIO_FN_DACK0_PE, DACK0_PE_MARK), PINMUX_GPIO(GPIO_FN_DREQ0_PD, DREQ0_PD_MARK), PINMUX_GPIO(GPIO_FN_DREQ0_PE, DREQ0_PE_MARK), PINMUX_GPIO(GPIO_FN_TEND1_PD, TEND1_PD_MARK), PINMUX_GPIO(GPIO_FN_TEND1_PE, TEND1_PE_MARK), PINMUX_GPIO(GPIO_FN_DACK1_PD, DACK1_PD_MARK), PINMUX_GPIO(GPIO_FN_DACK1_PE, DACK1_PE_MARK), PINMUX_GPIO(GPIO_FN_DREQ1_PD, DREQ1_PD_MARK), PINMUX_GPIO(GPIO_FN_DREQ1_PE, DREQ1_PE_MARK), PINMUX_GPIO(GPIO_FN_DACK2, DACK2_MARK), PINMUX_GPIO(GPIO_FN_DREQ2, DREQ2_MARK), PINMUX_GPIO(GPIO_FN_DACK3, DACK3_MARK), PINMUX_GPIO(GPIO_FN_DREQ3, DREQ3_MARK), /* ADC */ PINMUX_GPIO(GPIO_FN_ADTRG_PD, ADTRG_PD_MARK), PINMUX_GPIO(GPIO_FN_ADTRG_PE, ADTRG_PE_MARK), /* BSC */ PINMUX_GPIO(GPIO_FN_D31, D31_MARK), PINMUX_GPIO(GPIO_FN_D30, D30_MARK), PINMUX_GPIO(GPIO_FN_D29, D29_MARK), PINMUX_GPIO(GPIO_FN_D28, D28_MARK), PINMUX_GPIO(GPIO_FN_D27, D27_MARK), PINMUX_GPIO(GPIO_FN_D26, D26_MARK), PINMUX_GPIO(GPIO_FN_D25, D25_MARK), PINMUX_GPIO(GPIO_FN_D24, D24_MARK), PINMUX_GPIO(GPIO_FN_D23, D23_MARK), PINMUX_GPIO(GPIO_FN_D22, D22_MARK), PINMUX_GPIO(GPIO_FN_D21, D21_MARK), PINMUX_GPIO(GPIO_FN_D20, D20_MARK), PINMUX_GPIO(GPIO_FN_D19, D19_MARK), PINMUX_GPIO(GPIO_FN_D18, D18_MARK), PINMUX_GPIO(GPIO_FN_D17, D17_MARK), PINMUX_GPIO(GPIO_FN_D16, D16_MARK), PINMUX_GPIO(GPIO_FN_A25, A25_MARK), PINMUX_GPIO(GPIO_FN_A24, A24_MARK), PINMUX_GPIO(GPIO_FN_A23, A23_MARK), PINMUX_GPIO(GPIO_FN_A22, A22_MARK), PINMUX_GPIO(GPIO_FN_A21, A21_MARK), PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK), PINMUX_GPIO(GPIO_FN_MRES, MRES_MARK), PINMUX_GPIO(GPIO_FN_BS, BS_MARK), PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK), PINMUX_GPIO(GPIO_FN_CS1, CS1_MARK), PINMUX_GPIO(GPIO_FN_CS6_CE1B, CS6_CE1B_MARK), PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK), PINMUX_GPIO(GPIO_FN_CS5_CE1A, CS5_CE1A_MARK), PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK), PINMUX_GPIO(GPIO_FN_FRAME, FRAME_MARK), PINMUX_GPIO(GPIO_FN_WAIT, WAIT_MARK), PINMUX_GPIO(GPIO_FN_RDWR, RDWR_MARK), PINMUX_GPIO(GPIO_FN_CKE, CKE_MARK), PINMUX_GPIO(GPIO_FN_CASU, CASU_MARK), PINMUX_GPIO(GPIO_FN_BREQ, BREQ_MARK), PINMUX_GPIO(GPIO_FN_RASU, RASU_MARK), PINMUX_GPIO(GPIO_FN_BACK, BACK_MARK), PINMUX_GPIO(GPIO_FN_CASL, CASL_MARK), PINMUX_GPIO(GPIO_FN_RASL, RASL_MARK), PINMUX_GPIO(GPIO_FN_WE3_DQMUU_AH_ICIO_WR, WE3_DQMUU_AH_ICIO_WR_MARK), PINMUX_GPIO(GPIO_FN_WE2_DQMUL_ICIORD, WE2_DQMUL_ICIORD_MARK), PINMUX_GPIO(GPIO_FN_WE1_DQMLU_WE, WE1_DQMLU_WE_MARK), PINMUX_GPIO(GPIO_FN_WE0_DQMLL, WE0_DQMLL_MARK), PINMUX_GPIO(GPIO_FN_CS3, CS3_MARK), PINMUX_GPIO(GPIO_FN_CS2, CS2_MARK), PINMUX_GPIO(GPIO_FN_A1, A1_MARK), PINMUX_GPIO(GPIO_FN_A0, A0_MARK), PINMUX_GPIO(GPIO_FN_CS7, CS7_MARK), /* TMU */ PINMUX_GPIO(GPIO_FN_TIOC4D, TIOC4D_MARK), PINMUX_GPIO(GPIO_FN_TIOC4C, TIOC4C_MARK), PINMUX_GPIO(GPIO_FN_TIOC4B, TIOC4B_MARK), PINMUX_GPIO(GPIO_FN_TIOC4A, TIOC4A_MARK), PINMUX_GPIO(GPIO_FN_TIOC3D, TIOC3D_MARK), PINMUX_GPIO(GPIO_FN_TIOC3C, TIOC3C_MARK), PINMUX_GPIO(GPIO_FN_TIOC3B, TIOC3B_MARK), PINMUX_GPIO(GPIO_FN_TIOC3A, TIOC3A_MARK), PINMUX_GPIO(GPIO_FN_TIOC2B, TIOC2B_MARK), PINMUX_GPIO(GPIO_FN_TIOC1B, TIOC1B_MARK), PINMUX_GPIO(GPIO_FN_TIOC2A, TIOC2A_MARK), PINMUX_GPIO(GPIO_FN_TIOC1A, TIOC1A_MARK), PINMUX_GPIO(GPIO_FN_TIOC0D, TIOC0D_MARK), PINMUX_GPIO(GPIO_FN_TIOC0C, TIOC0C_MARK), PINMUX_GPIO(GPIO_FN_TIOC0B, TIOC0B_MARK), PINMUX_GPIO(GPIO_FN_TIOC0A, TIOC0A_MARK), PINMUX_GPIO(GPIO_FN_TCLKD_PD, TCLKD_PD_MARK), PINMUX_GPIO(GPIO_FN_TCLKC_PD, TCLKC_PD_MARK), PINMUX_GPIO(GPIO_FN_TCLKB_PD, TCLKB_PD_MARK), PINMUX_GPIO(GPIO_FN_TCLKA_PD, TCLKA_PD_MARK), PINMUX_GPIO(GPIO_FN_TCLKD_PF, TCLKD_PF_MARK), PINMUX_GPIO(GPIO_FN_TCLKC_PF, TCLKC_PF_MARK), PINMUX_GPIO(GPIO_FN_TCLKB_PF, TCLKB_PF_MARK), PINMUX_GPIO(GPIO_FN_TCLKA_PF, TCLKA_PF_MARK), /* SSU */ PINMUX_GPIO(GPIO_FN_SCS0_PD, SCS0_PD_MARK), PINMUX_GPIO(GPIO_FN_SSO0_PD, SSO0_PD_MARK), PINMUX_GPIO(GPIO_FN_SSI0_PD, SSI0_PD_MARK), PINMUX_GPIO(GPIO_FN_SSCK0_PD, SSCK0_PD_MARK), PINMUX_GPIO(GPIO_FN_SCS0_PF, SCS0_PF_MARK), PINMUX_GPIO(GPIO_FN_SSO0_PF, SSO0_PF_MARK), PINMUX_GPIO(GPIO_FN_SSI0_PF, SSI0_PF_MARK), PINMUX_GPIO(GPIO_FN_SSCK0_PF, SSCK0_PF_MARK), PINMUX_GPIO(GPIO_FN_SCS1_PD, SCS1_PD_MARK), PINMUX_GPIO(GPIO_FN_SSO1_PD, SSO1_PD_MARK), PINMUX_GPIO(GPIO_FN_SSI1_PD, SSI1_PD_MARK), PINMUX_GPIO(GPIO_FN_SSCK1_PD, SSCK1_PD_MARK), PINMUX_GPIO(GPIO_FN_SCS1_PF, SCS1_PF_MARK), PINMUX_GPIO(GPIO_FN_SSO1_PF, SSO1_PF_MARK), PINMUX_GPIO(GPIO_FN_SSI1_PF, SSI1_PF_MARK), PINMUX_GPIO(GPIO_FN_SSCK1_PF, SSCK1_PF_MARK), /* SCIF */ PINMUX_GPIO(GPIO_FN_TXD0, TXD0_MARK), PINMUX_GPIO(GPIO_FN_RXD0, RXD0_MARK), PINMUX_GPIO(GPIO_FN_SCK0, SCK0_MARK), PINMUX_GPIO(GPIO_FN_TXD1, TXD1_MARK), PINMUX_GPIO(GPIO_FN_RXD1, RXD1_MARK), PINMUX_GPIO(GPIO_FN_SCK1, SCK1_MARK), PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK), PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK), PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK), PINMUX_GPIO(GPIO_FN_RTS3, RTS3_MARK), PINMUX_GPIO(GPIO_FN_CTS3, CTS3_MARK), PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK), PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK), PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK), /* SSI */ PINMUX_GPIO(GPIO_FN_AUDIO_CLK, AUDIO_CLK_MARK), PINMUX_GPIO(GPIO_FN_SSIDATA3, SSIDATA3_MARK), PINMUX_GPIO(GPIO_FN_SSIWS3, SSIWS3_MARK), PINMUX_GPIO(GPIO_FN_SSISCK3, SSISCK3_MARK), PINMUX_GPIO(GPIO_FN_SSIDATA2, SSIDATA2_MARK), PINMUX_GPIO(GPIO_FN_SSIWS2, SSIWS2_MARK), PINMUX_GPIO(GPIO_FN_SSISCK2, SSISCK2_MARK), PINMUX_GPIO(GPIO_FN_SSIDATA1, SSIDATA1_MARK), PINMUX_GPIO(GPIO_FN_SSIWS1, SSIWS1_MARK), PINMUX_GPIO(GPIO_FN_SSISCK1, SSISCK1_MARK), PINMUX_GPIO(GPIO_FN_SSIDATA0, SSIDATA0_MARK), PINMUX_GPIO(GPIO_FN_SSIWS0, SSIWS0_MARK), PINMUX_GPIO(GPIO_FN_SSISCK0, SSISCK0_MARK), /* FLCTL */ PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK), PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK), PINMUX_GPIO(GPIO_FN_NAF7, NAF7_MARK), PINMUX_GPIO(GPIO_FN_NAF6, NAF6_MARK), PINMUX_GPIO(GPIO_FN_NAF5, NAF5_MARK), PINMUX_GPIO(GPIO_FN_NAF4, NAF4_MARK), PINMUX_GPIO(GPIO_FN_NAF3, NAF3_MARK), PINMUX_GPIO(GPIO_FN_NAF2, NAF2_MARK), PINMUX_GPIO(GPIO_FN_NAF1, NAF1_MARK), PINMUX_GPIO(GPIO_FN_NAF0, NAF0_MARK), PINMUX_GPIO(GPIO_FN_FSC, FSC_MARK), PINMUX_GPIO(GPIO_FN_FOE, FOE_MARK), PINMUX_GPIO(GPIO_FN_FCDE, FCDE_MARK), PINMUX_GPIO(GPIO_FN_FWE, FWE_MARK), /* LCDC */ PINMUX_GPIO(GPIO_FN_LCD_VEPWC, LCD_VEPWC_MARK), PINMUX_GPIO(GPIO_FN_LCD_VCPWC, LCD_VCPWC_MARK), PINMUX_GPIO(GPIO_FN_LCD_CLK, LCD_CLK_MARK), PINMUX_GPIO(GPIO_FN_LCD_FLM, LCD_FLM_MARK), PINMUX_GPIO(GPIO_FN_LCD_M_DISP, LCD_M_DISP_MARK), PINMUX_GPIO(GPIO_FN_LCD_CL2, LCD_CL2_MARK), PINMUX_GPIO(GPIO_FN_LCD_CL1, LCD_CL1_MARK), PINMUX_GPIO(GPIO_FN_LCD_DON, LCD_DON_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA15, LCD_DATA15_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA14, LCD_DATA14_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA13, LCD_DATA13_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA12, LCD_DATA12_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA11, LCD_DATA11_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA10, LCD_DATA10_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA9, LCD_DATA9_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA8, LCD_DATA8_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA7, LCD_DATA7_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA6, LCD_DATA6_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA5, LCD_DATA5_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA4, LCD_DATA4_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA3, LCD_DATA3_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA2, LCD_DATA2_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA1, LCD_DATA1_MARK), PINMUX_GPIO(GPIO_FN_LCD_DATA0, LCD_DATA0_MARK), }; static struct pinmux_cfg_reg pinmux_config_regs[] = { { PINMUX_CFG_REG("PBIORL", 0xfffe3886, 16, 1) { 0, 0, 0, 0, 0, 0, 0, 0, PB11_IN, PB11_OUT, PB10_IN, PB10_OUT, PB9_IN, PB9_OUT, PB8_IN, PB8_OUT, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PBCRL4", 0xfffe3890, 16, 4) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB12MD_00, PB12MD_01, PB12MD_10, PB12MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PBCRL3", 0xfffe3892, 16, 4) { PB11MD_0, PB11MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB10MD_0, PB10MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB9MD_00, PB9MD_01, PB9MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB8MD_00, PB8MD_01, PB8MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PBCRL2", 0xfffe3894, 16, 4) { PB7MD_00, PB7MD_01, PB7MD_10, PB7MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB6MD_00, PB6MD_01, PB6MD_10, PB6MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB5MD_00, PB5MD_01, PB5MD_10, PB5MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB4MD_00, PB4MD_01, PB4MD_10, PB4MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PBCRL1", 0xfffe3896, 16, 4) { PB3MD_00, PB3MD_01, PB3MD_10, PB3MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB2MD_00, PB2MD_01, PB2MD_10, PB2MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB1MD_00, PB1MD_01, PB1MD_10, PB1MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB0MD_00, PB0MD_01, PB0MD_10, PB0MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("IFCR", 0xfffe38a2, 16, 4) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB12IRQ_00, PB12IRQ_01, PB12IRQ_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PCIORL", 0xfffe3906, 16, 1) { 0, 0, PC14_IN, PC14_OUT, PC13_IN, PC13_OUT, PC12_IN, PC12_OUT, PC11_IN, PC11_OUT, PC10_IN, PC10_OUT, PC9_IN, PC9_OUT, PC8_IN, PC8_OUT, PC7_IN, PC7_OUT, PC6_IN, PC6_OUT, PC5_IN, PC5_OUT, PC4_IN, PC4_OUT, PC3_IN, PC3_OUT, PC2_IN, PC2_OUT, PC1_IN, PC1_OUT, PC0_IN, PC0_OUT } }, { PINMUX_CFG_REG("PCCRL4", 0xfffe3910, 16, 4) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC14MD_0, PC14MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC13MD_0, PC13MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC12MD_0, PC12MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PCCRL3", 0xfffe3912, 16, 4) { PC11MD_00, PC11MD_01, PC11MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC10MD_00, PC10MD_01, PC10MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC9MD_0, PC9MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC8MD_0, PC8MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PCCRL2", 0xfffe3914, 16, 4) { PC7MD_0, PC7MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC6MD_0, PC6MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC5MD_0, PC5MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC4MD_0, PC4MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PCCRL1", 0xfffe3916, 16, 4) { PC3MD_0, PC3MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC2MD_0, PC2MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC1MD_0, PC1MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC0MD_00, PC0MD_01, PC0MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PDIORL", 0xfffe3986, 16, 1) { PD15_IN, PD15_OUT, PD14_IN, PD14_OUT, PD13_IN, PD13_OUT, PD12_IN, PD12_OUT, PD11_IN, PD11_OUT, PD10_IN, PD10_OUT, PD9_IN, PD9_OUT, PD8_IN, PD8_OUT, PD7_IN, PD7_OUT, PD6_IN, PD6_OUT, PD5_IN, PD5_OUT, PD4_IN, PD4_OUT, PD3_IN, PD3_OUT, PD2_IN, PD2_OUT, PD1_IN, PD1_OUT, PD0_IN, PD0_OUT } }, { PINMUX_CFG_REG("PDCRL4", 0xfffe3990, 16, 4) { PD15MD_000, PD15MD_001, PD15MD_010, 0, PD15MD_100, PD15MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD14MD_000, PD14MD_001, PD14MD_010, 0, 0, PD14MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD13MD_000, PD13MD_001, PD13MD_010, 0, PD13MD_100, PD13MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD12MD_000, PD12MD_001, PD12MD_010, 0, PD12MD_100, PD12MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PDCRL3", 0xfffe3992, 16, 4) { PD11MD_000, PD11MD_001, PD11MD_010, 0, PD11MD_100, PD11MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD10MD_000, PD10MD_001, PD10MD_010, 0, PD10MD_100, PD10MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD9MD_000, PD9MD_001, PD9MD_010, 0, PD9MD_100, PD9MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD8MD_000, PD8MD_001, PD8MD_010, 0, PD8MD_100, PD8MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PDCRL2", 0xfffe3994, 16, 4) { PD7MD_000, PD7MD_001, PD7MD_010, PD7MD_011, PD7MD_100, PD7MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD6MD_000, PD6MD_001, PD6MD_010, PD6MD_011, PD6MD_100, PD6MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD5MD_000, PD5MD_001, PD5MD_010, PD5MD_011, PD5MD_100, PD5MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD4MD_000, PD4MD_001, PD4MD_010, PD4MD_011, PD4MD_100, PD4MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PDCRL1", 0xfffe3996, 16, 4) { PD3MD_000, PD3MD_001, PD3MD_010, PD3MD_011, PD3MD_100, PD3MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD2MD_000, PD2MD_001, PD2MD_010, PD2MD_011, PD2MD_100, PD2MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD1MD_000, PD1MD_001, PD1MD_010, PD1MD_011, PD1MD_100, PD1MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PD0MD_000, PD0MD_001, PD0MD_010, PD0MD_011, PD0MD_100, PD0MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PEIORL", 0xfffe3a06, 16, 1) { PE15_IN, PE15_OUT, PE14_IN, PE14_OUT, PE13_IN, PE13_OUT, PE12_IN, PE12_OUT, PE11_IN, PE11_OUT, PE10_IN, PE10_OUT, PE9_IN, PE9_OUT, PE8_IN, PE8_OUT, PE7_IN, PE7_OUT, PE6_IN, PE6_OUT, PE5_IN, PE5_OUT, PE4_IN, PE4_OUT, PE3_IN, PE3_OUT, PE2_IN, PE2_OUT, PE1_IN, PE1_OUT, PE0_IN, PE0_OUT } }, { PINMUX_CFG_REG("PECRL4", 0xfffe3a10, 16, 4) { PE15MD_00, PE15MD_01, 0, PE15MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE14MD_00, PE14MD_01, 0, PE14MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE13MD_00, 0, 0, PE13MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE12MD_00, 0, 0, PE12MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PECRL3", 0xfffe3a12, 16, 4) { PE11MD_000, PE11MD_001, PE11MD_010, 0, PE11MD_100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE10MD_000, PE10MD_001, PE10MD_010, 0, PE10MD_100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE9MD_00, PE9MD_01, PE9MD_10, PE9MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE8MD_00, PE8MD_01, PE8MD_10, PE8MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PECRL2", 0xfffe3a14, 16, 4) { PE7MD_000, PE7MD_001, PE7MD_010, PE7MD_011, PE7MD_100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE6MD_000, PE6MD_001, PE6MD_010, PE6MD_011, PE6MD_100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE5MD_000, PE5MD_001, PE5MD_010, PE5MD_011, PE5MD_100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE4MD_000, PE4MD_001, PE4MD_010, PE4MD_011, PE4MD_100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PECRL1", 0xfffe3a16, 16, 4) { PE3MD_00, PE3MD_01, 0, PE3MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE2MD_00, PE2MD_01, 0, PE2MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE1MD_00, PE1MD_01, PE1MD_10, PE1MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE0MD_000, PE0MD_001, 0, PE0MD_011, PE0MD_100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PFIORH", 0xfffe3a84, 16, 1) { 0, 0, PF30_IN, PF30_OUT, PF29_IN, PF29_OUT, PF28_IN, PF28_OUT, PF27_IN, PF27_OUT, PF26_IN, PF26_OUT, PF25_IN, PF25_OUT, PF24_IN, PF24_OUT, PF23_IN, PF23_OUT, PF22_IN, PF22_OUT, PF21_IN, PF21_OUT, PF20_IN, PF20_OUT, PF19_IN, PF19_OUT, PF18_IN, PF18_OUT, PF17_IN, PF17_OUT, PF16_IN, PF16_OUT } }, { PINMUX_CFG_REG("PFIORL", 0xfffe3a86, 16, 1) { PF15_IN, PF15_OUT, PF14_IN, PF14_OUT, PF13_IN, PF13_OUT, PF12_IN, PF12_OUT, PF11_IN, PF11_OUT, PF10_IN, PF10_OUT, PF9_IN, PF9_OUT, PF8_IN, PF8_OUT, PF7_IN, PF7_OUT, PF6_IN, PF6_OUT, PF5_IN, PF5_OUT, PF4_IN, PF4_OUT, PF3_IN, PF3_OUT, PF2_IN, PF2_OUT, PF1_IN, PF1_OUT, PF0_IN, PF0_OUT } }, { PINMUX_CFG_REG("PFCRH4", 0xfffe3a88, 16, 4) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF30MD_0, PF30MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF29MD_0, PF29MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF28MD_0, PF28MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PFCRH3", 0xfffe3a8a, 16, 4) { PF27MD_0, PF27MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF26MD_0, PF26MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF25MD_0, PF25MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF24MD_0, PF24MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PFCRH2", 0xfffe3a8c, 16, 4) { PF23MD_00, PF23MD_01, PF23MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF22MD_00, PF22MD_01, PF22MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF21MD_00, PF21MD_01, PF21MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF20MD_00, PF20MD_01, PF20MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PFCRH1", 0xfffe3a8e, 16, 4) { PF19MD_00, PF19MD_01, PF19MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF18MD_00, PF18MD_01, PF18MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF17MD_00, PF17MD_01, PF17MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF16MD_00, PF16MD_01, PF16MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PFCRL4", 0xfffe3a90, 16, 4) { PF15MD_00, PF15MD_01, PF15MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF14MD_00, PF14MD_01, PF14MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF13MD_00, PF13MD_01, PF13MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF12MD_00, PF12MD_01, PF12MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PFCRL3", 0xfffe3a92, 16, 4) { PF11MD_00, PF11MD_01, PF11MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF10MD_00, PF10MD_01, PF10MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF9MD_00, PF9MD_01, PF9MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF8MD_00, PF8MD_01, PF8MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PFCRL2", 0xfffe3a94, 16, 4) { PF7MD_00, PF7MD_01, PF7MD_10, PF7MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF6MD_00, PF6MD_01, PF6MD_10, PF6MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF5MD_00, PF5MD_01, PF5MD_10, PF5MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF4MD_00, PF4MD_01, PF4MD_10, PF4MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_CFG_REG("PFCRL1", 0xfffe3a96, 16, 4) { PF3MD_00, PF3MD_01, PF3MD_10, PF3MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF2MD_00, PF2MD_01, PF2MD_10, PF2MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF1MD_00, PF1MD_01, PF1MD_10, PF1MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PF0MD_00, PF0MD_01, PF0MD_10, PF0MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, {} }; static struct pinmux_data_reg pinmux_data_regs[] = { { PINMUX_DATA_REG("PADRL", 0xfffe3802, 16) { 0, 0, 0, 0, 0, 0, 0, 0, PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA, PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA } }, { PINMUX_DATA_REG("PBDRL", 0xfffe3882, 16) { 0, 0, 0, PB12_DATA, PB11_DATA, PB10_DATA, PB9_DATA, PB8_DATA, PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA, PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA } }, { PINMUX_DATA_REG("PCDRL", 0xfffe3902, 16) { 0, PC14_DATA, PC13_DATA, PC12_DATA, PC11_DATA, PC10_DATA, PC9_DATA, PC8_DATA, PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA, PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA } }, { PINMUX_DATA_REG("PDDRL", 0xfffe3982, 16) { PD15_DATA, PD14_DATA, PD13_DATA, PD12_DATA, PD11_DATA, PD10_DATA, PD9_DATA, PD8_DATA, PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA, PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA } }, { PINMUX_DATA_REG("PEDRL", 0xfffe3a02, 16) { PE15_DATA, PE14_DATA, PE13_DATA, PE12_DATA, PE11_DATA, PE10_DATA, PE9_DATA, PE8_DATA, PE7_DATA, PE6_DATA, PE5_DATA, PE4_DATA, PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA } }, { PINMUX_DATA_REG("PFDRH", 0xfffe3a80, 16) { 0, PF30_DATA, PF29_DATA, PF28_DATA, PF27_DATA, PF26_DATA, PF25_DATA, PF24_DATA, PF23_DATA, PF22_DATA, PF21_DATA, PF20_DATA, PF19_DATA, PF18_DATA, PF17_DATA, PF16_DATA } }, { PINMUX_DATA_REG("PFDRL", 0xfffe3a82, 16) { PF15_DATA, PF14_DATA, PF13_DATA, PF12_DATA, PF11_DATA, PF10_DATA, PF9_DATA, PF8_DATA, PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA, PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA } }, { }, }; static struct pinmux_info sh7203_pinmux_info = { .name = "sh7203_pfc", .reserved_id = PINMUX_RESERVED, .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END }, .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END, FORCE_IN }, .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END, FORCE_OUT }, .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END }, .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END }, .first_gpio = GPIO_PA7, .last_gpio = GPIO_FN_LCD_DATA0, .gpios = pinmux_gpios, .cfg_regs = pinmux_config_regs, .data_regs = pinmux_data_regs, .gpio_data = pinmux_data, .gpio_data_size = ARRAY_SIZE(pinmux_data), }; static int __init plat_pinmux_setup(void) { return register_pinmux(&sh7203_pinmux_info); } arch_initcall(plat_pinmux_setup);
gpl-2.0
Talustus/dreamkernel_ics_sghi777
drivers/input/mouse/inport.c
14610
5446
/* * Copyright (c) 1999-2001 Vojtech Pavlik * * Based on the work of: * Teemu Rantanen Derrick Cole * Peter Cervasio Christoph Niemann * Philip Blundell Russell King * Bob Harris */ /* * Inport (ATI XL and Microsoft) busmouse driver for Linux */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/module.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/input.h> #include <asm/io.h> #include <asm/irq.h> MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION("Inport (ATI XL and Microsoft) busmouse driver"); MODULE_LICENSE("GPL"); #define INPORT_BASE 0x23c #define INPORT_EXTENT 4 #define INPORT_CONTROL_PORT INPORT_BASE + 0 #define INPORT_DATA_PORT INPORT_BASE + 1 #define INPORT_SIGNATURE_PORT INPORT_BASE + 2 #define INPORT_REG_BTNS 0x00 #define INPORT_REG_X 0x01 #define INPORT_REG_Y 0x02 #define INPORT_REG_MODE 0x07 #define INPORT_RESET 0x80 #ifdef CONFIG_MOUSE_ATIXL #define INPORT_NAME "ATI XL Mouse" #define INPORT_VENDOR 0x0002 #define INPORT_SPEED_30HZ 0x01 #define INPORT_SPEED_50HZ 0x02 #define INPORT_SPEED_100HZ 0x03 #define INPORT_SPEED_200HZ 0x04 #define INPORT_MODE_BASE INPORT_SPEED_100HZ #define INPORT_MODE_IRQ 0x08 #else #define INPORT_NAME "Microsoft InPort Mouse" #define INPORT_VENDOR 0x0001 #define INPORT_MODE_BASE 0x10 #define INPORT_MODE_IRQ 0x01 #endif #define INPORT_MODE_HOLD 0x20 #define INPORT_IRQ 5 static int inport_irq = INPORT_IRQ; module_param_named(irq, inport_irq, uint, 0); MODULE_PARM_DESC(irq, "IRQ number (5=default)"); static struct input_dev *inport_dev; static irqreturn_t inport_interrupt(int irq, void *dev_id) { unsigned char buttons; outb(INPORT_REG_MODE, INPORT_CONTROL_PORT); outb(INPORT_MODE_HOLD | INPORT_MODE_IRQ | INPORT_MODE_BASE, INPORT_DATA_PORT); outb(INPORT_REG_X, INPORT_CONTROL_PORT); input_report_rel(inport_dev, REL_X, inb(INPORT_DATA_PORT)); outb(INPORT_REG_Y, INPORT_CONTROL_PORT); input_report_rel(inport_dev, REL_Y, inb(INPORT_DATA_PORT)); outb(INPORT_REG_BTNS, INPORT_CONTROL_PORT); buttons = inb(INPORT_DATA_PORT); input_report_key(inport_dev, BTN_MIDDLE, buttons & 1); input_report_key(inport_dev, BTN_LEFT, buttons & 2); input_report_key(inport_dev, BTN_RIGHT, buttons & 4); outb(INPORT_REG_MODE, INPORT_CONTROL_PORT); outb(INPORT_MODE_IRQ | INPORT_MODE_BASE, INPORT_DATA_PORT); input_sync(inport_dev); return IRQ_HANDLED; } static int inport_open(struct input_dev *dev) { if (request_irq(inport_irq, inport_interrupt, 0, "inport", NULL)) return -EBUSY; outb(INPORT_REG_MODE, INPORT_CONTROL_PORT); outb(INPORT_MODE_IRQ | INPORT_MODE_BASE, INPORT_DATA_PORT); return 0; } static void inport_close(struct input_dev *dev) { outb(INPORT_REG_MODE, INPORT_CONTROL_PORT); outb(INPORT_MODE_BASE, INPORT_DATA_PORT); free_irq(inport_irq, NULL); } static int __init inport_init(void) { unsigned char a, b, c; int err; if (!request_region(INPORT_BASE, INPORT_EXTENT, "inport")) { printk(KERN_ERR "inport.c: Can't allocate ports at %#x\n", INPORT_BASE); return -EBUSY; } a = inb(INPORT_SIGNATURE_PORT); b = inb(INPORT_SIGNATURE_PORT); c = inb(INPORT_SIGNATURE_PORT); if (a == b || a != c) { printk(KERN_INFO "inport.c: Didn't find InPort mouse at %#x\n", INPORT_BASE); err = -ENODEV; goto err_release_region; } inport_dev = input_allocate_device(); if (!inport_dev) { printk(KERN_ERR "inport.c: Not enough memory for input device\n"); err = -ENOMEM; goto err_release_region; } inport_dev->name = INPORT_NAME; inport_dev->phys = "isa023c/input0"; inport_dev->id.bustype = BUS_ISA; inport_dev->id.vendor = INPORT_VENDOR; inport_dev->id.product = 0x0001; inport_dev->id.version = 0x0100; inport_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); inport_dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT); inport_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y); inport_dev->open = inport_open; inport_dev->close = inport_close; outb(INPORT_RESET, INPORT_CONTROL_PORT); outb(INPORT_REG_MODE, INPORT_CONTROL_PORT); outb(INPORT_MODE_BASE, INPORT_DATA_PORT); err = input_register_device(inport_dev); if (err) goto err_free_dev; return 0; err_free_dev: input_free_device(inport_dev); err_release_region: release_region(INPORT_BASE, INPORT_EXTENT); return err; } static void __exit inport_exit(void) { input_unregister_device(inport_dev); release_region(INPORT_BASE, INPORT_EXTENT); } module_init(inport_init); module_exit(inport_exit);
gpl-2.0
pranav01/linux-3.10.y
drivers/input/mouse/inport.c
14610
5446
/* * Copyright (c) 1999-2001 Vojtech Pavlik * * Based on the work of: * Teemu Rantanen Derrick Cole * Peter Cervasio Christoph Niemann * Philip Blundell Russell King * Bob Harris */ /* * Inport (ATI XL and Microsoft) busmouse driver for Linux */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/module.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/input.h> #include <asm/io.h> #include <asm/irq.h> MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION("Inport (ATI XL and Microsoft) busmouse driver"); MODULE_LICENSE("GPL"); #define INPORT_BASE 0x23c #define INPORT_EXTENT 4 #define INPORT_CONTROL_PORT INPORT_BASE + 0 #define INPORT_DATA_PORT INPORT_BASE + 1 #define INPORT_SIGNATURE_PORT INPORT_BASE + 2 #define INPORT_REG_BTNS 0x00 #define INPORT_REG_X 0x01 #define INPORT_REG_Y 0x02 #define INPORT_REG_MODE 0x07 #define INPORT_RESET 0x80 #ifdef CONFIG_MOUSE_ATIXL #define INPORT_NAME "ATI XL Mouse" #define INPORT_VENDOR 0x0002 #define INPORT_SPEED_30HZ 0x01 #define INPORT_SPEED_50HZ 0x02 #define INPORT_SPEED_100HZ 0x03 #define INPORT_SPEED_200HZ 0x04 #define INPORT_MODE_BASE INPORT_SPEED_100HZ #define INPORT_MODE_IRQ 0x08 #else #define INPORT_NAME "Microsoft InPort Mouse" #define INPORT_VENDOR 0x0001 #define INPORT_MODE_BASE 0x10 #define INPORT_MODE_IRQ 0x01 #endif #define INPORT_MODE_HOLD 0x20 #define INPORT_IRQ 5 static int inport_irq = INPORT_IRQ; module_param_named(irq, inport_irq, uint, 0); MODULE_PARM_DESC(irq, "IRQ number (5=default)"); static struct input_dev *inport_dev; static irqreturn_t inport_interrupt(int irq, void *dev_id) { unsigned char buttons; outb(INPORT_REG_MODE, INPORT_CONTROL_PORT); outb(INPORT_MODE_HOLD | INPORT_MODE_IRQ | INPORT_MODE_BASE, INPORT_DATA_PORT); outb(INPORT_REG_X, INPORT_CONTROL_PORT); input_report_rel(inport_dev, REL_X, inb(INPORT_DATA_PORT)); outb(INPORT_REG_Y, INPORT_CONTROL_PORT); input_report_rel(inport_dev, REL_Y, inb(INPORT_DATA_PORT)); outb(INPORT_REG_BTNS, INPORT_CONTROL_PORT); buttons = inb(INPORT_DATA_PORT); input_report_key(inport_dev, BTN_MIDDLE, buttons & 1); input_report_key(inport_dev, BTN_LEFT, buttons & 2); input_report_key(inport_dev, BTN_RIGHT, buttons & 4); outb(INPORT_REG_MODE, INPORT_CONTROL_PORT); outb(INPORT_MODE_IRQ | INPORT_MODE_BASE, INPORT_DATA_PORT); input_sync(inport_dev); return IRQ_HANDLED; } static int inport_open(struct input_dev *dev) { if (request_irq(inport_irq, inport_interrupt, 0, "inport", NULL)) return -EBUSY; outb(INPORT_REG_MODE, INPORT_CONTROL_PORT); outb(INPORT_MODE_IRQ | INPORT_MODE_BASE, INPORT_DATA_PORT); return 0; } static void inport_close(struct input_dev *dev) { outb(INPORT_REG_MODE, INPORT_CONTROL_PORT); outb(INPORT_MODE_BASE, INPORT_DATA_PORT); free_irq(inport_irq, NULL); } static int __init inport_init(void) { unsigned char a, b, c; int err; if (!request_region(INPORT_BASE, INPORT_EXTENT, "inport")) { printk(KERN_ERR "inport.c: Can't allocate ports at %#x\n", INPORT_BASE); return -EBUSY; } a = inb(INPORT_SIGNATURE_PORT); b = inb(INPORT_SIGNATURE_PORT); c = inb(INPORT_SIGNATURE_PORT); if (a == b || a != c) { printk(KERN_INFO "inport.c: Didn't find InPort mouse at %#x\n", INPORT_BASE); err = -ENODEV; goto err_release_region; } inport_dev = input_allocate_device(); if (!inport_dev) { printk(KERN_ERR "inport.c: Not enough memory for input device\n"); err = -ENOMEM; goto err_release_region; } inport_dev->name = INPORT_NAME; inport_dev->phys = "isa023c/input0"; inport_dev->id.bustype = BUS_ISA; inport_dev->id.vendor = INPORT_VENDOR; inport_dev->id.product = 0x0001; inport_dev->id.version = 0x0100; inport_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); inport_dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT); inport_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y); inport_dev->open = inport_open; inport_dev->close = inport_close; outb(INPORT_RESET, INPORT_CONTROL_PORT); outb(INPORT_REG_MODE, INPORT_CONTROL_PORT); outb(INPORT_MODE_BASE, INPORT_DATA_PORT); err = input_register_device(inport_dev); if (err) goto err_free_dev; return 0; err_free_dev: input_free_device(inport_dev); err_release_region: release_region(INPORT_BASE, INPORT_EXTENT); return err; } static void __exit inport_exit(void) { input_unregister_device(inport_dev); release_region(INPORT_BASE, INPORT_EXTENT); } module_init(inport_init); module_exit(inport_exit);
gpl-2.0
jupiterben/shooter-player
src/filters/transform/mpcvideodec/ffmpeg/libavcodec/atrac3.c
19
32289
/* * Atrac 3 compatible decoder * Copyright (c) 2006-2008 Maxim Poliakovski * Copyright (c) 2006-2008 Benjamin Larsson * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file libavcodec/atrac3.c * Atrac 3 compatible decoder. * This decoder handles Sony's ATRAC3 data. * * Container formats used to store atrac 3 data: * RealMedia (.rm), RIFF WAV (.wav, .at3), Sony OpenMG (.oma, .aa3). * * To use this decoder, a calling application must supply the extradata * bytes provided in the containers above. */ #include <math.h> #include <stddef.h> #include <stdio.h> #include "avcodec.h" #include "get_bits.h" #include "dsputil.h" #include "bytestream.h" #include "atrac.h" #include "atrac3data.h" #define JOINT_STEREO 0x12 #define STEREO 0x2 /* These structures are needed to store the parsed gain control data. */ typedef struct { int num_gain_data; int levcode[8]; int loccode[8]; } gain_info; typedef struct { gain_info gBlock[4]; } gain_block; typedef struct { int pos; int numCoefs; float coef[8]; } tonal_component; typedef struct { int bandsCoded; int numComponents; tonal_component components[64]; float prevFrame[1024]; int gcBlkSwitch; gain_block gainBlock[2]; DECLARE_ALIGNED_16(float, spectrum[1024]); DECLARE_ALIGNED_16(float, IMDCT_buf[1024]); float delayBuf1[46]; ///<qmf delay buffers float delayBuf2[46]; float delayBuf3[46]; } channel_unit; typedef struct { GetBitContext gb; //@{ /** stream data */ int channels; int codingMode; int bit_rate; int sample_rate; int samples_per_channel; int samples_per_frame; int bits_per_frame; int bytes_per_frame; int pBs; channel_unit* pUnits; //@} //@{ /** joint-stereo related variables */ int matrix_coeff_index_prev[4]; int matrix_coeff_index_now[4]; int matrix_coeff_index_next[4]; int weighting_delay[6]; //@} //@{ /** data buffers */ float outSamples[2048]; uint8_t* decoded_bytes_buffer; float tempBuf[1070]; //@} //@{ /** extradata */ int atrac3version; int delay; int scrambled_stream; int frame_factor; //@} } ATRAC3Context; static DECLARE_ALIGNED_16(float,mdct_window[512]); static VLC spectral_coeff_tab[7]; static float gain_tab1[16]; static float gain_tab2[31]; static FFTContext mdct_ctx; static DSPContext dsp; /** * Regular 512 points IMDCT without overlapping, with the exception of the swapping of odd bands * caused by the reverse spectra of the QMF. * * @param pInput float input * @param pOutput float output * @param odd_band 1 if the band is an odd band */ static void IMLT(float *pInput, float *pOutput, int odd_band) { int i; if (odd_band) { /** * Reverse the odd bands before IMDCT, this is an effect of the QMF transform * or it gives better compression to do it this way. * FIXME: It should be possible to handle this in ff_imdct_calc * for that to happen a modification of the prerotation step of * all SIMD code and C code is needed. * Or fix the functions before so they generate a pre reversed spectrum. */ for (i=0; i<128; i++) FFSWAP(float, pInput[i], pInput[255-i]); } ff_imdct_calc(&mdct_ctx,pOutput,pInput); /* Perform windowing on the output. */ dsp.vector_fmul(pOutput,mdct_window,512); } /** * Atrac 3 indata descrambling, only used for data coming from the rm container * * @param in pointer to 8 bit array of indata * @param bits amount of bits * @param out pointer to 8 bit array of outdata */ static int decode_bytes(const uint8_t* inbuffer, uint8_t* out, int bytes){ int i, off; uint32_t c; const uint32_t* buf; uint32_t* obuf = (uint32_t*) out; off = (intptr_t)inbuffer & 3; buf = (const uint32_t*) (inbuffer - off); c = be2me_32((0x537F6103 >> (off*8)) | (0x537F6103 << (32-(off*8)))); bytes += 3 + off; for (i = 0; i < bytes/4; i++) obuf[i] = c ^ buf[i]; if (off) av_log(NULL,AV_LOG_DEBUG,"Offset of %d not handled, post sample on ffmpeg-dev.\n",off); return off; } static av_cold void init_atrac3_transforms(ATRAC3Context *q) { float enc_window[256]; int i; /* Generate the mdct window, for details see * http://wiki.multimedia.cx/index.php?title=RealAudio_atrc#Windows */ for (i=0 ; i<256; i++) enc_window[i] = (sin(((i + 0.5) / 256.0 - 0.5) * M_PI) + 1.0) * 0.5; if (!mdct_window[0]) for (i=0 ; i<256; i++) { mdct_window[i] = enc_window[i]/(enc_window[i]*enc_window[i] + enc_window[255-i]*enc_window[255-i]); mdct_window[511-i] = mdct_window[i]; } /* Initialize the MDCT transform. */ ff_mdct_init(&mdct_ctx, 9, 1, 1.0); } /** * Atrac3 uninit, free all allocated memory */ static av_cold int atrac3_decode_close(AVCodecContext *avctx) { ATRAC3Context *q = avctx->priv_data; av_free(q->pUnits); av_free(q->decoded_bytes_buffer); return 0; } /** / * Mantissa decoding * * @param gb the GetBit context * @param selector what table is the output values coded with * @param codingFlag constant length coding or variable length coding * @param mantissas mantissa output table * @param numCodes amount of values to get */ static void readQuantSpectralCoeffs (GetBitContext *gb, int selector, int codingFlag, int* mantissas, int numCodes) { int numBits, cnt, code, huffSymb; if (selector == 1) numCodes /= 2; if (codingFlag != 0) { /* constant length coding (CLC) */ numBits = CLCLengthTab[selector]; if (selector > 1) { for (cnt = 0; cnt < numCodes; cnt++) { if (numBits) code = get_sbits(gb, numBits); else code = 0; mantissas[cnt] = code; } } else { for (cnt = 0; cnt < numCodes; cnt++) { if (numBits) code = get_bits(gb, numBits); //numBits is always 4 in this case else code = 0; mantissas[cnt*2] = seTab_0[code >> 2]; mantissas[cnt*2+1] = seTab_0[code & 3]; } } } else { /* variable length coding (VLC) */ if (selector != 1) { for (cnt = 0; cnt < numCodes; cnt++) { huffSymb = get_vlc2(gb, spectral_coeff_tab[selector-1].table, spectral_coeff_tab[selector-1].bits, 3); huffSymb += 1; code = huffSymb >> 1; if (huffSymb & 1) code = -code; mantissas[cnt] = code; } } else { for (cnt = 0; cnt < numCodes; cnt++) { huffSymb = get_vlc2(gb, spectral_coeff_tab[selector-1].table, spectral_coeff_tab[selector-1].bits, 3); mantissas[cnt*2] = decTable1[huffSymb*2]; mantissas[cnt*2+1] = decTable1[huffSymb*2+1]; } } } } /** * Restore the quantized band spectrum coefficients * * @param gb the GetBit context * @param pOut decoded band spectrum * @return outSubbands subband counter, fix for broken specification/files */ static int decodeSpectrum (GetBitContext *gb, float *pOut) { int numSubbands, codingMode, cnt, first, last, subbWidth, *pIn; int subband_vlc_index[32], SF_idxs[32]; int mantissas[128]; float SF; numSubbands = get_bits(gb, 5); // number of coded subbands codingMode = get_bits1(gb); // coding Mode: 0 - VLC/ 1-CLC /* Get the VLC selector table for the subbands, 0 means not coded. */ for (cnt = 0; cnt <= numSubbands; cnt++) subband_vlc_index[cnt] = get_bits(gb, 3); /* Read the scale factor indexes from the stream. */ for (cnt = 0; cnt <= numSubbands; cnt++) { if (subband_vlc_index[cnt] != 0) SF_idxs[cnt] = get_bits(gb, 6); } for (cnt = 0; cnt <= numSubbands; cnt++) { first = subbandTab[cnt]; last = subbandTab[cnt+1]; subbWidth = last - first; if (subband_vlc_index[cnt] != 0) { /* Decode spectral coefficients for this subband. */ /* TODO: This can be done faster is several blocks share the * same VLC selector (subband_vlc_index) */ readQuantSpectralCoeffs (gb, subband_vlc_index[cnt], codingMode, mantissas, subbWidth); /* Decode the scale factor for this subband. */ SF = sf_table[SF_idxs[cnt]] * iMaxQuant[subband_vlc_index[cnt]]; /* Inverse quantize the coefficients. */ for (pIn=mantissas ; first<last; first++, pIn++) pOut[first] = *pIn * SF; } else { /* This subband was not coded, so zero the entire subband. */ memset(pOut+first, 0, subbWidth*sizeof(float)); } } /* Clear the subbands that were not coded. */ first = subbandTab[cnt]; memset(pOut+first, 0, (1024 - first) * sizeof(float)); return numSubbands; } /** * Restore the quantized tonal components * * @param gb the GetBit context * @param pComponent tone component * @param numBands amount of coded bands */ static int decodeTonalComponents (GetBitContext *gb, tonal_component *pComponent, int numBands) { int i,j,k,cnt; int components, coding_mode_selector, coding_mode, coded_values_per_component; int sfIndx, coded_values, max_coded_values, quant_step_index, coded_components; int band_flags[4], mantissa[8]; float *pCoef; float scalefactor; int component_count = 0; components = get_bits(gb,5); /* no tonal components */ if (components == 0) return 0; coding_mode_selector = get_bits(gb,2); if (coding_mode_selector == 2) return -1; coding_mode = coding_mode_selector & 1; for (i = 0; i < components; i++) { for (cnt = 0; cnt <= numBands; cnt++) band_flags[cnt] = get_bits1(gb); coded_values_per_component = get_bits(gb,3); quant_step_index = get_bits(gb,3); if (quant_step_index <= 1) return -1; if (coding_mode_selector == 3) coding_mode = get_bits1(gb); for (j = 0; j < (numBands + 1) * 4; j++) { if (band_flags[j >> 2] == 0) continue; coded_components = get_bits(gb,3); for (k=0; k<coded_components; k++) { sfIndx = get_bits(gb,6); pComponent[component_count].pos = j * 64 + (get_bits(gb,6)); max_coded_values = 1024 - pComponent[component_count].pos; coded_values = coded_values_per_component + 1; coded_values = FFMIN(max_coded_values,coded_values); scalefactor = sf_table[sfIndx] * iMaxQuant[quant_step_index]; readQuantSpectralCoeffs(gb, quant_step_index, coding_mode, mantissa, coded_values); pComponent[component_count].numCoefs = coded_values; /* inverse quant */ pCoef = pComponent[component_count].coef; for (cnt = 0; cnt < coded_values; cnt++) pCoef[cnt] = mantissa[cnt] * scalefactor; component_count++; } } } return component_count; } /** * Decode gain parameters for the coded bands * * @param gb the GetBit context * @param pGb the gainblock for the current band * @param numBands amount of coded bands */ static int decodeGainControl (GetBitContext *gb, gain_block *pGb, int numBands) { int i, cf, numData; int *pLevel, *pLoc; gain_info *pGain = pGb->gBlock; for (i=0 ; i<=numBands; i++) { numData = get_bits(gb,3); pGain[i].num_gain_data = numData; pLevel = pGain[i].levcode; pLoc = pGain[i].loccode; for (cf = 0; cf < numData; cf++){ pLevel[cf]= get_bits(gb,4); pLoc [cf]= get_bits(gb,5); if(cf && pLoc[cf] <= pLoc[cf-1]) return -1; } } /* Clear the unused blocks. */ for (; i<4 ; i++) pGain[i].num_gain_data = 0; return 0; } /** * Apply gain parameters and perform the MDCT overlapping part * * @param pIn input float buffer * @param pPrev previous float buffer to perform overlap against * @param pOut output float buffer * @param pGain1 current band gain info * @param pGain2 next band gain info */ static void gainCompensateAndOverlap (float *pIn, float *pPrev, float *pOut, gain_info *pGain1, gain_info *pGain2) { /* gain compensation function */ float gain1, gain2, gain_inc; int cnt, numdata, nsample, startLoc, endLoc; if (pGain2->num_gain_data == 0) gain1 = 1.0; else gain1 = gain_tab1[pGain2->levcode[0]]; if (pGain1->num_gain_data == 0) { for (cnt = 0; cnt < 256; cnt++) pOut[cnt] = pIn[cnt] * gain1 + pPrev[cnt]; } else { numdata = pGain1->num_gain_data; pGain1->loccode[numdata] = 32; pGain1->levcode[numdata] = 4; nsample = 0; // current sample = 0 for (cnt = 0; cnt < numdata; cnt++) { startLoc = pGain1->loccode[cnt] * 8; endLoc = startLoc + 8; gain2 = gain_tab1[pGain1->levcode[cnt]]; gain_inc = gain_tab2[(pGain1->levcode[cnt+1] - pGain1->levcode[cnt])+15]; /* interpolate */ for (; nsample < startLoc; nsample++) pOut[nsample] = (pIn[nsample] * gain1 + pPrev[nsample]) * gain2; /* interpolation is done over eight samples */ for (; nsample < endLoc; nsample++) { pOut[nsample] = (pIn[nsample] * gain1 + pPrev[nsample]) * gain2; gain2 *= gain_inc; } } for (; nsample < 256; nsample++) pOut[nsample] = (pIn[nsample] * gain1) + pPrev[nsample]; } /* Delay for the overlapping part. */ memcpy(pPrev, &pIn[256], 256*sizeof(float)); } /** * Combine the tonal band spectrum and regular band spectrum * Return position of the last tonal coefficient * * @param pSpectrum output spectrum buffer * @param numComponents amount of tonal components * @param pComponent tonal components for this band */ static int addTonalComponents (float *pSpectrum, int numComponents, tonal_component *pComponent) { int cnt, i, lastPos = -1; float *pIn, *pOut; for (cnt = 0; cnt < numComponents; cnt++){ lastPos = FFMAX(pComponent[cnt].pos + pComponent[cnt].numCoefs, lastPos); pIn = pComponent[cnt].coef; pOut = &(pSpectrum[pComponent[cnt].pos]); for (i=0 ; i<pComponent[cnt].numCoefs ; i++) pOut[i] += pIn[i]; } return lastPos; } #define INTERPOLATE(old,new,nsample) ((old) + (nsample)*0.125*((new)-(old))) static void reverseMatrixing(float *su1, float *su2, int *pPrevCode, int *pCurrCode) { int i, band, nsample, s1, s2; float c1, c2; float mc1_l, mc1_r, mc2_l, mc2_r; for (i=0,band = 0; band < 4*256; band+=256,i++) { s1 = pPrevCode[i]; s2 = pCurrCode[i]; nsample = 0; if (s1 != s2) { /* Selector value changed, interpolation needed. */ mc1_l = matrixCoeffs[s1*2]; mc1_r = matrixCoeffs[s1*2+1]; mc2_l = matrixCoeffs[s2*2]; mc2_r = matrixCoeffs[s2*2+1]; /* Interpolation is done over the first eight samples. */ for(; nsample < 8; nsample++) { c1 = su1[band+nsample]; c2 = su2[band+nsample]; c2 = c1 * INTERPOLATE(mc1_l,mc2_l,nsample) + c2 * INTERPOLATE(mc1_r,mc2_r,nsample); su1[band+nsample] = c2; su2[band+nsample] = c1 * 2.0 - c2; } } /* Apply the matrix without interpolation. */ switch (s2) { case 0: /* M/S decoding */ for (; nsample < 256; nsample++) { c1 = su1[band+nsample]; c2 = su2[band+nsample]; su1[band+nsample] = c2 * 2.0; su2[band+nsample] = (c1 - c2) * 2.0; } break; case 1: for (; nsample < 256; nsample++) { c1 = su1[band+nsample]; c2 = su2[band+nsample]; su1[band+nsample] = (c1 + c2) * 2.0; su2[band+nsample] = c2 * -2.0; } break; case 2: case 3: for (; nsample < 256; nsample++) { c1 = su1[band+nsample]; c2 = su2[band+nsample]; su1[band+nsample] = c1 + c2; su2[band+nsample] = c1 - c2; } break; default: assert(0); } } } static void getChannelWeights (int indx, int flag, float ch[2]){ if (indx == 7) { ch[0] = 1.0; ch[1] = 1.0; } else { ch[0] = (float)(indx & 7) / 7.0; ch[1] = sqrt(2 - ch[0]*ch[0]); if(flag) FFSWAP(float, ch[0], ch[1]); } } static void channelWeighting (float *su1, float *su2, int *p3) { int band, nsample; /* w[x][y] y=0 is left y=1 is right */ float w[2][2]; if (p3[1] != 7 || p3[3] != 7){ getChannelWeights(p3[1], p3[0], w[0]); getChannelWeights(p3[3], p3[2], w[1]); for(band = 1; band < 4; band++) { /* scale the channels by the weights */ for(nsample = 0; nsample < 8; nsample++) { su1[band*256+nsample] *= INTERPOLATE(w[0][0], w[0][1], nsample); su2[band*256+nsample] *= INTERPOLATE(w[1][0], w[1][1], nsample); } for(; nsample < 256; nsample++) { su1[band*256+nsample] *= w[1][0]; su2[band*256+nsample] *= w[1][1]; } } } } /** * Decode a Sound Unit * * @param gb the GetBit context * @param pSnd the channel unit to be used * @param pOut the decoded samples before IQMF in float representation * @param channelNum channel number * @param codingMode the coding mode (JOINT_STEREO or regular stereo/mono) */ static int decodeChannelSoundUnit (ATRAC3Context *q, GetBitContext *gb, channel_unit *pSnd, float *pOut, int channelNum, int codingMode) { int band, result=0, numSubbands, lastTonal, numBands; if (codingMode == JOINT_STEREO && channelNum == 1) { if (get_bits(gb,2) != 3) { av_log(NULL,AV_LOG_ERROR,"JS mono Sound Unit id != 3.\n"); return -1; } } else { if (get_bits(gb,6) != 0x28) { av_log(NULL,AV_LOG_ERROR,"Sound Unit id != 0x28.\n"); return -1; } } /* number of coded QMF bands */ pSnd->bandsCoded = get_bits(gb,2); result = decodeGainControl (gb, &(pSnd->gainBlock[pSnd->gcBlkSwitch]), pSnd->bandsCoded); if (result) return result; pSnd->numComponents = decodeTonalComponents (gb, pSnd->components, pSnd->bandsCoded); if (pSnd->numComponents == -1) return -1; numSubbands = decodeSpectrum (gb, pSnd->spectrum); /* Merge the decoded spectrum and tonal components. */ lastTonal = addTonalComponents (pSnd->spectrum, pSnd->numComponents, pSnd->components); /* calculate number of used MLT/QMF bands according to the amount of coded spectral lines */ numBands = (subbandTab[numSubbands] - 1) >> 8; if (lastTonal >= 0) numBands = FFMAX((lastTonal + 256) >> 8, numBands); /* Reconstruct time domain samples. */ for (band=0; band<4; band++) { /* Perform the IMDCT step without overlapping. */ if (band <= numBands) { IMLT(&(pSnd->spectrum[band*256]), pSnd->IMDCT_buf, band&1); } else memset(pSnd->IMDCT_buf, 0, 512 * sizeof(float)); /* gain compensation and overlapping */ gainCompensateAndOverlap (pSnd->IMDCT_buf, &(pSnd->prevFrame[band*256]), &(pOut[band*256]), &((pSnd->gainBlock[1 - (pSnd->gcBlkSwitch)]).gBlock[band]), &((pSnd->gainBlock[pSnd->gcBlkSwitch]).gBlock[band])); } /* Swap the gain control buffers for the next frame. */ pSnd->gcBlkSwitch ^= 1; return 0; } /** * Frame handling * * @param q Atrac3 private context * @param databuf the input data */ static int decodeFrame(ATRAC3Context *q, const uint8_t* databuf) { int result, i; float *p1, *p2, *p3, *p4; uint8_t *ptr1; if (q->codingMode == JOINT_STEREO) { /* channel coupling mode */ /* decode Sound Unit 1 */ init_get_bits(&q->gb,databuf,q->bits_per_frame); result = decodeChannelSoundUnit(q,&q->gb, q->pUnits, q->outSamples, 0, JOINT_STEREO); if (result != 0) return (result); /* Framedata of the su2 in the joint-stereo mode is encoded in * reverse byte order so we need to swap it first. */ if (databuf == q->decoded_bytes_buffer) { uint8_t *ptr2 = q->decoded_bytes_buffer+q->bytes_per_frame-1; ptr1 = q->decoded_bytes_buffer; for (i = 0; i < (q->bytes_per_frame/2); i++, ptr1++, ptr2--) { FFSWAP(uint8_t,*ptr1,*ptr2); } } else { const uint8_t *ptr2 = databuf+q->bytes_per_frame-1; for (i = 0; i < q->bytes_per_frame; i++) q->decoded_bytes_buffer[i] = *ptr2--; } /* Skip the sync codes (0xF8). */ ptr1 = q->decoded_bytes_buffer; for (i = 4; *ptr1 == 0xF8; i++, ptr1++) { if (i >= q->bytes_per_frame) return -1; } /* set the bitstream reader at the start of the second Sound Unit*/ init_get_bits(&q->gb,ptr1,q->bits_per_frame); /* Fill the Weighting coeffs delay buffer */ memmove(q->weighting_delay,&(q->weighting_delay[2]),4*sizeof(int)); q->weighting_delay[4] = get_bits1(&q->gb); q->weighting_delay[5] = get_bits(&q->gb,3); for (i = 0; i < 4; i++) { q->matrix_coeff_index_prev[i] = q->matrix_coeff_index_now[i]; q->matrix_coeff_index_now[i] = q->matrix_coeff_index_next[i]; q->matrix_coeff_index_next[i] = get_bits(&q->gb,2); } /* Decode Sound Unit 2. */ result = decodeChannelSoundUnit(q,&q->gb, &q->pUnits[1], &q->outSamples[1024], 1, JOINT_STEREO); if (result != 0) return (result); /* Reconstruct the channel coefficients. */ reverseMatrixing(q->outSamples, &q->outSamples[1024], q->matrix_coeff_index_prev, q->matrix_coeff_index_now); channelWeighting(q->outSamples, &q->outSamples[1024], q->weighting_delay); } else { /* normal stereo mode or mono */ /* Decode the channel sound units. */ for (i=0 ; i<q->channels ; i++) { /* Set the bitstream reader at the start of a channel sound unit. */ init_get_bits(&q->gb, databuf+((i*q->bytes_per_frame)/q->channels), (q->bits_per_frame)/q->channels); result = decodeChannelSoundUnit(q,&q->gb, &q->pUnits[i], &q->outSamples[i*1024], i, q->codingMode); if (result != 0) return (result); } } /* Apply the iQMF synthesis filter. */ p1= q->outSamples; for (i=0 ; i<q->channels ; i++) { p2= p1+256; p3= p2+256; p4= p3+256; atrac_iqmf (p1, p2, 256, p1, q->pUnits[i].delayBuf1, q->tempBuf); atrac_iqmf (p4, p3, 256, p3, q->pUnits[i].delayBuf2, q->tempBuf); atrac_iqmf (p1, p3, 512, p1, q->pUnits[i].delayBuf3, q->tempBuf); p1 +=1024; } return 0; } /** * Atrac frame decoding * * @param avctx pointer to the AVCodecContext */ static int atrac3_decode_frame(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf, int buf_size) { ATRAC3Context *q = avctx->priv_data; int result = 0, i; const uint8_t* databuf; int16_t* samples = data; if (buf_size < avctx->block_align) return buf_size; /* Check if we need to descramble and what buffer to pass on. */ if (q->scrambled_stream) { decode_bytes(buf, q->decoded_bytes_buffer, avctx->block_align); databuf = q->decoded_bytes_buffer; } else { databuf = buf; } result = decodeFrame(q, databuf); if (result != 0) { av_log(NULL,AV_LOG_ERROR,"Frame decoding error!\n"); return -1; } if (q->channels == 1) { /* mono */ for (i = 0; i<1024; i++) samples[i] = av_clip_int16(round(q->outSamples[i])); *data_size = 1024 * sizeof(int16_t); } else { /* stereo */ for (i = 0; i < 1024; i++) { samples[i*2] = av_clip_int16(round(q->outSamples[i])); samples[i*2+1] = av_clip_int16(round(q->outSamples[1024+i])); } *data_size = 2048 * sizeof(int16_t); } return avctx->block_align; } /** * Atrac3 initialization * * @param avctx pointer to the AVCodecContext */ static av_cold int atrac3_decode_init(AVCodecContext *avctx) { int i; const uint8_t *edata_ptr = avctx->extradata; ATRAC3Context *q = avctx->priv_data; static VLC_TYPE atrac3_vlc_table[4096][2]; static int vlcs_initialized = 0; /* Take data from the AVCodecContext (RM container). */ q->sample_rate = avctx->sample_rate; q->channels = avctx->channels; q->bit_rate = avctx->bit_rate; q->bits_per_frame = avctx->block_align * 8; q->bytes_per_frame = avctx->block_align; /* Take care of the codec-specific extradata. */ if (avctx->extradata_size == 14) { /* Parse the extradata, WAV format */ av_log(avctx,AV_LOG_DEBUG,"[0-1] %d\n",bytestream_get_le16(&edata_ptr)); //Unknown value always 1 q->samples_per_channel = bytestream_get_le32(&edata_ptr); q->codingMode = bytestream_get_le16(&edata_ptr); av_log(avctx,AV_LOG_DEBUG,"[8-9] %d\n",bytestream_get_le16(&edata_ptr)); //Dupe of coding mode q->frame_factor = bytestream_get_le16(&edata_ptr); //Unknown always 1 av_log(avctx,AV_LOG_DEBUG,"[12-13] %d\n",bytestream_get_le16(&edata_ptr)); //Unknown always 0 /* setup */ q->samples_per_frame = 1024 * q->channels; q->atrac3version = 4; q->delay = 0x88E; if (q->codingMode) q->codingMode = JOINT_STEREO; else q->codingMode = STEREO; q->scrambled_stream = 0; if ((q->bytes_per_frame == 96*q->channels*q->frame_factor) || (q->bytes_per_frame == 152*q->channels*q->frame_factor) || (q->bytes_per_frame == 192*q->channels*q->frame_factor)) { } else { av_log(avctx,AV_LOG_ERROR,"Unknown frame/channel/frame_factor configuration %d/%d/%d\n", q->bytes_per_frame, q->channels, q->frame_factor); return -1; } } else if (avctx->extradata_size == 10) { /* Parse the extradata, RM format. */ q->atrac3version = bytestream_get_be32(&edata_ptr); q->samples_per_frame = bytestream_get_be16(&edata_ptr); q->delay = bytestream_get_be16(&edata_ptr); q->codingMode = bytestream_get_be16(&edata_ptr); q->samples_per_channel = q->samples_per_frame / q->channels; q->scrambled_stream = 1; } else { av_log(NULL,AV_LOG_ERROR,"Unknown extradata size %d.\n",avctx->extradata_size); } /* Check the extradata. */ if (q->atrac3version != 4) { av_log(avctx,AV_LOG_ERROR,"Version %d != 4.\n",q->atrac3version); return -1; } if (q->samples_per_frame != 1024 && q->samples_per_frame != 2048) { av_log(avctx,AV_LOG_ERROR,"Unknown amount of samples per frame %d.\n",q->samples_per_frame); return -1; } if (q->delay != 0x88E) { av_log(avctx,AV_LOG_ERROR,"Unknown amount of delay %x != 0x88E.\n",q->delay); return -1; } if (q->codingMode == STEREO) { av_log(avctx,AV_LOG_DEBUG,"Normal stereo detected.\n"); } else if (q->codingMode == JOINT_STEREO) { av_log(avctx,AV_LOG_DEBUG,"Joint stereo detected.\n"); } else { av_log(avctx,AV_LOG_ERROR,"Unknown channel coding mode %x!\n",q->codingMode); return -1; } if (avctx->channels <= 0 || avctx->channels > 2 /*|| ((avctx->channels * 1024) != q->samples_per_frame)*/) { av_log(avctx,AV_LOG_ERROR,"Channel configuration error!\n"); return -1; } if(avctx->block_align >= UINT_MAX/2) return -1; /* Pad the data buffer with FF_INPUT_BUFFER_PADDING_SIZE, * this is for the bitstream reader. */ if ((q->decoded_bytes_buffer = av_mallocz((avctx->block_align+(4-avctx->block_align%4) + FF_INPUT_BUFFER_PADDING_SIZE))) == NULL) return AVERROR(ENOMEM); /* Initialize the VLC tables. */ if (!vlcs_initialized) { for (i=0 ; i<7 ; i++) { spectral_coeff_tab[i].table = &atrac3_vlc_table[atrac3_vlc_offs[i]]; spectral_coeff_tab[i].table_allocated = atrac3_vlc_offs[i + 1] - atrac3_vlc_offs[i]; init_vlc (&spectral_coeff_tab[i], 9, huff_tab_sizes[i], huff_bits[i], 1, 1, huff_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC); } vlcs_initialized = 1; } init_atrac3_transforms(q); atrac_generate_tables(); /* Generate gain tables. */ for (i=0 ; i<16 ; i++) gain_tab1[i] = powf (2.0, (4 - i)); for (i=-15 ; i<16 ; i++) gain_tab2[i+15] = powf (2.0, i * -0.125); /* init the joint-stereo decoding data */ q->weighting_delay[0] = 0; q->weighting_delay[1] = 7; q->weighting_delay[2] = 0; q->weighting_delay[3] = 7; q->weighting_delay[4] = 0; q->weighting_delay[5] = 7; for (i=0; i<4; i++) { q->matrix_coeff_index_prev[i] = 3; q->matrix_coeff_index_now[i] = 3; q->matrix_coeff_index_next[i] = 3; } dsputil_init(&dsp, avctx); q->pUnits = av_mallocz(sizeof(channel_unit)*q->channels); if (!q->pUnits) { av_free(q->decoded_bytes_buffer); return AVERROR(ENOMEM); } avctx->sample_fmt = SAMPLE_FMT_S16; return 0; } AVCodec atrac3_decoder = { .name = "atrac3", .type = CODEC_TYPE_AUDIO, .id = CODEC_ID_ATRAC3, .priv_data_size = sizeof(ATRAC3Context), .init = atrac3_decode_init, .close = atrac3_decode_close, .decode = atrac3_decode_frame, .long_name = NULL_IF_CONFIG_SMALL("Atrac 3 (Adaptive TRansform Acoustic Coding 3)"), };
gpl-2.0
borysiasty/QGIS
tests/src/core/testqgsvectortileconnection.cpp
19
2748
/*************************************************************************** testqgsvectortileconnection.cpp -------------------------------------- Date : January 2022 Copyright : (C) 2022 by Alexander Bruy Email : alexander dot bruy at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ #include "qgstest.h" #include <QObject> #include <QString> //qgis includes... #include "qgsapplication.h" #include "qgsvectortileconnection.h" /** * \ingroup UnitTests * This is a unit test for the vector tile provider connection class */ class TestQgsVectorTileConnection : public QObject { Q_OBJECT public: TestQgsVectorTileConnection() = default; private: private slots: void initTestCase();// will be called before the first testfunction is executed. void cleanupTestCase();// will be called after the last testfunction was executed. void init() {} // will be called before each testfunction is executed. void cleanup() {} // will be called after every testfunction. void test_encodedUri(); }; void TestQgsVectorTileConnection::initTestCase() { // init QGIS's paths - true means that all path will be inited from prefix QgsApplication::init(); QgsApplication::initQgis(); } void TestQgsVectorTileConnection::cleanupTestCase() { QgsApplication::exitQgis(); } void TestQgsVectorTileConnection::test_encodedUri() { QgsVectorTileProviderConnection::Data conn; conn.url = QStringLiteral( "https://api.maptiler.com/tiles/v3/{z}/{x}/{y}.pbf?key=abcdef12345" ); conn.zMin = 0; conn.zMax = 18; QString uri = QgsVectorTileProviderConnection::encodedUri( conn ); QCOMPARE( uri, QStringLiteral( "type=xyz&url=https://api.maptiler.com/tiles/v3/%7Bz%7D/%7Bx%7D/%7By%7D.pbf?key%3Dabcdef12345&zmax=18&zmin=0" ) ); conn.url = QStringLiteral( "file:///home/user/tiles.mbtiles" ); conn.zMin = 0; conn.zMax = 18; uri = QgsVectorTileProviderConnection::encodedUri( conn ); QCOMPARE( uri, QStringLiteral( "type=mbtiles&url=file:///home/user/tiles.mbtiles&zmax=18&zmin=0" ) ); } QGSTEST_MAIN( TestQgsVectorTileConnection ) #include "testqgsvectortileconnection.moc"
gpl-2.0
timshen91/gcc
gcc/config/mep/mep.c
19
194624
/* Definitions for Toshiba Media Processor Copyright (C) 2001-2015 Free Software Foundation, Inc. Contributed by Red Hat, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "backend.h" #include "cfghooks.h" #include "tree.h" #include "gimple.h" #include "rtl.h" #include "df.h" #include "alias.h" #include "fold-const.h" #include "varasm.h" #include "calls.h" #include "stringpool.h" #include "stor-layout.h" #include "regs.h" #include "insn-config.h" #include "conditions.h" #include "insn-flags.h" #include "output.h" #include "insn-attr.h" #include "flags.h" #include "recog.h" #include "expmed.h" #include "dojump.h" #include "explow.h" #include "emit-rtl.h" #include "stmt.h" #include "expr.h" #include "except.h" #include "insn-codes.h" #include "optabs.h" #include "reload.h" #include "tm_p.h" #include "diagnostic-core.h" #include "target.h" #include "langhooks.h" #include "cfgrtl.h" #include "cfganal.h" #include "lcm.h" #include "cfgbuild.h" #include "cfgcleanup.h" #include "internal-fn.h" #include "gimple-fold.h" #include "tree-eh.h" #include "gimplify.h" #include "opts.h" #include "dumpfile.h" #include "builtins.h" #include "rtl-iter.h" /* This file should be included last. */ #include "target-def.h" /* Structure of this file: + Command Line Option Support + Pattern support - constraints, predicates, expanders + Reload Support + Costs + Functions to save and restore machine-specific function data. + Frame/Epilog/Prolog Related + Operand Printing + Function args in registers + Handle pipeline hazards + Handle attributes + Trampolines + Machine-dependent Reorg + Builtins. */ /* Symbol encodings: Symbols are encoded as @ <char> . <name> where <char> is one of these: b - based t - tiny n - near f - far i - io, near I - io, far c - cb (control bus) */ struct GTY(()) machine_function { int mep_frame_pointer_needed; /* For varargs. */ int arg_regs_to_save; int regsave_filler; int frame_filler; int frame_locked; /* Records __builtin_return address. */ rtx eh_stack_adjust; int reg_save_size; int reg_save_slot[FIRST_PSEUDO_REGISTER]; unsigned char reg_saved[FIRST_PSEUDO_REGISTER]; /* 2 if the current function has an interrupt attribute, 1 if not, 0 if unknown. This is here because resource.c uses EPILOGUE_USES which needs it. */ int interrupt_handler; /* Likewise, for disinterrupt attribute. */ int disable_interrupts; /* Number of doloop tags used so far. */ int doloop_tags; /* True if the last tag was allocated to a doloop_end. */ bool doloop_tag_from_end; /* True if reload changes $TP. */ bool reload_changes_tp; /* 2 if there are asm()s without operands, 1 if not, 0 if unknown. We only set this if the function is an interrupt handler. */ int asms_without_operands; }; #define MEP_CONTROL_REG(x) \ (GET_CODE (x) == REG && ANY_CONTROL_REGNO_P (REGNO (x))) static GTY(()) section * based_section; static GTY(()) section * tinybss_section; static GTY(()) section * far_section; static GTY(()) section * farbss_section; static GTY(()) section * frodata_section; static GTY(()) section * srodata_section; static GTY(()) section * vtext_section; static GTY(()) section * vftext_section; static GTY(()) section * ftext_section; static void mep_set_leaf_registers (int); static bool symbol_p (rtx); static bool symbolref_p (rtx); static void encode_pattern_1 (rtx); static void encode_pattern (rtx); static bool const_in_range (rtx, int, int); static void mep_rewrite_mult (rtx_insn *, rtx); static void mep_rewrite_mulsi3 (rtx_insn *, rtx, rtx, rtx); static void mep_rewrite_maddsi3 (rtx_insn *, rtx, rtx, rtx, rtx); static bool mep_reuse_lo_p_1 (rtx, rtx, rtx_insn *, bool); static bool move_needs_splitting (rtx, rtx, machine_mode); static bool mep_expand_setcc_1 (enum rtx_code, rtx, rtx, rtx); static bool mep_nongeneral_reg (rtx); static bool mep_general_copro_reg (rtx); static bool mep_nonregister (rtx); static struct machine_function* mep_init_machine_status (void); static rtx mep_tp_rtx (void); static rtx mep_gp_rtx (void); static bool mep_interrupt_p (void); static bool mep_disinterrupt_p (void); static bool mep_reg_set_p (rtx, rtx); static bool mep_reg_set_in_function (int); static bool mep_interrupt_saved_reg (int); static bool mep_call_saves_register (int); static rtx_insn *F (rtx_insn *); static void add_constant (int, int, int, int); static rtx_insn *maybe_dead_move (rtx, rtx, bool); static void mep_reload_pointer (int, const char *); static void mep_start_function (FILE *, HOST_WIDE_INT); static bool mep_function_ok_for_sibcall (tree, tree); static int unique_bit_in (HOST_WIDE_INT); static int bit_size_for_clip (HOST_WIDE_INT); static int bytesize (const_tree, machine_mode); static tree mep_validate_based_tiny (tree *, tree, tree, int, bool *); static tree mep_validate_near_far (tree *, tree, tree, int, bool *); static tree mep_validate_disinterrupt (tree *, tree, tree, int, bool *); static tree mep_validate_interrupt (tree *, tree, tree, int, bool *); static tree mep_validate_io_cb (tree *, tree, tree, int, bool *); static tree mep_validate_vliw (tree *, tree, tree, int, bool *); static bool mep_function_attribute_inlinable_p (const_tree); static bool mep_can_inline_p (tree, tree); static bool mep_lookup_pragma_disinterrupt (const char *); static int mep_multiple_address_regions (tree, bool); static int mep_attrlist_to_encoding (tree, tree); static void mep_insert_attributes (tree, tree *); static void mep_encode_section_info (tree, rtx, int); static section * mep_select_section (tree, int, unsigned HOST_WIDE_INT); static void mep_unique_section (tree, int); static unsigned int mep_section_type_flags (tree, const char *, int); static void mep_asm_named_section (const char *, unsigned int, tree); static bool mep_mentioned_p (rtx, rtx, int); static void mep_reorg_regmove (rtx_insn *); static rtx_insn *mep_insert_repeat_label_last (rtx_insn *, rtx_code_label *, bool, bool); static void mep_reorg_repeat (rtx_insn *); static bool mep_invertable_branch_p (rtx_insn *); static void mep_invert_branch (rtx_insn *, rtx_insn *); static void mep_reorg_erepeat (rtx_insn *); static void mep_jmp_return_reorg (rtx_insn *); static void mep_reorg_addcombine (rtx_insn *); static void mep_reorg (void); static void mep_init_intrinsics (void); static void mep_init_builtins (void); static void mep_intrinsic_unavailable (int); static bool mep_get_intrinsic_insn (int, const struct cgen_insn **); static bool mep_get_move_insn (int, const struct cgen_insn **); static rtx mep_convert_arg (machine_mode, rtx); static rtx mep_convert_regnum (const struct cgen_regnum_operand *, rtx); static rtx mep_legitimize_arg (const struct insn_operand_data *, rtx, int); static void mep_incompatible_arg (const struct insn_operand_data *, rtx, int, tree); static rtx mep_expand_builtin (tree, rtx, rtx, machine_mode, int); static int mep_adjust_cost (rtx_insn *, rtx, rtx_insn *, int); static int mep_issue_rate (void); static rtx_insn *mep_find_ready_insn (rtx_insn **, int, enum attr_slot, int); static void mep_move_ready_insn (rtx_insn **, int, rtx_insn *); static int mep_sched_reorder (FILE *, int, rtx_insn **, int *, int); static rtx_insn *mep_make_bundle (rtx, rtx_insn *); static void mep_bundle_insns (rtx_insn *); static bool mep_rtx_cost (rtx, machine_mode, int, int, int *, bool); static int mep_address_cost (rtx, machine_mode, addr_space_t, bool); static void mep_setup_incoming_varargs (cumulative_args_t, machine_mode, tree, int *, int); static bool mep_pass_by_reference (cumulative_args_t cum, machine_mode, const_tree, bool); static rtx mep_function_arg (cumulative_args_t, machine_mode, const_tree, bool); static void mep_function_arg_advance (cumulative_args_t, machine_mode, const_tree, bool); static bool mep_vector_mode_supported_p (machine_mode); static rtx mep_allocate_initial_value (rtx); static void mep_asm_init_sections (void); static int mep_comp_type_attributes (const_tree, const_tree); static bool mep_narrow_volatile_bitfield (void); static rtx mep_expand_builtin_saveregs (void); static tree mep_build_builtin_va_list (void); static void mep_expand_va_start (tree, rtx); static tree mep_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *); static bool mep_can_eliminate (const int, const int); static void mep_conditional_register_usage (void); static void mep_trampoline_init (rtx, tree, rtx); #define WANT_GCC_DEFINITIONS #include "mep-intrin.h" #undef WANT_GCC_DEFINITIONS /* Command Line Option Support. */ char mep_leaf_registers [FIRST_PSEUDO_REGISTER]; /* True if we can use cmov instructions to move values back and forth between core and coprocessor registers. */ bool mep_have_core_copro_moves_p; /* True if we can use cmov instructions (or a work-alike) to move values between coprocessor registers. */ bool mep_have_copro_copro_moves_p; /* A table of all coprocessor instructions that can act like a coprocessor-to-coprocessor cmov. */ static const int mep_cmov_insns[] = { mep_cmov, mep_cpmov, mep_fmovs, mep_caddi3, mep_csubi3, mep_candi3, mep_cori3, mep_cxori3, mep_cand3, mep_cor3 }; static void mep_set_leaf_registers (int enable) { int i; if (mep_leaf_registers[0] != enable) for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) mep_leaf_registers[i] = enable; } static void mep_conditional_register_usage (void) { int i; if (!TARGET_OPT_MULT && !TARGET_OPT_DIV) { fixed_regs[HI_REGNO] = 1; fixed_regs[LO_REGNO] = 1; call_used_regs[HI_REGNO] = 1; call_used_regs[LO_REGNO] = 1; } for (i = FIRST_SHADOW_REGISTER; i <= LAST_SHADOW_REGISTER; i++) global_regs[i] = 1; } static void mep_option_override (void) { unsigned int i; int j; cl_deferred_option *opt; vec<cl_deferred_option> *v = (vec<cl_deferred_option> *) mep_deferred_options; if (v) FOR_EACH_VEC_ELT (*v, i, opt) { switch (opt->opt_index) { case OPT_mivc2: for (j = 0; j < 32; j++) fixed_regs[j + 48] = 0; for (j = 0; j < 32; j++) call_used_regs[j + 48] = 1; for (j = 6; j < 8; j++) call_used_regs[j + 48] = 0; #define RN(n,s) reg_names[FIRST_CCR_REGNO + n] = s RN (0, "$csar0"); RN (1, "$cc"); RN (4, "$cofr0"); RN (5, "$cofr1"); RN (6, "$cofa0"); RN (7, "$cofa1"); RN (15, "$csar1"); RN (16, "$acc0_0"); RN (17, "$acc0_1"); RN (18, "$acc0_2"); RN (19, "$acc0_3"); RN (20, "$acc0_4"); RN (21, "$acc0_5"); RN (22, "$acc0_6"); RN (23, "$acc0_7"); RN (24, "$acc1_0"); RN (25, "$acc1_1"); RN (26, "$acc1_2"); RN (27, "$acc1_3"); RN (28, "$acc1_4"); RN (29, "$acc1_5"); RN (30, "$acc1_6"); RN (31, "$acc1_7"); #undef RN break; default: gcc_unreachable (); } } if (flag_pic == 1) warning (OPT_fpic, "-fpic is not supported"); if (flag_pic == 2) warning (OPT_fPIC, "-fPIC is not supported"); if (TARGET_S && TARGET_M) error ("only one of -ms and -mm may be given"); if (TARGET_S && TARGET_L) error ("only one of -ms and -ml may be given"); if (TARGET_M && TARGET_L) error ("only one of -mm and -ml may be given"); if (TARGET_S && global_options_set.x_mep_tiny_cutoff) error ("only one of -ms and -mtiny= may be given"); if (TARGET_M && global_options_set.x_mep_tiny_cutoff) error ("only one of -mm and -mtiny= may be given"); if (TARGET_OPT_CLIP && ! TARGET_OPT_MINMAX) warning (0, "-mclip currently has no effect without -mminmax"); if (mep_const_section) { if (strcmp (mep_const_section, "tiny") != 0 && strcmp (mep_const_section, "near") != 0 && strcmp (mep_const_section, "far") != 0) error ("-mc= must be -mc=tiny, -mc=near, or -mc=far"); } if (TARGET_S) mep_tiny_cutoff = 65536; if (TARGET_M) mep_tiny_cutoff = 0; if (TARGET_L && ! global_options_set.x_mep_tiny_cutoff) mep_tiny_cutoff = 0; if (TARGET_64BIT_CR_REGS) flag_split_wide_types = 0; init_machine_status = mep_init_machine_status; mep_init_intrinsics (); } /* Pattern Support - constraints, predicates, expanders. */ /* MEP has very few instructions that can refer to the span of addresses used by symbols, so it's common to check for them. */ static bool symbol_p (rtx x) { int c = GET_CODE (x); return (c == CONST_INT || c == CONST || c == SYMBOL_REF); } static bool symbolref_p (rtx x) { int c; if (GET_CODE (x) != MEM) return false; c = GET_CODE (XEXP (x, 0)); return (c == CONST_INT || c == CONST || c == SYMBOL_REF); } /* static const char *reg_class_names[] = REG_CLASS_NAMES; */ #define GEN_REG(R, STRICT) \ (GR_REGNO_P (R) \ || (!STRICT \ && ((R) == ARG_POINTER_REGNUM \ || (R) >= FIRST_PSEUDO_REGISTER))) static char pattern[12], *patternp; static GTY(()) rtx patternr[12]; #define RTX_IS(x) (strcmp (pattern, x) == 0) static void encode_pattern_1 (rtx x) { int i; if (patternp == pattern + sizeof (pattern) - 2) { patternp[-1] = '?'; return; } patternr[patternp-pattern] = x; switch (GET_CODE (x)) { case REG: *patternp++ = 'r'; break; case MEM: *patternp++ = 'm'; case CONST: encode_pattern_1 (XEXP(x, 0)); break; case PLUS: *patternp++ = '+'; encode_pattern_1 (XEXP(x, 0)); encode_pattern_1 (XEXP(x, 1)); break; case LO_SUM: *patternp++ = 'L'; encode_pattern_1 (XEXP(x, 0)); encode_pattern_1 (XEXP(x, 1)); break; case HIGH: *patternp++ = 'H'; encode_pattern_1 (XEXP(x, 0)); break; case SYMBOL_REF: *patternp++ = 's'; break; case LABEL_REF: *patternp++ = 'l'; break; case CONST_INT: case CONST_DOUBLE: *patternp++ = 'i'; break; case UNSPEC: *patternp++ = 'u'; *patternp++ = '0' + XCINT(x, 1, UNSPEC); for (i=0; i<XVECLEN (x, 0); i++) encode_pattern_1 (XVECEXP (x, 0, i)); break; case USE: *patternp++ = 'U'; break; default: *patternp++ = '?'; #if 0 fprintf (stderr, "can't encode pattern %s\n", GET_RTX_NAME(GET_CODE(x))); debug_rtx (x); gcc_unreachable (); #endif break; } } static void encode_pattern (rtx x) { patternp = pattern; encode_pattern_1 (x); *patternp = 0; } int mep_section_tag (rtx x) { const char *name; while (1) { switch (GET_CODE (x)) { case MEM: case CONST: x = XEXP (x, 0); break; case UNSPEC: x = XVECEXP (x, 0, 0); break; case PLUS: if (GET_CODE (XEXP (x, 1)) != CONST_INT) return 0; x = XEXP (x, 0); break; default: goto done; } } done: if (GET_CODE (x) != SYMBOL_REF) return 0; name = XSTR (x, 0); if (name[0] == '@' && name[2] == '.') { if (name[1] == 'i' || name[1] == 'I') { if (name[1] == 'I') return 'f'; /* near */ return 'n'; /* far */ } return name[1]; } return 0; } int mep_regno_reg_class (int regno) { switch (regno) { case SP_REGNO: return SP_REGS; case TP_REGNO: return TP_REGS; case GP_REGNO: return GP_REGS; case 0: return R0_REGS; case HI_REGNO: return HI_REGS; case LO_REGNO: return LO_REGS; case ARG_POINTER_REGNUM: return GENERAL_REGS; } if (GR_REGNO_P (regno)) return regno < FIRST_GR_REGNO + 8 ? TPREL_REGS : GENERAL_REGS; if (CONTROL_REGNO_P (regno)) return CONTROL_REGS; if (CR_REGNO_P (regno)) { int i, j; /* Search for the register amongst user-defined subclasses of the coprocessor registers. */ for (i = USER0_REGS; i <= USER3_REGS; ++i) { if (! TEST_HARD_REG_BIT (reg_class_contents[i], regno)) continue; for (j = 0; j < N_REG_CLASSES; ++j) { enum reg_class sub = reg_class_subclasses[i][j]; if (sub == LIM_REG_CLASSES) return i; if (TEST_HARD_REG_BIT (reg_class_contents[sub], regno)) break; } } return LOADABLE_CR_REGNO_P (regno) ? LOADABLE_CR_REGS : CR_REGS; } if (CCR_REGNO_P (regno)) return CCR_REGS; gcc_assert (regno >= FIRST_SHADOW_REGISTER && regno <= LAST_SHADOW_REGISTER); return NO_REGS; } static bool const_in_range (rtx x, int minv, int maxv) { return (GET_CODE (x) == CONST_INT && INTVAL (x) >= minv && INTVAL (x) <= maxv); } /* Given three integer registers DEST, SRC1 and SRC2, return an rtx X such that "mulr DEST,X" will calculate DEST = SRC1 * SRC2. If a move is needed, emit it before INSN if INSN is nonnull, otherwise emit it at the end of the insn stream. */ rtx mep_mulr_source (rtx_insn *insn, rtx dest, rtx src1, rtx src2) { if (rtx_equal_p (dest, src1)) return src2; else if (rtx_equal_p (dest, src2)) return src1; else { if (insn == 0) emit_insn (gen_movsi (copy_rtx (dest), src1)); else emit_insn_before (gen_movsi (copy_rtx (dest), src1), insn); return src2; } } /* Replace INSN's pattern with PATTERN, a multiplication PARALLEL. Change the last element of PATTERN from (clobber (scratch:SI)) to (clobber (reg:SI HI_REGNO)). */ static void mep_rewrite_mult (rtx_insn *insn, rtx pattern) { rtx hi_clobber; hi_clobber = XVECEXP (pattern, 0, XVECLEN (pattern, 0) - 1); XEXP (hi_clobber, 0) = gen_rtx_REG (SImode, HI_REGNO); PATTERN (insn) = pattern; INSN_CODE (insn) = -1; } /* Subroutine of mep_reuse_lo_p. Rewrite instruction INSN so that it calculates SRC1 * SRC2 and stores the result in $lo. Also make it store the result in DEST if nonnull. */ static void mep_rewrite_mulsi3 (rtx_insn *insn, rtx dest, rtx src1, rtx src2) { rtx lo, pattern; lo = gen_rtx_REG (SImode, LO_REGNO); if (dest) pattern = gen_mulsi3r (lo, dest, copy_rtx (dest), mep_mulr_source (insn, dest, src1, src2)); else pattern = gen_mulsi3_lo (lo, src1, src2); mep_rewrite_mult (insn, pattern); } /* Like mep_rewrite_mulsi3, but calculate SRC1 * SRC2 + SRC3. First copy SRC3 into $lo, then use either madd or maddr. The move into $lo will be deleted by a peephole2 if SRC3 is already in $lo. */ static void mep_rewrite_maddsi3 (rtx_insn *insn, rtx dest, rtx src1, rtx src2, rtx src3) { rtx lo, pattern; lo = gen_rtx_REG (SImode, LO_REGNO); emit_insn_before (gen_movsi (copy_rtx (lo), src3), insn); if (dest) pattern = gen_maddsi3r (lo, dest, copy_rtx (dest), mep_mulr_source (insn, dest, src1, src2), copy_rtx (lo)); else pattern = gen_maddsi3_lo (lo, src1, src2, copy_rtx (lo)); mep_rewrite_mult (insn, pattern); } /* Return true if $lo has the same value as integer register GPR when instruction INSN is reached. If necessary, rewrite the instruction that sets $lo so that it uses a proper SET, not a CLOBBER. LO is an rtx for (reg:SI LO_REGNO). This function is intended to be used by the peephole2 pass. Since that pass goes from the end of a basic block to the beginning, and propagates liveness information on the way, there is no need to update register notes here. If GPR_DEAD_P is true on entry, and this function returns true, then the caller will replace _every_ use of GPR in and after INSN with LO. This means that if the instruction that sets $lo is a mulr- or maddr-type instruction, we can rewrite it to use mul or madd instead. In combination with the copy progagation pass, this allows us to replace sequences like: mov GPR,R1 mulr GPR,R2 with: mul R1,R2 if GPR is no longer used. */ static bool mep_reuse_lo_p_1 (rtx lo, rtx gpr, rtx_insn *insn, bool gpr_dead_p) { do { insn = PREV_INSN (insn); if (INSN_P (insn)) switch (recog_memoized (insn)) { case CODE_FOR_mulsi3_1: extract_insn (insn); if (rtx_equal_p (recog_data.operand[0], gpr)) { mep_rewrite_mulsi3 (insn, gpr_dead_p ? NULL : recog_data.operand[0], recog_data.operand[1], recog_data.operand[2]); return true; } return false; case CODE_FOR_maddsi3: extract_insn (insn); if (rtx_equal_p (recog_data.operand[0], gpr)) { mep_rewrite_maddsi3 (insn, gpr_dead_p ? NULL : recog_data.operand[0], recog_data.operand[1], recog_data.operand[2], recog_data.operand[3]); return true; } return false; case CODE_FOR_mulsi3r: case CODE_FOR_maddsi3r: extract_insn (insn); return rtx_equal_p (recog_data.operand[1], gpr); default: if (reg_set_p (lo, insn) || reg_set_p (gpr, insn) || volatile_insn_p (PATTERN (insn))) return false; if (gpr_dead_p && reg_referenced_p (gpr, PATTERN (insn))) gpr_dead_p = false; break; } } while (!NOTE_INSN_BASIC_BLOCK_P (insn)); return false; } /* A wrapper around mep_reuse_lo_p_1 that preserves recog_data. */ bool mep_reuse_lo_p (rtx lo, rtx gpr, rtx_insn *insn, bool gpr_dead_p) { bool result = mep_reuse_lo_p_1 (lo, gpr, insn, gpr_dead_p); extract_insn (insn); return result; } /* Return true if SET can be turned into a post-modify load or store that adds OFFSET to GPR. In other words, return true if SET can be changed into: (parallel [SET (set GPR (plus:SI GPR OFFSET))]). It's OK to change SET to an equivalent operation in order to make it match. */ static bool mep_use_post_modify_for_set_p (rtx set, rtx gpr, rtx offset) { rtx *reg, *mem; unsigned int reg_bytes, mem_bytes; machine_mode reg_mode, mem_mode; /* Only simple SETs can be converted. */ if (GET_CODE (set) != SET) return false; /* Point REG to what we hope will be the register side of the set and MEM to what we hope will be the memory side. */ if (GET_CODE (SET_DEST (set)) == MEM) { mem = &SET_DEST (set); reg = &SET_SRC (set); } else { reg = &SET_DEST (set); mem = &SET_SRC (set); if (GET_CODE (*mem) == SIGN_EXTEND) mem = &XEXP (*mem, 0); } /* Check that *REG is a suitable coprocessor register. */ if (GET_CODE (*reg) != REG || !LOADABLE_CR_REGNO_P (REGNO (*reg))) return false; /* Check that *MEM is a suitable memory reference. */ if (GET_CODE (*mem) != MEM || !rtx_equal_p (XEXP (*mem, 0), gpr)) return false; /* Get the number of bytes in each operand. */ mem_bytes = GET_MODE_SIZE (GET_MODE (*mem)); reg_bytes = GET_MODE_SIZE (GET_MODE (*reg)); /* Check that OFFSET is suitably aligned. */ if (INTVAL (offset) & (mem_bytes - 1)) return false; /* Convert *MEM to a normal integer mode. */ mem_mode = mode_for_size (mem_bytes * BITS_PER_UNIT, MODE_INT, 0); *mem = change_address (*mem, mem_mode, NULL); /* Adjust *REG as well. */ *reg = shallow_copy_rtx (*reg); if (reg == &SET_DEST (set) && reg_bytes < UNITS_PER_WORD) { /* SET is a subword load. Convert it to an explicit extension. */ PUT_MODE (*reg, SImode); *mem = gen_rtx_SIGN_EXTEND (SImode, *mem); } else { reg_mode = mode_for_size (reg_bytes * BITS_PER_UNIT, MODE_INT, 0); PUT_MODE (*reg, reg_mode); } return true; } /* Return the effect of frame-related instruction INSN. */ static rtx mep_frame_expr (rtx_insn *insn) { rtx note, expr; note = find_reg_note (insn, REG_FRAME_RELATED_EXPR, 0); expr = (note != 0 ? XEXP (note, 0) : copy_rtx (PATTERN (insn))); RTX_FRAME_RELATED_P (expr) = 1; return expr; } /* Merge instructions INSN1 and INSN2 using a PARALLEL. Store the new pattern in INSN1; INSN2 will be deleted by the caller. */ static void mep_make_parallel (rtx_insn *insn1, rtx_insn *insn2) { rtx expr; if (RTX_FRAME_RELATED_P (insn2)) { expr = mep_frame_expr (insn2); if (RTX_FRAME_RELATED_P (insn1)) expr = gen_rtx_SEQUENCE (VOIDmode, gen_rtvec (2, mep_frame_expr (insn1), expr)); set_unique_reg_note (insn1, REG_FRAME_RELATED_EXPR, expr); RTX_FRAME_RELATED_P (insn1) = 1; } PATTERN (insn1) = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, PATTERN (insn1), PATTERN (insn2))); INSN_CODE (insn1) = -1; } /* SET_INSN is an instruction that adds OFFSET to REG. Go back through the basic block to see if any previous load or store instruction can be persuaded to do SET_INSN as a side-effect. Return true if so. */ static bool mep_use_post_modify_p_1 (rtx_insn *set_insn, rtx reg, rtx offset) { rtx_insn *insn; insn = set_insn; do { insn = PREV_INSN (insn); if (INSN_P (insn)) { if (mep_use_post_modify_for_set_p (PATTERN (insn), reg, offset)) { mep_make_parallel (insn, set_insn); return true; } if (reg_set_p (reg, insn) || reg_referenced_p (reg, PATTERN (insn)) || volatile_insn_p (PATTERN (insn))) return false; } } while (!NOTE_INSN_BASIC_BLOCK_P (insn)); return false; } /* A wrapper around mep_use_post_modify_p_1 that preserves recog_data. */ bool mep_use_post_modify_p (rtx_insn *insn, rtx reg, rtx offset) { bool result = mep_use_post_modify_p_1 (insn, reg, offset); extract_insn (insn); return result; } bool mep_allow_clip (rtx ux, rtx lx, int s) { HOST_WIDE_INT u = INTVAL (ux); HOST_WIDE_INT l = INTVAL (lx); int i; if (!TARGET_OPT_CLIP) return false; if (s) { for (i = 0; i < 30; i ++) if ((u == ((HOST_WIDE_INT) 1 << i) - 1) && (l == - ((HOST_WIDE_INT) 1 << i))) return true; } else { if (l != 0) return false; for (i = 0; i < 30; i ++) if ((u == ((HOST_WIDE_INT) 1 << i) - 1)) return true; } return false; } bool mep_bit_position_p (rtx x, bool looking_for) { if (GET_CODE (x) != CONST_INT) return false; switch ((int) INTVAL(x) & 0xff) { case 0x01: case 0x02: case 0x04: case 0x08: case 0x10: case 0x20: case 0x40: case 0x80: return looking_for; case 0xfe: case 0xfd: case 0xfb: case 0xf7: case 0xef: case 0xdf: case 0xbf: case 0x7f: return !looking_for; } return false; } static bool move_needs_splitting (rtx dest, rtx src, machine_mode mode ATTRIBUTE_UNUSED) { int s = mep_section_tag (src); while (1) { if (GET_CODE (src) == CONST || GET_CODE (src) == MEM) src = XEXP (src, 0); else if (GET_CODE (src) == SYMBOL_REF || GET_CODE (src) == LABEL_REF || GET_CODE (src) == PLUS) break; else return false; } if (s == 'f' || (GET_CODE (src) == PLUS && GET_CODE (XEXP (src, 1)) == CONST_INT && (INTVAL (XEXP (src, 1)) < -65536 || INTVAL (XEXP (src, 1)) > 0xffffff)) || (GET_CODE (dest) == REG && REGNO (dest) > 7 && REGNO (dest) < FIRST_PSEUDO_REGISTER)) return true; return false; } bool mep_split_mov (rtx *operands, int symbolic) { if (symbolic) { if (move_needs_splitting (operands[0], operands[1], SImode)) return true; return false; } if (GET_CODE (operands[1]) != CONST_INT) return false; if (constraint_satisfied_p (operands[1], CONSTRAINT_I) || constraint_satisfied_p (operands[1], CONSTRAINT_J) || constraint_satisfied_p (operands[1], CONSTRAINT_O)) return false; if (((!reload_completed && !reload_in_progress) || (REG_P (operands[0]) && REGNO (operands[0]) < 8)) && constraint_satisfied_p (operands[1], CONSTRAINT_K)) return false; return true; } /* Irritatingly, the "jsrv" insn *toggles* PSW.OM rather than set it to one specific value. So the insn chosen depends on whether the source and destination modes match. */ bool mep_vliw_mode_match (rtx tgt) { bool src_vliw = mep_vliw_function_p (cfun->decl); bool tgt_vliw = INTVAL (tgt); return src_vliw == tgt_vliw; } /* Like the above, but also test for near/far mismatches. */ bool mep_vliw_jmp_match (rtx tgt) { bool src_vliw = mep_vliw_function_p (cfun->decl); bool tgt_vliw = INTVAL (tgt); if (mep_section_tag (DECL_RTL (cfun->decl)) == 'f') return false; return src_vliw == tgt_vliw; } bool mep_multi_slot (rtx_insn *x) { return get_attr_slot (x) == SLOT_MULTI; } /* Implement TARGET_LEGITIMATE_CONSTANT_P. */ static bool mep_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x) { /* We can't convert symbol values to gp- or tp-rel values after reload, as reload might have used $gp or $tp for other purposes. */ if (GET_CODE (x) == SYMBOL_REF && (reload_in_progress || reload_completed)) { char e = mep_section_tag (x); return (e != 't' && e != 'b'); } return 1; } /* Be careful not to use macros that need to be compiled one way for strict, and another way for not-strict, like REG_OK_FOR_BASE_P. */ bool mep_legitimate_address (machine_mode mode, rtx x, int strict) { int the_tag; #define DEBUG_LEGIT 0 #if DEBUG_LEGIT fprintf (stderr, "legit: mode %s strict %d ", mode_name[mode], strict); debug_rtx (x); #endif if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == REG && GEN_REG (REGNO (XEXP (x, 0)), strict) && CONSTANT_P (XEXP (x, 1))) { if (GET_MODE_SIZE (mode) > 4) { /* We will end up splitting this, and lo_sums are not offsettable for us. */ #if DEBUG_LEGIT fprintf(stderr, " - nope, %%lo(sym)[reg] not splittable\n"); #endif return false; } #if DEBUG_LEGIT fprintf (stderr, " - yup, %%lo(sym)[reg]\n"); #endif return true; } if (GET_CODE (x) == REG && GEN_REG (REGNO (x), strict)) { #if DEBUG_LEGIT fprintf (stderr, " - yup, [reg]\n"); #endif return true; } if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == REG && GEN_REG (REGNO (XEXP (x, 0)), strict) && const_in_range (XEXP (x, 1), -32768, 32767)) { #if DEBUG_LEGIT fprintf (stderr, " - yup, [reg+const]\n"); #endif return true; } if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == REG && GEN_REG (REGNO (XEXP (x, 0)), strict) && GET_CODE (XEXP (x, 1)) == CONST && (GET_CODE (XEXP (XEXP (x, 1), 0)) == UNSPEC || (GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 0)) == UNSPEC && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT))) { #if DEBUG_LEGIT fprintf (stderr, " - yup, [reg+unspec]\n"); #endif return true; } the_tag = mep_section_tag (x); if (the_tag == 'f') { #if DEBUG_LEGIT fprintf (stderr, " - nope, [far]\n"); #endif return false; } if (mode == VOIDmode && GET_CODE (x) == SYMBOL_REF) { #if DEBUG_LEGIT fprintf (stderr, " - yup, call [symbol]\n"); #endif return true; } if ((mode == SImode || mode == SFmode) && CONSTANT_P (x) && mep_legitimate_constant_p (mode, x) && the_tag != 't' && the_tag != 'b') { if (GET_CODE (x) != CONST_INT || (INTVAL (x) <= 0xfffff && INTVAL (x) >= 0 && (INTVAL (x) % 4) == 0)) { #if DEBUG_LEGIT fprintf (stderr, " - yup, [const]\n"); #endif return true; } } #if DEBUG_LEGIT fprintf (stderr, " - nope.\n"); #endif return false; } int mep_legitimize_reload_address (rtx *x, machine_mode mode, int opnum, int type_i, int ind_levels ATTRIBUTE_UNUSED) { enum reload_type type = (enum reload_type) type_i; if (GET_CODE (*x) == PLUS && GET_CODE (XEXP (*x, 0)) == MEM && GET_CODE (XEXP (*x, 1)) == REG) { /* GCC will by default copy the MEM into a REG, which results in an invalid address. For us, the best thing to do is move the whole expression to a REG. */ push_reload (*x, NULL_RTX, x, NULL, GENERAL_REGS, mode, VOIDmode, 0, 0, opnum, type); return 1; } if (GET_CODE (*x) == PLUS && GET_CODE (XEXP (*x, 0)) == SYMBOL_REF && GET_CODE (XEXP (*x, 1)) == CONST_INT) { char e = mep_section_tag (XEXP (*x, 0)); if (e != 't' && e != 'b') { /* GCC thinks that (sym+const) is a valid address. Well, sometimes it is, this time it isn't. The best thing to do is reload the symbol to a register, since reg+int tends to work, and we can't just add the symbol and constant anyway. */ push_reload (XEXP (*x, 0), NULL_RTX, &(XEXP(*x, 0)), NULL, GENERAL_REGS, mode, VOIDmode, 0, 0, opnum, type); return 1; } } return 0; } int mep_core_address_length (rtx_insn *insn, int opn) { rtx set = single_set (insn); rtx mem = XEXP (set, opn); rtx other = XEXP (set, 1-opn); rtx addr = XEXP (mem, 0); if (register_operand (addr, Pmode)) return 2; if (GET_CODE (addr) == PLUS) { rtx addend = XEXP (addr, 1); gcc_assert (REG_P (XEXP (addr, 0))); switch (REGNO (XEXP (addr, 0))) { case STACK_POINTER_REGNUM: if (GET_MODE_SIZE (GET_MODE (mem)) == 4 && mep_imm7a4_operand (addend, VOIDmode)) return 2; break; case 13: /* TP */ gcc_assert (REG_P (other)); if (REGNO (other) >= 8) break; if (GET_CODE (addend) == CONST && GET_CODE (XEXP (addend, 0)) == UNSPEC && XINT (XEXP (addend, 0), 1) == UNS_TPREL) return 2; if (GET_CODE (addend) == CONST_INT && INTVAL (addend) >= 0 && INTVAL (addend) <= 127 && INTVAL (addend) % GET_MODE_SIZE (GET_MODE (mem)) == 0) return 2; break; } } return 4; } int mep_cop_address_length (rtx_insn *insn, int opn) { rtx set = single_set (insn); rtx mem = XEXP (set, opn); rtx addr = XEXP (mem, 0); if (GET_CODE (mem) != MEM) return 2; if (register_operand (addr, Pmode)) return 2; if (GET_CODE (addr) == POST_INC) return 2; return 4; } #define DEBUG_EXPAND_MOV 0 bool mep_expand_mov (rtx *operands, machine_mode mode) { int i, t; int tag[2]; rtx tpsym, tpoffs; int post_reload = 0; tag[0] = mep_section_tag (operands[0]); tag[1] = mep_section_tag (operands[1]); if (!reload_in_progress && !reload_completed && GET_CODE (operands[0]) != REG && GET_CODE (operands[0]) != SUBREG && GET_CODE (operands[1]) != REG && GET_CODE (operands[1]) != SUBREG) operands[1] = copy_to_mode_reg (mode, operands[1]); #if DEBUG_EXPAND_MOV fprintf(stderr, "expand move %s %d\n", mode_name[mode], reload_in_progress || reload_completed); debug_rtx (operands[0]); debug_rtx (operands[1]); #endif if (mode == DImode || mode == DFmode) return false; if (reload_in_progress || reload_completed) { rtx r; if (GET_CODE (operands[0]) == REG && REGNO (operands[0]) == TP_REGNO) cfun->machine->reload_changes_tp = true; if (tag[0] == 't' || tag[1] == 't') { r = has_hard_reg_initial_val (Pmode, GP_REGNO); if (!r || GET_CODE (r) != REG || REGNO (r) != GP_REGNO) post_reload = 1; } if (tag[0] == 'b' || tag[1] == 'b') { r = has_hard_reg_initial_val (Pmode, TP_REGNO); if (!r || GET_CODE (r) != REG || REGNO (r) != TP_REGNO) post_reload = 1; } if (cfun->machine->reload_changes_tp == true) post_reload = 1; } if (!post_reload) { rtx n; if (symbol_p (operands[1])) { t = mep_section_tag (operands[1]); if (t == 'b' || t == 't') { if (GET_CODE (operands[1]) == SYMBOL_REF) { tpsym = operands[1]; n = gen_rtx_UNSPEC (mode, gen_rtvec (1, operands[1]), t == 'b' ? UNS_TPREL : UNS_GPREL); n = gen_rtx_CONST (mode, n); } else if (GET_CODE (operands[1]) == CONST && GET_CODE (XEXP (operands[1], 0)) == PLUS && GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF && GET_CODE (XEXP (XEXP (operands[1], 0), 1)) == CONST_INT) { tpsym = XEXP (XEXP (operands[1], 0), 0); tpoffs = XEXP (XEXP (operands[1], 0), 1); n = gen_rtx_UNSPEC (mode, gen_rtvec (1, tpsym), t == 'b' ? UNS_TPREL : UNS_GPREL); n = gen_rtx_PLUS (mode, n, tpoffs); n = gen_rtx_CONST (mode, n); } else if (GET_CODE (operands[1]) == CONST && GET_CODE (XEXP (operands[1], 0)) == UNSPEC) return false; else { error ("unusual TP-relative address"); return false; } n = gen_rtx_PLUS (mode, (t == 'b' ? mep_tp_rtx () : mep_gp_rtx ()), n); n = emit_insn (gen_rtx_SET (operands[0], n)); #if DEBUG_EXPAND_MOV fprintf(stderr, "mep_expand_mov emitting "); debug_rtx(n); #endif return true; } } for (i=0; i < 2; i++) { t = mep_section_tag (operands[i]); if (GET_CODE (operands[i]) == MEM && (t == 'b' || t == 't')) { rtx sym, n, r; int u; sym = XEXP (operands[i], 0); if (GET_CODE (sym) == CONST && GET_CODE (XEXP (sym, 0)) == UNSPEC) sym = XVECEXP (XEXP (sym, 0), 0, 0); if (t == 'b') { r = mep_tp_rtx (); u = UNS_TPREL; } else { r = mep_gp_rtx (); u = UNS_GPREL; } n = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, sym), u); n = gen_rtx_CONST (Pmode, n); n = gen_rtx_PLUS (Pmode, r, n); operands[i] = replace_equiv_address (operands[i], n); } } } if ((GET_CODE (operands[1]) != REG && MEP_CONTROL_REG (operands[0])) || (GET_CODE (operands[0]) != REG && MEP_CONTROL_REG (operands[1]))) { rtx temp; #if DEBUG_EXPAND_MOV fprintf (stderr, "cr-mem, forcing op1 to reg\n"); #endif temp = gen_reg_rtx (mode); emit_move_insn (temp, operands[1]); operands[1] = temp; } if (symbolref_p (operands[0]) && (mep_section_tag (XEXP (operands[0], 0)) == 'f' || (GET_MODE_SIZE (mode) != 4))) { rtx temp; gcc_assert (!reload_in_progress && !reload_completed); temp = force_reg (Pmode, XEXP (operands[0], 0)); operands[0] = replace_equiv_address (operands[0], temp); emit_move_insn (operands[0], operands[1]); return true; } if (!post_reload && (tag[1] == 't' || tag[1] == 'b')) tag[1] = 0; if (symbol_p (operands[1]) && (tag[1] == 'f' || tag[1] == 't' || tag[1] == 'b')) { emit_insn (gen_movsi_topsym_s (operands[0], operands[1])); emit_insn (gen_movsi_botsym_s (operands[0], operands[0], operands[1])); return true; } if (symbolref_p (operands[1]) && (tag[1] == 'f' || tag[1] == 't' || tag[1] == 'b')) { rtx temp; if (reload_in_progress || reload_completed) temp = operands[0]; else temp = gen_reg_rtx (Pmode); emit_insn (gen_movsi_topsym_s (temp, operands[1])); emit_insn (gen_movsi_botsym_s (temp, temp, operands[1])); emit_move_insn (operands[0], replace_equiv_address (operands[1], temp)); return true; } return false; } /* Cases where the pattern can't be made to use at all. */ bool mep_mov_ok (rtx *operands, machine_mode mode ATTRIBUTE_UNUSED) { int i; #define DEBUG_MOV_OK 0 #if DEBUG_MOV_OK fprintf (stderr, "mep_mov_ok %s %c=%c\n", mode_name[mode], mep_section_tag (operands[0]), mep_section_tag (operands[1])); debug_rtx (operands[0]); debug_rtx (operands[1]); #endif /* We want the movh patterns to get these. */ if (GET_CODE (operands[1]) == HIGH) return false; /* We can't store a register to a far variable without using a scratch register to hold the address. Using far variables should be split by mep_emit_mov anyway. */ if (mep_section_tag (operands[0]) == 'f' || mep_section_tag (operands[1]) == 'f') { #if DEBUG_MOV_OK fprintf (stderr, " - no, f\n"); #endif return false; } i = mep_section_tag (operands[1]); if ((i == 'b' || i == 't') && !reload_completed && !reload_in_progress) /* These are supposed to be generated with adds of the appropriate register. During and after reload, however, we allow them to be accessed as normal symbols because adding a dependency on the base register now might cause problems. */ { #if DEBUG_MOV_OK fprintf (stderr, " - no, bt\n"); #endif return false; } /* The only moves we can allow involve at least one general register, so require it. */ for (i = 0; i < 2; i ++) { /* Allow subregs too, before reload. */ rtx x = operands[i]; if (GET_CODE (x) == SUBREG) x = XEXP (x, 0); if (GET_CODE (x) == REG && ! MEP_CONTROL_REG (x)) { #if DEBUG_MOV_OK fprintf (stderr, " - ok\n"); #endif return true; } } #if DEBUG_MOV_OK fprintf (stderr, " - no, no gen reg\n"); #endif return false; } #define DEBUG_SPLIT_WIDE_MOVE 0 void mep_split_wide_move (rtx *operands, machine_mode mode) { int i; #if DEBUG_SPLIT_WIDE_MOVE fprintf (stderr, "\n\033[34mmep_split_wide_move\033[0m mode %s\n", mode_name[mode]); debug_rtx (operands[0]); debug_rtx (operands[1]); #endif for (i = 0; i <= 1; i++) { rtx op = operands[i], hi, lo; switch (GET_CODE (op)) { case REG: { unsigned int regno = REGNO (op); if (TARGET_64BIT_CR_REGS && CR_REGNO_P (regno)) { rtx i32; lo = gen_rtx_REG (SImode, regno); i32 = GEN_INT (32); hi = gen_rtx_ZERO_EXTRACT (SImode, gen_rtx_REG (DImode, regno), i32, i32); } else { hi = gen_rtx_REG (SImode, regno + TARGET_LITTLE_ENDIAN); lo = gen_rtx_REG (SImode, regno + TARGET_BIG_ENDIAN); } } break; case CONST_INT: case CONST_DOUBLE: case MEM: hi = operand_subword (op, TARGET_LITTLE_ENDIAN, 0, mode); lo = operand_subword (op, TARGET_BIG_ENDIAN, 0, mode); break; default: gcc_unreachable (); } /* The high part of CR <- GPR moves must be done after the low part. */ operands [i + 4] = lo; operands [i + 2] = hi; } if (reg_mentioned_p (operands[2], operands[5]) || GET_CODE (operands[2]) == ZERO_EXTRACT || GET_CODE (operands[4]) == ZERO_EXTRACT) { rtx tmp; /* Overlapping register pairs -- make sure we don't early-clobber ourselves. */ tmp = operands[2]; operands[2] = operands[4]; operands[4] = tmp; tmp = operands[3]; operands[3] = operands[5]; operands[5] = tmp; } #if DEBUG_SPLIT_WIDE_MOVE fprintf(stderr, "\033[34m"); debug_rtx (operands[2]); debug_rtx (operands[3]); debug_rtx (operands[4]); debug_rtx (operands[5]); fprintf(stderr, "\033[0m"); #endif } /* Emit a setcc instruction in its entirity. */ static bool mep_expand_setcc_1 (enum rtx_code code, rtx dest, rtx op1, rtx op2) { rtx tmp; switch (code) { case GT: case GTU: tmp = op1, op1 = op2, op2 = tmp; code = swap_condition (code); /* FALLTHRU */ case LT: case LTU: op1 = force_reg (SImode, op1); emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, SImode, op1, op2))); return true; case EQ: if (op2 != const0_rtx) op1 = expand_binop (SImode, sub_optab, op1, op2, NULL, 1, OPTAB_WIDEN); mep_expand_setcc_1 (LTU, dest, op1, const1_rtx); return true; case NE: /* Branchful sequence: mov dest, 0 16-bit beq op1, op2, Lover 16-bit (op2 < 16), 32-bit otherwise mov dest, 1 16-bit Branchless sequence: add3 tmp, op1, -op2 32-bit (or mov + sub) sltu3 tmp, tmp, 1 16-bit xor3 dest, tmp, 1 32-bit */ if (optimize_size && op2 != const0_rtx) return false; if (op2 != const0_rtx) op1 = expand_binop (SImode, sub_optab, op1, op2, NULL, 1, OPTAB_WIDEN); op2 = gen_reg_rtx (SImode); mep_expand_setcc_1 (LTU, op2, op1, const1_rtx); emit_insn (gen_rtx_SET (dest, gen_rtx_XOR (SImode, op2, const1_rtx))); return true; case LE: if (GET_CODE (op2) != CONST_INT || INTVAL (op2) == 0x7ffffff) return false; op2 = GEN_INT (INTVAL (op2) + 1); return mep_expand_setcc_1 (LT, dest, op1, op2); case LEU: if (GET_CODE (op2) != CONST_INT || INTVAL (op2) == -1) return false; op2 = GEN_INT (trunc_int_for_mode (INTVAL (op2) + 1, SImode)); return mep_expand_setcc_1 (LTU, dest, op1, op2); case GE: if (GET_CODE (op2) != CONST_INT || INTVAL (op2) == trunc_int_for_mode (0x80000000, SImode)) return false; op2 = GEN_INT (INTVAL (op2) - 1); return mep_expand_setcc_1 (GT, dest, op1, op2); case GEU: if (GET_CODE (op2) != CONST_INT || op2 == const0_rtx) return false; op2 = GEN_INT (trunc_int_for_mode (INTVAL (op2) - 1, SImode)); return mep_expand_setcc_1 (GTU, dest, op1, op2); default: gcc_unreachable (); } } bool mep_expand_setcc (rtx *operands) { rtx dest = operands[0]; enum rtx_code code = GET_CODE (operands[1]); rtx op0 = operands[2]; rtx op1 = operands[3]; return mep_expand_setcc_1 (code, dest, op0, op1); } rtx mep_expand_cbranch (rtx *operands) { enum rtx_code code = GET_CODE (operands[0]); rtx op0 = operands[1]; rtx op1 = operands[2]; rtx tmp; restart: switch (code) { case LT: if (mep_imm4_operand (op1, SImode)) break; tmp = gen_reg_rtx (SImode); gcc_assert (mep_expand_setcc_1 (LT, tmp, op0, op1)); code = NE; op0 = tmp; op1 = const0_rtx; break; case GE: if (mep_imm4_operand (op1, SImode)) break; tmp = gen_reg_rtx (SImode); gcc_assert (mep_expand_setcc_1 (LT, tmp, op0, op1)); code = EQ; op0 = tmp; op1 = const0_rtx; break; case EQ: case NE: if (! mep_reg_or_imm4_operand (op1, SImode)) op1 = force_reg (SImode, op1); break; case LE: case GT: if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0x7fffffff) { op1 = GEN_INT (INTVAL (op1) + 1); code = (code == LE ? LT : GE); goto restart; } tmp = gen_reg_rtx (SImode); gcc_assert (mep_expand_setcc_1 (LT, tmp, op1, op0)); code = (code == LE ? EQ : NE); op0 = tmp; op1 = const0_rtx; break; case LTU: if (op1 == const1_rtx) { code = EQ; op1 = const0_rtx; break; } tmp = gen_reg_rtx (SImode); gcc_assert (mep_expand_setcc_1 (LTU, tmp, op0, op1)); code = NE; op0 = tmp; op1 = const0_rtx; break; case LEU: tmp = gen_reg_rtx (SImode); if (mep_expand_setcc_1 (LEU, tmp, op0, op1)) code = NE; else if (mep_expand_setcc_1 (LTU, tmp, op1, op0)) code = EQ; else gcc_unreachable (); op0 = tmp; op1 = const0_rtx; break; case GTU: tmp = gen_reg_rtx (SImode); gcc_assert (mep_expand_setcc_1 (GTU, tmp, op0, op1) || mep_expand_setcc_1 (LTU, tmp, op1, op0)); code = NE; op0 = tmp; op1 = const0_rtx; break; case GEU: tmp = gen_reg_rtx (SImode); if (mep_expand_setcc_1 (GEU, tmp, op0, op1)) code = NE; else if (mep_expand_setcc_1 (LTU, tmp, op0, op1)) code = EQ; else gcc_unreachable (); op0 = tmp; op1 = const0_rtx; break; default: gcc_unreachable (); } return gen_rtx_fmt_ee (code, VOIDmode, op0, op1); } const char * mep_emit_cbranch (rtx *operands, int ne) { if (GET_CODE (operands[1]) == REG) return ne ? "bne\t%0, %1, %l2" : "beq\t%0, %1, %l2"; else if (INTVAL (operands[1]) == 0 && !mep_vliw_function_p(cfun->decl)) return ne ? "bnez\t%0, %l2" : "beqz\t%0, %l2"; else return ne ? "bnei\t%0, %1, %l2" : "beqi\t%0, %1, %l2"; } void mep_expand_call (rtx *operands, int returns_value) { rtx addr = operands[returns_value]; rtx tp = mep_tp_rtx (); rtx gp = mep_gp_rtx (); gcc_assert (GET_CODE (addr) == MEM); addr = XEXP (addr, 0); if (! mep_call_address_operand (addr, VOIDmode)) addr = force_reg (SImode, addr); if (! operands[returns_value+2]) operands[returns_value+2] = const0_rtx; if (returns_value) emit_call_insn (gen_call_value_internal (operands[0], addr, operands[2], operands[3], tp, gp)); else emit_call_insn (gen_call_internal (addr, operands[1], operands[2], tp, gp)); } /* Aliasing Support. */ /* If X is a machine specific address (i.e. a symbol or label being referenced as a displacement from the GOT implemented using an UNSPEC), then return the base term. Otherwise return X. */ rtx mep_find_base_term (rtx x) { rtx base, term; int unspec; if (GET_CODE (x) != PLUS) return x; base = XEXP (x, 0); term = XEXP (x, 1); if (has_hard_reg_initial_val(Pmode, TP_REGNO) && base == mep_tp_rtx ()) unspec = UNS_TPREL; else if (has_hard_reg_initial_val(Pmode, GP_REGNO) && base == mep_gp_rtx ()) unspec = UNS_GPREL; else return x; if (GET_CODE (term) != CONST) return x; term = XEXP (term, 0); if (GET_CODE (term) != UNSPEC || XINT (term, 1) != unspec) return x; return XVECEXP (term, 0, 0); } /* Reload Support. */ /* Return true if the registers in CLASS cannot represent the change from modes FROM to TO. */ bool mep_cannot_change_mode_class (machine_mode from, machine_mode to, enum reg_class regclass) { if (from == to) return false; /* 64-bit COP regs must remain 64-bit COP regs. */ if (TARGET_64BIT_CR_REGS && (regclass == CR_REGS || regclass == LOADABLE_CR_REGS) && (GET_MODE_SIZE (to) < 8 || GET_MODE_SIZE (from) < 8)) return true; return false; } #define MEP_NONGENERAL_CLASS(C) (!reg_class_subset_p (C, GENERAL_REGS)) static bool mep_general_reg (rtx x) { while (GET_CODE (x) == SUBREG) x = XEXP (x, 0); return GET_CODE (x) == REG && GR_REGNO_P (REGNO (x)); } static bool mep_nongeneral_reg (rtx x) { while (GET_CODE (x) == SUBREG) x = XEXP (x, 0); return (GET_CODE (x) == REG && !GR_REGNO_P (REGNO (x)) && REGNO (x) < FIRST_PSEUDO_REGISTER); } static bool mep_general_copro_reg (rtx x) { while (GET_CODE (x) == SUBREG) x = XEXP (x, 0); return (GET_CODE (x) == REG && CR_REGNO_P (REGNO (x))); } static bool mep_nonregister (rtx x) { while (GET_CODE (x) == SUBREG) x = XEXP (x, 0); return (GET_CODE (x) != REG || REGNO (x) >= FIRST_PSEUDO_REGISTER); } #define DEBUG_RELOAD 0 /* Return the secondary reload class needed for moving value X to or from a register in coprocessor register class CLASS. */ static enum reg_class mep_secondary_copro_reload_class (enum reg_class rclass, rtx x) { if (mep_general_reg (x)) /* We can do the move directly if mep_have_core_copro_moves_p, otherwise we need to go through memory. Either way, no secondary register is needed. */ return NO_REGS; if (mep_general_copro_reg (x)) { /* We can do the move directly if mep_have_copro_copro_moves_p. */ if (mep_have_copro_copro_moves_p) return NO_REGS; /* Otherwise we can use a temporary if mep_have_core_copro_moves_p. */ if (mep_have_core_copro_moves_p) return GENERAL_REGS; /* Otherwise we need to do it through memory. No secondary register is needed. */ return NO_REGS; } if (reg_class_subset_p (rclass, LOADABLE_CR_REGS) && constraint_satisfied_p (x, CONSTRAINT_U)) /* X is a memory value that we can access directly. */ return NO_REGS; /* We have to move X into a GPR first and then copy it to the coprocessor register. The move from the GPR to the coprocessor might be done directly or through memory, depending on mep_have_core_copro_moves_p. */ return GENERAL_REGS; } /* Copying X to register in RCLASS. */ enum reg_class mep_secondary_input_reload_class (enum reg_class rclass, machine_mode mode ATTRIBUTE_UNUSED, rtx x) { int rv = NO_REGS; #if DEBUG_RELOAD fprintf (stderr, "secondary input reload copy to %s %s from ", reg_class_names[rclass], mode_name[mode]); debug_rtx (x); #endif if (reg_class_subset_p (rclass, CR_REGS)) rv = mep_secondary_copro_reload_class (rclass, x); else if (MEP_NONGENERAL_CLASS (rclass) && (mep_nonregister (x) || mep_nongeneral_reg (x))) rv = GENERAL_REGS; #if DEBUG_RELOAD fprintf (stderr, " - requires %s\n", reg_class_names[rv]); #endif return (enum reg_class) rv; } /* Copying register in RCLASS to X. */ enum reg_class mep_secondary_output_reload_class (enum reg_class rclass, machine_mode mode ATTRIBUTE_UNUSED, rtx x) { int rv = NO_REGS; #if DEBUG_RELOAD fprintf (stderr, "secondary output reload copy from %s %s to ", reg_class_names[rclass], mode_name[mode]); debug_rtx (x); #endif if (reg_class_subset_p (rclass, CR_REGS)) rv = mep_secondary_copro_reload_class (rclass, x); else if (MEP_NONGENERAL_CLASS (rclass) && (mep_nonregister (x) || mep_nongeneral_reg (x))) rv = GENERAL_REGS; #if DEBUG_RELOAD fprintf (stderr, " - requires %s\n", reg_class_names[rv]); #endif return (enum reg_class) rv; } /* Implement SECONDARY_MEMORY_NEEDED. */ bool mep_secondary_memory_needed (enum reg_class rclass1, enum reg_class rclass2, machine_mode mode ATTRIBUTE_UNUSED) { if (!mep_have_core_copro_moves_p) { if (reg_classes_intersect_p (rclass1, CR_REGS) && reg_classes_intersect_p (rclass2, GENERAL_REGS)) return true; if (reg_classes_intersect_p (rclass2, CR_REGS) && reg_classes_intersect_p (rclass1, GENERAL_REGS)) return true; if (!mep_have_copro_copro_moves_p && reg_classes_intersect_p (rclass1, CR_REGS) && reg_classes_intersect_p (rclass2, CR_REGS)) return true; } return false; } void mep_expand_reload (rtx *operands, machine_mode mode) { /* There are three cases for each direction: register, farsym control, farsym control, nearsym */ int s0 = mep_section_tag (operands[0]) == 'f'; int s1 = mep_section_tag (operands[1]) == 'f'; int c0 = mep_nongeneral_reg (operands[0]); int c1 = mep_nongeneral_reg (operands[1]); int which = (s0 ? 20:0) + (c0 ? 10:0) + (s1 ? 2:0) + (c1 ? 1:0); #if DEBUG_RELOAD fprintf (stderr, "expand_reload %s\n", mode_name[mode]); debug_rtx (operands[0]); debug_rtx (operands[1]); #endif switch (which) { case 00: /* Don't know why this gets here. */ case 02: /* general = far */ emit_move_insn (operands[0], operands[1]); return; case 10: /* cr = mem */ case 11: /* cr = cr */ case 01: /* mem = cr */ case 12: /* cr = far */ emit_move_insn (operands[2], operands[1]); emit_move_insn (operands[0], operands[2]); return; case 20: /* far = general */ emit_move_insn (operands[2], XEXP (operands[1], 0)); emit_move_insn (operands[0], gen_rtx_MEM (mode, operands[2])); return; case 21: /* far = cr */ case 22: /* far = far */ default: fprintf (stderr, "unsupported expand reload case %02d for mode %s\n", which, mode_name[mode]); debug_rtx (operands[0]); debug_rtx (operands[1]); gcc_unreachable (); } } /* Implement PREFERRED_RELOAD_CLASS. See whether X is a constant that can be moved directly into registers 0 to 7, but not into the rest. If so, and if the required class includes registers 0 to 7, restrict it to those registers. */ enum reg_class mep_preferred_reload_class (rtx x, enum reg_class rclass) { switch (GET_CODE (x)) { case CONST_INT: if (INTVAL (x) >= 0x10000 && INTVAL (x) < 0x01000000 && (INTVAL (x) & 0xffff) != 0 && reg_class_subset_p (TPREL_REGS, rclass)) rclass = TPREL_REGS; break; case CONST: case SYMBOL_REF: case LABEL_REF: if (mep_section_tag (x) != 'f' && reg_class_subset_p (TPREL_REGS, rclass)) rclass = TPREL_REGS; break; default: break; } return rclass; } /* Implement REGISTER_MOVE_COST. Return 2 for direct single-register moves, 4 for direct double-register moves, and 1000 for anything that requires a temporary register or temporary stack slot. */ int mep_register_move_cost (machine_mode mode, enum reg_class from, enum reg_class to) { if (mep_have_copro_copro_moves_p && reg_class_subset_p (from, CR_REGS) && reg_class_subset_p (to, CR_REGS)) { if (TARGET_32BIT_CR_REGS && GET_MODE_SIZE (mode) > UNITS_PER_WORD) return 4; return 2; } if (reg_class_subset_p (from, CR_REGS) && reg_class_subset_p (to, CR_REGS)) { if (TARGET_32BIT_CR_REGS && GET_MODE_SIZE (mode) > UNITS_PER_WORD) return 8; return 4; } if (reg_class_subset_p (from, CR_REGS) || reg_class_subset_p (to, CR_REGS)) { if (GET_MODE_SIZE (mode) > UNITS_PER_WORD) return 4; return 2; } if (mep_secondary_memory_needed (from, to, mode)) return 1000; if (MEP_NONGENERAL_CLASS (from) && MEP_NONGENERAL_CLASS (to)) return 1000; if (GET_MODE_SIZE (mode) > 4) return 4; return 2; } /* Functions to save and restore machine-specific function data. */ static struct machine_function * mep_init_machine_status (void) { return ggc_cleared_alloc<machine_function> (); } static rtx mep_allocate_initial_value (rtx reg) { int rss; if (GET_CODE (reg) != REG) return NULL_RTX; if (REGNO (reg) >= FIRST_PSEUDO_REGISTER) return NULL_RTX; /* In interrupt functions, the "initial" values of $gp and $tp are provided by the prologue. They are not necessarily the same as the values that the caller was using. */ if (REGNO (reg) == TP_REGNO || REGNO (reg) == GP_REGNO) if (mep_interrupt_p ()) return NULL_RTX; if (! cfun->machine->reg_save_slot[REGNO(reg)]) { cfun->machine->reg_save_size += 4; cfun->machine->reg_save_slot[REGNO(reg)] = cfun->machine->reg_save_size; } rss = cfun->machine->reg_save_slot[REGNO(reg)]; return gen_rtx_MEM (SImode, plus_constant (Pmode, arg_pointer_rtx, -rss)); } rtx mep_return_addr_rtx (int count) { if (count != 0) return const0_rtx; return get_hard_reg_initial_val (Pmode, LP_REGNO); } static rtx mep_tp_rtx (void) { return get_hard_reg_initial_val (Pmode, TP_REGNO); } static rtx mep_gp_rtx (void) { return get_hard_reg_initial_val (Pmode, GP_REGNO); } static bool mep_interrupt_p (void) { if (cfun->machine->interrupt_handler == 0) { int interrupt_handler = (lookup_attribute ("interrupt", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE); cfun->machine->interrupt_handler = interrupt_handler ? 2 : 1; } return cfun->machine->interrupt_handler == 2; } static bool mep_disinterrupt_p (void) { if (cfun->machine->disable_interrupts == 0) { int disable_interrupts = (lookup_attribute ("disinterrupt", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE); cfun->machine->disable_interrupts = disable_interrupts ? 2 : 1; } return cfun->machine->disable_interrupts == 2; } /* Frame/Epilog/Prolog Related. */ static bool mep_reg_set_p (rtx reg, rtx insn) { /* Similar to reg_set_p in rtlanal.c, but we ignore calls */ if (INSN_P (insn)) { if (FIND_REG_INC_NOTE (insn, reg)) return true; insn = PATTERN (insn); } if (GET_CODE (insn) == SET && GET_CODE (XEXP (insn, 0)) == REG && GET_CODE (XEXP (insn, 1)) == REG && REGNO (XEXP (insn, 0)) == REGNO (XEXP (insn, 1))) return false; return set_of (reg, insn) != NULL_RTX; } #define MEP_SAVES_UNKNOWN 0 #define MEP_SAVES_YES 1 #define MEP_SAVES_MAYBE 2 #define MEP_SAVES_NO 3 static bool mep_reg_set_in_function (int regno) { rtx reg; rtx_insn *insn; if (mep_interrupt_p () && df_regs_ever_live_p(regno)) return true; if (regno == LP_REGNO && (profile_arc_flag > 0 || profile_flag > 0)) return true; push_topmost_sequence (); insn = get_insns (); pop_topmost_sequence (); if (!insn) return false; reg = gen_rtx_REG (SImode, regno); for (insn = NEXT_INSN (insn); insn; insn = NEXT_INSN (insn)) if (INSN_P (insn) && mep_reg_set_p (reg, insn)) return true; return false; } static bool mep_asm_without_operands_p (void) { if (cfun->machine->asms_without_operands == 0) { rtx_insn *insn; push_topmost_sequence (); insn = get_insns (); pop_topmost_sequence (); cfun->machine->asms_without_operands = 1; while (insn) { if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == ASM_INPUT) { cfun->machine->asms_without_operands = 2; break; } insn = NEXT_INSN (insn); } } return cfun->machine->asms_without_operands == 2; } /* Interrupt functions save/restore every call-preserved register, and any call-used register it uses (or all if it calls any function, since they may get clobbered there too). Here we check to see which call-used registers need saving. */ #define IVC2_ISAVED_REG(r) (TARGET_IVC2 \ && (r == FIRST_CCR_REGNO + 1 \ || (r >= FIRST_CCR_REGNO + 8 && r <= FIRST_CCR_REGNO + 11) \ || (r >= FIRST_CCR_REGNO + 16 && r <= FIRST_CCR_REGNO + 31))) static bool mep_interrupt_saved_reg (int r) { if (!mep_interrupt_p ()) return false; if (r == REGSAVE_CONTROL_TEMP || (TARGET_64BIT_CR_REGS && TARGET_COP && r == REGSAVE_CONTROL_TEMP+1)) return true; if (mep_asm_without_operands_p () && (!fixed_regs[r] || (r == RPB_REGNO || r == RPE_REGNO || r == RPC_REGNO || r == LP_REGNO) || IVC2_ISAVED_REG (r))) return true; if (!crtl->is_leaf) /* Function calls mean we need to save $lp. */ if (r == LP_REGNO || IVC2_ISAVED_REG (r)) return true; if (!crtl->is_leaf || cfun->machine->doloop_tags > 0) /* The interrupt handler might use these registers for repeat blocks, or it might call a function that does so. */ if (r == RPB_REGNO || r == RPE_REGNO || r == RPC_REGNO) return true; if (crtl->is_leaf && call_used_regs[r] && !df_regs_ever_live_p(r)) return false; /* Functions we call might clobber these. */ if (call_used_regs[r] && !fixed_regs[r]) return true; /* Additional registers that need to be saved for IVC2. */ if (IVC2_ISAVED_REG (r)) return true; return false; } static bool mep_call_saves_register (int r) { if (! cfun->machine->frame_locked) { int rv = MEP_SAVES_NO; if (cfun->machine->reg_save_slot[r]) rv = MEP_SAVES_YES; else if (r == LP_REGNO && (profile_arc_flag > 0 || profile_flag > 0)) rv = MEP_SAVES_YES; else if (r == FRAME_POINTER_REGNUM && frame_pointer_needed) rv = MEP_SAVES_YES; else if ((!call_used_regs[r] || r == LP_REGNO) && df_regs_ever_live_p(r)) rv = MEP_SAVES_YES; else if (crtl->calls_eh_return && (r == 10 || r == 11)) /* We need these to have stack slots so that they can be set during unwinding. */ rv = MEP_SAVES_YES; else if (mep_interrupt_saved_reg (r)) rv = MEP_SAVES_YES; cfun->machine->reg_saved[r] = rv; } return cfun->machine->reg_saved[r] == MEP_SAVES_YES; } /* Return true if epilogue uses register REGNO. */ bool mep_epilogue_uses (int regno) { /* Since $lp is a call-saved register, the generic code will normally mark it used in the epilogue if it needs to be saved and restored. However, when profiling is enabled, the profiling code will implicitly clobber $11. This case has to be handled specially both here and in mep_call_saves_register. */ if (regno == LP_REGNO && (profile_arc_flag > 0 || profile_flag > 0)) return true; /* Interrupt functions save/restore pretty much everything. */ return (reload_completed && mep_interrupt_saved_reg (regno)); } static int mep_reg_size (int regno) { if (CR_REGNO_P (regno) && TARGET_64BIT_CR_REGS) return 8; return 4; } /* Worker function for TARGET_CAN_ELIMINATE. */ bool mep_can_eliminate (const int from, const int to) { return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true); } int mep_elimination_offset (int from, int to) { int reg_save_size; int i; int frame_size = get_frame_size () + crtl->outgoing_args_size; int total_size; if (!cfun->machine->frame_locked) memset (cfun->machine->reg_saved, 0, sizeof (cfun->machine->reg_saved)); /* We don't count arg_regs_to_save in the arg pointer offset, because gcc thinks the arg pointer has moved along with the saved regs. However, we do count it when we adjust $sp in the prologue. */ reg_save_size = 0; for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (mep_call_saves_register (i)) reg_save_size += mep_reg_size (i); if (reg_save_size % 8) cfun->machine->regsave_filler = 8 - (reg_save_size % 8); else cfun->machine->regsave_filler = 0; /* This is what our total stack adjustment looks like. */ total_size = (reg_save_size + frame_size + cfun->machine->regsave_filler); if (total_size % 8) cfun->machine->frame_filler = 8 - (total_size % 8); else cfun->machine->frame_filler = 0; if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM) return reg_save_size + cfun->machine->regsave_filler; if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM) return cfun->machine->frame_filler + frame_size; if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM) return reg_save_size + cfun->machine->regsave_filler + cfun->machine->frame_filler + frame_size; gcc_unreachable (); } static rtx_insn * F (rtx_insn *x) { RTX_FRAME_RELATED_P (x) = 1; return x; } /* Since the prologue/epilogue code is generated after optimization, we can't rely on gcc to split constants for us. So, this code captures all the ways to add a constant to a register in one logic chunk, including optimizing away insns we just don't need. This makes the prolog/epilog code easier to follow. */ static void add_constant (int dest, int src, int value, int mark_frame) { rtx_insn *insn; int hi, lo; if (src == dest && value == 0) return; if (value == 0) { insn = emit_move_insn (gen_rtx_REG (SImode, dest), gen_rtx_REG (SImode, src)); if (mark_frame) RTX_FRAME_RELATED_P(insn) = 1; return; } if (value >= -32768 && value <= 32767) { insn = emit_insn (gen_addsi3 (gen_rtx_REG (SImode, dest), gen_rtx_REG (SImode, src), GEN_INT (value))); if (mark_frame) RTX_FRAME_RELATED_P(insn) = 1; return; } /* Big constant, need to use a temp register. We use REGSAVE_CONTROL_TEMP because it's call clobberable (the reg save area is always small enough to directly add to). */ hi = trunc_int_for_mode (value & 0xffff0000, SImode); lo = value & 0xffff; insn = emit_move_insn (gen_rtx_REG (SImode, REGSAVE_CONTROL_TEMP), GEN_INT (hi)); if (lo) { insn = emit_insn (gen_iorsi3 (gen_rtx_REG (SImode, REGSAVE_CONTROL_TEMP), gen_rtx_REG (SImode, REGSAVE_CONTROL_TEMP), GEN_INT (lo))); } insn = emit_insn (gen_addsi3 (gen_rtx_REG (SImode, dest), gen_rtx_REG (SImode, src), gen_rtx_REG (SImode, REGSAVE_CONTROL_TEMP))); if (mark_frame) { RTX_FRAME_RELATED_P(insn) = 1; add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_SET (gen_rtx_REG (SImode, dest), gen_rtx_PLUS (SImode, gen_rtx_REG (SImode, dest), GEN_INT (value)))); } } /* Move SRC to DEST. Mark the move as being potentially dead if MAYBE_DEAD_P. */ static rtx_insn * maybe_dead_move (rtx dest, rtx src, bool ATTRIBUTE_UNUSED maybe_dead_p) { rtx_insn *insn = emit_move_insn (dest, src); #if 0 if (maybe_dead_p) REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL); #endif return insn; } /* Used for interrupt functions, which can't assume that $tp and $gp contain the correct pointers. */ static void mep_reload_pointer (int regno, const char *symbol) { rtx reg, sym; if (!df_regs_ever_live_p(regno) && crtl->is_leaf) return; reg = gen_rtx_REG (SImode, regno); sym = gen_rtx_SYMBOL_REF (SImode, symbol); emit_insn (gen_movsi_topsym_s (reg, sym)); emit_insn (gen_movsi_botsym_s (reg, reg, sym)); } /* Assign save slots for any register not already saved. DImode registers go at the end of the reg save area; the rest go at the beginning. This is for alignment purposes. Returns true if a frame is really needed. */ static bool mep_assign_save_slots (int reg_save_size) { bool really_need_stack_frame = false; int di_ofs = 0; int i; for (i=0; i<FIRST_PSEUDO_REGISTER; i++) if (mep_call_saves_register(i)) { int regsize = mep_reg_size (i); if ((i != TP_REGNO && i != GP_REGNO && i != LP_REGNO) || mep_reg_set_in_function (i)) really_need_stack_frame = true; if (cfun->machine->reg_save_slot[i]) continue; if (regsize < 8) { cfun->machine->reg_save_size += regsize; cfun->machine->reg_save_slot[i] = cfun->machine->reg_save_size; } else { cfun->machine->reg_save_slot[i] = reg_save_size - di_ofs; di_ofs += 8; } } cfun->machine->frame_locked = 1; return really_need_stack_frame; } void mep_expand_prologue (void) { int i, rss, sp_offset = 0; int reg_save_size; int frame_size; int really_need_stack_frame; /* We must not allow register renaming in interrupt functions, because that invalidates the correctness of the set of call-used registers we're going to save/restore. */ mep_set_leaf_registers (mep_interrupt_p () ? 0 : 1); if (mep_disinterrupt_p ()) emit_insn (gen_mep_disable_int ()); cfun->machine->mep_frame_pointer_needed = frame_pointer_needed; reg_save_size = mep_elimination_offset (ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM); frame_size = mep_elimination_offset (FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM); really_need_stack_frame = frame_size; really_need_stack_frame |= mep_assign_save_slots (reg_save_size); sp_offset = reg_save_size; if (sp_offset + frame_size < 128) sp_offset += frame_size ; add_constant (SP_REGNO, SP_REGNO, -sp_offset, 1); for (i=0; i<FIRST_PSEUDO_REGISTER; i++) if (mep_call_saves_register(i)) { rtx mem; bool maybe_dead_p; machine_mode rmode; rss = cfun->machine->reg_save_slot[i]; if ((i == TP_REGNO || i == GP_REGNO || i == LP_REGNO) && (!mep_reg_set_in_function (i) && !mep_interrupt_p ())) continue; if (mep_reg_size (i) == 8) rmode = DImode; else rmode = SImode; /* If there is a pseudo associated with this register's initial value, reload might have already spilt it to the stack slot suggested by ALLOCATE_INITIAL_VALUE. The moves emitted here can then be safely deleted as dead. */ mem = gen_rtx_MEM (rmode, plus_constant (Pmode, stack_pointer_rtx, sp_offset - rss)); maybe_dead_p = rtx_equal_p (mem, has_hard_reg_initial_val (rmode, i)); if (GR_REGNO_P (i) || LOADABLE_CR_REGNO_P (i)) F(maybe_dead_move (mem, gen_rtx_REG (rmode, i), maybe_dead_p)); else if (rmode == DImode) { rtx_insn *insn; int be = TARGET_BIG_ENDIAN ? 4 : 0; mem = gen_rtx_MEM (SImode, plus_constant (Pmode, stack_pointer_rtx, sp_offset - rss + be)); maybe_dead_move (gen_rtx_REG (SImode, REGSAVE_CONTROL_TEMP), gen_rtx_REG (SImode, i), maybe_dead_p); maybe_dead_move (gen_rtx_REG (SImode, REGSAVE_CONTROL_TEMP+1), gen_rtx_ZERO_EXTRACT (SImode, gen_rtx_REG (DImode, i), GEN_INT (32), GEN_INT (32)), maybe_dead_p); insn = maybe_dead_move (mem, gen_rtx_REG (SImode, REGSAVE_CONTROL_TEMP), maybe_dead_p); RTX_FRAME_RELATED_P (insn) = 1; add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_SET (copy_rtx (mem), gen_rtx_REG (rmode, i))); mem = gen_rtx_MEM (SImode, plus_constant (Pmode, stack_pointer_rtx, sp_offset - rss + (4-be))); insn = maybe_dead_move (mem, gen_rtx_REG (SImode, REGSAVE_CONTROL_TEMP+1), maybe_dead_p); } else { rtx_insn *insn; maybe_dead_move (gen_rtx_REG (rmode, REGSAVE_CONTROL_TEMP), gen_rtx_REG (rmode, i), maybe_dead_p); insn = maybe_dead_move (mem, gen_rtx_REG (rmode, REGSAVE_CONTROL_TEMP), maybe_dead_p); RTX_FRAME_RELATED_P (insn) = 1; add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_SET (copy_rtx (mem), gen_rtx_REG (rmode, i))); } } if (frame_pointer_needed) { /* We've already adjusted down by sp_offset. Total $sp change is reg_save_size + frame_size. We want a net change here of just reg_save_size. */ add_constant (FP_REGNO, SP_REGNO, sp_offset - reg_save_size, 1); } add_constant (SP_REGNO, SP_REGNO, sp_offset-(reg_save_size+frame_size), 1); if (mep_interrupt_p ()) { mep_reload_pointer(GP_REGNO, "__sdabase"); mep_reload_pointer(TP_REGNO, "__tpbase"); } } static void mep_start_function (FILE *file, HOST_WIDE_INT hwi_local) { int local = hwi_local; int frame_size = local + crtl->outgoing_args_size; int reg_save_size; int ffill; int i, sp, skip; int sp_offset; int slot_map[FIRST_PSEUDO_REGISTER], si, sj; reg_save_size = mep_elimination_offset (ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM); frame_size = mep_elimination_offset (FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM); sp_offset = reg_save_size + frame_size; ffill = cfun->machine->frame_filler; if (cfun->machine->mep_frame_pointer_needed) reg_names[FP_REGNO] = "$fp"; else reg_names[FP_REGNO] = "$8"; if (sp_offset == 0) return; if (debug_info_level == DINFO_LEVEL_NONE) { fprintf (file, "\t# frame: %d", sp_offset); if (reg_save_size) fprintf (file, " %d regs", reg_save_size); if (local) fprintf (file, " %d locals", local); if (crtl->outgoing_args_size) fprintf (file, " %d args", crtl->outgoing_args_size); fprintf (file, "\n"); return; } fprintf (file, "\t#\n"); fprintf (file, "\t# Initial Frame Information:\n"); if (sp_offset || !frame_pointer_needed) fprintf (file, "\t# Entry ---------- 0\n"); /* Sort registers by save slots, so they're printed in the order they appear in memory, not the order they're saved in. */ for (si=0; si<FIRST_PSEUDO_REGISTER; si++) slot_map[si] = si; for (si=0; si<FIRST_PSEUDO_REGISTER-1; si++) for (sj=si+1; sj<FIRST_PSEUDO_REGISTER; sj++) if (cfun->machine->reg_save_slot[slot_map[si]] > cfun->machine->reg_save_slot[slot_map[sj]]) { int t = slot_map[si]; slot_map[si] = slot_map[sj]; slot_map[sj] = t; } sp = 0; for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) { int rsize; int r = slot_map[i]; int rss = cfun->machine->reg_save_slot[r]; if (!mep_call_saves_register (r)) continue; if ((r == TP_REGNO || r == GP_REGNO || r == LP_REGNO) && (!mep_reg_set_in_function (r) && !mep_interrupt_p ())) continue; rsize = mep_reg_size(r); skip = rss - (sp+rsize); if (skip) fprintf (file, "\t# %3d bytes for alignment\n", skip); fprintf (file, "\t# %3d bytes for saved %-3s %3d($sp)\n", rsize, reg_names[r], sp_offset - rss); sp = rss; } skip = reg_save_size - sp; if (skip) fprintf (file, "\t# %3d bytes for alignment\n", skip); if (frame_pointer_needed) fprintf (file, "\t# FP ---> ---------- %d (sp-%d)\n", reg_save_size, sp_offset-reg_save_size); if (local) fprintf (file, "\t# %3d bytes for local vars\n", local); if (ffill) fprintf (file, "\t# %3d bytes for alignment\n", ffill); if (crtl->outgoing_args_size) fprintf (file, "\t# %3d bytes for outgoing args\n", crtl->outgoing_args_size); fprintf (file, "\t# SP ---> ---------- %d\n", sp_offset); fprintf (file, "\t#\n"); } static int mep_prevent_lp_restore = 0; static int mep_sibcall_epilogue = 0; void mep_expand_epilogue (void) { int i, sp_offset = 0; int reg_save_size = 0; int frame_size; int lp_temp = LP_REGNO, lp_slot = -1; int really_need_stack_frame = get_frame_size() + crtl->outgoing_args_size; int interrupt_handler = mep_interrupt_p (); if (profile_arc_flag == 2) emit_insn (gen_mep_bb_trace_ret ()); reg_save_size = mep_elimination_offset (ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM); frame_size = mep_elimination_offset (FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM); really_need_stack_frame |= mep_assign_save_slots (reg_save_size); if (frame_pointer_needed) { /* If we have a frame pointer, we won't have a reliable stack pointer (alloca, you know), so rebase SP from FP */ emit_move_insn (gen_rtx_REG (SImode, SP_REGNO), gen_rtx_REG (SImode, FP_REGNO)); sp_offset = reg_save_size; } else { /* SP is right under our local variable space. Adjust it if needed. */ sp_offset = reg_save_size + frame_size; if (sp_offset >= 128) { add_constant (SP_REGNO, SP_REGNO, frame_size, 0); sp_offset -= frame_size; } } /* This is backwards so that we restore the control and coprocessor registers before the temporary registers we use to restore them. */ for (i=FIRST_PSEUDO_REGISTER-1; i>=1; i--) if (mep_call_saves_register (i)) { machine_mode rmode; int rss = cfun->machine->reg_save_slot[i]; if (mep_reg_size (i) == 8) rmode = DImode; else rmode = SImode; if ((i == TP_REGNO || i == GP_REGNO || i == LP_REGNO) && !(mep_reg_set_in_function (i) || interrupt_handler)) continue; if (mep_prevent_lp_restore && i == LP_REGNO) continue; if (!mep_prevent_lp_restore && !interrupt_handler && (i == 10 || i == 11)) continue; if (GR_REGNO_P (i) || LOADABLE_CR_REGNO_P (i)) emit_move_insn (gen_rtx_REG (rmode, i), gen_rtx_MEM (rmode, plus_constant (Pmode, stack_pointer_rtx, sp_offset - rss))); else { if (i == LP_REGNO && !mep_sibcall_epilogue && !interrupt_handler) /* Defer this one so we can jump indirect rather than copying the RA to $lp and "ret". EH epilogues automatically skip this anyway. */ lp_slot = sp_offset-rss; else { emit_move_insn (gen_rtx_REG (rmode, REGSAVE_CONTROL_TEMP), gen_rtx_MEM (rmode, plus_constant (Pmode, stack_pointer_rtx, sp_offset-rss))); emit_move_insn (gen_rtx_REG (rmode, i), gen_rtx_REG (rmode, REGSAVE_CONTROL_TEMP)); } } } if (lp_slot != -1) { /* Restore this one last so we know it will be in the temp register when we return by jumping indirectly via the temp. */ emit_move_insn (gen_rtx_REG (SImode, REGSAVE_CONTROL_TEMP), gen_rtx_MEM (SImode, plus_constant (Pmode, stack_pointer_rtx, lp_slot))); lp_temp = REGSAVE_CONTROL_TEMP; } add_constant (SP_REGNO, SP_REGNO, sp_offset, 0); if (crtl->calls_eh_return && mep_prevent_lp_restore) emit_insn (gen_addsi3 (gen_rtx_REG (SImode, SP_REGNO), gen_rtx_REG (SImode, SP_REGNO), cfun->machine->eh_stack_adjust)); if (mep_sibcall_epilogue) return; if (mep_disinterrupt_p ()) emit_insn (gen_mep_enable_int ()); if (mep_prevent_lp_restore) { emit_jump_insn (gen_eh_return_internal ()); emit_barrier (); } else if (interrupt_handler) emit_jump_insn (gen_mep_reti ()); else emit_jump_insn (gen_return_internal (gen_rtx_REG (SImode, lp_temp))); } void mep_expand_eh_return (rtx *operands) { if (GET_CODE (operands[0]) != REG || REGNO (operands[0]) != LP_REGNO) { rtx ra = gen_rtx_REG (Pmode, LP_REGNO); emit_move_insn (ra, operands[0]); operands[0] = ra; } emit_insn (gen_eh_epilogue (operands[0])); } void mep_emit_eh_epilogue (rtx *operands ATTRIBUTE_UNUSED) { cfun->machine->eh_stack_adjust = gen_rtx_REG (Pmode, 0); mep_prevent_lp_restore = 1; mep_expand_epilogue (); mep_prevent_lp_restore = 0; } void mep_expand_sibcall_epilogue (void) { mep_sibcall_epilogue = 1; mep_expand_epilogue (); mep_sibcall_epilogue = 0; } static bool mep_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED) { if (decl == NULL) return false; if (mep_section_tag (DECL_RTL (decl)) == 'f') return false; /* Can't call to a sibcall from an interrupt or disinterrupt function. */ if (mep_interrupt_p () || mep_disinterrupt_p ()) return false; return true; } rtx mep_return_stackadj_rtx (void) { return gen_rtx_REG (SImode, 10); } rtx mep_return_handler_rtx (void) { return gen_rtx_REG (SImode, LP_REGNO); } void mep_function_profiler (FILE *file) { /* Always right at the beginning of the function. */ fprintf (file, "\t# mep function profiler\n"); fprintf (file, "\tadd\t$sp, -8\n"); fprintf (file, "\tsw\t$0, ($sp)\n"); fprintf (file, "\tldc\t$0, $lp\n"); fprintf (file, "\tsw\t$0, 4($sp)\n"); fprintf (file, "\tbsr\t__mep_mcount\n"); fprintf (file, "\tlw\t$0, 4($sp)\n"); fprintf (file, "\tstc\t$0, $lp\n"); fprintf (file, "\tlw\t$0, ($sp)\n"); fprintf (file, "\tadd\t$sp, 8\n\n"); } const char * mep_emit_bb_trace_ret (void) { fprintf (asm_out_file, "\t# end of block profiling\n"); fprintf (asm_out_file, "\tadd\t$sp, -8\n"); fprintf (asm_out_file, "\tsw\t$0, ($sp)\n"); fprintf (asm_out_file, "\tldc\t$0, $lp\n"); fprintf (asm_out_file, "\tsw\t$0, 4($sp)\n"); fprintf (asm_out_file, "\tbsr\t__bb_trace_ret\n"); fprintf (asm_out_file, "\tlw\t$0, 4($sp)\n"); fprintf (asm_out_file, "\tstc\t$0, $lp\n"); fprintf (asm_out_file, "\tlw\t$0, ($sp)\n"); fprintf (asm_out_file, "\tadd\t$sp, 8\n\n"); return ""; } #undef SAVE #undef RESTORE /* Operand Printing. */ void mep_print_operand_address (FILE *stream, rtx address) { if (GET_CODE (address) == MEM) address = XEXP (address, 0); else /* cf: gcc.dg/asm-4.c. */ gcc_assert (GET_CODE (address) == REG); mep_print_operand (stream, address, 0); } static struct { char code; const char *pattern; const char *format; } const conversions[] = { { 0, "r", "0" }, { 0, "m+ri", "3(2)" }, { 0, "mr", "(1)" }, { 0, "ms", "(1)" }, { 0, "ml", "(1)" }, { 0, "mLrs", "%lo(3)(2)" }, { 0, "mLr+si", "%lo(4+5)(2)" }, { 0, "m+ru2s", "%tpoff(5)(2)" }, { 0, "m+ru3s", "%sdaoff(5)(2)" }, { 0, "m+r+u2si", "%tpoff(6+7)(2)" }, { 0, "m+ru2+si", "%tpoff(6+7)(2)" }, { 0, "m+r+u3si", "%sdaoff(6+7)(2)" }, { 0, "m+ru3+si", "%sdaoff(6+7)(2)" }, { 0, "mi", "(1)" }, { 0, "m+si", "(2+3)" }, { 0, "m+li", "(2+3)" }, { 0, "i", "0" }, { 0, "s", "0" }, { 0, "+si", "1+2" }, { 0, "+u2si", "%tpoff(3+4)" }, { 0, "+u3si", "%sdaoff(3+4)" }, { 0, "l", "0" }, { 'b', "i", "0" }, { 'B', "i", "0" }, { 'U', "i", "0" }, { 'h', "i", "0" }, { 'h', "Hs", "%hi(1)" }, { 'I', "i", "0" }, { 'I', "u2s", "%tpoff(2)" }, { 'I', "u3s", "%sdaoff(2)" }, { 'I', "+u2si", "%tpoff(3+4)" }, { 'I', "+u3si", "%sdaoff(3+4)" }, { 'J', "i", "0" }, { 'P', "mr", "(1\\+),\\0" }, { 'x', "i", "0" }, { 0, 0, 0 } }; static int unique_bit_in (HOST_WIDE_INT i) { switch (i & 0xff) { case 0x01: case 0xfe: return 0; case 0x02: case 0xfd: return 1; case 0x04: case 0xfb: return 2; case 0x08: case 0xf7: return 3; case 0x10: case 0x7f: return 4; case 0x20: case 0xbf: return 5; case 0x40: case 0xdf: return 6; case 0x80: case 0xef: return 7; default: gcc_unreachable (); } } static int bit_size_for_clip (HOST_WIDE_INT i) { int rv; for (rv = 0; rv < 31; rv ++) if (((HOST_WIDE_INT) 1 << rv) > i) return rv + 1; gcc_unreachable (); } /* Print an operand to a assembler instruction. */ void mep_print_operand (FILE *file, rtx x, int code) { int i, j; const char *real_name; if (code == '<') { /* Print a mnemonic to do CR <- CR moves. Find out which intrinsic we're using, then skip over the "mep_" part of its name. */ const struct cgen_insn *insn; if (mep_get_move_insn (mep_cmov, &insn)) fputs (cgen_intrinsics[insn->intrinsic] + 4, file); else mep_intrinsic_unavailable (mep_cmov); return; } if (code == 'L') { switch (GET_CODE (x)) { case AND: fputs ("clr", file); return; case IOR: fputs ("set", file); return; case XOR: fputs ("not", file); return; default: output_operand_lossage ("invalid %%L code"); } } if (code == 'M') { /* Print the second operand of a CR <- CR move. If we're using a two-operand instruction (i.e., a real cmov), then just print the operand normally. If we're using a "reg, reg, immediate" instruction such as caddi3, print the operand followed by a zero field. If we're using a three-register instruction, print the operand twice. */ const struct cgen_insn *insn; mep_print_operand (file, x, 0); if (mep_get_move_insn (mep_cmov, &insn) && insn_data[insn->icode].n_operands == 3) { fputs (", ", file); if (insn_data[insn->icode].operand[2].predicate (x, VOIDmode)) mep_print_operand (file, x, 0); else mep_print_operand (file, const0_rtx, 0); } return; } encode_pattern (x); for (i = 0; conversions[i].pattern; i++) if (conversions[i].code == code && strcmp(conversions[i].pattern, pattern) == 0) { for (j = 0; conversions[i].format[j]; j++) if (conversions[i].format[j] == '\\') { fputc (conversions[i].format[j+1], file); j++; } else if (ISDIGIT(conversions[i].format[j])) { rtx r = patternr[conversions[i].format[j] - '0']; switch (GET_CODE (r)) { case REG: fprintf (file, "%s", reg_names [REGNO (r)]); break; case CONST_INT: switch (code) { case 'b': fprintf (file, "%d", unique_bit_in (INTVAL (r))); break; case 'B': fprintf (file, "%d", bit_size_for_clip (INTVAL (r))); break; case 'h': fprintf (file, "0x%x", ((int) INTVAL (r) >> 16) & 0xffff); break; case 'U': fprintf (file, "%d", bit_size_for_clip (INTVAL (r)) - 1); break; case 'J': fprintf (file, "0x%x", (int) INTVAL (r) & 0xffff); break; case 'x': if (INTVAL (r) & ~(HOST_WIDE_INT)0xff && !(INTVAL (r) & 0xff)) fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL(r)); else fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL(r)); break; case 'I': if (INTVAL (r) & ~(HOST_WIDE_INT)0xff && conversions[i].format[j+1] == 0) { fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (r)); fprintf (file, " # 0x%x", (int) INTVAL(r) & 0xffff); } else fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL(r)); break; default: fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL(r)); break; } break; case CONST_DOUBLE: fprintf(file, "[const_double 0x%lx]", (unsigned long) CONST_DOUBLE_HIGH(r)); break; case SYMBOL_REF: real_name = targetm.strip_name_encoding (XSTR (r, 0)); assemble_name (file, real_name); break; case LABEL_REF: output_asm_label (r); break; default: fprintf (stderr, "don't know how to print this operand:"); debug_rtx (r); gcc_unreachable (); } } else { if (conversions[i].format[j] == '+' && (!code || code == 'I') && ISDIGIT (conversions[i].format[j+1]) && GET_CODE (patternr[conversions[i].format[j+1] - '0']) == CONST_INT && INTVAL (patternr[conversions[i].format[j+1] - '0']) < 0) continue; fputc(conversions[i].format[j], file); } break; } if (!conversions[i].pattern) { error ("unconvertible operand %c %qs", code?code:'-', pattern); debug_rtx(x); } return; } void mep_final_prescan_insn (rtx_insn *insn, rtx *operands ATTRIBUTE_UNUSED, int noperands ATTRIBUTE_UNUSED) { /* Despite the fact that MeP is perfectly capable of branching and doing something else in the same bundle, gcc does jump optimization *after* scheduling, so we cannot trust the bundling flags on jump instructions. */ if (GET_MODE (insn) == BImode && get_attr_slots (insn) != SLOTS_CORE) fputc ('+', asm_out_file); } /* Function args in registers. */ static void mep_setup_incoming_varargs (cumulative_args_t cum, machine_mode mode ATTRIBUTE_UNUSED, tree type ATTRIBUTE_UNUSED, int *pretend_size, int second_time ATTRIBUTE_UNUSED) { int nsave = 4 - (get_cumulative_args (cum)->nregs + 1); if (nsave > 0) cfun->machine->arg_regs_to_save = nsave; *pretend_size = nsave * 4; } static int bytesize (const_tree type, machine_mode mode) { if (mode == BLKmode) return int_size_in_bytes (type); return GET_MODE_SIZE (mode); } static rtx mep_expand_builtin_saveregs (void) { int bufsize, i, ns; rtx regbuf; ns = cfun->machine->arg_regs_to_save; if (TARGET_IVC2) { bufsize = 8 * ((ns + 1) / 2) + 8 * ns; regbuf = assign_stack_local (SImode, bufsize, 64); } else { bufsize = ns * 4; regbuf = assign_stack_local (SImode, bufsize, 32); } move_block_from_reg (5-ns, regbuf, ns); if (TARGET_IVC2) { rtx tmp = gen_rtx_MEM (DImode, XEXP (regbuf, 0)); int ofs = 8 * ((ns+1)/2); for (i=0; i<ns; i++) { int rn = (4-ns) + i + 49; rtx ptr; ptr = offset_address (tmp, GEN_INT (ofs), 2); emit_move_insn (ptr, gen_rtx_REG (DImode, rn)); ofs += 8; } } return XEXP (regbuf, 0); } static tree mep_build_builtin_va_list (void) { tree f_next_gp, f_next_gp_limit, f_next_cop, f_next_stack; tree record; record = (*lang_hooks.types.make_type) (RECORD_TYPE); f_next_gp = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("__va_next_gp"), ptr_type_node); f_next_gp_limit = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("__va_next_gp_limit"), ptr_type_node); f_next_cop = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("__va_next_cop"), ptr_type_node); f_next_stack = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("__va_next_stack"), ptr_type_node); DECL_FIELD_CONTEXT (f_next_gp) = record; DECL_FIELD_CONTEXT (f_next_gp_limit) = record; DECL_FIELD_CONTEXT (f_next_cop) = record; DECL_FIELD_CONTEXT (f_next_stack) = record; TYPE_FIELDS (record) = f_next_gp; DECL_CHAIN (f_next_gp) = f_next_gp_limit; DECL_CHAIN (f_next_gp_limit) = f_next_cop; DECL_CHAIN (f_next_cop) = f_next_stack; layout_type (record); return record; } static void mep_expand_va_start (tree valist, rtx nextarg) { tree f_next_gp, f_next_gp_limit, f_next_cop, f_next_stack; tree next_gp, next_gp_limit, next_cop, next_stack; tree t, u; int ns; ns = cfun->machine->arg_regs_to_save; f_next_gp = TYPE_FIELDS (va_list_type_node); f_next_gp_limit = DECL_CHAIN (f_next_gp); f_next_cop = DECL_CHAIN (f_next_gp_limit); f_next_stack = DECL_CHAIN (f_next_cop); next_gp = build3 (COMPONENT_REF, TREE_TYPE (f_next_gp), valist, f_next_gp, NULL_TREE); next_gp_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_gp_limit), valist, f_next_gp_limit, NULL_TREE); next_cop = build3 (COMPONENT_REF, TREE_TYPE (f_next_cop), valist, f_next_cop, NULL_TREE); next_stack = build3 (COMPONENT_REF, TREE_TYPE (f_next_stack), valist, f_next_stack, NULL_TREE); /* va_list.next_gp = expand_builtin_saveregs (); */ u = make_tree (sizetype, expand_builtin_saveregs ()); u = fold_convert (ptr_type_node, u); t = build2 (MODIFY_EXPR, ptr_type_node, next_gp, u); TREE_SIDE_EFFECTS (t) = 1; expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); /* va_list.next_gp_limit = va_list.next_gp + 4 * ns; */ u = fold_build_pointer_plus_hwi (u, 4 * ns); t = build2 (MODIFY_EXPR, ptr_type_node, next_gp_limit, u); TREE_SIDE_EFFECTS (t) = 1; expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); u = fold_build_pointer_plus_hwi (u, 8 * ((ns+1)/2)); /* va_list.next_cop = ROUND_UP(va_list.next_gp_limit,8); */ t = build2 (MODIFY_EXPR, ptr_type_node, next_cop, u); TREE_SIDE_EFFECTS (t) = 1; expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); /* va_list.next_stack = nextarg; */ u = make_tree (ptr_type_node, nextarg); t = build2 (MODIFY_EXPR, ptr_type_node, next_stack, u); TREE_SIDE_EFFECTS (t) = 1; expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); } static tree mep_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p, gimple_seq *post_p ATTRIBUTE_UNUSED) { HOST_WIDE_INT size, rsize; bool by_reference, ivc2_vec; tree f_next_gp, f_next_gp_limit, f_next_cop, f_next_stack; tree next_gp, next_gp_limit, next_cop, next_stack; tree label_sover, label_selse; tree tmp, res_addr; ivc2_vec = TARGET_IVC2 && VECTOR_TYPE_P (type); size = int_size_in_bytes (type); by_reference = (size > (ivc2_vec ? 8 : 4)) || (size <= 0); if (by_reference) { type = build_pointer_type (type); size = 4; } rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD; f_next_gp = TYPE_FIELDS (va_list_type_node); f_next_gp_limit = DECL_CHAIN (f_next_gp); f_next_cop = DECL_CHAIN (f_next_gp_limit); f_next_stack = DECL_CHAIN (f_next_cop); next_gp = build3 (COMPONENT_REF, TREE_TYPE (f_next_gp), valist, f_next_gp, NULL_TREE); next_gp_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_gp_limit), valist, f_next_gp_limit, NULL_TREE); next_cop = build3 (COMPONENT_REF, TREE_TYPE (f_next_cop), valist, f_next_cop, NULL_TREE); next_stack = build3 (COMPONENT_REF, TREE_TYPE (f_next_stack), valist, f_next_stack, NULL_TREE); /* if f_next_gp < f_next_gp_limit IF (VECTOR_P && IVC2) val = *f_next_cop; ELSE val = *f_next_gp; f_next_gp += 4; f_next_cop += 8; else label_selse: val = *f_next_stack; f_next_stack += rsize; label_sover: */ label_sover = create_artificial_label (UNKNOWN_LOCATION); label_selse = create_artificial_label (UNKNOWN_LOCATION); res_addr = create_tmp_var (ptr_type_node); tmp = build2 (GE_EXPR, boolean_type_node, next_gp, unshare_expr (next_gp_limit)); tmp = build3 (COND_EXPR, void_type_node, tmp, build1 (GOTO_EXPR, void_type_node, unshare_expr (label_selse)), NULL_TREE); gimplify_and_add (tmp, pre_p); if (ivc2_vec) { tmp = build2 (MODIFY_EXPR, void_type_node, res_addr, next_cop); gimplify_and_add (tmp, pre_p); } else { tmp = build2 (MODIFY_EXPR, void_type_node, res_addr, next_gp); gimplify_and_add (tmp, pre_p); } tmp = fold_build_pointer_plus_hwi (unshare_expr (next_gp), 4); gimplify_assign (unshare_expr (next_gp), tmp, pre_p); tmp = fold_build_pointer_plus_hwi (unshare_expr (next_cop), 8); gimplify_assign (unshare_expr (next_cop), tmp, pre_p); tmp = build1 (GOTO_EXPR, void_type_node, unshare_expr (label_sover)); gimplify_and_add (tmp, pre_p); /* - - */ tmp = build1 (LABEL_EXPR, void_type_node, unshare_expr (label_selse)); gimplify_and_add (tmp, pre_p); tmp = build2 (MODIFY_EXPR, void_type_node, res_addr, unshare_expr (next_stack)); gimplify_and_add (tmp, pre_p); tmp = fold_build_pointer_plus_hwi (unshare_expr (next_stack), rsize); gimplify_assign (unshare_expr (next_stack), tmp, pre_p); /* - - */ tmp = build1 (LABEL_EXPR, void_type_node, unshare_expr (label_sover)); gimplify_and_add (tmp, pre_p); res_addr = fold_convert (build_pointer_type (type), res_addr); if (by_reference) res_addr = build_va_arg_indirect_ref (res_addr); return build_va_arg_indirect_ref (res_addr); } void mep_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype, rtx libname ATTRIBUTE_UNUSED, tree fndecl ATTRIBUTE_UNUSED) { pcum->nregs = 0; if (fntype && lookup_attribute ("vliw", TYPE_ATTRIBUTES (fntype))) pcum->vliw = 1; else pcum->vliw = 0; } /* The ABI is thus: Arguments are in $1, $2, $3, $4, stack. Arguments larger than 4 bytes are passed indirectly. Return value in 0, unless bigger than 4 bytes, then the caller passes a pointer as the first arg. For varargs, we copy $1..$4 to the stack. */ static rtx mep_function_arg (cumulative_args_t cum_v, machine_mode mode, const_tree type ATTRIBUTE_UNUSED, bool named ATTRIBUTE_UNUSED) { CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); /* VOIDmode is a signal for the backend to pass data to the call expander via the second operand to the call pattern. We use this to determine whether to use "jsr" or "jsrv". */ if (mode == VOIDmode) return GEN_INT (cum->vliw); /* If we havn't run out of argument registers, return the next. */ if (cum->nregs < 4) { if (type && TARGET_IVC2 && VECTOR_TYPE_P (type)) return gen_rtx_REG (mode, cum->nregs + 49); else return gen_rtx_REG (mode, cum->nregs + 1); } /* Otherwise the argument goes on the stack. */ return NULL_RTX; } static bool mep_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED, machine_mode mode, const_tree type, bool named ATTRIBUTE_UNUSED) { int size = bytesize (type, mode); /* This is non-obvious, but yes, large values passed after we've run out of registers are *still* passed by reference - we put the address of the parameter on the stack, as well as putting the parameter itself elsewhere on the stack. */ if (size <= 0 || size > 8) return true; if (size <= 4) return false; if (TARGET_IVC2 && get_cumulative_args (cum)->nregs < 4 && type != NULL_TREE && VECTOR_TYPE_P (type)) return false; return true; } static void mep_function_arg_advance (cumulative_args_t pcum, machine_mode mode ATTRIBUTE_UNUSED, const_tree type ATTRIBUTE_UNUSED, bool named ATTRIBUTE_UNUSED) { get_cumulative_args (pcum)->nregs += 1; } bool mep_return_in_memory (const_tree type, const_tree decl ATTRIBUTE_UNUSED) { int size = bytesize (type, BLKmode); if (TARGET_IVC2 && VECTOR_TYPE_P (type)) return size > 0 && size <= 8 ? 0 : 1; return size > 0 && size <= 4 ? 0 : 1; } static bool mep_narrow_volatile_bitfield (void) { return true; return false; } /* Implement FUNCTION_VALUE. All values are returned in $0. */ rtx mep_function_value (const_tree type, const_tree func ATTRIBUTE_UNUSED) { if (TARGET_IVC2 && VECTOR_TYPE_P (type)) return gen_rtx_REG (TYPE_MODE (type), 48); return gen_rtx_REG (TYPE_MODE (type), RETURN_VALUE_REGNUM); } /* Implement LIBCALL_VALUE, using the same rules as mep_function_value. */ rtx mep_libcall_value (machine_mode mode) { return gen_rtx_REG (mode, RETURN_VALUE_REGNUM); } /* Handle pipeline hazards. */ typedef enum { op_none, op_stc, op_fsft, op_ret } op_num; static const char *opnames[] = { "", "stc", "fsft", "ret" }; static int prev_opcode = 0; /* This isn't as optimal as it could be, because we don't know what control register the STC opcode is storing in. We only need to add the nop if it's the relevant register, but we add it for irrelevant registers also. */ void mep_asm_output_opcode (FILE *file, const char *ptr) { int this_opcode = op_none; const char *hazard = 0; switch (*ptr) { case 'f': if (strncmp (ptr, "fsft", 4) == 0 && !ISGRAPH (ptr[4])) this_opcode = op_fsft; break; case 'r': if (strncmp (ptr, "ret", 3) == 0 && !ISGRAPH (ptr[3])) this_opcode = op_ret; break; case 's': if (strncmp (ptr, "stc", 3) == 0 && !ISGRAPH (ptr[3])) this_opcode = op_stc; break; } if (prev_opcode == op_stc && this_opcode == op_fsft) hazard = "nop"; if (prev_opcode == op_stc && this_opcode == op_ret) hazard = "nop"; if (hazard) fprintf(file, "%s\t# %s-%s hazard\n\t", hazard, opnames[prev_opcode], opnames[this_opcode]); prev_opcode = this_opcode; } /* Handle attributes. */ static tree mep_validate_based_tiny (tree *node, tree name, tree args, int flags ATTRIBUTE_UNUSED, bool *no_add) { if (TREE_CODE (*node) != VAR_DECL && TREE_CODE (*node) != POINTER_TYPE && TREE_CODE (*node) != TYPE_DECL) { warning (0, "%qE attribute only applies to variables", name); *no_add = true; } else if (args == NULL_TREE && TREE_CODE (*node) == VAR_DECL) { if (! (TREE_PUBLIC (*node) || TREE_STATIC (*node))) { warning (0, "address region attributes not allowed with auto storage class"); *no_add = true; } /* Ignore storage attribute of pointed to variable: char __far * x; */ if (TREE_TYPE (*node) && TREE_CODE (TREE_TYPE (*node)) == POINTER_TYPE) { warning (0, "address region attributes on pointed-to types ignored"); *no_add = true; } } return NULL_TREE; } static int mep_multiple_address_regions (tree list, bool check_section_attr) { tree a; int count_sections = 0; int section_attr_count = 0; for (a = list; a; a = TREE_CHAIN (a)) { if (is_attribute_p ("based", TREE_PURPOSE (a)) || is_attribute_p ("tiny", TREE_PURPOSE (a)) || is_attribute_p ("near", TREE_PURPOSE (a)) || is_attribute_p ("far", TREE_PURPOSE (a)) || is_attribute_p ("io", TREE_PURPOSE (a))) count_sections ++; if (check_section_attr) section_attr_count += is_attribute_p ("section", TREE_PURPOSE (a)); } if (check_section_attr) return section_attr_count; else return count_sections; } #define MEP_ATTRIBUTES(decl) \ (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \ : DECL_ATTRIBUTES (decl) \ ? (DECL_ATTRIBUTES (decl)) \ : TYPE_ATTRIBUTES (TREE_TYPE (decl)) static tree mep_validate_near_far (tree *node, tree name, tree args, int flags ATTRIBUTE_UNUSED, bool *no_add) { if (TREE_CODE (*node) != VAR_DECL && TREE_CODE (*node) != FUNCTION_DECL && TREE_CODE (*node) != METHOD_TYPE && TREE_CODE (*node) != POINTER_TYPE && TREE_CODE (*node) != TYPE_DECL) { warning (0, "%qE attribute only applies to variables and functions", name); *no_add = true; } else if (args == NULL_TREE && TREE_CODE (*node) == VAR_DECL) { if (! (TREE_PUBLIC (*node) || TREE_STATIC (*node))) { warning (0, "address region attributes not allowed with auto storage class"); *no_add = true; } /* Ignore storage attribute of pointed to variable: char __far * x; */ if (TREE_TYPE (*node) && TREE_CODE (TREE_TYPE (*node)) == POINTER_TYPE) { warning (0, "address region attributes on pointed-to types ignored"); *no_add = true; } } else if (mep_multiple_address_regions (MEP_ATTRIBUTES (*node), false) > 0) { warning (0, "duplicate address region attribute %qE in declaration of %qE on line %d", name, DECL_NAME (*node), DECL_SOURCE_LINE (*node)); DECL_ATTRIBUTES (*node) = NULL_TREE; } return NULL_TREE; } static tree mep_validate_disinterrupt (tree *node, tree name, tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED, bool *no_add) { if (TREE_CODE (*node) != FUNCTION_DECL && TREE_CODE (*node) != METHOD_TYPE) { warning (0, "%qE attribute only applies to functions", name); *no_add = true; } return NULL_TREE; } static tree mep_validate_interrupt (tree *node, tree name, tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED, bool *no_add) { tree function_type; if (TREE_CODE (*node) != FUNCTION_DECL) { warning (0, "%qE attribute only applies to functions", name); *no_add = true; return NULL_TREE; } if (DECL_DECLARED_INLINE_P (*node)) error ("cannot inline interrupt function %qE", DECL_NAME (*node)); DECL_UNINLINABLE (*node) = 1; function_type = TREE_TYPE (*node); if (TREE_TYPE (function_type) != void_type_node) error ("interrupt function must have return type of void"); if (prototype_p (function_type) && (TREE_VALUE (TYPE_ARG_TYPES (function_type)) != void_type_node || TREE_CHAIN (TYPE_ARG_TYPES (function_type)) != NULL_TREE)) error ("interrupt function must have no arguments"); return NULL_TREE; } static tree mep_validate_io_cb (tree *node, tree name, tree args, int flags ATTRIBUTE_UNUSED, bool *no_add) { if (TREE_CODE (*node) != VAR_DECL) { warning (0, "%qE attribute only applies to variables", name); *no_add = true; } if (args != NULL_TREE) { if (TREE_CODE (TREE_VALUE (args)) == NON_LVALUE_EXPR) TREE_VALUE (args) = TREE_OPERAND (TREE_VALUE (args), 0); if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST) { warning (0, "%qE attribute allows only an integer constant argument", name); *no_add = true; } } if (*no_add == false && !TARGET_IO_NO_VOLATILE) TREE_THIS_VOLATILE (*node) = 1; return NULL_TREE; } static tree mep_validate_vliw (tree *node, tree name, tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED, bool *no_add) { if (TREE_CODE (*node) != FUNCTION_TYPE && TREE_CODE (*node) != FUNCTION_DECL && TREE_CODE (*node) != METHOD_TYPE && TREE_CODE (*node) != FIELD_DECL && TREE_CODE (*node) != TYPE_DECL) { static int gave_pointer_note = 0; static int gave_array_note = 0; static const char * given_type = NULL; given_type = get_tree_code_name (TREE_CODE (*node)); if (TREE_CODE (*node) == POINTER_TYPE) given_type = "pointers"; if (TREE_CODE (*node) == ARRAY_TYPE) given_type = "arrays"; if (given_type) warning (0, "%qE attribute only applies to functions, not %s", name, given_type); else warning (0, "%qE attribute only applies to functions", name); *no_add = true; if (TREE_CODE (*node) == POINTER_TYPE && !gave_pointer_note) { inform (input_location, "to describe a pointer to a VLIW function, use syntax like this:\n%s", " typedef int (__vliw *vfuncptr) ();"); gave_pointer_note = 1; } if (TREE_CODE (*node) == ARRAY_TYPE && !gave_array_note) { inform (input_location, "to describe an array of VLIW function pointers, use syntax like this:\n%s", " typedef int (__vliw *vfuncptr[]) ();"); gave_array_note = 1; } } if (!TARGET_VLIW) error ("VLIW functions are not allowed without a VLIW configuration"); return NULL_TREE; } static const struct attribute_spec mep_attribute_table[11] = { /* name min max decl type func handler affects_type_identity */ { "based", 0, 0, false, false, false, mep_validate_based_tiny, false }, { "tiny", 0, 0, false, false, false, mep_validate_based_tiny, false }, { "near", 0, 0, false, false, false, mep_validate_near_far, false }, { "far", 0, 0, false, false, false, mep_validate_near_far, false }, { "disinterrupt", 0, 0, false, false, false, mep_validate_disinterrupt, false }, { "interrupt", 0, 0, false, false, false, mep_validate_interrupt, false }, { "io", 0, 1, false, false, false, mep_validate_io_cb, false }, { "cb", 0, 1, false, false, false, mep_validate_io_cb, false }, { "vliw", 0, 0, false, true, false, mep_validate_vliw, false }, { NULL, 0, 0, false, false, false, NULL, false } }; static bool mep_function_attribute_inlinable_p (const_tree callee) { tree attrs = TYPE_ATTRIBUTES (TREE_TYPE (callee)); if (!attrs) attrs = DECL_ATTRIBUTES (callee); return (lookup_attribute ("disinterrupt", attrs) == 0 && lookup_attribute ("interrupt", attrs) == 0); } static bool mep_can_inline_p (tree caller, tree callee) { if (TREE_CODE (callee) == ADDR_EXPR) callee = TREE_OPERAND (callee, 0); if (!mep_vliw_function_p (caller) && mep_vliw_function_p (callee)) { return false; } return true; } #define FUNC_CALL 1 #define FUNC_DISINTERRUPT 2 struct GTY(()) pragma_entry { int used; int flag; }; /* Hash table of farcall-tagged sections. */ static GTY(()) hash_map<nofree_string_hash, pragma_entry> *pragma_htab; static void mep_note_pragma_flag (const char *funcname, int flag) { if (!pragma_htab) pragma_htab = hash_map<nofree_string_hash, pragma_entry>::create_ggc (31); bool existed; const char *name = ggc_strdup (funcname); pragma_entry *slot = &pragma_htab->get_or_insert (name, &existed); if (!existed) { slot->flag = 0; slot->used = 0; } slot->flag |= flag; } static bool mep_lookup_pragma_flag (const char *funcname, int flag) { if (!pragma_htab) return false; if (funcname[0] == '@' && funcname[2] == '.') funcname += 3; pragma_entry *slot = pragma_htab->get (funcname); if (slot && (slot->flag & flag)) { slot->used |= flag; return true; } return false; } bool mep_lookup_pragma_call (const char *funcname) { return mep_lookup_pragma_flag (funcname, FUNC_CALL); } void mep_note_pragma_call (const char *funcname) { mep_note_pragma_flag (funcname, FUNC_CALL); } bool mep_lookup_pragma_disinterrupt (const char *funcname) { return mep_lookup_pragma_flag (funcname, FUNC_DISINTERRUPT); } void mep_note_pragma_disinterrupt (const char *funcname) { mep_note_pragma_flag (funcname, FUNC_DISINTERRUPT); } bool note_unused_pragma_disinterrupt (const char *const &s, const pragma_entry &e, void *) { if ((e.flag & FUNC_DISINTERRUPT) && !(e.used & FUNC_DISINTERRUPT)) warning (0, "\"#pragma disinterrupt %s\" not used", s); return 1; } void mep_file_cleanups (void) { if (pragma_htab) pragma_htab->traverse<void *, note_unused_pragma_disinterrupt> (NULL); } /* These three functions provide a bridge between the pramgas that affect register classes, and the functions that maintain them. We can't call those functions directly as pragma handling is part of the front end and doesn't have direct access to them. */ void mep_save_register_info (void) { save_register_info (); } void mep_reinit_regs (void) { reinit_regs (); } void mep_init_regs (void) { init_regs (); } static int mep_attrlist_to_encoding (tree list, tree decl) { if (mep_multiple_address_regions (list, false) > 1) { warning (0, "duplicate address region attribute %qE in declaration of %qE on line %d", TREE_PURPOSE (TREE_CHAIN (list)), DECL_NAME (decl), DECL_SOURCE_LINE (decl)); TREE_CHAIN (list) = NULL_TREE; } while (list) { if (is_attribute_p ("based", TREE_PURPOSE (list))) return 'b'; if (is_attribute_p ("tiny", TREE_PURPOSE (list))) return 't'; if (is_attribute_p ("near", TREE_PURPOSE (list))) return 'n'; if (is_attribute_p ("far", TREE_PURPOSE (list))) return 'f'; if (is_attribute_p ("io", TREE_PURPOSE (list))) { if (TREE_VALUE (list) && TREE_VALUE (TREE_VALUE (list)) && TREE_CODE (TREE_VALUE (TREE_VALUE (list))) == INTEGER_CST) { int location = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE(list))); if (location >= 0 && location <= 0x1000000) return 'i'; } return 'I'; } if (is_attribute_p ("cb", TREE_PURPOSE (list))) return 'c'; list = TREE_CHAIN (list); } if (TARGET_TF && TREE_CODE (decl) == FUNCTION_DECL && DECL_SECTION_NAME (decl) == 0) return 'f'; return 0; } static int mep_comp_type_attributes (const_tree t1, const_tree t2) { int vliw1, vliw2; vliw1 = (lookup_attribute ("vliw", TYPE_ATTRIBUTES (t1)) != 0); vliw2 = (lookup_attribute ("vliw", TYPE_ATTRIBUTES (t2)) != 0); if (vliw1 != vliw2) return 0; return 1; } static void mep_insert_attributes (tree decl, tree *attributes) { int size; const char *secname = 0; tree attrib, attrlist; char encoding; if (TREE_CODE (decl) == FUNCTION_DECL) { const char *funcname = IDENTIFIER_POINTER (DECL_NAME (decl)); if (mep_lookup_pragma_disinterrupt (funcname)) { attrib = build_tree_list (get_identifier ("disinterrupt"), NULL_TREE); *attributes = chainon (*attributes, attrib); } } if (TREE_CODE (decl) != VAR_DECL || ! (TREE_PUBLIC (decl) || TREE_STATIC (decl) || DECL_EXTERNAL (decl))) return; if (TREE_READONLY (decl) && TARGET_DC) /* -mdc means that const variables default to the near section, regardless of the size cutoff. */ return; /* User specified an attribute, so override the default. Ignore storage attribute of pointed to variable. char __far * x; */ if (! (TREE_TYPE (decl) && TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE)) { if (TYPE_P (decl) && TYPE_ATTRIBUTES (decl) && *attributes) TYPE_ATTRIBUTES (decl) = NULL_TREE; else if (DECL_ATTRIBUTES (decl) && *attributes) DECL_ATTRIBUTES (decl) = NULL_TREE; } attrlist = *attributes ? *attributes : DECL_ATTRIBUTES (decl); encoding = mep_attrlist_to_encoding (attrlist, decl); if (!encoding && TYPE_P (TREE_TYPE (decl))) { attrlist = TYPE_ATTRIBUTES (TREE_TYPE (decl)); encoding = mep_attrlist_to_encoding (attrlist, decl); } if (encoding) { /* This means that the declaration has a specific section attribute, so we should not apply the default rules. */ if (encoding == 'i' || encoding == 'I') { tree attr = lookup_attribute ("io", attrlist); if (attr && TREE_VALUE (attr) && TREE_VALUE (TREE_VALUE(attr))) { int location = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE(attr))); static tree previous_value = 0; static int previous_location = 0; static tree previous_name = 0; /* We take advantage of the fact that gcc will reuse the same tree pointer when applying an attribute to a list of decls, but produce a new tree for attributes on separate source lines, even when they're textually identical. This is the behavior we want. */ if (TREE_VALUE (attr) == previous_value && location == previous_location) { warning(0, "__io address 0x%x is the same for %qE and %qE", location, previous_name, DECL_NAME (decl)); } previous_name = DECL_NAME (decl); previous_location = location; previous_value = TREE_VALUE (attr); } } return; } /* Declarations of arrays can change size. Don't trust them. */ if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE) size = 0; else size = int_size_in_bytes (TREE_TYPE (decl)); if (TARGET_RAND_TPGP && size <= 4 && size > 0) { if (TREE_PUBLIC (decl) || DECL_EXTERNAL (decl) || TREE_STATIC (decl)) { const char *name = IDENTIFIER_POINTER (DECL_NAME (decl)); int key = 0; while (*name) key += *name++; switch (key & 3) { case 0: secname = "based"; break; case 1: secname = "tiny"; break; case 2: secname = "far"; break; default: ; } } } else { if (size <= mep_based_cutoff && size > 0) secname = "based"; else if (size <= mep_tiny_cutoff && size > 0) secname = "tiny"; else if (TARGET_L) secname = "far"; } if (mep_const_section && TREE_READONLY (decl)) { if (strcmp (mep_const_section, "tiny") == 0) secname = "tiny"; else if (strcmp (mep_const_section, "near") == 0) return; else if (strcmp (mep_const_section, "far") == 0) secname = "far"; } if (!secname) return; if (!mep_multiple_address_regions (*attributes, true) && !mep_multiple_address_regions (DECL_ATTRIBUTES (decl), false)) { attrib = build_tree_list (get_identifier (secname), NULL_TREE); /* Chain the attribute directly onto the variable's DECL_ATTRIBUTES in order to avoid the POINTER_TYPE bypasses in mep_validate_near_far and mep_validate_based_tiny. */ DECL_ATTRIBUTES (decl) = chainon (DECL_ATTRIBUTES (decl), attrib); } } static void mep_encode_section_info (tree decl, rtx rtl, int first) { rtx rtlname; const char *oldname; const char *secname; char encoding; char *newname; tree idp; int maxsize; tree type; tree mep_attributes; if (! first) return; if (TREE_CODE (decl) != VAR_DECL && TREE_CODE (decl) != FUNCTION_DECL) return; rtlname = XEXP (rtl, 0); if (GET_CODE (rtlname) == SYMBOL_REF) oldname = XSTR (rtlname, 0); else if (GET_CODE (rtlname) == MEM && GET_CODE (XEXP (rtlname, 0)) == SYMBOL_REF) oldname = XSTR (XEXP (rtlname, 0), 0); else gcc_unreachable (); type = TREE_TYPE (decl); if (type == error_mark_node) return; mep_attributes = MEP_ATTRIBUTES (decl); encoding = mep_attrlist_to_encoding (mep_attributes, decl); if (encoding) { newname = (char *) alloca (strlen (oldname) + 4); sprintf (newname, "@%c.%s", encoding, oldname); idp = get_identifier (newname); XEXP (rtl, 0) = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp)); SYMBOL_REF_WEAK (XEXP (rtl, 0)) = DECL_WEAK (decl); SET_SYMBOL_REF_DECL (XEXP (rtl, 0), decl); switch (encoding) { case 'b': maxsize = 128; secname = "based"; break; case 't': maxsize = 65536; secname = "tiny"; break; case 'n': maxsize = 0x1000000; secname = "near"; break; default: maxsize = 0; secname = 0; break; } if (maxsize && int_size_in_bytes (TREE_TYPE (decl)) > maxsize) { warning (0, "variable %s (%ld bytes) is too large for the %s section (%d bytes)", oldname, (long) int_size_in_bytes (TREE_TYPE (decl)), secname, maxsize); } } } const char * mep_strip_name_encoding (const char *sym) { while (1) { if (*sym == '*') sym++; else if (*sym == '@' && sym[2] == '.') sym += 3; else return sym; } } static section * mep_select_section (tree decl, int reloc ATTRIBUTE_UNUSED, unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED) { int readonly = 1; int encoding; switch (TREE_CODE (decl)) { case VAR_DECL: if (!TREE_READONLY (decl) || TREE_SIDE_EFFECTS (decl) || !DECL_INITIAL (decl) || (DECL_INITIAL (decl) != error_mark_node && !TREE_CONSTANT (DECL_INITIAL (decl)))) readonly = 0; break; case CONSTRUCTOR: if (! TREE_CONSTANT (decl)) readonly = 0; break; default: break; } if (TREE_CODE (decl) == FUNCTION_DECL) { const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0); if (name[0] == '@' && name[2] == '.') encoding = name[1]; else encoding = 0; if (flag_function_sections || DECL_COMDAT_GROUP (decl)) mep_unique_section (decl, 0); else if (lookup_attribute ("vliw", TYPE_ATTRIBUTES (TREE_TYPE (decl)))) { if (encoding == 'f') return vftext_section; else return vtext_section; } else if (encoding == 'f') return ftext_section; else return text_section; } if (TREE_CODE (decl) == VAR_DECL) { const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0); if (name[0] == '@' && name[2] == '.') switch (name[1]) { case 'b': return based_section; case 't': if (readonly) return srodata_section; if (DECL_INITIAL (decl)) return sdata_section; return tinybss_section; case 'f': if (readonly) return frodata_section; return far_section; case 'i': case 'I': error_at (DECL_SOURCE_LOCATION (decl), "variable %D of type %<io%> must be uninitialized", decl); return data_section; case 'c': error_at (DECL_SOURCE_LOCATION (decl), "variable %D of type %<cb%> must be uninitialized", decl); return data_section; } } if (readonly) return readonly_data_section; return data_section; } static void mep_unique_section (tree decl, int reloc) { static const char *prefixes[][2] = { { ".text.", ".gnu.linkonce.t." }, { ".rodata.", ".gnu.linkonce.r." }, { ".data.", ".gnu.linkonce.d." }, { ".based.", ".gnu.linkonce.based." }, { ".sdata.", ".gnu.linkonce.s." }, { ".far.", ".gnu.linkonce.far." }, { ".ftext.", ".gnu.linkonce.ft." }, { ".frodata.", ".gnu.linkonce.frd." }, { ".srodata.", ".gnu.linkonce.srd." }, { ".vtext.", ".gnu.linkonce.v." }, { ".vftext.", ".gnu.linkonce.vf." } }; int sec = 2; /* .data */ int len; const char *name, *prefix; char *string; name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)); if (DECL_RTL (decl)) name = XSTR (XEXP (DECL_RTL (decl), 0), 0); if (TREE_CODE (decl) == FUNCTION_DECL) { if (lookup_attribute ("vliw", TYPE_ATTRIBUTES (TREE_TYPE (decl)))) sec = 9; /* .vtext */ else sec = 0; /* .text */ } else if (decl_readonly_section (decl, reloc)) sec = 1; /* .rodata */ if (name[0] == '@' && name[2] == '.') { switch (name[1]) { case 'b': sec = 3; /* .based */ break; case 't': if (sec == 1) sec = 8; /* .srodata */ else sec = 4; /* .sdata */ break; case 'f': if (sec == 0) sec = 6; /* .ftext */ else if (sec == 9) sec = 10; /* .vftext */ else if (sec == 1) sec = 7; /* .frodata */ else sec = 5; /* .far. */ break; } name += 3; } prefix = prefixes[sec][DECL_COMDAT_GROUP(decl) != NULL]; len = strlen (name) + strlen (prefix); string = (char *) alloca (len + 1); sprintf (string, "%s%s", prefix, name); set_decl_section_name (decl, string); } /* Given a decl, a section name, and whether the decl initializer has relocs, choose attributes for the section. */ #define SECTION_MEP_VLIW SECTION_MACH_DEP static unsigned int mep_section_type_flags (tree decl, const char *name, int reloc) { unsigned int flags = default_section_type_flags (decl, name, reloc); if (decl && TREE_CODE (decl) == FUNCTION_DECL && lookup_attribute ("vliw", TYPE_ATTRIBUTES (TREE_TYPE (decl)))) flags |= SECTION_MEP_VLIW; return flags; } /* Switch to an arbitrary section NAME with attributes as specified by FLAGS. ALIGN specifies any known alignment requirements for the section; 0 if the default should be used. Differs from the standard ELF version only in support of VLIW mode. */ static void mep_asm_named_section (const char *name, unsigned int flags, tree decl ATTRIBUTE_UNUSED) { char flagchars[8], *f = flagchars; const char *type; if (!(flags & SECTION_DEBUG)) *f++ = 'a'; if (flags & SECTION_WRITE) *f++ = 'w'; if (flags & SECTION_CODE) *f++ = 'x'; if (flags & SECTION_SMALL) *f++ = 's'; if (flags & SECTION_MEP_VLIW) *f++ = 'v'; *f = '\0'; if (flags & SECTION_BSS) type = "nobits"; else type = "progbits"; fprintf (asm_out_file, "\t.section\t%s,\"%s\",@%s\n", name, flagchars, type); if (flags & SECTION_CODE) fputs ((flags & SECTION_MEP_VLIW ? "\t.vliw\n" : "\t.core\n"), asm_out_file); } void mep_output_aligned_common (FILE *stream, tree decl, const char *name, int size, int align, int global) { /* We intentionally don't use mep_section_tag() here. */ if (name[0] == '@' && (name[1] == 'i' || name[1] == 'I' || name[1] == 'c') && name[2] == '.') { int location = -1; tree attr = lookup_attribute ((name[1] == 'c' ? "cb" : "io"), DECL_ATTRIBUTES (decl)); if (attr && TREE_VALUE (attr) && TREE_VALUE (TREE_VALUE(attr))) location = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE(attr))); if (location == -1) return; if (global) { fprintf (stream, "\t.globl\t"); assemble_name (stream, name); fprintf (stream, "\n"); } assemble_name (stream, name); fprintf (stream, " = %d\n", location); return; } if (name[0] == '@' && name[2] == '.') { const char *sec = 0; switch (name[1]) { case 'b': switch_to_section (based_section); sec = ".based"; break; case 't': switch_to_section (tinybss_section); sec = ".sbss"; break; case 'f': switch_to_section (farbss_section); sec = ".farbss"; break; } if (sec) { const char *name2; int p2align = 0; while (align > BITS_PER_UNIT) { align /= 2; p2align ++; } name2 = targetm.strip_name_encoding (name); if (global) fprintf (stream, "\t.globl\t%s\n", name2); fprintf (stream, "\t.p2align %d\n", p2align); fprintf (stream, "\t.type\t%s,@object\n", name2); fprintf (stream, "\t.size\t%s,%d\n", name2, size); fprintf (stream, "%s:\n\t.zero\t%d\n", name2, size); return; } } if (!global) { fprintf (stream, "\t.local\t"); assemble_name (stream, name); fprintf (stream, "\n"); } fprintf (stream, "\t.comm\t"); assemble_name (stream, name); fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT); } /* Trampolines. */ static void mep_trampoline_init (rtx m_tramp, tree fndecl, rtx static_chain) { rtx addr = XEXP (m_tramp, 0); rtx fnaddr = XEXP (DECL_RTL (fndecl), 0); emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__mep_trampoline_helper"), LCT_NORMAL, VOIDmode, 3, addr, Pmode, fnaddr, Pmode, static_chain, Pmode); } /* Experimental Reorg. */ static bool mep_mentioned_p (rtx in, rtx reg, /* NULL for mem */ int modes_too) /* if nonzero, modes must match also. */ { const char *fmt; int i; enum rtx_code code; if (in == 0) return false; if (reg && GET_CODE (reg) != REG) return false; if (GET_CODE (in) == LABEL_REF) return (reg == 0); code = GET_CODE (in); switch (code) { case MEM: if (reg) return mep_mentioned_p (XEXP (in, 0), reg, modes_too); return true; case REG: if (!reg) return false; if (modes_too && (GET_MODE (in) != GET_MODE (reg))) return false; return (REGNO (in) == REGNO (reg)); case SCRATCH: case CC0: case PC: case CONST_INT: case CONST_DOUBLE: return false; default: break; } /* Set's source should be read-only. */ if (code == SET && !reg) return mep_mentioned_p (SET_DEST (in), reg, modes_too); fmt = GET_RTX_FORMAT (code); for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) { if (fmt[i] == 'E') { register int j; for (j = XVECLEN (in, i) - 1; j >= 0; j--) if (mep_mentioned_p (XVECEXP (in, i, j), reg, modes_too)) return true; } else if (fmt[i] == 'e' && mep_mentioned_p (XEXP (in, i), reg, modes_too)) return true; } return false; } #define EXPERIMENTAL_REGMOVE_REORG 1 #if EXPERIMENTAL_REGMOVE_REORG static int mep_compatible_reg_class (int r1, int r2) { if (GR_REGNO_P (r1) && GR_REGNO_P (r2)) return 1; if (CR_REGNO_P (r1) && CR_REGNO_P (r2)) return 1; return 0; } static void mep_reorg_regmove (rtx_insn *insns) { rtx_insn *insn, *next, *follow; rtx pat, *where; int count = 0, done = 0, replace, before = 0; if (dump_file) for (insn = insns; insn; insn = NEXT_INSN (insn)) if (NONJUMP_INSN_P (insn)) before++; /* We're looking for (set r2 r1) moves where r1 dies, followed by a set that uses the r2 and r2 dies there. We replace r2 with r1 and see if it's still a valid insn. If so, delete the first set. Copied from reorg.c. */ while (!done) { done = 1; for (insn = insns; insn; insn = next) { next = next_nonnote_nondebug_insn (insn); if (! NONJUMP_INSN_P (insn)) continue; pat = PATTERN (insn); replace = 0; if (GET_CODE (pat) == SET && GET_CODE (SET_SRC (pat)) == REG && GET_CODE (SET_DEST (pat)) == REG && find_regno_note (insn, REG_DEAD, REGNO (SET_SRC (pat))) && mep_compatible_reg_class (REGNO (SET_SRC (pat)), REGNO (SET_DEST (pat)))) { follow = next_nonnote_nondebug_insn (insn); if (dump_file) fprintf (dump_file, "superfluous moves: considering %d\n", INSN_UID (insn)); while (follow && NONJUMP_INSN_P (follow) && GET_CODE (PATTERN (follow)) == SET && !dead_or_set_p (follow, SET_SRC (pat)) && !mep_mentioned_p (PATTERN (follow), SET_SRC (pat), 0) && !mep_mentioned_p (PATTERN (follow), SET_DEST (pat), 0)) { if (dump_file) fprintf (dump_file, "\tskipping %d\n", INSN_UID (follow)); follow = next_nonnote_insn (follow); } if (dump_file) fprintf (dump_file, "\tfollow is %d\n", INSN_UID (follow)); if (follow && NONJUMP_INSN_P (follow) && GET_CODE (PATTERN (follow)) == SET && find_regno_note (follow, REG_DEAD, REGNO (SET_DEST (pat)))) { if (GET_CODE (SET_DEST (PATTERN (follow))) == REG) { if (mep_mentioned_p (SET_SRC (PATTERN (follow)), SET_DEST (pat), 1)) { replace = 1; where = & SET_SRC (PATTERN (follow)); } } else if (GET_CODE (SET_DEST (PATTERN (follow))) == MEM) { if (mep_mentioned_p (PATTERN (follow), SET_DEST (pat), 1)) { replace = 1; where = & PATTERN (follow); } } } } /* If so, follow is the corresponding insn */ if (replace) { if (dump_file) { rtx_insn *x; fprintf (dump_file, "----- Candidate for superfluous move deletion:\n\n"); for (x = insn; x ;x = NEXT_INSN (x)) { print_rtl_single (dump_file, x); if (x == follow) break; fprintf (dump_file, "\n"); } } if (validate_replace_rtx_subexp (SET_DEST (pat), SET_SRC (pat), follow, where)) { count ++; delete_insn (insn); if (dump_file) { fprintf (dump_file, "\n----- Success! new insn:\n\n"); print_rtl_single (dump_file, follow); } done = 0; } } } } if (dump_file) { fprintf (dump_file, "\n%d insn%s deleted out of %d.\n\n", count, count == 1 ? "" : "s", before); fprintf (dump_file, "=====\n"); } } #endif /* Figure out where to put LABEL, which is the label for a repeat loop. If INCLUDING, LAST_INSN is the last instruction in the loop, otherwise the loop ends just before LAST_INSN. If SHARED, insns other than the "repeat" might use LABEL to jump to the loop's continuation point. Return the last instruction in the adjusted loop. */ static rtx_insn * mep_insert_repeat_label_last (rtx_insn *last_insn, rtx_code_label *label, bool including, bool shared) { rtx_insn *next, *prev; int count = 0, code, icode; if (dump_file) fprintf (dump_file, "considering end of repeat loop at insn %d\n", INSN_UID (last_insn)); /* Set PREV to the last insn in the loop. */ prev = last_insn; if (!including) prev = PREV_INSN (prev); /* Set NEXT to the next insn after the repeat label. */ next = last_insn; if (!shared) while (prev != 0) { code = GET_CODE (prev); if (code == CALL_INSN || code == CODE_LABEL || code == BARRIER) break; if (INSN_P (prev)) { if (GET_CODE (PATTERN (prev)) == SEQUENCE) prev = as_a <rtx_insn *> (XVECEXP (PATTERN (prev), 0, 1)); /* Other insns that should not be in the last two opcodes. */ icode = recog_memoized (prev); if (icode < 0 || icode == CODE_FOR_repeat || icode == CODE_FOR_erepeat || get_attr_may_trap (prev) == MAY_TRAP_YES) break; /* That leaves JUMP_INSN and INSN. It will have BImode if it is the second instruction in a VLIW bundle. In that case, loop again: if the first instruction also satisfies the conditions above then we will reach here again and put both of them into the repeat epilogue. Otherwise both should remain outside. */ if (GET_MODE (prev) != BImode) { count++; next = prev; if (dump_file) print_rtl_single (dump_file, next); if (count == 2) break; } } prev = PREV_INSN (prev); } /* See if we're adding the label immediately after the repeat insn. If so, we need to separate them with a nop. */ prev = prev_real_insn (next); if (prev) switch (recog_memoized (prev)) { case CODE_FOR_repeat: case CODE_FOR_erepeat: if (dump_file) fprintf (dump_file, "Adding nop inside loop\n"); emit_insn_before (gen_nop (), next); break; default: break; } /* Insert the label. */ emit_label_before (label, next); /* Insert the nops. */ if (dump_file && count < 2) fprintf (dump_file, "Adding %d nop%s\n\n", 2 - count, count == 1 ? "" : "s"); for (; count < 2; count++) if (including) last_insn = emit_insn_after (gen_nop (), last_insn); else emit_insn_before (gen_nop (), last_insn); return last_insn; } void mep_emit_doloop (rtx *operands, int is_end) { rtx tag; if (cfun->machine->doloop_tags == 0 || cfun->machine->doloop_tag_from_end == is_end) { cfun->machine->doloop_tags++; cfun->machine->doloop_tag_from_end = is_end; } tag = GEN_INT (cfun->machine->doloop_tags - 1); if (is_end) emit_jump_insn (gen_doloop_end_internal (operands[0], operands[1], tag)); else emit_insn (gen_doloop_begin_internal (operands[0], operands[0], tag)); } /* Code for converting doloop_begins and doloop_ends into valid MeP instructions. A doloop_begin is just a placeholder: $count = unspec ($count) where $count is initially the number of iterations - 1. doloop_end has the form: if ($count-- == 0) goto label The counter variable is private to the doloop insns, nothing else relies on its value. There are three cases, in decreasing order of preference: 1. A loop has exactly one doloop_begin and one doloop_end. The doloop_end branches to the first instruction after the doloop_begin. In this case we can replace the doloop_begin with a repeat instruction and remove the doloop_end. I.e.: $count1 = unspec ($count1) label: ... insn1 insn2 if ($count2-- == 0) goto label becomes: repeat $count1,repeat_label label: ... repeat_label: insn1 insn2 # end repeat 2. As for (1), except there are several doloop_ends. One of them (call it X) falls through to a label L. All the others fall through to branches to L. In this case, we remove X and replace the other doloop_ends with branches to the repeat label. For example: $count1 = unspec ($count1) start: ... if ($count2-- == 0) goto label end: ... if ($count3-- == 0) goto label goto end becomes: repeat $count1,repeat_label start: ... repeat_label: nop nop # end repeat end: ... goto repeat_label 3. The fallback case. Replace doloop_begins with: $count = $count + 1 Replace doloop_ends with the equivalent of: $count = $count - 1 if ($count == 0) goto label Note that this might need a scratch register if $count is stored in memory. */ /* A structure describing one doloop_begin. */ struct mep_doloop_begin { /* The next doloop_begin with the same tag. */ struct mep_doloop_begin *next; /* The instruction itself. */ rtx_insn *insn; /* The initial counter value. This is known to be a general register. */ rtx counter; }; /* A structure describing a doloop_end. */ struct mep_doloop_end { /* The next doloop_end with the same loop tag. */ struct mep_doloop_end *next; /* The instruction itself. */ rtx_insn *insn; /* The first instruction after INSN when the branch isn't taken. */ rtx_insn *fallthrough; /* The location of the counter value. Since doloop_end_internal is a jump instruction, it has to allow the counter to be stored anywhere (any non-fixed register or memory location). */ rtx counter; /* The target label (the place where the insn branches when the counter isn't zero). */ rtx label; /* A scratch register. Only available when COUNTER isn't stored in a general register. */ rtx scratch; }; /* One do-while loop. */ struct mep_doloop { /* All the doloop_begins for this loop (in no particular order). */ struct mep_doloop_begin *begin; /* All the doloop_ends. When there is more than one, arrange things so that the first one is the most likely to be X in case (2) above. */ struct mep_doloop_end *end; }; /* Return true if LOOP can be converted into repeat/repeat_end form (that is, if it matches cases (1) or (2) above). */ static bool mep_repeat_loop_p (struct mep_doloop *loop) { struct mep_doloop_end *end; rtx fallthrough; /* There must be exactly one doloop_begin and at least one doloop_end. */ if (loop->begin == 0 || loop->end == 0 || loop->begin->next != 0) return false; /* The first doloop_end (X) must branch back to the insn after the doloop_begin. */ if (prev_real_insn (loop->end->label) != loop->begin->insn) return false; /* All the other doloop_ends must branch to the same place as X. When the branch isn't taken, they must jump to the instruction after X. */ fallthrough = loop->end->fallthrough; for (end = loop->end->next; end != 0; end = end->next) if (end->label != loop->end->label || !simplejump_p (end->fallthrough) || next_real_insn (JUMP_LABEL (end->fallthrough)) != fallthrough) return false; return true; } /* The main repeat reorg function. See comment above for details. */ static void mep_reorg_repeat (rtx_insn *insns) { rtx_insn *insn; struct mep_doloop *loops, *loop; struct mep_doloop_begin *begin; struct mep_doloop_end *end; /* Quick exit if we haven't created any loops. */ if (cfun->machine->doloop_tags == 0) return; /* Create an array of mep_doloop structures. */ loops = (struct mep_doloop *) alloca (sizeof (loops[0]) * cfun->machine->doloop_tags); memset (loops, 0, sizeof (loops[0]) * cfun->machine->doloop_tags); /* Search the function for do-while insns and group them by loop tag. */ for (insn = insns; insn; insn = NEXT_INSN (insn)) if (INSN_P (insn)) switch (recog_memoized (insn)) { case CODE_FOR_doloop_begin_internal: insn_extract (insn); loop = &loops[INTVAL (recog_data.operand[2])]; begin = (struct mep_doloop_begin *) alloca (sizeof (struct mep_doloop_begin)); begin->next = loop->begin; begin->insn = insn; begin->counter = recog_data.operand[0]; loop->begin = begin; break; case CODE_FOR_doloop_end_internal: insn_extract (insn); loop = &loops[INTVAL (recog_data.operand[2])]; end = (struct mep_doloop_end *) alloca (sizeof (struct mep_doloop_end)); end->insn = insn; end->fallthrough = next_real_insn (insn); end->counter = recog_data.operand[0]; end->label = recog_data.operand[1]; end->scratch = recog_data.operand[3]; /* If this insn falls through to an unconditional jump, give it a lower priority than the others. */ if (loop->end != 0 && simplejump_p (end->fallthrough)) { end->next = loop->end->next; loop->end->next = end; } else { end->next = loop->end; loop->end = end; } break; } /* Convert the insns for each loop in turn. */ for (loop = loops; loop < loops + cfun->machine->doloop_tags; loop++) if (mep_repeat_loop_p (loop)) { /* Case (1) or (2). */ rtx_code_label *repeat_label; rtx label_ref; /* Create a new label for the repeat insn. */ repeat_label = gen_label_rtx (); /* Replace the doloop_begin with a repeat. */ label_ref = gen_rtx_LABEL_REF (VOIDmode, repeat_label); emit_insn_before (gen_repeat (loop->begin->counter, label_ref), loop->begin->insn); delete_insn (loop->begin->insn); /* Insert the repeat label before the first doloop_end. Fill the gap with nops if there are other doloop_ends. */ mep_insert_repeat_label_last (loop->end->insn, repeat_label, false, loop->end->next != 0); /* Emit a repeat_end (to improve the readability of the output). */ emit_insn_before (gen_repeat_end (), loop->end->insn); /* Delete the first doloop_end. */ delete_insn (loop->end->insn); /* Replace the others with branches to REPEAT_LABEL. */ for (end = loop->end->next; end != 0; end = end->next) { emit_jump_insn_before (gen_jump (repeat_label), end->insn); delete_insn (end->insn); delete_insn (end->fallthrough); } } else { /* Case (3). First replace all the doloop_begins with increment instructions. */ for (begin = loop->begin; begin != 0; begin = begin->next) { emit_insn_before (gen_add3_insn (copy_rtx (begin->counter), begin->counter, const1_rtx), begin->insn); delete_insn (begin->insn); } /* Replace all the doloop_ends with decrement-and-branch sequences. */ for (end = loop->end; end != 0; end = end->next) { rtx reg; start_sequence (); /* Load the counter value into a general register. */ reg = end->counter; if (!REG_P (reg) || REGNO (reg) > 15) { reg = end->scratch; emit_move_insn (copy_rtx (reg), copy_rtx (end->counter)); } /* Decrement the counter. */ emit_insn (gen_add3_insn (copy_rtx (reg), copy_rtx (reg), constm1_rtx)); /* Copy it back to its original location. */ if (reg != end->counter) emit_move_insn (copy_rtx (end->counter), copy_rtx (reg)); /* Jump back to the start label. */ insn = emit_jump_insn (gen_mep_bne_true (reg, const0_rtx, end->label)); JUMP_LABEL (insn) = end->label; LABEL_NUSES (end->label)++; /* Emit the whole sequence before the doloop_end. */ insn = get_insns (); end_sequence (); emit_insn_before (insn, end->insn); /* Delete the doloop_end. */ delete_insn (end->insn); } } } static bool mep_invertable_branch_p (rtx_insn *insn) { rtx cond, set; enum rtx_code old_code; int i; set = PATTERN (insn); if (GET_CODE (set) != SET) return false; if (GET_CODE (XEXP (set, 1)) != IF_THEN_ELSE) return false; cond = XEXP (XEXP (set, 1), 0); old_code = GET_CODE (cond); switch (old_code) { case EQ: PUT_CODE (cond, NE); break; case NE: PUT_CODE (cond, EQ); break; case LT: PUT_CODE (cond, GE); break; case GE: PUT_CODE (cond, LT); break; default: return false; } INSN_CODE (insn) = -1; i = recog_memoized (insn); PUT_CODE (cond, old_code); INSN_CODE (insn) = -1; return i >= 0; } static void mep_invert_branch (rtx_insn *insn, rtx_insn *after) { rtx cond, set, label; int i; set = PATTERN (insn); gcc_assert (GET_CODE (set) == SET); gcc_assert (GET_CODE (XEXP (set, 1)) == IF_THEN_ELSE); cond = XEXP (XEXP (set, 1), 0); switch (GET_CODE (cond)) { case EQ: PUT_CODE (cond, NE); break; case NE: PUT_CODE (cond, EQ); break; case LT: PUT_CODE (cond, GE); break; case GE: PUT_CODE (cond, LT); break; default: gcc_unreachable (); } label = gen_label_rtx (); emit_label_after (label, after); for (i=1; i<=2; i++) if (GET_CODE (XEXP (XEXP (set, 1), i)) == LABEL_REF) { rtx ref = XEXP (XEXP (set, 1), i); if (LABEL_NUSES (XEXP (ref, 0)) == 1) delete_insn (XEXP (ref, 0)); XEXP (ref, 0) = label; LABEL_NUSES (label) ++; JUMP_LABEL (insn) = label; } INSN_CODE (insn) = -1; i = recog_memoized (insn); gcc_assert (i >= 0); } static void mep_reorg_erepeat (rtx_insn *insns) { rtx_insn *insn, *prev; rtx_code_label *l; rtx x; int count; for (insn = insns; insn; insn = NEXT_INSN (insn)) if (JUMP_P (insn) && mep_invertable_branch_p (insn)) { if (dump_file) { fprintf (dump_file, "\n------------------------------\n"); fprintf (dump_file, "erepeat: considering this jump:\n"); print_rtl_single (dump_file, insn); } count = simplejump_p (insn) ? 0 : 1; for (prev = PREV_INSN (insn); prev; prev = PREV_INSN (prev)) { if (CALL_P (prev) || BARRIER_P (prev)) break; if (prev == JUMP_LABEL (insn)) { rtx_insn *newlast; if (dump_file) fprintf (dump_file, "found loop top, %d insns\n", count); if (LABEL_NUSES (prev) == 1) /* We're the only user, always safe */ ; else if (LABEL_NUSES (prev) == 2) { /* See if there's a barrier before this label. If so, we know nobody inside the loop uses it. But we must be careful to put the erepeat *after* the label. */ rtx_insn *barrier; for (barrier = PREV_INSN (prev); barrier && NOTE_P (barrier); barrier = PREV_INSN (barrier)) ; if (barrier && ! BARRIER_P (barrier)) break; } else { /* We don't know who else, within or without our loop, uses this */ if (dump_file) fprintf (dump_file, "... but there are multiple users, too risky.\n"); break; } /* Generate a label to be used by the erepat insn. */ l = gen_label_rtx (); /* Insert the erepeat after INSN's target label. */ x = gen_erepeat (gen_rtx_LABEL_REF (VOIDmode, l)); LABEL_NUSES (l)++; emit_insn_after (x, prev); /* Insert the erepeat label. */ newlast = (mep_insert_repeat_label_last (insn, l, !simplejump_p (insn), false)); if (simplejump_p (insn)) { emit_insn_before (gen_erepeat_end (), insn); delete_insn (insn); } else { mep_invert_branch (insn, newlast); emit_insn_after (gen_erepeat_end (), newlast); } break; } if (LABEL_P (prev)) { /* A label is OK if there is exactly one user, and we can find that user before the next label. */ rtx_insn *user = 0; int safe = 0; if (LABEL_NUSES (prev) == 1) { for (user = PREV_INSN (prev); user && (INSN_P (user) || NOTE_P (user)); user = PREV_INSN (user)) if (JUMP_P (user) && JUMP_LABEL (user) == prev) { safe = INSN_UID (user); break; } } if (!safe) break; if (dump_file) fprintf (dump_file, "... ignoring jump from insn %d to %d\n", safe, INSN_UID (prev)); } if (INSN_P (prev)) { count ++; } } } if (dump_file) fprintf (dump_file, "\n==============================\n"); } /* Replace a jump to a return, with a copy of the return. GCC doesn't always do this on its own. */ static void mep_jmp_return_reorg (rtx_insn *insns) { rtx_insn *insn, *label, *ret; int ret_code; for (insn = insns; insn; insn = NEXT_INSN (insn)) if (simplejump_p (insn)) { /* Find the fist real insn the jump jumps to. */ label = ret = safe_as_a <rtx_insn *> (JUMP_LABEL (insn)); while (ret && (NOTE_P (ret) || LABEL_P (ret) || GET_CODE (PATTERN (ret)) == USE)) ret = NEXT_INSN (ret); if (ret) { /* Is it a return? */ ret_code = recog_memoized (ret); if (ret_code == CODE_FOR_return_internal || ret_code == CODE_FOR_eh_return_internal) { /* It is. Replace the jump with a return. */ LABEL_NUSES (label) --; if (LABEL_NUSES (label) == 0) delete_insn (label); PATTERN (insn) = copy_rtx (PATTERN (ret)); INSN_CODE (insn) = -1; } } } } static void mep_reorg_addcombine (rtx_insn *insns) { rtx_insn *i, *n; for (i = insns; i; i = NEXT_INSN (i)) if (INSN_P (i) && INSN_CODE (i) == CODE_FOR_addsi3 && GET_CODE (SET_DEST (PATTERN (i))) == REG && GET_CODE (XEXP (SET_SRC (PATTERN (i)), 0)) == REG && REGNO (SET_DEST (PATTERN (i))) == REGNO (XEXP (SET_SRC (PATTERN (i)), 0)) && GET_CODE (XEXP (SET_SRC (PATTERN (i)), 1)) == CONST_INT) { n = NEXT_INSN (i); if (INSN_P (n) && INSN_CODE (n) == CODE_FOR_addsi3 && GET_CODE (SET_DEST (PATTERN (n))) == REG && GET_CODE (XEXP (SET_SRC (PATTERN (n)), 0)) == REG && REGNO (SET_DEST (PATTERN (n))) == REGNO (XEXP (SET_SRC (PATTERN (n)), 0)) && GET_CODE (XEXP (SET_SRC (PATTERN (n)), 1)) == CONST_INT) { int ic = INTVAL (XEXP (SET_SRC (PATTERN (i)), 1)); int nc = INTVAL (XEXP (SET_SRC (PATTERN (n)), 1)); if (REGNO (SET_DEST (PATTERN (i))) == REGNO (SET_DEST (PATTERN (n))) && ic + nc < 32767 && ic + nc > -32768) { XEXP (SET_SRC (PATTERN (i)), 1) = GEN_INT (ic + nc); SET_NEXT_INSN (i) = NEXT_INSN (n); if (NEXT_INSN (i)) SET_PREV_INSN (NEXT_INSN (i)) = i; } } } } /* If this insn adjusts the stack, return the adjustment, else return zero. */ static int add_sp_insn_p (rtx_insn *insn) { rtx pat; if (! single_set (insn)) return 0; pat = PATTERN (insn); if (GET_CODE (SET_DEST (pat)) != REG) return 0; if (REGNO (SET_DEST (pat)) != SP_REGNO) return 0; if (GET_CODE (SET_SRC (pat)) != PLUS) return 0; if (GET_CODE (XEXP (SET_SRC (pat), 0)) != REG) return 0; if (REGNO (XEXP (SET_SRC (pat), 0)) != SP_REGNO) return 0; if (GET_CODE (XEXP (SET_SRC (pat), 1)) != CONST_INT) return 0; return INTVAL (XEXP (SET_SRC (pat), 1)); } /* Check for trivial functions that set up an unneeded stack frame. */ static void mep_reorg_noframe (rtx_insn *insns) { rtx_insn *start_frame_insn; rtx_insn *end_frame_insn = 0; int sp_adjust, sp2; rtx sp; /* The first insn should be $sp = $sp + N */ while (insns && ! INSN_P (insns)) insns = NEXT_INSN (insns); if (!insns) return; sp_adjust = add_sp_insn_p (insns); if (sp_adjust == 0) return; start_frame_insn = insns; sp = SET_DEST (PATTERN (start_frame_insn)); insns = next_real_insn (insns); while (insns) { rtx_insn *next = next_real_insn (insns); if (!next) break; sp2 = add_sp_insn_p (insns); if (sp2) { if (end_frame_insn) return; end_frame_insn = insns; if (sp2 != -sp_adjust) return; } else if (mep_mentioned_p (insns, sp, 0)) return; else if (CALL_P (insns)) return; insns = next; } if (end_frame_insn) { delete_insn (start_frame_insn); delete_insn (end_frame_insn); } } static void mep_reorg (void) { rtx_insn *insns = get_insns (); /* We require accurate REG_DEAD notes. */ compute_bb_for_insn (); df_note_add_problem (); df_analyze (); mep_reorg_addcombine (insns); #if EXPERIMENTAL_REGMOVE_REORG /* VLIW packing has been done already, so we can't just delete things. */ if (!mep_vliw_function_p (cfun->decl)) mep_reorg_regmove (insns); #endif mep_jmp_return_reorg (insns); mep_bundle_insns (insns); mep_reorg_repeat (insns); if (optimize && !profile_flag && !profile_arc_flag && TARGET_OPT_REPEAT && (!mep_interrupt_p () || mep_interrupt_saved_reg (RPB_REGNO))) mep_reorg_erepeat (insns); /* This may delete *insns so make sure it's last. */ mep_reorg_noframe (insns); df_finish_pass (false); } /*----------------------------------------------------------------------*/ /* Builtins */ /*----------------------------------------------------------------------*/ /* Element X gives the index into cgen_insns[] of the most general implementation of intrinsic X. Unimplemented intrinsics are mapped to -1. */ int mep_intrinsic_insn[ARRAY_SIZE (cgen_intrinsics)]; /* Element X gives the index of another instruction that is mapped to the same intrinsic as cgen_insns[X]. It is -1 when there is no other instruction. Things are set up so that mep_intrinsic_chain[X] < X. */ static int mep_intrinsic_chain[ARRAY_SIZE (cgen_insns)]; /* The bitmask for the current ISA. The ISA masks are declared in mep-intrin.h. */ unsigned int mep_selected_isa; struct mep_config { const char *config_name; unsigned int isa; }; static struct mep_config mep_configs[] = { #ifdef COPROC_SELECTION_TABLE COPROC_SELECTION_TABLE, #endif { 0, 0 } }; /* Initialize the global intrinsics variables above. */ static void mep_init_intrinsics (void) { size_t i; /* Set MEP_SELECTED_ISA to the ISA flag for this configuration. */ mep_selected_isa = mep_configs[0].isa; if (mep_config_string != 0) for (i = 0; mep_configs[i].config_name; i++) if (strcmp (mep_config_string, mep_configs[i].config_name) == 0) { mep_selected_isa = mep_configs[i].isa; break; } /* Assume all intrinsics are unavailable. */ for (i = 0; i < ARRAY_SIZE (mep_intrinsic_insn); i++) mep_intrinsic_insn[i] = -1; /* Build up the global intrinsic tables. */ for (i = 0; i < ARRAY_SIZE (cgen_insns); i++) if ((cgen_insns[i].isas & mep_selected_isa) != 0) { mep_intrinsic_chain[i] = mep_intrinsic_insn[cgen_insns[i].intrinsic]; mep_intrinsic_insn[cgen_insns[i].intrinsic] = i; } /* See whether we can directly move values between one coprocessor register and another. */ for (i = 0; i < ARRAY_SIZE (mep_cmov_insns); i++) if (MEP_INTRINSIC_AVAILABLE_P (mep_cmov_insns[i])) mep_have_copro_copro_moves_p = true; /* See whether we can directly move values between core and coprocessor registers. */ mep_have_core_copro_moves_p = (MEP_INTRINSIC_AVAILABLE_P (mep_cmov1) && MEP_INTRINSIC_AVAILABLE_P (mep_cmov2)); mep_have_core_copro_moves_p = 1; } /* Declare all available intrinsic functions. Called once only. */ static tree cp_data_bus_int_type_node; static tree opaque_vector_type_node; static tree v8qi_type_node; static tree v4hi_type_node; static tree v2si_type_node; static tree v8uqi_type_node; static tree v4uhi_type_node; static tree v2usi_type_node; static tree mep_cgen_regnum_to_type (enum cgen_regnum_operand_type cr) { switch (cr) { case cgen_regnum_operand_type_POINTER: return ptr_type_node; case cgen_regnum_operand_type_LONG: return long_integer_type_node; case cgen_regnum_operand_type_ULONG: return long_unsigned_type_node; case cgen_regnum_operand_type_SHORT: return short_integer_type_node; case cgen_regnum_operand_type_USHORT: return short_unsigned_type_node; case cgen_regnum_operand_type_CHAR: return char_type_node; case cgen_regnum_operand_type_UCHAR: return unsigned_char_type_node; case cgen_regnum_operand_type_SI: return intSI_type_node; case cgen_regnum_operand_type_DI: return intDI_type_node; case cgen_regnum_operand_type_VECTOR: return opaque_vector_type_node; case cgen_regnum_operand_type_V8QI: return v8qi_type_node; case cgen_regnum_operand_type_V4HI: return v4hi_type_node; case cgen_regnum_operand_type_V2SI: return v2si_type_node; case cgen_regnum_operand_type_V8UQI: return v8uqi_type_node; case cgen_regnum_operand_type_V4UHI: return v4uhi_type_node; case cgen_regnum_operand_type_V2USI: return v2usi_type_node; case cgen_regnum_operand_type_CP_DATA_BUS_INT: return cp_data_bus_int_type_node; default: return void_type_node; } } static void mep_init_builtins (void) { size_t i; if (TARGET_64BIT_CR_REGS) cp_data_bus_int_type_node = long_long_integer_type_node; else cp_data_bus_int_type_node = long_integer_type_node; opaque_vector_type_node = build_opaque_vector_type (intQI_type_node, 8); v8qi_type_node = build_vector_type (intQI_type_node, 8); v4hi_type_node = build_vector_type (intHI_type_node, 4); v2si_type_node = build_vector_type (intSI_type_node, 2); v8uqi_type_node = build_vector_type (unsigned_intQI_type_node, 8); v4uhi_type_node = build_vector_type (unsigned_intHI_type_node, 4); v2usi_type_node = build_vector_type (unsigned_intSI_type_node, 2); add_builtin_type ("cp_data_bus_int", cp_data_bus_int_type_node); add_builtin_type ("cp_vector", opaque_vector_type_node); add_builtin_type ("cp_v8qi", v8qi_type_node); add_builtin_type ("cp_v4hi", v4hi_type_node); add_builtin_type ("cp_v2si", v2si_type_node); add_builtin_type ("cp_v8uqi", v8uqi_type_node); add_builtin_type ("cp_v4uhi", v4uhi_type_node); add_builtin_type ("cp_v2usi", v2usi_type_node); /* Intrinsics like mep_cadd3 are implemented with two groups of instructions, one which uses UNSPECs and one which uses a specific rtl code such as PLUS. Instructions in the latter group belong to GROUP_KNOWN_CODE. In such cases, the intrinsic will have two entries in the global tables above. The unspec form is accessed using builtin functions while the specific form is accessed using the mep_* enum in mep-intrin.h. The idea is that __cop arithmetic and builtin functions have different optimization requirements. If mep_cadd3() appears in the source code, the user will surely except gcc to use cadd3 rather than a work-alike such as add3. However, if the user just writes "a + b", where a or b are __cop variables, it is reasonable for gcc to choose a core instruction rather than cadd3 if it believes that is more optimal. */ for (i = 0; i < ARRAY_SIZE (cgen_insns); i++) if ((cgen_insns[i].groups & GROUP_KNOWN_CODE) == 0 && mep_intrinsic_insn[cgen_insns[i].intrinsic] >= 0) { tree ret_type = void_type_node; tree bi_type; if (i > 0 && cgen_insns[i].intrinsic == cgen_insns[i-1].intrinsic) continue; if (cgen_insns[i].cret_p) ret_type = mep_cgen_regnum_to_type (cgen_insns[i].regnums[0].type); bi_type = build_function_type_list (ret_type, NULL_TREE); add_builtin_function (cgen_intrinsics[cgen_insns[i].intrinsic], bi_type, cgen_insns[i].intrinsic, BUILT_IN_MD, NULL, NULL); } } /* Report the unavailablity of the given intrinsic. */ #if 1 static void mep_intrinsic_unavailable (int intrinsic) { static int already_reported_p[ARRAY_SIZE (cgen_intrinsics)]; if (already_reported_p[intrinsic]) return; if (mep_intrinsic_insn[intrinsic] < 0) error ("coprocessor intrinsic %qs is not available in this configuration", cgen_intrinsics[intrinsic]); else if (CGEN_CURRENT_GROUP == GROUP_VLIW) error ("%qs is not available in VLIW functions", cgen_intrinsics[intrinsic]); else error ("%qs is not available in non-VLIW functions", cgen_intrinsics[intrinsic]); already_reported_p[intrinsic] = 1; } #endif /* See if any implementation of INTRINSIC is available to the current function. If so, store the most general implementation in *INSN_PTR and return true. Return false otherwise. */ static bool mep_get_intrinsic_insn (int intrinsic ATTRIBUTE_UNUSED, const struct cgen_insn **insn_ptr ATTRIBUTE_UNUSED) { int i; i = mep_intrinsic_insn[intrinsic]; while (i >= 0 && !CGEN_ENABLE_INSN_P (i)) i = mep_intrinsic_chain[i]; if (i >= 0) { *insn_ptr = &cgen_insns[i]; return true; } return false; } /* Like mep_get_intrinsic_insn, but with extra handling for moves. If INTRINSIC is mep_cmov, but there is no pure CR <- CR move insn, try using a work-alike instead. In this case, the returned insn may have three operands rather than two. */ static bool mep_get_move_insn (int intrinsic, const struct cgen_insn **cgen_insn) { size_t i; if (intrinsic == mep_cmov) { for (i = 0; i < ARRAY_SIZE (mep_cmov_insns); i++) if (mep_get_intrinsic_insn (mep_cmov_insns[i], cgen_insn)) return true; return false; } return mep_get_intrinsic_insn (intrinsic, cgen_insn); } /* If ARG is a register operand that is the same size as MODE, convert it to MODE using a subreg. Otherwise return ARG as-is. */ static rtx mep_convert_arg (machine_mode mode, rtx arg) { if (GET_MODE (arg) != mode && register_operand (arg, VOIDmode) && GET_MODE_SIZE (GET_MODE (arg)) == GET_MODE_SIZE (mode)) return simplify_gen_subreg (mode, arg, GET_MODE (arg), 0); return arg; } /* Apply regnum conversions to ARG using the description given by REGNUM. Return the new argument on success and null on failure. */ static rtx mep_convert_regnum (const struct cgen_regnum_operand *regnum, rtx arg) { if (regnum->count == 0) return arg; if (GET_CODE (arg) != CONST_INT || INTVAL (arg) < 0 || INTVAL (arg) >= regnum->count) return 0; return gen_rtx_REG (SImode, INTVAL (arg) + regnum->base); } /* Try to make intrinsic argument ARG match the given operand. UNSIGNED_P is true if the argument has an unsigned type. */ static rtx mep_legitimize_arg (const struct insn_operand_data *operand, rtx arg, int unsigned_p) { if (GET_CODE (arg) == CONST_INT) { /* CONST_INTs can only be bound to integer operands. */ if (GET_MODE_CLASS (operand->mode) != MODE_INT) return 0; } else if (GET_CODE (arg) == CONST_DOUBLE) /* These hold vector constants. */; else if (GET_MODE_SIZE (GET_MODE (arg)) != GET_MODE_SIZE (operand->mode)) { /* If the argument is a different size from what's expected, we must have a value in the right mode class in order to convert it. */ if (GET_MODE_CLASS (operand->mode) != GET_MODE_CLASS (GET_MODE (arg))) return 0; /* If the operand is an rvalue, promote or demote it to match the operand's size. This might not need extra instructions when ARG is a register value. */ if (operand->constraint[0] != '=') arg = convert_to_mode (operand->mode, arg, unsigned_p); } /* If the operand is an lvalue, bind the operand to a new register. The caller will copy this value into ARG after the main instruction. By doing this always, we produce slightly more optimal code. */ /* But not for control registers. */ if (operand->constraint[0] == '=' && (! REG_P (arg) || ! (CONTROL_REGNO_P (REGNO (arg)) || CCR_REGNO_P (REGNO (arg)) || CR_REGNO_P (REGNO (arg))) )) return gen_reg_rtx (operand->mode); /* Try simple mode punning. */ arg = mep_convert_arg (operand->mode, arg); if (operand->predicate (arg, operand->mode)) return arg; /* See if forcing the argument into a register will make it match. */ if (GET_CODE (arg) == CONST_INT || GET_CODE (arg) == CONST_DOUBLE) arg = force_reg (operand->mode, arg); else arg = mep_convert_arg (operand->mode, force_reg (GET_MODE (arg), arg)); if (operand->predicate (arg, operand->mode)) return arg; return 0; } /* Report that ARG cannot be passed to argument ARGNUM of intrinsic function FNNAME. OPERAND describes the operand to which ARGNUM is mapped. */ static void mep_incompatible_arg (const struct insn_operand_data *operand, rtx arg, int argnum, tree fnname) { size_t i; if (GET_CODE (arg) == CONST_INT) for (i = 0; i < ARRAY_SIZE (cgen_immediate_predicates); i++) if (operand->predicate == cgen_immediate_predicates[i].predicate) { const struct cgen_immediate_predicate *predicate; HOST_WIDE_INT argval; predicate = &cgen_immediate_predicates[i]; argval = INTVAL (arg); if (argval < predicate->lower || argval >= predicate->upper) error ("argument %d of %qE must be in the range %d...%d", argnum, fnname, predicate->lower, predicate->upper - 1); else error ("argument %d of %qE must be a multiple of %d", argnum, fnname, predicate->align); return; } error ("incompatible type for argument %d of %qE", argnum, fnname); } static rtx mep_expand_builtin (tree exp, rtx target ATTRIBUTE_UNUSED, rtx subtarget ATTRIBUTE_UNUSED, machine_mode mode ATTRIBUTE_UNUSED, int ignore ATTRIBUTE_UNUSED) { rtx pat, op[10], arg[10]; unsigned int a; int opindex, unsigned_p[10]; tree fndecl, args; unsigned int n_args; tree fnname; const struct cgen_insn *cgen_insn; const struct insn_data_d *idata; unsigned int first_arg = 0; unsigned int builtin_n_args; fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0); fnname = DECL_NAME (fndecl); /* Find out which instruction we should emit. Note that some coprocessor intrinsics may only be available in VLIW mode, or only in normal mode. */ if (!mep_get_intrinsic_insn (DECL_FUNCTION_CODE (fndecl), &cgen_insn)) { mep_intrinsic_unavailable (DECL_FUNCTION_CODE (fndecl)); return NULL_RTX; } idata = &insn_data[cgen_insn->icode]; builtin_n_args = cgen_insn->num_args; if (cgen_insn->cret_p) { if (cgen_insn->cret_p > 1) builtin_n_args ++; first_arg = 1; mep_cgen_regnum_to_type (cgen_insn->regnums[0].type); builtin_n_args --; } /* Evaluate each argument. */ n_args = call_expr_nargs (exp); if (n_args < builtin_n_args) { error ("too few arguments to %qE", fnname); return NULL_RTX; } if (n_args > builtin_n_args) { error ("too many arguments to %qE", fnname); return NULL_RTX; } for (a = first_arg; a < builtin_n_args + first_arg; a++) { tree value; args = CALL_EXPR_ARG (exp, a - first_arg); value = args; #if 0 if (cgen_insn->regnums[a].reference_p) { if (TREE_CODE (value) != ADDR_EXPR) { debug_tree(value); error ("argument %d of %qE must be an address", a+1, fnname); return NULL_RTX; } value = TREE_OPERAND (value, 0); } #endif /* If the argument has been promoted to int, get the unpromoted value. This is necessary when sub-int memory values are bound to reference parameters. */ if (TREE_CODE (value) == NOP_EXPR && TREE_TYPE (value) == integer_type_node && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (value, 0))) && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (value, 0))) < TYPE_PRECISION (TREE_TYPE (value)))) value = TREE_OPERAND (value, 0); /* If the argument has been promoted to double, get the unpromoted SFmode value. This is necessary for FMAX support, for example. */ if (TREE_CODE (value) == NOP_EXPR && SCALAR_FLOAT_TYPE_P (TREE_TYPE (value)) && SCALAR_FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (value, 0))) && TYPE_MODE (TREE_TYPE (value)) == DFmode && TYPE_MODE (TREE_TYPE (TREE_OPERAND (value, 0))) == SFmode) value = TREE_OPERAND (value, 0); unsigned_p[a] = TYPE_UNSIGNED (TREE_TYPE (value)); arg[a] = expand_expr (value, NULL, VOIDmode, EXPAND_NORMAL); arg[a] = mep_convert_regnum (&cgen_insn->regnums[a], arg[a]); if (cgen_insn->regnums[a].reference_p) { tree pointed_to = TREE_TYPE (TREE_TYPE (value)); machine_mode pointed_mode = TYPE_MODE (pointed_to); arg[a] = gen_rtx_MEM (pointed_mode, arg[a]); } if (arg[a] == 0) { error ("argument %d of %qE must be in the range %d...%d", a + 1, fnname, 0, cgen_insn->regnums[a].count - 1); return NULL_RTX; } } for (a = 0; a < first_arg; a++) { if (a == 0 && target && GET_MODE (target) == idata->operand[0].mode) arg[a] = target; else arg[a] = gen_reg_rtx (idata->operand[0].mode); } /* Convert the arguments into a form suitable for the intrinsic. Report an error if this isn't possible. */ for (opindex = 0; opindex < idata->n_operands; opindex++) { a = cgen_insn->op_mapping[opindex]; op[opindex] = mep_legitimize_arg (&idata->operand[opindex], arg[a], unsigned_p[a]); if (op[opindex] == 0) { mep_incompatible_arg (&idata->operand[opindex], arg[a], a + 1 - first_arg, fnname); return NULL_RTX; } } /* Emit the instruction. */ pat = idata->genfun (op[0], op[1], op[2], op[3], op[4], op[5], op[6], op[7], op[8], op[9]); if (GET_CODE (pat) == SET && GET_CODE (SET_DEST (pat)) == PC && GET_CODE (SET_SRC (pat)) == IF_THEN_ELSE) emit_jump_insn (pat); else emit_insn (pat); /* Copy lvalues back to their final locations. */ for (opindex = 0; opindex < idata->n_operands; opindex++) if (idata->operand[opindex].constraint[0] == '=') { a = cgen_insn->op_mapping[opindex]; if (a >= first_arg) { if (GET_MODE_CLASS (GET_MODE (arg[a])) != GET_MODE_CLASS (GET_MODE (op[opindex]))) emit_move_insn (arg[a], gen_lowpart (GET_MODE (arg[a]), op[opindex])); else { /* First convert the operand to the right mode, then copy it into the destination. Doing the conversion as a separate step (rather than using convert_move) means that we can avoid creating no-op moves when ARG[A] and OP[OPINDEX] refer to the same register. */ op[opindex] = convert_to_mode (GET_MODE (arg[a]), op[opindex], unsigned_p[a]); if (!rtx_equal_p (arg[a], op[opindex])) emit_move_insn (arg[a], op[opindex]); } } } if (first_arg > 0 && target && target != op[0]) { emit_move_insn (target, op[0]); } return target; } static bool mep_vector_mode_supported_p (machine_mode mode ATTRIBUTE_UNUSED) { return false; } /* A subroutine of global_reg_mentioned_p, returns 1 if *LOC mentions a global register. */ static bool global_reg_mentioned_p_1 (const_rtx x) { int regno; switch (GET_CODE (x)) { case SUBREG: if (REG_P (SUBREG_REG (x))) { if (REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER && global_regs[subreg_regno (x)]) return true; return false; } break; case REG: regno = REGNO (x); if (regno < FIRST_PSEUDO_REGISTER && global_regs[regno]) return true; return false; case CALL: /* A non-constant call might use a global register. */ return true; default: break; } return false; } /* Returns nonzero if X mentions a global register. */ static bool global_reg_mentioned_p (rtx x) { if (INSN_P (x)) { if (CALL_P (x)) { if (! RTL_CONST_OR_PURE_CALL_P (x)) return true; x = CALL_INSN_FUNCTION_USAGE (x); if (x == 0) return false; } else x = PATTERN (x); } subrtx_iterator::array_type array; FOR_EACH_SUBRTX (iter, array, x, NONCONST) if (global_reg_mentioned_p_1 (*iter)) return true; return false; } /* Scheduling hooks for VLIW mode. Conceptually this is very simple: we have a two-pack architecture that takes one core insn and one coprocessor insn to make up either a 32- or 64-bit instruction word (depending on the option bit set in the chip). I.e. in VL32 mode, we can pack one 16-bit core insn and one 16-bit cop insn; in VL64 mode we can pack one 16-bit core insn and one 48-bit cop insn or two 32-bit core/cop insns. In practice, instruction selection will be a bear. Consider in VL64 mode the following insns add $1, 1 cmov $cr0, $0 these cannot pack, since the add is a 16-bit core insn and cmov is a 32-bit cop insn. However, add3 $1, $1, 1 cmov $cr0, $0 packs just fine. For good VLIW code generation in VL64 mode, we will have to have 32-bit alternatives for many of the common core insns. Not implemented. */ static int mep_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost) { int cost_specified; if (REG_NOTE_KIND (link) != 0) { /* See whether INSN and DEP_INSN are intrinsics that set the same hard register. If so, it is more important to free up DEP_INSN than it is to free up INSN. Note that intrinsics like mep_mulr are handled differently from the equivalent mep.md patterns. In mep.md, if we don't care about the value of $lo and $hi, the pattern will just clobber the registers, not set them. Since clobbers don't count as output dependencies, it is often possible to reorder two mulrs, even after reload. In contrast, mep_mulr() sets both $lo and $hi to specific values, so any pair of mep_mulr()s will be inter-dependent. We should therefore give the first mep_mulr() a higher priority. */ if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT && global_reg_mentioned_p (PATTERN (insn)) && global_reg_mentioned_p (PATTERN (dep_insn))) return 1; /* If the dependence is an anti or output dependence, assume it has no cost. */ return 0; } /* If we can't recognize the insns, we can't really do anything. */ if (recog_memoized (dep_insn) < 0) return cost; /* The latency attribute doesn't apply to MeP-h1: we use the stall attribute instead. */ if (!TARGET_H1) { cost_specified = get_attr_latency (dep_insn); if (cost_specified != 0) return cost_specified; } return cost; } /* ??? We don't properly compute the length of a load/store insn, taking into account the addressing mode. */ static int mep_issue_rate (void) { return TARGET_IVC2 ? 3 : 2; } /* Return true if function DECL was declared with the vliw attribute. */ bool mep_vliw_function_p (tree decl) { return lookup_attribute ("vliw", TYPE_ATTRIBUTES (TREE_TYPE (decl))) != 0; } static rtx_insn * mep_find_ready_insn (rtx_insn **ready, int nready, enum attr_slot slot, int length) { int i; for (i = nready - 1; i >= 0; --i) { rtx_insn *insn = ready[i]; if (recog_memoized (insn) >= 0 && get_attr_slot (insn) == slot && get_attr_length (insn) == length) return insn; } return NULL; } static void mep_move_ready_insn (rtx_insn **ready, int nready, rtx_insn *insn) { int i; for (i = 0; i < nready; ++i) if (ready[i] == insn) { for (; i < nready - 1; ++i) ready[i] = ready[i + 1]; ready[i] = insn; return; } gcc_unreachable (); } static void mep_print_sched_insn (FILE *dump, rtx_insn *insn) { const char *slots = "none"; const char *name = NULL; int code; char buf[30]; if (GET_CODE (PATTERN (insn)) == SET || GET_CODE (PATTERN (insn)) == PARALLEL) { switch (get_attr_slots (insn)) { case SLOTS_CORE: slots = "core"; break; case SLOTS_C3: slots = "c3"; break; case SLOTS_P0: slots = "p0"; break; case SLOTS_P0_P0S: slots = "p0,p0s"; break; case SLOTS_P0_P1: slots = "p0,p1"; break; case SLOTS_P0S: slots = "p0s"; break; case SLOTS_P0S_P1: slots = "p0s,p1"; break; case SLOTS_P1: slots = "p1"; break; default: sprintf(buf, "%d", get_attr_slots (insn)); slots = buf; break; } } if (GET_CODE (PATTERN (insn)) == USE) slots = "use"; code = INSN_CODE (insn); if (code >= 0) name = get_insn_name (code); if (!name) name = "{unknown}"; fprintf (dump, "insn %4d %4d %8s %s\n", code, INSN_UID (insn), name, slots); } static int mep_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose ATTRIBUTE_UNUSED, rtx_insn **ready, int *pnready, int clock ATTRIBUTE_UNUSED) { int nready = *pnready; rtx_insn *core_insn, *cop_insn; int i; if (dump && sched_verbose > 1) { fprintf (dump, "\nsched_reorder: clock %d nready %d\n", clock, nready); for (i=0; i<nready; i++) mep_print_sched_insn (dump, ready[i]); fprintf (dump, "\n"); } if (!mep_vliw_function_p (cfun->decl)) return 1; if (nready < 2) return 1; /* IVC2 uses a DFA to determine what's ready and what's not. */ if (TARGET_IVC2) return nready; /* We can issue either a core or coprocessor instruction. Look for a matched pair of insns to reorder. If we don't find any, don't second-guess the scheduler's priorities. */ if ((core_insn = mep_find_ready_insn (ready, nready, SLOT_CORE, 2)) && (cop_insn = mep_find_ready_insn (ready, nready, SLOT_COP, TARGET_OPT_VL64 ? 6 : 2))) ; else if (TARGET_OPT_VL64 && (core_insn = mep_find_ready_insn (ready, nready, SLOT_CORE, 4)) && (cop_insn = mep_find_ready_insn (ready, nready, SLOT_COP, 4))) ; else /* We didn't find a pair. Issue the single insn at the head of the ready list. */ return 1; /* Reorder the two insns first. */ mep_move_ready_insn (ready, nready, core_insn); mep_move_ready_insn (ready, nready - 1, cop_insn); return 2; } /* Return true if X contains a register that is set by insn PREV. */ static bool mep_store_find_set (const_rtx x, const rtx_insn *prev) { subrtx_iterator::array_type array; FOR_EACH_SUBRTX (iter, array, x, NONCONST) if (REG_P (x) && reg_set_p (x, prev)) return true; return false; } /* Like mep_store_bypass_p, but takes a pattern as the second argument, not the containing insn. */ static bool mep_store_data_bypass_1 (rtx_insn *prev, rtx pat) { /* Cope with intrinsics like swcpa. */ if (GET_CODE (pat) == PARALLEL) { int i; for (i = 0; i < XVECLEN (pat, 0); i++) if (mep_store_data_bypass_p (prev, as_a <rtx_insn *> (XVECEXP (pat, 0, i)))) return true; return false; } /* Check for some sort of store. */ if (GET_CODE (pat) != SET || GET_CODE (SET_DEST (pat)) != MEM) return false; /* Intrinsics use patterns of the form (set (mem (scratch)) (unspec ...)). The first operand to the unspec is the store data and the other operands are used to calculate the address. */ if (GET_CODE (SET_SRC (pat)) == UNSPEC) { rtx src; int i; src = SET_SRC (pat); for (i = 1; i < XVECLEN (src, 0); i++) if (mep_store_find_set (XVECEXP (src, 0, i), prev)) return false; return true; } /* Otherwise just check that PREV doesn't modify any register mentioned in the memory destination. */ return !mep_store_find_set (SET_DEST (pat), prev); } /* Return true if INSN is a store instruction and if the store address has no true dependence on PREV. */ bool mep_store_data_bypass_p (rtx_insn *prev, rtx_insn *insn) { return INSN_P (insn) ? mep_store_data_bypass_1 (prev, PATTERN (insn)) : false; } /* Return true if, apart from HI/LO, there are no true dependencies between multiplication instructions PREV and INSN. */ bool mep_mul_hilo_bypass_p (rtx_insn *prev, rtx_insn *insn) { rtx pat; pat = PATTERN (insn); if (GET_CODE (pat) == PARALLEL) pat = XVECEXP (pat, 0, 0); if (GET_CODE (pat) != SET) return false; subrtx_iterator::array_type array; FOR_EACH_SUBRTX (iter, array, SET_SRC (pat), NONCONST) { const_rtx x = *iter; if (REG_P (x) && REGNO (x) != LO_REGNO && REGNO (x) != HI_REGNO && reg_set_p (x, prev)) return false; } return true; } /* Return true if INSN is an ldc instruction that issues to the MeP-h1 integer pipeline. This is true for instructions that read from PSW, LP, SAR, HI and LO. */ bool mep_ipipe_ldc_p (rtx_insn *insn) { rtx pat, src; pat = PATTERN (insn); /* Cope with instrinsics that set both a hard register and its shadow. The set of the hard register comes first. */ if (GET_CODE (pat) == PARALLEL) pat = XVECEXP (pat, 0, 0); if (GET_CODE (pat) == SET) { src = SET_SRC (pat); /* Cope with intrinsics. The first operand to the unspec is the source register. */ if (GET_CODE (src) == UNSPEC || GET_CODE (src) == UNSPEC_VOLATILE) src = XVECEXP (src, 0, 0); if (REG_P (src)) switch (REGNO (src)) { case PSW_REGNO: case LP_REGNO: case SAR_REGNO: case HI_REGNO: case LO_REGNO: return true; } } return false; } /* Create a VLIW bundle from core instruction CORE and coprocessor instruction COP. COP always satisfies INSN_P, but CORE can be either a new pattern or an existing instruction. Emit the bundle in place of COP and return it. */ static rtx_insn * mep_make_bundle (rtx core_insn_or_pat, rtx_insn *cop) { rtx seq; rtx_insn *core_insn; rtx_insn *insn; /* If CORE is an existing instruction, remove it, otherwise put the new pattern in an INSN harness. */ if (INSN_P (core_insn_or_pat)) { core_insn = as_a <rtx_insn *> (core_insn_or_pat); remove_insn (core_insn); } else core_insn = make_insn_raw (core_insn_or_pat); /* Generate the bundle sequence and replace COP with it. */ seq = gen_rtx_SEQUENCE (VOIDmode, gen_rtvec (2, core_insn, cop)); insn = emit_insn_after (seq, cop); remove_insn (cop); /* Set up the links of the insns inside the SEQUENCE. */ SET_PREV_INSN (core_insn) = PREV_INSN (insn); SET_NEXT_INSN (core_insn) = cop; SET_PREV_INSN (cop) = core_insn; SET_NEXT_INSN (cop) = NEXT_INSN (insn); /* Set the VLIW flag for the coprocessor instruction. */ PUT_MODE (core_insn, VOIDmode); PUT_MODE (cop, BImode); /* Derive a location for the bundle. Individual instructions cannot have their own location because there can be no assembler labels between CORE_INSN and COP. */ INSN_LOCATION (insn) = INSN_LOCATION (INSN_LOCATION (core_insn) ? core_insn : cop); INSN_LOCATION (core_insn) = 0; INSN_LOCATION (cop) = 0; return insn; } /* A helper routine for ms1_insn_dependent_p called through note_stores. */ static void mep_insn_dependent_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data) { rtx * pinsn = (rtx *) data; if (*pinsn && reg_mentioned_p (x, *pinsn)) *pinsn = NULL_RTX; } /* Return true if anything in insn X is (anti,output,true) dependent on anything in insn Y. */ static int mep_insn_dependent_p (rtx x, rtx y) { rtx tmp; gcc_assert (INSN_P (x)); gcc_assert (INSN_P (y)); tmp = PATTERN (y); note_stores (PATTERN (x), mep_insn_dependent_p_1, &tmp); if (tmp == NULL_RTX) return 1; tmp = PATTERN (x); note_stores (PATTERN (y), mep_insn_dependent_p_1, &tmp); if (tmp == NULL_RTX) return 1; return 0; } static int core_insn_p (rtx_insn *insn) { if (GET_CODE (PATTERN (insn)) == USE) return 0; if (get_attr_slot (insn) == SLOT_CORE) return 1; return 0; } /* Mark coprocessor instructions that can be bundled together with the immediately preceding core instruction. This is later used to emit the "+" that tells the assembler to create a VLIW insn. For unbundled insns, the assembler will automatically add coprocessor nops, and 16-bit core nops. Due to an apparent oversight in the spec, the assembler will _not_ automatically add 32-bit core nops, so we have to emit those here. Called from mep_insn_reorg. */ static void mep_bundle_insns (rtx_insn *insns) { rtx_insn *insn, *last = NULL, *first = NULL; int saw_scheduling = 0; /* Only do bundling if we're in vliw mode. */ if (!mep_vliw_function_p (cfun->decl)) return; /* The first insn in a bundle are TImode, the remainder are VOIDmode. After this function, the first has VOIDmode and the rest have BImode. */ /* Note: this doesn't appear to be true for JUMP_INSNs. */ /* First, move any NOTEs that are within a bundle, to the beginning of the bundle. */ for (insn = insns; insn ; insn = NEXT_INSN (insn)) { if (NOTE_P (insn) && first) /* Don't clear FIRST. */; else if (NONJUMP_INSN_P (insn) && GET_MODE (insn) == TImode) first = insn; else if (NONJUMP_INSN_P (insn) && GET_MODE (insn) == VOIDmode && first) { rtx_insn *note, *prev; /* INSN is part of a bundle; FIRST is the first insn in that bundle. Move all intervening notes out of the bundle. In addition, since the debug pass may insert a label whenever the current line changes, set the location info for INSN to match FIRST. */ INSN_LOCATION (insn) = INSN_LOCATION (first); note = PREV_INSN (insn); while (note && note != first) { prev = PREV_INSN (note); if (NOTE_P (note)) { /* Remove NOTE from here... */ SET_PREV_INSN (NEXT_INSN (note)) = PREV_INSN (note); SET_NEXT_INSN (PREV_INSN (note)) = NEXT_INSN (note); /* ...and put it in here. */ SET_NEXT_INSN (note) = first; SET_PREV_INSN (note) = PREV_INSN (first); SET_NEXT_INSN (PREV_INSN (note)) = note; SET_PREV_INSN (NEXT_INSN (note)) = note; } note = prev; } } else if (!NONJUMP_INSN_P (insn)) first = 0; } /* Now fix up the bundles. */ for (insn = insns; insn ; insn = NEXT_INSN (insn)) { if (NOTE_P (insn)) continue; if (!NONJUMP_INSN_P (insn)) { last = 0; continue; } /* If we're not optimizing enough, there won't be scheduling info. We detect that here. */ if (GET_MODE (insn) == TImode) saw_scheduling = 1; if (!saw_scheduling) continue; if (TARGET_IVC2) { rtx_insn *core_insn = NULL; /* IVC2 slots are scheduled by DFA, so we just accept whatever the scheduler gives us. However, we must make sure the core insn (if any) is the first in the bundle. The IVC2 assembler can insert whatever NOPs are needed, and allows a COP insn to be first. */ if (NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) != USE && GET_MODE (insn) == TImode) { for (last = insn; NEXT_INSN (last) && GET_MODE (NEXT_INSN (last)) == VOIDmode && NONJUMP_INSN_P (NEXT_INSN (last)); last = NEXT_INSN (last)) { if (core_insn_p (last)) core_insn = last; } if (core_insn_p (last)) core_insn = last; if (core_insn && core_insn != insn) { /* Swap core insn to first in the bundle. */ /* Remove core insn. */ if (PREV_INSN (core_insn)) SET_NEXT_INSN (PREV_INSN (core_insn)) = NEXT_INSN (core_insn); if (NEXT_INSN (core_insn)) SET_PREV_INSN (NEXT_INSN (core_insn)) = PREV_INSN (core_insn); /* Re-insert core insn. */ SET_PREV_INSN (core_insn) = PREV_INSN (insn); SET_NEXT_INSN (core_insn) = insn; if (PREV_INSN (core_insn)) SET_NEXT_INSN (PREV_INSN (core_insn)) = core_insn; SET_PREV_INSN (insn) = core_insn; PUT_MODE (core_insn, TImode); PUT_MODE (insn, VOIDmode); } } /* The first insn has TImode, the rest have VOIDmode */ if (GET_MODE (insn) == TImode) PUT_MODE (insn, VOIDmode); else PUT_MODE (insn, BImode); continue; } PUT_MODE (insn, VOIDmode); if (recog_memoized (insn) >= 0 && get_attr_slot (insn) == SLOT_COP) { if (JUMP_P (insn) || ! last || recog_memoized (last) < 0 || get_attr_slot (last) != SLOT_CORE || (get_attr_length (insn) != (TARGET_OPT_VL64 ? 8 : 4) - get_attr_length (last)) || mep_insn_dependent_p (insn, last)) { switch (get_attr_length (insn)) { case 8: break; case 6: insn = mep_make_bundle (gen_nop (), insn); break; case 4: if (TARGET_OPT_VL64) insn = mep_make_bundle (gen_nop32 (), insn); break; case 2: if (TARGET_OPT_VL64) error ("2 byte cop instructions are" " not allowed in 64-bit VLIW mode"); else insn = mep_make_bundle (gen_nop (), insn); break; default: error ("unexpected %d byte cop instruction", get_attr_length (insn)); break; } } else insn = mep_make_bundle (last, insn); } last = insn; } } /* Try to instantiate INTRINSIC with the operands given in OPERANDS. Return true on success. This function can fail if the intrinsic is unavailable or if the operands don't satisfy their predicates. */ bool mep_emit_intrinsic (int intrinsic, const rtx *operands) { const struct cgen_insn *cgen_insn; const struct insn_data_d *idata; rtx newop[10]; int i; if (!mep_get_intrinsic_insn (intrinsic, &cgen_insn)) return false; idata = &insn_data[cgen_insn->icode]; for (i = 0; i < idata->n_operands; i++) { newop[i] = mep_convert_arg (idata->operand[i].mode, operands[i]); if (!idata->operand[i].predicate (newop[i], idata->operand[i].mode)) return false; } emit_insn (idata->genfun (newop[0], newop[1], newop[2], newop[3], newop[4], newop[5], newop[6], newop[7], newop[8])); return true; } /* Apply the given unary intrinsic to OPERANDS[1] and store it on OPERANDS[0]. Report an error if the instruction could not be synthesized. OPERANDS[1] is a register_operand. For sign and zero extensions, it may be smaller than SImode. */ bool mep_expand_unary_intrinsic (int ATTRIBUTE_UNUSED intrinsic, rtx * operands ATTRIBUTE_UNUSED) { return false; } /* Likewise, but apply a binary operation to OPERANDS[1] and OPERANDS[2]. OPERANDS[1] is a register_operand, OPERANDS[2] can be a general_operand. IMMEDIATE and IMMEDIATE3 are intrinsics that take an immediate third operand. REG and REG3 take register operands only. */ bool mep_expand_binary_intrinsic (int ATTRIBUTE_UNUSED immediate, int ATTRIBUTE_UNUSED immediate3, int ATTRIBUTE_UNUSED reg, int ATTRIBUTE_UNUSED reg3, rtx * operands ATTRIBUTE_UNUSED) { return false; } static bool mep_rtx_cost (rtx x, machine_mode mode ATTRIBUTE_UNUSED, int outer_code ATTRIBUTE_UNUSED, int opno ATTRIBUTE_UNUSED, int *total, bool ATTRIBUTE_UNUSED speed_t) { int code = GET_CODE (x); switch (code) { case CONST_INT: if (INTVAL (x) >= -128 && INTVAL (x) < 127) *total = 0; else if (INTVAL (x) >= -32768 && INTVAL (x) < 65536) *total = 1; else *total = 3; return true; case SYMBOL_REF: *total = optimize_size ? COSTS_N_INSNS (0) : COSTS_N_INSNS (1); return true; case MULT: *total = (GET_CODE (XEXP (x, 1)) == CONST_INT ? COSTS_N_INSNS (3) : COSTS_N_INSNS (2)); return true; } return false; } static int mep_address_cost (rtx addr ATTRIBUTE_UNUSED, machine_mode mode ATTRIBUTE_UNUSED, addr_space_t as ATTRIBUTE_UNUSED, bool ATTRIBUTE_UNUSED speed_p) { return 1; } static void mep_asm_init_sections (void) { based_section = get_unnamed_section (SECTION_WRITE, output_section_asm_op, "\t.section .based,\"aw\""); tinybss_section = get_unnamed_section (SECTION_WRITE | SECTION_BSS, output_section_asm_op, "\t.section .sbss,\"aw\""); sdata_section = get_unnamed_section (SECTION_WRITE, output_section_asm_op, "\t.section .sdata,\"aw\",@progbits"); far_section = get_unnamed_section (SECTION_WRITE, output_section_asm_op, "\t.section .far,\"aw\""); farbss_section = get_unnamed_section (SECTION_WRITE | SECTION_BSS, output_section_asm_op, "\t.section .farbss,\"aw\""); frodata_section = get_unnamed_section (0, output_section_asm_op, "\t.section .frodata,\"a\""); srodata_section = get_unnamed_section (0, output_section_asm_op, "\t.section .srodata,\"a\""); vtext_section = get_unnamed_section (SECTION_CODE | SECTION_MEP_VLIW, output_section_asm_op, "\t.section .vtext,\"axv\"\n\t.vliw"); vftext_section = get_unnamed_section (SECTION_CODE | SECTION_MEP_VLIW, output_section_asm_op, "\t.section .vftext,\"axv\"\n\t.vliw"); ftext_section = get_unnamed_section (SECTION_CODE, output_section_asm_op, "\t.section .ftext,\"ax\"\n\t.core"); } /* Initialize the GCC target structure. */ #undef TARGET_ASM_FUNCTION_PROLOGUE #define TARGET_ASM_FUNCTION_PROLOGUE mep_start_function #undef TARGET_ATTRIBUTE_TABLE #define TARGET_ATTRIBUTE_TABLE mep_attribute_table #undef TARGET_COMP_TYPE_ATTRIBUTES #define TARGET_COMP_TYPE_ATTRIBUTES mep_comp_type_attributes #undef TARGET_INSERT_ATTRIBUTES #define TARGET_INSERT_ATTRIBUTES mep_insert_attributes #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P mep_function_attribute_inlinable_p #undef TARGET_CAN_INLINE_P #define TARGET_CAN_INLINE_P mep_can_inline_p #undef TARGET_SECTION_TYPE_FLAGS #define TARGET_SECTION_TYPE_FLAGS mep_section_type_flags #undef TARGET_ASM_NAMED_SECTION #define TARGET_ASM_NAMED_SECTION mep_asm_named_section #undef TARGET_INIT_BUILTINS #define TARGET_INIT_BUILTINS mep_init_builtins #undef TARGET_EXPAND_BUILTIN #define TARGET_EXPAND_BUILTIN mep_expand_builtin #undef TARGET_SCHED_ADJUST_COST #define TARGET_SCHED_ADJUST_COST mep_adjust_cost #undef TARGET_SCHED_ISSUE_RATE #define TARGET_SCHED_ISSUE_RATE mep_issue_rate #undef TARGET_SCHED_REORDER #define TARGET_SCHED_REORDER mep_sched_reorder #undef TARGET_STRIP_NAME_ENCODING #define TARGET_STRIP_NAME_ENCODING mep_strip_name_encoding #undef TARGET_ASM_SELECT_SECTION #define TARGET_ASM_SELECT_SECTION mep_select_section #undef TARGET_ASM_UNIQUE_SECTION #define TARGET_ASM_UNIQUE_SECTION mep_unique_section #undef TARGET_ENCODE_SECTION_INFO #define TARGET_ENCODE_SECTION_INFO mep_encode_section_info #undef TARGET_FUNCTION_OK_FOR_SIBCALL #define TARGET_FUNCTION_OK_FOR_SIBCALL mep_function_ok_for_sibcall #undef TARGET_RTX_COSTS #define TARGET_RTX_COSTS mep_rtx_cost #undef TARGET_ADDRESS_COST #define TARGET_ADDRESS_COST mep_address_cost #undef TARGET_MACHINE_DEPENDENT_REORG #define TARGET_MACHINE_DEPENDENT_REORG mep_reorg #undef TARGET_SETUP_INCOMING_VARARGS #define TARGET_SETUP_INCOMING_VARARGS mep_setup_incoming_varargs #undef TARGET_PASS_BY_REFERENCE #define TARGET_PASS_BY_REFERENCE mep_pass_by_reference #undef TARGET_FUNCTION_ARG #define TARGET_FUNCTION_ARG mep_function_arg #undef TARGET_FUNCTION_ARG_ADVANCE #define TARGET_FUNCTION_ARG_ADVANCE mep_function_arg_advance #undef TARGET_VECTOR_MODE_SUPPORTED_P #define TARGET_VECTOR_MODE_SUPPORTED_P mep_vector_mode_supported_p #undef TARGET_OPTION_OVERRIDE #define TARGET_OPTION_OVERRIDE mep_option_override #undef TARGET_ALLOCATE_INITIAL_VALUE #define TARGET_ALLOCATE_INITIAL_VALUE mep_allocate_initial_value #undef TARGET_ASM_INIT_SECTIONS #define TARGET_ASM_INIT_SECTIONS mep_asm_init_sections #undef TARGET_RETURN_IN_MEMORY #define TARGET_RETURN_IN_MEMORY mep_return_in_memory #undef TARGET_NARROW_VOLATILE_BITFIELD #define TARGET_NARROW_VOLATILE_BITFIELD mep_narrow_volatile_bitfield #undef TARGET_EXPAND_BUILTIN_SAVEREGS #define TARGET_EXPAND_BUILTIN_SAVEREGS mep_expand_builtin_saveregs #undef TARGET_BUILD_BUILTIN_VA_LIST #define TARGET_BUILD_BUILTIN_VA_LIST mep_build_builtin_va_list #undef TARGET_EXPAND_BUILTIN_VA_START #define TARGET_EXPAND_BUILTIN_VA_START mep_expand_va_start #undef TARGET_GIMPLIFY_VA_ARG_EXPR #define TARGET_GIMPLIFY_VA_ARG_EXPR mep_gimplify_va_arg_expr #undef TARGET_CAN_ELIMINATE #define TARGET_CAN_ELIMINATE mep_can_eliminate #undef TARGET_CONDITIONAL_REGISTER_USAGE #define TARGET_CONDITIONAL_REGISTER_USAGE mep_conditional_register_usage #undef TARGET_TRAMPOLINE_INIT #define TARGET_TRAMPOLINE_INIT mep_trampoline_init #undef TARGET_LEGITIMATE_CONSTANT_P #define TARGET_LEGITIMATE_CONSTANT_P mep_legitimate_constant_p #undef TARGET_CAN_USE_DOLOOP_P #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost struct gcc_target targetm = TARGET_INITIALIZER; #include "gt-mep.h"
gpl-2.0
liaoqingwei/ltp
testcases/kernel/syscalls/ipc/shmctl/shmctl01.c
19
11025
/* * Copyright (c) International Business Machines Corp., 2001 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* * NAME * shmctl01.c * * DESCRIPTION * shmctl01 - test the IPC_STAT, IPC_SET and IPC_RMID commands as * they are used with shmctl() * * ALGORITHM * loop if that option was specified * create a shared memory segment with read and write permission * set up any test case specific conditions * call shmctl() using the TEST macro * check the return code * if failure, issue a FAIL message. * otherwise, * if doing functionality testing * call the correct test function * if the conditions are correct, * issue a PASS message * otherwise * issue a FAIL message * otherwise * issue a PASS message * call cleanup */ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #include "ipcshm.h" char *TCID = "shmctl01"; static int shm_id_1 = -1; static int shm_index; static struct shmid_ds buf; static struct shminfo info; static long save_time; #define FIRST 0 #define SECOND 1 static int stat_time; static void *set_shared; #define N_ATTACH 4 static pid_t pid_arr[N_ATTACH]; /* Setup, cleanup and check routines for IPC_STAT */ static void stat_setup(void), func_istat(int ret); static void stat_cleanup(void); /* Setup and check routines for IPC_SET */ static void set_setup(void), func_set(int ret); /* Check routine for IPC_INFO */ static void func_info(int ret); /* Check routine for SHM_STAT */ static void func_sstat(int ret); /* Check routine for SHM_LOCK */ static void func_lock(int ret); /* Check routine for SHM_UNLOCK */ static void func_unlock(int ret); /* Check routine for IPC_RMID */ static void func_rmid(int ret); /* Child function */ static void do_child(void); static struct test_case_t { int *shmid; int cmd; struct shmid_ds *arg; void (*func_test) (int); void (*func_setup) (void); } TC[] = { {&shm_id_1, IPC_STAT, &buf, func_istat, stat_setup}, #ifndef UCLINUX /* * The second test is not applicable to uClinux; * shared memory segments are detached on exec(), * so cannot be passed to uClinux children. */ {&shm_id_1, IPC_STAT, &buf, func_istat, stat_setup}, #endif {&shm_id_1, IPC_SET, &buf, func_set, set_setup}, {&shm_id_1, IPC_INFO, (struct shmid_ds *) &info, func_info, NULL}, {&shm_index, SHM_STAT, &buf, func_sstat, NULL}, {&shm_id_1, SHM_LOCK, NULL, func_lock, NULL}, {&shm_id_1, SHM_UNLOCK, NULL, func_unlock, NULL}, {&shm_id_1, IPC_RMID, NULL, func_rmid, NULL}, }; static int TST_TOTAL = ARRAY_SIZE(TC); #define NEWMODE 0066 #ifdef UCLINUX #define PIPE_NAME "shmctl01" static char *argv0; #endif static int stat_i; int main(int argc, char *argv[]) { int lc; int i; tst_parse_opts(argc, argv, NULL, NULL); #ifdef UCLINUX argv0 = argv[0]; maybe_run_child(do_child, "ddd", &stat_i, &stat_time, &shm_id_1); #endif setup(); for (lc = 0; TEST_LOOPING(lc); lc++) { tst_count = 0; stat_time = FIRST; /* * Create a shared memory segment with read and write * permissions. Do this here instead of in setup() * so that looping (-i) will work correctly. */ shm_id_1 = shmget(shmkey, SHM_SIZE, IPC_CREAT | IPC_EXCL | SHM_RW); if (shm_id_1 == -1) tst_brkm(TBROK, cleanup, "couldn't create the shared" " memory segment"); for (i = 0; i < TST_TOTAL; i++) { /* * if needed, set up any required conditions by * calling the appropriate setup function */ if (TC[i].func_setup != NULL) (*TC[i].func_setup) (); TEST(shmctl(*(TC[i].shmid), TC[i].cmd, TC[i].arg)); if (TEST_RETURN == -1) { tst_resm(TFAIL, "%s call failed - errno " "= %d : %s", TCID, TEST_ERRNO, strerror(TEST_ERRNO)); continue; } (*TC[i].func_test) (TEST_RETURN); } } cleanup(); tst_exit(); } /* * set_shmat() - Attach the shared memory and return the pointer. Use * this seperate routine to avoid code duplication in * stat_setup() below. */ void *set_shmat(void) { void *rval; /* attach the shared memory */ rval = shmat(shm_id_1, 0, 0); /* * if shmat() fails, the only thing we can do is * print a message to that effect. */ if (rval == (void *)-1) { tst_resm(TBROK, "shmat() failed - %s", strerror(errno)); cleanup(); } return rval; } /* * stat_setup() - Set up for the IPC_STAT command with shmctl(). * Make things interesting by forking some children * that will either attach or inherit the shared memory. */ void stat_setup(void) { void *set_shmat(); pid_t pid; /* * The first time through, let the children attach the memory. * The second time through, attach the memory first and let * the children inherit the memory. */ if (stat_time == SECOND) /* * use the global "set_shared" variable here so that * it can be removed in the stat_func() routine. */ set_shared = set_shmat(); tst_flush(); for (stat_i = 0; stat_i < N_ATTACH; stat_i++) { pid = FORK_OR_VFORK(); if (pid == -1) tst_brkm(TBROK, cleanup, "could not fork"); if (pid == 0) { #ifdef UCLINUX if (self_exec(argv0, "ddd", stat_i, stat_time, shm_id_1) < 0) tst_brkm(TBROK, cleanup, "could not self_exec"); #else do_child(); #endif } else { /* save the child's pid for cleanup later */ pid_arr[stat_i] = pid; TST_PROCESS_STATE_WAIT(cleanup, pid, 'S'); } } } void do_child(void) { void *test; if (stat_time == FIRST) test = set_shmat(); else test = set_shared; memcpy(test, &stat_i, sizeof(stat_i)); /* pause until we get a signal from stat_cleanup() */ pause(); /* now we're back - detach the memory and exit */ if (shmdt(test) == -1) tst_resm(TBROK, "shmdt() failed - %d", errno); tst_exit(); } /* * func_istat() - check the functionality of the IPC_STAT command with shmctl() * by looking at the pid of the creator, the segement size, * the number of attaches and the mode. */ void func_istat(int ret) { int fail = 0; pid_t pid; /* check perm, pid, nattach and size */ pid = getpid(); if (buf.shm_cpid != pid) { tst_resm(TFAIL, "creator pid is incorrect"); fail = 1; } if (!fail && buf.shm_segsz != SHM_SIZE) { tst_resm(TFAIL, "segment size is incorrect"); fail = 1; } /* * The first time through, only the children attach the memory, so * the attaches equal N_ATTACH + stat_time (0). The second time * through, the parent attaches the memory and the children inherit * that memory so the attaches equal N_ATTACH + stat_time (1). */ if (!fail && buf.shm_nattch != N_ATTACH + stat_time) { tst_resm(TFAIL, "# of attaches is incorrect - %ld", buf.shm_nattch); fail = 1; } /* use MODE_MASK to make sure we are comparing the last 9 bits */ if (!fail && (buf.shm_perm.mode & MODE_MASK) != ((SHM_RW) & MODE_MASK)) { tst_resm(TFAIL, "segment mode is incorrect"); fail = 1; } stat_cleanup(); /* save the change time for use in the next test */ save_time = buf.shm_ctime; if (fail) return; tst_resm(TPASS, "pid, size, # of attaches and mode are correct " "- pass #%d", stat_time); } /* * stat_cleanup() - signal the children to clean up after themselves and * have the parent make dessert, er, um, make that remove * the shared memory that is no longer needed. */ void stat_cleanup(void) { int i; /* wake up the childern so they can detach the memory and exit */ for (i = 0; i < N_ATTACH; i++) { if (kill(pid_arr[i], SIGUSR1) == -1) tst_brkm(TBROK, cleanup, "kill failed"); } /* remove the parent's shared memory the second time through */ if (stat_time == SECOND) { if (shmdt(set_shared) == -1) tst_resm(TINFO, "shmdt() failed"); } for (i = 0; i < N_ATTACH; i++) { if (waitpid(pid_arr[i], NULL, 0) == -1) tst_brkm(TBROK, cleanup, "waitpid failed"); } stat_time++; } /* * set_setup() - set up for the IPC_SET command with shmctl() */ void set_setup(void) { /* set up a new mode for the shared memory segment */ buf.shm_perm.mode = SHM_RW | NEWMODE; /* sleep for one second to get a different shm_ctime value */ sleep(1); } /* * func_set() - check the functionality of the IPC_SET command with shmctl() */ void func_set(int ret) { int fail = 0; /* first stat the shared memory to get the new data */ if (shmctl(shm_id_1, IPC_STAT, &buf) == -1) { tst_resm(TBROK, "stat failed in func_set()"); return; } if ((buf.shm_perm.mode & MODE_MASK) != ((SHM_RW | NEWMODE) & MODE_MASK)) { tst_resm(TFAIL, "new mode is incorrect"); fail = 1; } if (!fail && save_time >= buf.shm_ctime) { tst_resm(TFAIL, "change time is incorrect"); fail = 1; } if (fail) return; tst_resm(TPASS, "new mode and change time are correct"); } static void func_info(int ret) { if (info.shmmin != 1) tst_resm(TFAIL, "value of shmmin is incorrect"); else tst_resm(TPASS, "get correct shared memory limits"); } static void func_sstat(int ret) { if (ret >= 0) tst_resm(TPASS, "get correct shared memory id"); else tst_resm(TFAIL, "shared memory id is incorrect"); } static void func_lock(int ret) { if (shmctl(shm_id_1, IPC_STAT, &buf) == -1) { tst_resm(TBROK, "stat failed in func_lock()"); return; } if (buf.shm_perm.mode & SHM_LOCKED) tst_resm(TPASS, "SHM_LOCK is set"); else tst_resm(TFAIL, "SHM_LOCK is cleared"); } static void func_unlock(int ret) { if (shmctl(shm_id_1, IPC_STAT, &buf) == -1) { tst_resm(TBROK, "stat failed in func_unlock()"); return; } if (buf.shm_perm.mode & SHM_LOCKED) tst_resm(TFAIL, "SHM_LOCK is set"); else tst_resm(TPASS, "SHM_LOCK is cleared"); } /* * func_rmid() - check the functionality of the IPC_RMID command with shmctl() */ void func_rmid(int ret) { /* Do another shmctl() - we should get EINVAL */ if (shmctl(shm_id_1, IPC_STAT, &buf) != -1) tst_brkm(TBROK, cleanup, "shmctl succeeded on expected fail"); if (errno != EINVAL) tst_resm(TFAIL, "returned unexpected errno %d", errno); else tst_resm(TPASS, "shared memory appears to be removed"); shm_id_1 = -1; } /* * sighandler() - handle signals, in this case SIGUSR1 is the only one expected */ void sighandler(int sig) { if (sig != SIGUSR1) tst_resm(TBROK, "received unexpected signal %d", sig); } void setup(void) { tst_sig(FORK, sighandler, cleanup); TEST_PAUSE; tst_tmpdir(); shmkey = getipckey(); } void cleanup(void) { rm_shm(shm_id_1); tst_rmdir(); }
gpl-2.0
stevezilla/amherst-linux-devel
arch/arm/mach-imx/imx31-dt.c
275
1141
/* * Copyright 2012 Sascha Hauer, Pengutronix * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License * Version 2 or later at the following locations: * * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ #include <linux/irq.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include "common.h" #include "mx31.h" static void __init imx31_dt_init(void) { mxc_arch_reset_init_dt(); of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } static const char *imx31_dt_board_compat[] __initdata = { "fsl,imx31", NULL }; static void __init imx31_dt_timer_init(void) { mx31_clocks_init_dt(); } DT_MACHINE_START(IMX31_DT, "Freescale i.MX31 (Device Tree Support)") .map_io = mx31_map_io, .init_early = imx31_init_early, .init_irq = mx31_init_irq, .handle_irq = imx31_handle_irq, .init_time = imx31_dt_timer_init, .init_machine = imx31_dt_init, .dt_compat = imx31_dt_board_compat, .restart = mxc_restart, MACHINE_END
gpl-2.0
BobZhome/Samsung_SPH-M930_kernel
arch/powerpc/sysdev/fsl_rio.c
531
42998
/* * Freescale MPC85xx/MPC86xx RapidIO support * * Copyright 2009 Sysgo AG * Thomas Moll <thomas.moll@sysgo.com> * - fixed maintenance access routines, check for aligned access * * Copyright 2009 Integrated Device Technology, Inc. * Alex Bounine <alexandre.bounine@idt.com> * - Added Port-Write message handling * - Added Machine Check exception handling * * Copyright (C) 2007, 2008 Freescale Semiconductor, Inc. * Zhang Wei <wei.zhang@freescale.com> * * Copyright 2005 MontaVista Software, Inc. * Matt Porter <mporter@kernel.crashing.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/rio.h> #include <linux/rio_drv.h> #include <linux/of_platform.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/kfifo.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/uaccess.h> #undef DEBUG_PW /* Port-Write debugging */ /* RapidIO definition irq, which read from OF-tree */ #define IRQ_RIO_BELL(m) (((struct rio_priv *)(m->priv))->bellirq) #define IRQ_RIO_TX(m) (((struct rio_priv *)(m->priv))->txirq) #define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq) #define IRQ_RIO_PW(m) (((struct rio_priv *)(m->priv))->pwirq) #define RIO_ATMU_REGS_OFFSET 0x10c00 #define RIO_P_MSG_REGS_OFFSET 0x11000 #define RIO_S_MSG_REGS_OFFSET 0x13000 #define RIO_ESCSR 0x158 #define RIO_CCSR 0x15c #define RIO_LTLEDCSR 0x0608 #define RIO_LTLEDCSR_IER 0x80000000 #define RIO_LTLEDCSR_PRT 0x01000000 #define RIO_LTLEECSR 0x060c #define RIO_EPWISR 0x10010 #define RIO_ISR_AACR 0x10120 #define RIO_ISR_AACR_AA 0x1 /* Accept All ID */ #define RIO_MAINT_WIN_SIZE 0x400000 #define RIO_DBELL_WIN_SIZE 0x1000 #define RIO_MSG_OMR_MUI 0x00000002 #define RIO_MSG_OSR_TE 0x00000080 #define RIO_MSG_OSR_QOI 0x00000020 #define RIO_MSG_OSR_QFI 0x00000010 #define RIO_MSG_OSR_MUB 0x00000004 #define RIO_MSG_OSR_EOMI 0x00000002 #define RIO_MSG_OSR_QEI 0x00000001 #define RIO_MSG_IMR_MI 0x00000002 #define RIO_MSG_ISR_TE 0x00000080 #define RIO_MSG_ISR_QFI 0x00000010 #define RIO_MSG_ISR_DIQI 0x00000001 #define RIO_IPWMR_SEN 0x00100000 #define RIO_IPWMR_QFIE 0x00000100 #define RIO_IPWMR_EIE 0x00000020 #define RIO_IPWMR_CQ 0x00000002 #define RIO_IPWMR_PWE 0x00000001 #define RIO_IPWSR_QF 0x00100000 #define RIO_IPWSR_TE 0x00000080 #define RIO_IPWSR_QFI 0x00000010 #define RIO_IPWSR_PWD 0x00000008 #define RIO_IPWSR_PWB 0x00000004 #define RIO_MSG_DESC_SIZE 32 #define RIO_MSG_BUFFER_SIZE 4096 #define RIO_MIN_TX_RING_SIZE 2 #define RIO_MAX_TX_RING_SIZE 2048 #define RIO_MIN_RX_RING_SIZE 2 #define RIO_MAX_RX_RING_SIZE 2048 #define DOORBELL_DMR_DI 0x00000002 #define DOORBELL_DSR_TE 0x00000080 #define DOORBELL_DSR_QFI 0x00000010 #define DOORBELL_DSR_DIQI 0x00000001 #define DOORBELL_TID_OFFSET 0x02 #define DOORBELL_SID_OFFSET 0x04 #define DOORBELL_INFO_OFFSET 0x06 #define DOORBELL_MESSAGE_SIZE 0x08 #define DBELL_SID(x) (*(u16 *)(x + DOORBELL_SID_OFFSET)) #define DBELL_TID(x) (*(u16 *)(x + DOORBELL_TID_OFFSET)) #define DBELL_INF(x) (*(u16 *)(x + DOORBELL_INFO_OFFSET)) struct rio_atmu_regs { u32 rowtar; u32 rowtear; u32 rowbar; u32 pad2; u32 rowar; u32 pad3[3]; }; struct rio_msg_regs { u32 omr; u32 osr; u32 pad1; u32 odqdpar; u32 pad2; u32 osar; u32 odpr; u32 odatr; u32 odcr; u32 pad3; u32 odqepar; u32 pad4[13]; u32 imr; u32 isr; u32 pad5; u32 ifqdpar; u32 pad6; u32 ifqepar; u32 pad7[226]; u32 odmr; u32 odsr; u32 res0[4]; u32 oddpr; u32 oddatr; u32 res1[3]; u32 odretcr; u32 res2[12]; u32 dmr; u32 dsr; u32 pad8; u32 dqdpar; u32 pad9; u32 dqepar; u32 pad10[26]; u32 pwmr; u32 pwsr; u32 epwqbar; u32 pwqbar; }; struct rio_tx_desc { u32 res1; u32 saddr; u32 dport; u32 dattr; u32 res2; u32 res3; u32 dwcnt; u32 res4; }; struct rio_dbell_ring { void *virt; dma_addr_t phys; }; struct rio_msg_tx_ring { void *virt; dma_addr_t phys; void *virt_buffer[RIO_MAX_TX_RING_SIZE]; dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE]; int tx_slot; int size; void *dev_id; }; struct rio_msg_rx_ring { void *virt; dma_addr_t phys; void *virt_buffer[RIO_MAX_RX_RING_SIZE]; int rx_slot; int size; void *dev_id; }; struct rio_port_write_msg { void *virt; dma_addr_t phys; u32 msg_count; u32 err_count; u32 discard_count; }; struct rio_priv { struct device *dev; void __iomem *regs_win; struct rio_atmu_regs __iomem *atmu_regs; struct rio_atmu_regs __iomem *maint_atmu_regs; struct rio_atmu_regs __iomem *dbell_atmu_regs; void __iomem *dbell_win; void __iomem *maint_win; struct rio_msg_regs __iomem *msg_regs; struct rio_dbell_ring dbell_ring; struct rio_msg_tx_ring msg_tx_ring; struct rio_msg_rx_ring msg_rx_ring; struct rio_port_write_msg port_write_msg; int bellirq; int txirq; int rxirq; int pwirq; struct work_struct pw_work; struct kfifo pw_fifo; spinlock_t pw_fifo_lock; }; #define __fsl_read_rio_config(x, addr, err, op) \ __asm__ __volatile__( \ "1: "op" %1,0(%2)\n" \ " eieio\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: li %1,-1\n" \ " li %0,%3\n" \ " b 2b\n" \ ".section __ex_table,\"a\"\n" \ " .align 2\n" \ " .long 1b,3b\n" \ ".text" \ : "=r" (err), "=r" (x) \ : "b" (addr), "i" (-EFAULT), "0" (err)) static void __iomem *rio_regs_win; static int (*saved_mcheck_exception)(struct pt_regs *regs); static int fsl_rio_mcheck_exception(struct pt_regs *regs) { const struct exception_table_entry *entry = NULL; unsigned long reason = (mfspr(SPRN_MCSR) & MCSR_MASK); if (reason & MCSR_BUS_RBERR) { reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR)); if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) { /* Check if we are prepared to handle this fault */ entry = search_exception_tables(regs->nip); if (entry) { pr_debug("RIO: %s - MC Exception handled\n", __func__); out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0); regs->msr |= MSR_RI; regs->nip = entry->fixup; return 1; } } } if (saved_mcheck_exception) return saved_mcheck_exception(regs); else return cur_cpu_spec->machine_check(regs); } /** * fsl_rio_doorbell_send - Send a MPC85xx doorbell message * @mport: RapidIO master port info * @index: ID of RapidIO interface * @destid: Destination ID of target device * @data: 16-bit info field of RapidIO doorbell message * * Sends a MPC85xx doorbell message. Returns %0 on success or * %-EINVAL on failure. */ static int fsl_rio_doorbell_send(struct rio_mport *mport, int index, u16 destid, u16 data) { struct rio_priv *priv = mport->priv; pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n", index, destid, data); switch (mport->phy_type) { case RIO_PHY_PARALLEL: out_be32(&priv->dbell_atmu_regs->rowtar, destid << 22); out_be16(priv->dbell_win, data); break; case RIO_PHY_SERIAL: /* In the serial version silicons, such as MPC8548, MPC8641, * below operations is must be. */ out_be32(&priv->msg_regs->odmr, 0x00000000); out_be32(&priv->msg_regs->odretcr, 0x00000004); out_be32(&priv->msg_regs->oddpr, destid << 16); out_be32(&priv->msg_regs->oddatr, data); out_be32(&priv->msg_regs->odmr, 0x00000001); break; } return 0; } /** * fsl_local_config_read - Generate a MPC85xx local config space read * @mport: RapidIO master port info * @index: ID of RapdiIO interface * @offset: Offset into configuration space * @len: Length (in bytes) of the maintenance transaction * @data: Value to be read into * * Generates a MPC85xx local configuration space read. Returns %0 on * success or %-EINVAL on failure. */ static int fsl_local_config_read(struct rio_mport *mport, int index, u32 offset, int len, u32 *data) { struct rio_priv *priv = mport->priv; pr_debug("fsl_local_config_read: index %d offset %8.8x\n", index, offset); *data = in_be32(priv->regs_win + offset); return 0; } /** * fsl_local_config_write - Generate a MPC85xx local config space write * @mport: RapidIO master port info * @index: ID of RapdiIO interface * @offset: Offset into configuration space * @len: Length (in bytes) of the maintenance transaction * @data: Value to be written * * Generates a MPC85xx local configuration space write. Returns %0 on * success or %-EINVAL on failure. */ static int fsl_local_config_write(struct rio_mport *mport, int index, u32 offset, int len, u32 data) { struct rio_priv *priv = mport->priv; pr_debug ("fsl_local_config_write: index %d offset %8.8x data %8.8x\n", index, offset, data); out_be32(priv->regs_win + offset, data); return 0; } /** * fsl_rio_config_read - Generate a MPC85xx read maintenance transaction * @mport: RapidIO master port info * @index: ID of RapdiIO interface * @destid: Destination ID of transaction * @hopcount: Number of hops to target device * @offset: Offset into configuration space * @len: Length (in bytes) of the maintenance transaction * @val: Location to be read into * * Generates a MPC85xx read maintenance transaction. Returns %0 on * success or %-EINVAL on failure. */ static int fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid, u8 hopcount, u32 offset, int len, u32 *val) { struct rio_priv *priv = mport->priv; u8 *data; u32 rval, err = 0; pr_debug ("fsl_rio_config_read: index %d destid %d hopcount %d offset %8.8x len %d\n", index, destid, hopcount, offset, len); /* 16MB maintenance window possible */ /* allow only aligned access to maintenance registers */ if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len)) return -EINVAL; out_be32(&priv->maint_atmu_regs->rowtar, (destid << 22) | (hopcount << 12) | (offset >> 12)); out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1)); switch (len) { case 1: __fsl_read_rio_config(rval, data, err, "lbz"); break; case 2: __fsl_read_rio_config(rval, data, err, "lhz"); break; case 4: __fsl_read_rio_config(rval, data, err, "lwz"); break; default: return -EINVAL; } if (err) { pr_debug("RIO: cfg_read error %d for %x:%x:%x\n", err, destid, hopcount, offset); } *val = rval; return err; } /** * fsl_rio_config_write - Generate a MPC85xx write maintenance transaction * @mport: RapidIO master port info * @index: ID of RapdiIO interface * @destid: Destination ID of transaction * @hopcount: Number of hops to target device * @offset: Offset into configuration space * @len: Length (in bytes) of the maintenance transaction * @val: Value to be written * * Generates an MPC85xx write maintenance transaction. Returns %0 on * success or %-EINVAL on failure. */ static int fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid, u8 hopcount, u32 offset, int len, u32 val) { struct rio_priv *priv = mport->priv; u8 *data; pr_debug ("fsl_rio_config_write: index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n", index, destid, hopcount, offset, len, val); /* 16MB maintenance windows possible */ /* allow only aligned access to maintenance registers */ if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len)) return -EINVAL; out_be32(&priv->maint_atmu_regs->rowtar, (destid << 22) | (hopcount << 12) | (offset >> 12)); out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1)); switch (len) { case 1: out_8((u8 *) data, val); break; case 2: out_be16((u16 *) data, val); break; case 4: out_be32((u32 *) data, val); break; default: return -EINVAL; } return 0; } /** * rio_hw_add_outb_message - Add message to the MPC85xx outbound message queue * @mport: Master port with outbound message queue * @rdev: Target of outbound message * @mbox: Outbound mailbox * @buffer: Message to add to outbound queue * @len: Length of message * * Adds the @buffer message to the MPC85xx outbound message queue. Returns * %0 on success or %-EINVAL on failure. */ int rio_hw_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, void *buffer, size_t len) { struct rio_priv *priv = mport->priv; u32 omr; struct rio_tx_desc *desc = (struct rio_tx_desc *)priv->msg_tx_ring.virt + priv->msg_tx_ring.tx_slot; int ret = 0; pr_debug ("RIO: rio_hw_add_outb_message(): destid %4.4x mbox %d buffer %8.8x len %8.8x\n", rdev->destid, mbox, (int)buffer, len); if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) { ret = -EINVAL; goto out; } /* Copy and clear rest of buffer */ memcpy(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot], buffer, len); if (len < (RIO_MAX_MSG_SIZE - 4)) memset(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot] + len, 0, RIO_MAX_MSG_SIZE - len); switch (mport->phy_type) { case RIO_PHY_PARALLEL: /* Set mbox field for message */ desc->dport = mbox & 0x3; /* Enable EOMI interrupt, set priority, and set destid */ desc->dattr = 0x28000000 | (rdev->destid << 2); break; case RIO_PHY_SERIAL: /* Set mbox field for message, and set destid */ desc->dport = (rdev->destid << 16) | (mbox & 0x3); /* Enable EOMI interrupt and priority */ desc->dattr = 0x28000000; break; } /* Set transfer size aligned to next power of 2 (in double words) */ desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len); /* Set snooping and source buffer address */ desc->saddr = 0x00000004 | priv->msg_tx_ring.phys_buffer[priv->msg_tx_ring.tx_slot]; /* Increment enqueue pointer */ omr = in_be32(&priv->msg_regs->omr); out_be32(&priv->msg_regs->omr, omr | RIO_MSG_OMR_MUI); /* Go to next descriptor */ if (++priv->msg_tx_ring.tx_slot == priv->msg_tx_ring.size) priv->msg_tx_ring.tx_slot = 0; out: return ret; } EXPORT_SYMBOL_GPL(rio_hw_add_outb_message); /** * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler * @irq: Linux interrupt number * @dev_instance: Pointer to interrupt-specific data * * Handles outbound message interrupts. Executes a register outbound * mailbox event handler and acks the interrupt occurrence. */ static irqreturn_t fsl_rio_tx_handler(int irq, void *dev_instance) { int osr; struct rio_mport *port = (struct rio_mport *)dev_instance; struct rio_priv *priv = port->priv; osr = in_be32(&priv->msg_regs->osr); if (osr & RIO_MSG_OSR_TE) { pr_info("RIO: outbound message transmission error\n"); out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_TE); goto out; } if (osr & RIO_MSG_OSR_QOI) { pr_info("RIO: outbound message queue overflow\n"); out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_QOI); goto out; } if (osr & RIO_MSG_OSR_EOMI) { u32 dqp = in_be32(&priv->msg_regs->odqdpar); int slot = (dqp - priv->msg_tx_ring.phys) >> 5; port->outb_msg[0].mcback(port, priv->msg_tx_ring.dev_id, -1, slot); /* Ack the end-of-message interrupt */ out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_EOMI); } out: return IRQ_HANDLED; } /** * rio_open_outb_mbox - Initialize MPC85xx outbound mailbox * @mport: Master port implementing the outbound message unit * @dev_id: Device specific pointer to pass on event * @mbox: Mailbox to open * @entries: Number of entries in the outbound mailbox ring * * Initializes buffer ring, request the outbound message interrupt, * and enables the outbound message unit. Returns %0 on success and * %-EINVAL or %-ENOMEM on failure. */ int rio_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) { int i, j, rc = 0; struct rio_priv *priv = mport->priv; if ((entries < RIO_MIN_TX_RING_SIZE) || (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) { rc = -EINVAL; goto out; } /* Initialize shadow copy ring */ priv->msg_tx_ring.dev_id = dev_id; priv->msg_tx_ring.size = entries; for (i = 0; i < priv->msg_tx_ring.size; i++) { priv->msg_tx_ring.virt_buffer[i] = dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, &priv->msg_tx_ring.phys_buffer[i], GFP_KERNEL); if (!priv->msg_tx_ring.virt_buffer[i]) { rc = -ENOMEM; for (j = 0; j < priv->msg_tx_ring.size; j++) if (priv->msg_tx_ring.virt_buffer[j]) dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, priv->msg_tx_ring. virt_buffer[j], priv->msg_tx_ring. phys_buffer[j]); goto out; } } /* Initialize outbound message descriptor ring */ priv->msg_tx_ring.virt = dma_alloc_coherent(priv->dev, priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE, &priv->msg_tx_ring.phys, GFP_KERNEL); if (!priv->msg_tx_ring.virt) { rc = -ENOMEM; goto out_dma; } memset(priv->msg_tx_ring.virt, 0, priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE); priv->msg_tx_ring.tx_slot = 0; /* Point dequeue/enqueue pointers at first entry in ring */ out_be32(&priv->msg_regs->odqdpar, priv->msg_tx_ring.phys); out_be32(&priv->msg_regs->odqepar, priv->msg_tx_ring.phys); /* Configure for snooping */ out_be32(&priv->msg_regs->osar, 0x00000004); /* Clear interrupt status */ out_be32(&priv->msg_regs->osr, 0x000000b3); /* Hook up outbound message handler */ rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0, "msg_tx", (void *)mport); if (rc < 0) goto out_irq; /* * Configure outbound message unit * Snooping * Interrupts (all enabled, except QEIE) * Chaining mode * Disable */ out_be32(&priv->msg_regs->omr, 0x00100220); /* Set number of entries */ out_be32(&priv->msg_regs->omr, in_be32(&priv->msg_regs->omr) | ((get_bitmask_order(entries) - 2) << 12)); /* Now enable the unit */ out_be32(&priv->msg_regs->omr, in_be32(&priv->msg_regs->omr) | 0x1); out: return rc; out_irq: dma_free_coherent(priv->dev, priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE, priv->msg_tx_ring.virt, priv->msg_tx_ring.phys); out_dma: for (i = 0; i < priv->msg_tx_ring.size; i++) dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, priv->msg_tx_ring.virt_buffer[i], priv->msg_tx_ring.phys_buffer[i]); return rc; } /** * rio_close_outb_mbox - Shut down MPC85xx outbound mailbox * @mport: Master port implementing the outbound message unit * @mbox: Mailbox to close * * Disables the outbound message unit, free all buffers, and * frees the outbound message interrupt. */ void rio_close_outb_mbox(struct rio_mport *mport, int mbox) { struct rio_priv *priv = mport->priv; /* Disable inbound message unit */ out_be32(&priv->msg_regs->omr, 0); /* Free ring */ dma_free_coherent(priv->dev, priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE, priv->msg_tx_ring.virt, priv->msg_tx_ring.phys); /* Free interrupt */ free_irq(IRQ_RIO_TX(mport), (void *)mport); } /** * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler * @irq: Linux interrupt number * @dev_instance: Pointer to interrupt-specific data * * Handles inbound message interrupts. Executes a registered inbound * mailbox event handler and acks the interrupt occurrence. */ static irqreturn_t fsl_rio_rx_handler(int irq, void *dev_instance) { int isr; struct rio_mport *port = (struct rio_mport *)dev_instance; struct rio_priv *priv = port->priv; isr = in_be32(&priv->msg_regs->isr); if (isr & RIO_MSG_ISR_TE) { pr_info("RIO: inbound message reception error\n"); out_be32((void *)&priv->msg_regs->isr, RIO_MSG_ISR_TE); goto out; } /* XXX Need to check/dispatch until queue empty */ if (isr & RIO_MSG_ISR_DIQI) { /* * We implement *only* mailbox 0, but can receive messages * for any mailbox/letter to that mailbox destination. So, * make the callback with an unknown/invalid mailbox number * argument. */ port->inb_msg[0].mcback(port, priv->msg_rx_ring.dev_id, -1, -1); /* Ack the queueing interrupt */ out_be32(&priv->msg_regs->isr, RIO_MSG_ISR_DIQI); } out: return IRQ_HANDLED; } /** * rio_open_inb_mbox - Initialize MPC85xx inbound mailbox * @mport: Master port implementing the inbound message unit * @dev_id: Device specific pointer to pass on event * @mbox: Mailbox to open * @entries: Number of entries in the inbound mailbox ring * * Initializes buffer ring, request the inbound message interrupt, * and enables the inbound message unit. Returns %0 on success * and %-EINVAL or %-ENOMEM on failure. */ int rio_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) { int i, rc = 0; struct rio_priv *priv = mport->priv; if ((entries < RIO_MIN_RX_RING_SIZE) || (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) { rc = -EINVAL; goto out; } /* Initialize client buffer ring */ priv->msg_rx_ring.dev_id = dev_id; priv->msg_rx_ring.size = entries; priv->msg_rx_ring.rx_slot = 0; for (i = 0; i < priv->msg_rx_ring.size; i++) priv->msg_rx_ring.virt_buffer[i] = NULL; /* Initialize inbound message ring */ priv->msg_rx_ring.virt = dma_alloc_coherent(priv->dev, priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE, &priv->msg_rx_ring.phys, GFP_KERNEL); if (!priv->msg_rx_ring.virt) { rc = -ENOMEM; goto out; } /* Point dequeue/enqueue pointers at first entry in ring */ out_be32(&priv->msg_regs->ifqdpar, (u32) priv->msg_rx_ring.phys); out_be32(&priv->msg_regs->ifqepar, (u32) priv->msg_rx_ring.phys); /* Clear interrupt status */ out_be32(&priv->msg_regs->isr, 0x00000091); /* Hook up inbound message handler */ rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0, "msg_rx", (void *)mport); if (rc < 0) { dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE, priv->msg_tx_ring.virt_buffer[i], priv->msg_tx_ring.phys_buffer[i]); goto out; } /* * Configure inbound message unit: * Snooping * 4KB max message size * Unmask all interrupt sources * Disable */ out_be32(&priv->msg_regs->imr, 0x001b0060); /* Set number of queue entries */ setbits32(&priv->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12); /* Now enable the unit */ setbits32(&priv->msg_regs->imr, 0x1); out: return rc; } /** * rio_close_inb_mbox - Shut down MPC85xx inbound mailbox * @mport: Master port implementing the inbound message unit * @mbox: Mailbox to close * * Disables the inbound message unit, free all buffers, and * frees the inbound message interrupt. */ void rio_close_inb_mbox(struct rio_mport *mport, int mbox) { struct rio_priv *priv = mport->priv; /* Disable inbound message unit */ out_be32(&priv->msg_regs->imr, 0); /* Free ring */ dma_free_coherent(priv->dev, priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE, priv->msg_rx_ring.virt, priv->msg_rx_ring.phys); /* Free interrupt */ free_irq(IRQ_RIO_RX(mport), (void *)mport); } /** * rio_hw_add_inb_buffer - Add buffer to the MPC85xx inbound message queue * @mport: Master port implementing the inbound message unit * @mbox: Inbound mailbox number * @buf: Buffer to add to inbound queue * * Adds the @buf buffer to the MPC85xx inbound message queue. Returns * %0 on success or %-EINVAL on failure. */ int rio_hw_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf) { int rc = 0; struct rio_priv *priv = mport->priv; pr_debug("RIO: rio_hw_add_inb_buffer(), msg_rx_ring.rx_slot %d\n", priv->msg_rx_ring.rx_slot); if (priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot]) { printk(KERN_ERR "RIO: error adding inbound buffer %d, buffer exists\n", priv->msg_rx_ring.rx_slot); rc = -EINVAL; goto out; } priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot] = buf; if (++priv->msg_rx_ring.rx_slot == priv->msg_rx_ring.size) priv->msg_rx_ring.rx_slot = 0; out: return rc; } EXPORT_SYMBOL_GPL(rio_hw_add_inb_buffer); /** * rio_hw_get_inb_message - Fetch inbound message from the MPC85xx message unit * @mport: Master port implementing the inbound message unit * @mbox: Inbound mailbox number * * Gets the next available inbound message from the inbound message queue. * A pointer to the message is returned on success or NULL on failure. */ void *rio_hw_get_inb_message(struct rio_mport *mport, int mbox) { struct rio_priv *priv = mport->priv; u32 phys_buf, virt_buf; void *buf = NULL; int buf_idx; phys_buf = in_be32(&priv->msg_regs->ifqdpar); /* If no more messages, then bail out */ if (phys_buf == in_be32(&priv->msg_regs->ifqepar)) goto out2; virt_buf = (u32) priv->msg_rx_ring.virt + (phys_buf - priv->msg_rx_ring.phys); buf_idx = (phys_buf - priv->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE; buf = priv->msg_rx_ring.virt_buffer[buf_idx]; if (!buf) { printk(KERN_ERR "RIO: inbound message copy failed, no buffers\n"); goto out1; } /* Copy max message size, caller is expected to allocate that big */ memcpy(buf, (void *)virt_buf, RIO_MAX_MSG_SIZE); /* Clear the available buffer */ priv->msg_rx_ring.virt_buffer[buf_idx] = NULL; out1: setbits32(&priv->msg_regs->imr, RIO_MSG_IMR_MI); out2: return buf; } EXPORT_SYMBOL_GPL(rio_hw_get_inb_message); /** * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler * @irq: Linux interrupt number * @dev_instance: Pointer to interrupt-specific data * * Handles doorbell interrupts. Parses a list of registered * doorbell event handlers and executes a matching event handler. */ static irqreturn_t fsl_rio_dbell_handler(int irq, void *dev_instance) { int dsr; struct rio_mport *port = (struct rio_mport *)dev_instance; struct rio_priv *priv = port->priv; dsr = in_be32(&priv->msg_regs->dsr); if (dsr & DOORBELL_DSR_TE) { pr_info("RIO: doorbell reception error\n"); out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_TE); goto out; } if (dsr & DOORBELL_DSR_QFI) { pr_info("RIO: doorbell queue full\n"); out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI); goto out; } /* XXX Need to check/dispatch until queue empty */ if (dsr & DOORBELL_DSR_DIQI) { u32 dmsg = (u32) priv->dbell_ring.virt + (in_be32(&priv->msg_regs->dqdpar) & 0xfff); struct rio_dbell *dbell; int found = 0; pr_debug ("RIO: processing doorbell, sid %2.2x tid %2.2x info %4.4x\n", DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg)); list_for_each_entry(dbell, &port->dbells, node) { if ((dbell->res->start <= DBELL_INF(dmsg)) && (dbell->res->end >= DBELL_INF(dmsg))) { found = 1; break; } } if (found) { dbell->dinb(port, dbell->dev_id, DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg)); } else { pr_debug ("RIO: spurious doorbell, sid %2.2x tid %2.2x info %4.4x\n", DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg)); } setbits32(&priv->msg_regs->dmr, DOORBELL_DMR_DI); out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_DIQI); } out: return IRQ_HANDLED; } /** * fsl_rio_doorbell_init - MPC85xx doorbell interface init * @mport: Master port implementing the inbound doorbell unit * * Initializes doorbell unit hardware and inbound DMA buffer * ring. Called from fsl_rio_setup(). Returns %0 on success * or %-ENOMEM on failure. */ static int fsl_rio_doorbell_init(struct rio_mport *mport) { struct rio_priv *priv = mport->priv; int rc = 0; /* Map outbound doorbell window immediately after maintenance window */ priv->dbell_win = ioremap(mport->iores.start + RIO_MAINT_WIN_SIZE, RIO_DBELL_WIN_SIZE); if (!priv->dbell_win) { printk(KERN_ERR "RIO: unable to map outbound doorbell window\n"); rc = -ENOMEM; goto out; } /* Initialize inbound doorbells */ priv->dbell_ring.virt = dma_alloc_coherent(priv->dev, 512 * DOORBELL_MESSAGE_SIZE, &priv->dbell_ring.phys, GFP_KERNEL); if (!priv->dbell_ring.virt) { printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n"); rc = -ENOMEM; iounmap(priv->dbell_win); goto out; } /* Point dequeue/enqueue pointers at first entry in ring */ out_be32(&priv->msg_regs->dqdpar, (u32) priv->dbell_ring.phys); out_be32(&priv->msg_regs->dqepar, (u32) priv->dbell_ring.phys); /* Clear interrupt status */ out_be32(&priv->msg_regs->dsr, 0x00000091); /* Hook up doorbell handler */ rc = request_irq(IRQ_RIO_BELL(mport), fsl_rio_dbell_handler, 0, "dbell_rx", (void *)mport); if (rc < 0) { iounmap(priv->dbell_win); dma_free_coherent(priv->dev, 512 * DOORBELL_MESSAGE_SIZE, priv->dbell_ring.virt, priv->dbell_ring.phys); printk(KERN_ERR "MPC85xx RIO: unable to request inbound doorbell irq"); goto out; } /* Configure doorbells for snooping, 512 entries, and enable */ out_be32(&priv->msg_regs->dmr, 0x00108161); out: return rc; } /** * fsl_rio_port_write_handler - MPC85xx port write interrupt handler * @irq: Linux interrupt number * @dev_instance: Pointer to interrupt-specific data * * Handles port write interrupts. Parses a list of registered * port write event handlers and executes a matching event handler. */ static irqreturn_t fsl_rio_port_write_handler(int irq, void *dev_instance) { u32 ipwmr, ipwsr; struct rio_mport *port = (struct rio_mport *)dev_instance; struct rio_priv *priv = port->priv; u32 epwisr, tmp; ipwmr = in_be32(&priv->msg_regs->pwmr); ipwsr = in_be32(&priv->msg_regs->pwsr); epwisr = in_be32(priv->regs_win + RIO_EPWISR); if (epwisr & 0x80000000) { tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); pr_info("RIO_LTLEDCSR = 0x%x\n", tmp); out_be32(priv->regs_win + RIO_LTLEDCSR, 0); } if (!(epwisr & 0x00000001)) return IRQ_HANDLED; #ifdef DEBUG_PW pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr); if (ipwsr & RIO_IPWSR_QF) pr_debug(" QF"); if (ipwsr & RIO_IPWSR_TE) pr_debug(" TE"); if (ipwsr & RIO_IPWSR_QFI) pr_debug(" QFI"); if (ipwsr & RIO_IPWSR_PWD) pr_debug(" PWD"); if (ipwsr & RIO_IPWSR_PWB) pr_debug(" PWB"); pr_debug(" )\n"); #endif out_be32(&priv->msg_regs->pwsr, ipwsr & (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD)); if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) { priv->port_write_msg.err_count++; pr_info("RIO: Port-Write Transaction Err (%d)\n", priv->port_write_msg.err_count); } if (ipwsr & RIO_IPWSR_PWD) { priv->port_write_msg.discard_count++; pr_info("RIO: Port Discarded Port-Write Msg(s) (%d)\n", priv->port_write_msg.discard_count); } /* Schedule deferred processing if PW was received */ if (ipwsr & RIO_IPWSR_QFI) { /* Save PW message (if there is room in FIFO), * otherwise discard it. */ if (kfifo_avail(&priv->pw_fifo) >= RIO_PW_MSG_SIZE) { priv->port_write_msg.msg_count++; kfifo_in(&priv->pw_fifo, priv->port_write_msg.virt, RIO_PW_MSG_SIZE); } else { priv->port_write_msg.discard_count++; pr_info("RIO: ISR Discarded Port-Write Msg(s) (%d)\n", priv->port_write_msg.discard_count); } schedule_work(&priv->pw_work); } /* Issue Clear Queue command. This allows another * port-write to be received. */ out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ); return IRQ_HANDLED; } static void fsl_pw_dpc(struct work_struct *work) { struct rio_priv *priv = container_of(work, struct rio_priv, pw_work); unsigned long flags; u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)]; /* * Process port-write messages */ spin_lock_irqsave(&priv->pw_fifo_lock, flags); while (kfifo_out(&priv->pw_fifo, (unsigned char *)msg_buffer, RIO_PW_MSG_SIZE)) { /* Process one message */ spin_unlock_irqrestore(&priv->pw_fifo_lock, flags); #ifdef DEBUG_PW { u32 i; pr_debug("%s : Port-Write Message:", __func__); for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) { if ((i%4) == 0) pr_debug("\n0x%02x: 0x%08x", i*4, msg_buffer[i]); else pr_debug(" 0x%08x", msg_buffer[i]); } pr_debug("\n"); } #endif /* Pass the port-write message to RIO core for processing */ rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer); spin_lock_irqsave(&priv->pw_fifo_lock, flags); } spin_unlock_irqrestore(&priv->pw_fifo_lock, flags); } /** * fsl_rio_pw_enable - enable/disable port-write interface init * @mport: Master port implementing the port write unit * @enable: 1=enable; 0=disable port-write message handling */ static int fsl_rio_pw_enable(struct rio_mport *mport, int enable) { struct rio_priv *priv = mport->priv; u32 rval; rval = in_be32(&priv->msg_regs->pwmr); if (enable) rval |= RIO_IPWMR_PWE; else rval &= ~RIO_IPWMR_PWE; out_be32(&priv->msg_regs->pwmr, rval); return 0; } /** * fsl_rio_port_write_init - MPC85xx port write interface init * @mport: Master port implementing the port write unit * * Initializes port write unit hardware and DMA buffer * ring. Called from fsl_rio_setup(). Returns %0 on success * or %-ENOMEM on failure. */ static int fsl_rio_port_write_init(struct rio_mport *mport) { struct rio_priv *priv = mport->priv; int rc = 0; /* Following configurations require a disabled port write controller */ out_be32(&priv->msg_regs->pwmr, in_be32(&priv->msg_regs->pwmr) & ~RIO_IPWMR_PWE); /* Initialize port write */ priv->port_write_msg.virt = dma_alloc_coherent(priv->dev, RIO_PW_MSG_SIZE, &priv->port_write_msg.phys, GFP_KERNEL); if (!priv->port_write_msg.virt) { pr_err("RIO: unable allocate port write queue\n"); return -ENOMEM; } priv->port_write_msg.err_count = 0; priv->port_write_msg.discard_count = 0; /* Point dequeue/enqueue pointers at first entry */ out_be32(&priv->msg_regs->epwqbar, 0); out_be32(&priv->msg_regs->pwqbar, (u32) priv->port_write_msg.phys); pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n", in_be32(&priv->msg_regs->epwqbar), in_be32(&priv->msg_regs->pwqbar)); /* Clear interrupt status IPWSR */ out_be32(&priv->msg_regs->pwsr, (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD)); /* Configure port write contoller for snooping enable all reporting, clear queue full */ out_be32(&priv->msg_regs->pwmr, RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ); /* Hook up port-write handler */ rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler, 0, "port-write", (void *)mport); if (rc < 0) { pr_err("MPC85xx RIO: unable to request inbound doorbell irq"); goto err_out; } INIT_WORK(&priv->pw_work, fsl_pw_dpc); spin_lock_init(&priv->pw_fifo_lock); if (kfifo_alloc(&priv->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) { pr_err("FIFO allocation failed\n"); rc = -ENOMEM; goto err_out_irq; } pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n", in_be32(&priv->msg_regs->pwmr), in_be32(&priv->msg_regs->pwsr)); return rc; err_out_irq: free_irq(IRQ_RIO_PW(mport), (void *)mport); err_out: dma_free_coherent(priv->dev, RIO_PW_MSG_SIZE, priv->port_write_msg.virt, priv->port_write_msg.phys); return rc; } static char *cmdline = NULL; static int fsl_rio_get_hdid(int index) { /* XXX Need to parse multiple entries in some format */ if (!cmdline) return -1; return simple_strtol(cmdline, NULL, 0); } static int fsl_rio_get_cmdline(char *s) { if (!s) return 0; cmdline = s; return 1; } __setup("riohdid=", fsl_rio_get_cmdline); static inline void fsl_rio_info(struct device *dev, u32 ccsr) { const char *str; if (ccsr & 1) { /* Serial phy */ switch (ccsr >> 30) { case 0: str = "1"; break; case 1: str = "4"; break; default: str = "Unknown"; break; } dev_info(dev, "Hardware port width: %s\n", str); switch ((ccsr >> 27) & 7) { case 0: str = "Single-lane 0"; break; case 1: str = "Single-lane 2"; break; case 2: str = "Four-lane"; break; default: str = "Unknown"; break; } dev_info(dev, "Training connection status: %s\n", str); } else { /* Parallel phy */ if (!(ccsr & 0x80000000)) dev_info(dev, "Output port operating in 8-bit mode\n"); if (!(ccsr & 0x08000000)) dev_info(dev, "Input port operating in 8-bit mode\n"); } } /** * fsl_rio_setup - Setup Freescale PowerPC RapidIO interface * @dev: of_device pointer * * Initializes MPC85xx RapidIO hardware interface, configures * master port with system-specific info, and registers the * master port with the RapidIO subsystem. */ int fsl_rio_setup(struct of_device *dev) { struct rio_ops *ops; struct rio_mport *port; struct rio_priv *priv; int rc = 0; const u32 *dt_range, *cell; struct resource regs; int rlen; u32 ccsr; u64 law_start, law_size; int paw, aw, sw; if (!dev->dev.of_node) { dev_err(&dev->dev, "Device OF-Node is NULL"); return -EFAULT; } rc = of_address_to_resource(dev->dev.of_node, 0, &regs); if (rc) { dev_err(&dev->dev, "Can't get %s property 'reg'\n", dev->dev.of_node->full_name); return -EFAULT; } dev_info(&dev->dev, "Of-device full name %s\n", dev->dev.of_node->full_name); dev_info(&dev->dev, "Regs: %pR\n", &regs); dt_range = of_get_property(dev->dev.of_node, "ranges", &rlen); if (!dt_range) { dev_err(&dev->dev, "Can't get %s property 'ranges'\n", dev->dev.of_node->full_name); return -EFAULT; } /* Get node address wide */ cell = of_get_property(dev->dev.of_node, "#address-cells", NULL); if (cell) aw = *cell; else aw = of_n_addr_cells(dev->dev.of_node); /* Get node size wide */ cell = of_get_property(dev->dev.of_node, "#size-cells", NULL); if (cell) sw = *cell; else sw = of_n_size_cells(dev->dev.of_node); /* Get parent address wide wide */ paw = of_n_addr_cells(dev->dev.of_node); law_start = of_read_number(dt_range + aw, paw); law_size = of_read_number(dt_range + aw + paw, sw); dev_info(&dev->dev, "LAW start 0x%016llx, size 0x%016llx.\n", law_start, law_size); ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL); if (!ops) { rc = -ENOMEM; goto err_ops; } ops->lcread = fsl_local_config_read; ops->lcwrite = fsl_local_config_write; ops->cread = fsl_rio_config_read; ops->cwrite = fsl_rio_config_write; ops->dsend = fsl_rio_doorbell_send; ops->pwenable = fsl_rio_pw_enable; port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); if (!port) { rc = -ENOMEM; goto err_port; } port->id = 0; port->index = 0; priv = kzalloc(sizeof(struct rio_priv), GFP_KERNEL); if (!priv) { printk(KERN_ERR "Can't alloc memory for 'priv'\n"); rc = -ENOMEM; goto err_priv; } INIT_LIST_HEAD(&port->dbells); port->iores.start = law_start; port->iores.end = law_start + law_size - 1; port->iores.flags = IORESOURCE_MEM; port->iores.name = "rio_io_win"; priv->pwirq = irq_of_parse_and_map(dev->dev.of_node, 0); priv->bellirq = irq_of_parse_and_map(dev->dev.of_node, 2); priv->txirq = irq_of_parse_and_map(dev->dev.of_node, 3); priv->rxirq = irq_of_parse_and_map(dev->dev.of_node, 4); dev_info(&dev->dev, "pwirq: %d, bellirq: %d, txirq: %d, rxirq %d\n", priv->pwirq, priv->bellirq, priv->txirq, priv->rxirq); rio_init_dbell_res(&port->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); rio_init_mbox_res(&port->riores[RIO_INB_MBOX_RESOURCE], 0, 0); rio_init_mbox_res(&port->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0); strcpy(port->name, "RIO0 mport"); priv->dev = &dev->dev; port->ops = ops; port->host_deviceid = fsl_rio_get_hdid(port->id); port->priv = priv; rio_register_mport(port); priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1); rio_regs_win = priv->regs_win; /* Probe the master port phy type */ ccsr = in_be32(priv->regs_win + RIO_CCSR); port->phy_type = (ccsr & 1) ? RIO_PHY_SERIAL : RIO_PHY_PARALLEL; dev_info(&dev->dev, "RapidIO PHY type: %s\n", (port->phy_type == RIO_PHY_PARALLEL) ? "parallel" : ((port->phy_type == RIO_PHY_SERIAL) ? "serial" : "unknown")); /* Checking the port training status */ if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) { dev_err(&dev->dev, "Port is not ready. " "Try to restart connection...\n"); switch (port->phy_type) { case RIO_PHY_SERIAL: /* Disable ports */ out_be32(priv->regs_win + RIO_CCSR, 0); /* Set 1x lane */ setbits32(priv->regs_win + RIO_CCSR, 0x02000000); /* Enable ports */ setbits32(priv->regs_win + RIO_CCSR, 0x00600000); break; case RIO_PHY_PARALLEL: /* Disable ports */ out_be32(priv->regs_win + RIO_CCSR, 0x22000000); /* Enable ports */ out_be32(priv->regs_win + RIO_CCSR, 0x44000000); break; } msleep(100); if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) { dev_err(&dev->dev, "Port restart failed.\n"); rc = -ENOLINK; goto err; } dev_info(&dev->dev, "Port restart success!\n"); } fsl_rio_info(&dev->dev, ccsr); port->sys_size = (in_be32((priv->regs_win + RIO_PEF_CAR)) & RIO_PEF_CTLS) >> 4; dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n", port->sys_size ? 65536 : 256); priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win + RIO_ATMU_REGS_OFFSET); priv->maint_atmu_regs = priv->atmu_regs + 1; priv->dbell_atmu_regs = priv->atmu_regs + 2; priv->msg_regs = (struct rio_msg_regs *)(priv->regs_win + ((port->phy_type == RIO_PHY_SERIAL) ? RIO_S_MSG_REGS_OFFSET : RIO_P_MSG_REGS_OFFSET)); /* Set to receive any dist ID for serial RapidIO controller. */ if (port->phy_type == RIO_PHY_SERIAL) out_be32((priv->regs_win + RIO_ISR_AACR), RIO_ISR_AACR_AA); /* Configure maintenance transaction window */ out_be32(&priv->maint_atmu_regs->rowbar, law_start >> 12); out_be32(&priv->maint_atmu_regs->rowar, 0x80077000 | (ilog2(RIO_MAINT_WIN_SIZE) - 1)); priv->maint_win = ioremap(law_start, RIO_MAINT_WIN_SIZE); /* Configure outbound doorbell window */ out_be32(&priv->dbell_atmu_regs->rowbar, (law_start + RIO_MAINT_WIN_SIZE) >> 12); out_be32(&priv->dbell_atmu_regs->rowar, 0x8004200b); /* 4k */ fsl_rio_doorbell_init(port); fsl_rio_port_write_init(port); saved_mcheck_exception = ppc_md.machine_check_exception; ppc_md.machine_check_exception = fsl_rio_mcheck_exception; /* Ensure that RFXE is set */ mtspr(SPRN_HID1, (mfspr(SPRN_HID1) | 0x20000)); return 0; err: iounmap(priv->regs_win); kfree(priv); err_priv: kfree(port); err_port: kfree(ops); err_ops: return rc; } /* The probe function for RapidIO peer-to-peer network. */ static int __devinit fsl_of_rio_rpn_probe(struct of_device *dev, const struct of_device_id *match) { int rc; printk(KERN_INFO "Setting up RapidIO peer-to-peer network %s\n", dev->dev.of_node->full_name); rc = fsl_rio_setup(dev); if (rc) goto out; /* Enumerate all registered ports */ rc = rio_init_mports(); out: return rc; }; static const struct of_device_id fsl_of_rio_rpn_ids[] = { { .compatible = "fsl,rapidio-delta", }, {}, }; static struct of_platform_driver fsl_of_rio_rpn_driver = { .driver = { .name = "fsl-of-rio", .owner = THIS_MODULE, .of_match_table = fsl_of_rio_rpn_ids, }, .probe = fsl_of_rio_rpn_probe, }; static __init int fsl_of_rio_rpn_init(void) { return of_register_platform_driver(&fsl_of_rio_rpn_driver); } subsys_initcall(fsl_of_rio_rpn_init);
gpl-2.0
smipi1/bbb_kernel
fs/proc/inode.c
531
11909
/* * linux/fs/proc/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds */ #include <linux/time.h> #include <linux/proc_fs.h> #include <linux/kernel.h> #include <linux/pid_namespace.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/stat.h> #include <linux/completion.h> #include <linux/poll.h> #include <linux/printk.h> #include <linux/file.h> #include <linux/limits.h> #include <linux/init.h> #include <linux/module.h> #include <linux/sysctl.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/mount.h> #include <linux/magic.h> #include <asm/uaccess.h> #include "internal.h" static void proc_evict_inode(struct inode *inode) { struct proc_dir_entry *de; struct ctl_table_header *head; truncate_inode_pages_final(&inode->i_data); clear_inode(inode); /* Stop tracking associated processes */ put_pid(PROC_I(inode)->pid); /* Let go of any associated proc directory entry */ de = PDE(inode); if (de) pde_put(de); head = PROC_I(inode)->sysctl; if (head) { RCU_INIT_POINTER(PROC_I(inode)->sysctl, NULL); sysctl_head_put(head); } } static struct kmem_cache * proc_inode_cachep; static struct inode *proc_alloc_inode(struct super_block *sb) { struct proc_inode *ei; struct inode *inode; ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, GFP_KERNEL); if (!ei) return NULL; ei->pid = NULL; ei->fd = 0; ei->op.proc_get_link = NULL; ei->pde = NULL; ei->sysctl = NULL; ei->sysctl_entry = NULL; ei->ns_ops = NULL; inode = &ei->vfs_inode; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; return inode; } static void proc_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(proc_inode_cachep, PROC_I(inode)); } static void proc_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, proc_i_callback); } static void init_once(void *foo) { struct proc_inode *ei = (struct proc_inode *) foo; inode_init_once(&ei->vfs_inode); } void __init proc_init_inodecache(void) { proc_inode_cachep = kmem_cache_create("proc_inode_cache", sizeof(struct proc_inode), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD|SLAB_PANIC), init_once); } static int proc_show_options(struct seq_file *seq, struct dentry *root) { struct super_block *sb = root->d_sb; struct pid_namespace *pid = sb->s_fs_info; if (!gid_eq(pid->pid_gid, GLOBAL_ROOT_GID)) seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, pid->pid_gid)); if (pid->hide_pid != 0) seq_printf(seq, ",hidepid=%u", pid->hide_pid); return 0; } static const struct super_operations proc_sops = { .alloc_inode = proc_alloc_inode, .destroy_inode = proc_destroy_inode, .drop_inode = generic_delete_inode, .evict_inode = proc_evict_inode, .statfs = simple_statfs, .remount_fs = proc_remount, .show_options = proc_show_options, }; enum {BIAS = -1U<<31}; static inline int use_pde(struct proc_dir_entry *pde) { return atomic_inc_unless_negative(&pde->in_use); } static void unuse_pde(struct proc_dir_entry *pde) { if (atomic_dec_return(&pde->in_use) == BIAS) complete(pde->pde_unload_completion); } /* pde is locked */ static void close_pdeo(struct proc_dir_entry *pde, struct pde_opener *pdeo) { if (pdeo->closing) { /* somebody else is doing that, just wait */ DECLARE_COMPLETION_ONSTACK(c); pdeo->c = &c; spin_unlock(&pde->pde_unload_lock); wait_for_completion(&c); spin_lock(&pde->pde_unload_lock); } else { struct file *file; pdeo->closing = 1; spin_unlock(&pde->pde_unload_lock); file = pdeo->file; pde->proc_fops->release(file_inode(file), file); spin_lock(&pde->pde_unload_lock); list_del_init(&pdeo->lh); if (pdeo->c) complete(pdeo->c); kfree(pdeo); } } void proc_entry_rundown(struct proc_dir_entry *de) { DECLARE_COMPLETION_ONSTACK(c); /* Wait until all existing callers into module are done. */ de->pde_unload_completion = &c; if (atomic_add_return(BIAS, &de->in_use) != BIAS) wait_for_completion(&c); spin_lock(&de->pde_unload_lock); while (!list_empty(&de->pde_openers)) { struct pde_opener *pdeo; pdeo = list_first_entry(&de->pde_openers, struct pde_opener, lh); close_pdeo(de, pdeo); } spin_unlock(&de->pde_unload_lock); } static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence) { struct proc_dir_entry *pde = PDE(file_inode(file)); loff_t rv = -EINVAL; if (use_pde(pde)) { loff_t (*llseek)(struct file *, loff_t, int); llseek = pde->proc_fops->llseek; if (!llseek) llseek = default_llseek; rv = llseek(file, offset, whence); unuse_pde(pde); } return rv; } static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { ssize_t (*read)(struct file *, char __user *, size_t, loff_t *); struct proc_dir_entry *pde = PDE(file_inode(file)); ssize_t rv = -EIO; if (use_pde(pde)) { read = pde->proc_fops->read; if (read) rv = read(file, buf, count, ppos); unuse_pde(pde); } return rv; } static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *); struct proc_dir_entry *pde = PDE(file_inode(file)); ssize_t rv = -EIO; if (use_pde(pde)) { write = pde->proc_fops->write; if (write) rv = write(file, buf, count, ppos); unuse_pde(pde); } return rv; } static unsigned int proc_reg_poll(struct file *file, struct poll_table_struct *pts) { struct proc_dir_entry *pde = PDE(file_inode(file)); unsigned int rv = DEFAULT_POLLMASK; unsigned int (*poll)(struct file *, struct poll_table_struct *); if (use_pde(pde)) { poll = pde->proc_fops->poll; if (poll) rv = poll(file, pts); unuse_pde(pde); } return rv; } static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct proc_dir_entry *pde = PDE(file_inode(file)); long rv = -ENOTTY; long (*ioctl)(struct file *, unsigned int, unsigned long); if (use_pde(pde)) { ioctl = pde->proc_fops->unlocked_ioctl; if (ioctl) rv = ioctl(file, cmd, arg); unuse_pde(pde); } return rv; } #ifdef CONFIG_COMPAT static long proc_reg_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct proc_dir_entry *pde = PDE(file_inode(file)); long rv = -ENOTTY; long (*compat_ioctl)(struct file *, unsigned int, unsigned long); if (use_pde(pde)) { compat_ioctl = pde->proc_fops->compat_ioctl; if (compat_ioctl) rv = compat_ioctl(file, cmd, arg); unuse_pde(pde); } return rv; } #endif static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma) { struct proc_dir_entry *pde = PDE(file_inode(file)); int rv = -EIO; int (*mmap)(struct file *, struct vm_area_struct *); if (use_pde(pde)) { mmap = pde->proc_fops->mmap; if (mmap) rv = mmap(file, vma); unuse_pde(pde); } return rv; } static unsigned long proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct proc_dir_entry *pde = PDE(file_inode(file)); unsigned long rv = -EIO; if (use_pde(pde)) { typeof(proc_reg_get_unmapped_area) *get_area; get_area = pde->proc_fops->get_unmapped_area; #ifdef CONFIG_MMU if (!get_area) get_area = current->mm->get_unmapped_area; #endif if (get_area) rv = get_area(file, orig_addr, len, pgoff, flags); else rv = orig_addr; unuse_pde(pde); } return rv; } static int proc_reg_open(struct inode *inode, struct file *file) { struct proc_dir_entry *pde = PDE(inode); int rv = 0; int (*open)(struct inode *, struct file *); int (*release)(struct inode *, struct file *); struct pde_opener *pdeo; /* * What for, you ask? Well, we can have open, rmmod, remove_proc_entry * sequence. ->release won't be called because ->proc_fops will be * cleared. Depending on complexity of ->release, consequences vary. * * We can't wait for mercy when close will be done for real, it's * deadlockable: rmmod foo </proc/foo . So, we're going to do ->release * by hand in remove_proc_entry(). For this, save opener's credentials * for later. */ pdeo = kzalloc(sizeof(struct pde_opener), GFP_KERNEL); if (!pdeo) return -ENOMEM; if (!use_pde(pde)) { kfree(pdeo); return -ENOENT; } open = pde->proc_fops->open; release = pde->proc_fops->release; if (open) rv = open(inode, file); if (rv == 0 && release) { /* To know what to release. */ pdeo->file = file; /* Strictly for "too late" ->release in proc_reg_release(). */ spin_lock(&pde->pde_unload_lock); list_add(&pdeo->lh, &pde->pde_openers); spin_unlock(&pde->pde_unload_lock); } else kfree(pdeo); unuse_pde(pde); return rv; } static int proc_reg_release(struct inode *inode, struct file *file) { struct proc_dir_entry *pde = PDE(inode); struct pde_opener *pdeo; spin_lock(&pde->pde_unload_lock); list_for_each_entry(pdeo, &pde->pde_openers, lh) { if (pdeo->file == file) { close_pdeo(pde, pdeo); break; } } spin_unlock(&pde->pde_unload_lock); return 0; } static const struct file_operations proc_reg_file_ops = { .llseek = proc_reg_llseek, .read = proc_reg_read, .write = proc_reg_write, .poll = proc_reg_poll, .unlocked_ioctl = proc_reg_unlocked_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = proc_reg_compat_ioctl, #endif .mmap = proc_reg_mmap, .get_unmapped_area = proc_reg_get_unmapped_area, .open = proc_reg_open, .release = proc_reg_release, }; #ifdef CONFIG_COMPAT static const struct file_operations proc_reg_file_ops_no_compat = { .llseek = proc_reg_llseek, .read = proc_reg_read, .write = proc_reg_write, .poll = proc_reg_poll, .unlocked_ioctl = proc_reg_unlocked_ioctl, .mmap = proc_reg_mmap, .get_unmapped_area = proc_reg_get_unmapped_area, .open = proc_reg_open, .release = proc_reg_release, }; #endif static const char *proc_follow_link(struct dentry *dentry, void **cookie) { struct proc_dir_entry *pde = PDE(d_inode(dentry)); if (unlikely(!use_pde(pde))) return ERR_PTR(-EINVAL); *cookie = pde; return pde->data; } static void proc_put_link(struct inode *unused, void *p) { unuse_pde(p); } const struct inode_operations proc_link_inode_operations = { .readlink = generic_readlink, .follow_link = proc_follow_link, .put_link = proc_put_link, }; struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) { struct inode *inode = new_inode_pseudo(sb); if (inode) { inode->i_ino = de->low_ino; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; PROC_I(inode)->pde = de; if (is_empty_pde(de)) { make_empty_dir_inode(inode); return inode; } if (de->mode) { inode->i_mode = de->mode; inode->i_uid = de->uid; inode->i_gid = de->gid; } if (de->size) inode->i_size = de->size; if (de->nlink) set_nlink(inode, de->nlink); WARN_ON(!de->proc_iops); inode->i_op = de->proc_iops; if (de->proc_fops) { if (S_ISREG(inode->i_mode)) { #ifdef CONFIG_COMPAT if (!de->proc_fops->compat_ioctl) inode->i_fop = &proc_reg_file_ops_no_compat; else #endif inode->i_fop = &proc_reg_file_ops; } else { inode->i_fop = de->proc_fops; } } } else pde_put(de); return inode; } int proc_fill_super(struct super_block *s) { struct inode *root_inode; int ret; s->s_flags |= MS_NODIRATIME | MS_NOSUID | MS_NOEXEC; s->s_blocksize = 1024; s->s_blocksize_bits = 10; s->s_magic = PROC_SUPER_MAGIC; s->s_op = &proc_sops; s->s_time_gran = 1; pde_get(&proc_root); root_inode = proc_get_inode(s, &proc_root); if (!root_inode) { pr_err("proc_fill_super: get root inode failed\n"); return -ENOMEM; } s->s_root = d_make_root(root_inode); if (!s->s_root) { pr_err("proc_fill_super: allocate dentry failed\n"); return -ENOMEM; } ret = proc_setup_self(s); if (ret) { return ret; } return proc_setup_thread_self(s); }
gpl-2.0
jurgel/maratis
3rdparty/freetype/raster.c
787
1386
/***************************************************************************/ /* */ /* raster.c */ /* */ /* FreeType monochrome rasterer module component (body only). */ /* */ /* Copyright 1996-2001 by */ /* David Turner, Robert Wilhelm, and Werner Lemberg. */ /* */ /* This file is part of the FreeType project, and may only be used, */ /* modified, and distributed under the terms of the FreeType project */ /* license, LICENSE.TXT. By continuing to use, modify, or distribute */ /* this file you indicate that you have read the license and */ /* understand and accept it fully. */ /* */ /***************************************************************************/ #define FT_MAKE_OPTION_SINGLE_OBJECT #include <ft2build.h> #include "rastpic.c" #include "ftraster.c" #include "ftrend1.c" /* END */
gpl-2.0
spacecaker/CM7_Space_Kernel_Cooper
ipc/msg.c
1043
21347
/* * linux/ipc/msg.c * Copyright (C) 1992 Krishna Balasubramanian * * Removed all the remaining kerneld mess * Catch the -EFAULT stuff properly * Use GFP_KERNEL for messages as in 1.2 * Fixed up the unchecked user space derefs * Copyright (C) 1998 Alan Cox & Andi Kleen * * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com> * * mostly rewritten, threaded and wake-one semantics added * MSGMAX limit removed, sysctl's added * (c) 1999 Manfred Spraul <manfred@colorfullife.com> * * support for audit of ipc object properties and permission changes * Dustin Kirkland <dustin.kirkland@us.ibm.com> * * namespaces support * OpenVZ, SWsoft Inc. * Pavel Emelianov <xemul@openvz.org> */ #include <linux/capability.h> #include <linux/msg.h> #include <linux/spinlock.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/proc_fs.h> #include <linux/list.h> #include <linux/security.h> #include <linux/sched.h> #include <linux/syscalls.h> #include <linux/audit.h> #include <linux/seq_file.h> #include <linux/rwsem.h> #include <linux/nsproxy.h> #include <linux/ipc_namespace.h> #include <asm/current.h> #include <asm/uaccess.h> #include "util.h" /* * one msg_receiver structure for each sleeping receiver: */ struct msg_receiver { struct list_head r_list; struct task_struct *r_tsk; int r_mode; long r_msgtype; long r_maxsize; struct msg_msg *volatile r_msg; }; /* one msg_sender for each sleeping sender */ struct msg_sender { struct list_head list; struct task_struct *tsk; }; #define SEARCH_ANY 1 #define SEARCH_EQUAL 2 #define SEARCH_NOTEQUAL 3 #define SEARCH_LESSEQUAL 4 #define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS]) #define msg_unlock(msq) ipc_unlock(&(msq)->q_perm) static void freeque(struct ipc_namespace *, struct kern_ipc_perm *); static int newque(struct ipc_namespace *, struct ipc_params *); #ifdef CONFIG_PROC_FS static int sysvipc_msg_proc_show(struct seq_file *s, void *it); #endif /* * Scale msgmni with the available lowmem size: the memory dedicated to msg * queues should occupy at most 1/MSG_MEM_SCALE of lowmem. * Also take into account the number of nsproxies created so far. * This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range. */ void recompute_msgmni(struct ipc_namespace *ns) { struct sysinfo i; unsigned long allowed; int nb_ns; si_meminfo(&i); allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit) / MSGMNB; nb_ns = atomic_read(&nr_ipc_ns); allowed /= nb_ns; if (allowed < MSGMNI) { ns->msg_ctlmni = MSGMNI; return; } if (allowed > IPCMNI / nb_ns) { ns->msg_ctlmni = IPCMNI / nb_ns; return; } ns->msg_ctlmni = allowed; } void msg_init_ns(struct ipc_namespace *ns) { ns->msg_ctlmax = MSGMAX; ns->msg_ctlmnb = MSGMNB; recompute_msgmni(ns); atomic_set(&ns->msg_bytes, 0); atomic_set(&ns->msg_hdrs, 0); ipc_init_ids(&ns->ids[IPC_MSG_IDS]); } #ifdef CONFIG_IPC_NS void msg_exit_ns(struct ipc_namespace *ns) { free_ipcs(ns, &msg_ids(ns), freeque); idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr); } #endif void __init msg_init(void) { msg_init_ns(&init_ipc_ns); printk(KERN_INFO "msgmni has been set to %d\n", init_ipc_ns.msg_ctlmni); ipc_init_proc_interface("sysvipc/msg", " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n", IPC_MSG_IDS, sysvipc_msg_proc_show); } /* * msg_lock_(check_) routines are called in the paths where the rw_mutex * is not held. */ static inline struct msg_queue *msg_lock(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_lock(&msg_ids(ns), id); if (IS_ERR(ipcp)) return (struct msg_queue *)ipcp; return container_of(ipcp, struct msg_queue, q_perm); } static inline struct msg_queue *msg_lock_check(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_lock_check(&msg_ids(ns), id); if (IS_ERR(ipcp)) return (struct msg_queue *)ipcp; return container_of(ipcp, struct msg_queue, q_perm); } static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s) { ipc_rmid(&msg_ids(ns), &s->q_perm); } /** * newque - Create a new msg queue * @ns: namespace * @params: ptr to the structure that contains the key and msgflg * * Called with msg_ids.rw_mutex held (writer) */ static int newque(struct ipc_namespace *ns, struct ipc_params *params) { struct msg_queue *msq; int id, retval; key_t key = params->key; int msgflg = params->flg; msq = ipc_rcu_alloc(sizeof(*msq)); if (!msq) return -ENOMEM; msq->q_perm.mode = msgflg & S_IRWXUGO; msq->q_perm.key = key; msq->q_perm.security = NULL; retval = security_msg_queue_alloc(msq); if (retval) { ipc_rcu_putref(msq); return retval; } /* * ipc_addid() locks msq */ id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); if (id < 0) { security_msg_queue_free(msq); ipc_rcu_putref(msq); return id; } msq->q_stime = msq->q_rtime = 0; msq->q_ctime = get_seconds(); msq->q_cbytes = msq->q_qnum = 0; msq->q_qbytes = ns->msg_ctlmnb; msq->q_lspid = msq->q_lrpid = 0; INIT_LIST_HEAD(&msq->q_messages); INIT_LIST_HEAD(&msq->q_receivers); INIT_LIST_HEAD(&msq->q_senders); msg_unlock(msq); return msq->q_perm.id; } static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss) { mss->tsk = current; current->state = TASK_INTERRUPTIBLE; list_add_tail(&mss->list, &msq->q_senders); } static inline void ss_del(struct msg_sender *mss) { if (mss->list.next != NULL) list_del(&mss->list); } static void ss_wakeup(struct list_head *h, int kill) { struct list_head *tmp; tmp = h->next; while (tmp != h) { struct msg_sender *mss; mss = list_entry(tmp, struct msg_sender, list); tmp = tmp->next; if (kill) mss->list.next = NULL; wake_up_process(mss->tsk); } } static void expunge_all(struct msg_queue *msq, int res) { struct list_head *tmp; tmp = msq->q_receivers.next; while (tmp != &msq->q_receivers) { struct msg_receiver *msr; msr = list_entry(tmp, struct msg_receiver, r_list); tmp = tmp->next; msr->r_msg = NULL; wake_up_process(msr->r_tsk); smp_mb(); msr->r_msg = ERR_PTR(res); } } /* * freeque() wakes up waiters on the sender and receiver waiting queue, * removes the message queue from message queue ID IDR, and cleans up all the * messages associated with this queue. * * msg_ids.rw_mutex (writer) and the spinlock for this message queue are held * before freeque() is called. msg_ids.rw_mutex remains locked on exit. */ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) { struct list_head *tmp; struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm); expunge_all(msq, -EIDRM); ss_wakeup(&msq->q_senders, 1); msg_rmid(ns, msq); msg_unlock(msq); tmp = msq->q_messages.next; while (tmp != &msq->q_messages) { struct msg_msg *msg = list_entry(tmp, struct msg_msg, m_list); tmp = tmp->next; atomic_dec(&ns->msg_hdrs); free_msg(msg); } atomic_sub(msq->q_cbytes, &ns->msg_bytes); security_msg_queue_free(msq); ipc_rcu_putref(msq); } /* * Called with msg_ids.rw_mutex and ipcp locked. */ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg) { struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm); return security_msg_queue_associate(msq, msgflg); } SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg) { struct ipc_namespace *ns; struct ipc_ops msg_ops; struct ipc_params msg_params; ns = current->nsproxy->ipc_ns; msg_ops.getnew = newque; msg_ops.associate = msg_security; msg_ops.more_checks = NULL; msg_params.key = key; msg_params.flg = msgflg; return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params); } static inline unsigned long copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version) { switch(version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct msqid_ds out; memset(&out, 0, sizeof(out)); ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm); out.msg_stime = in->msg_stime; out.msg_rtime = in->msg_rtime; out.msg_ctime = in->msg_ctime; if (in->msg_cbytes > USHRT_MAX) out.msg_cbytes = USHRT_MAX; else out.msg_cbytes = in->msg_cbytes; out.msg_lcbytes = in->msg_cbytes; if (in->msg_qnum > USHRT_MAX) out.msg_qnum = USHRT_MAX; else out.msg_qnum = in->msg_qnum; if (in->msg_qbytes > USHRT_MAX) out.msg_qbytes = USHRT_MAX; else out.msg_qbytes = in->msg_qbytes; out.msg_lqbytes = in->msg_qbytes; out.msg_lspid = in->msg_lspid; out.msg_lrpid = in->msg_lrpid; return copy_to_user(buf, &out, sizeof(out)); } default: return -EINVAL; } } static inline unsigned long copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version) { switch(version) { case IPC_64: if (copy_from_user(out, buf, sizeof(*out))) return -EFAULT; return 0; case IPC_OLD: { struct msqid_ds tbuf_old; if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) return -EFAULT; out->msg_perm.uid = tbuf_old.msg_perm.uid; out->msg_perm.gid = tbuf_old.msg_perm.gid; out->msg_perm.mode = tbuf_old.msg_perm.mode; if (tbuf_old.msg_qbytes == 0) out->msg_qbytes = tbuf_old.msg_lqbytes; else out->msg_qbytes = tbuf_old.msg_qbytes; return 0; } default: return -EINVAL; } } /* * This function handles some msgctl commands which require the rw_mutex * to be held in write mode. * NOTE: no locks must be held, the rw_mutex is taken inside this function. */ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd, struct msqid_ds __user *buf, int version) { struct kern_ipc_perm *ipcp; struct msqid64_ds uninitialized_var(msqid64); struct msg_queue *msq; int err; if (cmd == IPC_SET) { if (copy_msqid_from_user(&msqid64, buf, version)) return -EFAULT; } ipcp = ipcctl_pre_down(&msg_ids(ns), msqid, cmd, &msqid64.msg_perm, msqid64.msg_qbytes); if (IS_ERR(ipcp)) return PTR_ERR(ipcp); msq = container_of(ipcp, struct msg_queue, q_perm); err = security_msg_queue_msgctl(msq, cmd); if (err) goto out_unlock; switch (cmd) { case IPC_RMID: freeque(ns, ipcp); goto out_up; case IPC_SET: if (msqid64.msg_qbytes > ns->msg_ctlmnb && !capable(CAP_SYS_RESOURCE)) { err = -EPERM; goto out_unlock; } msq->q_qbytes = msqid64.msg_qbytes; ipc_update_perm(&msqid64.msg_perm, ipcp); msq->q_ctime = get_seconds(); /* sleeping receivers might be excluded by * stricter permissions. */ expunge_all(msq, -EAGAIN); /* sleeping senders might be able to send * due to a larger queue size. */ ss_wakeup(&msq->q_senders, 0); break; default: err = -EINVAL; } out_unlock: msg_unlock(msq); out_up: up_write(&msg_ids(ns).rw_mutex); return err; } SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf) { struct msg_queue *msq; int err, version; struct ipc_namespace *ns; if (msqid < 0 || cmd < 0) return -EINVAL; version = ipc_parse_version(&cmd); ns = current->nsproxy->ipc_ns; switch (cmd) { case IPC_INFO: case MSG_INFO: { struct msginfo msginfo; int max_id; if (!buf) return -EFAULT; /* * We must not return kernel stack data. * due to padding, it's not enough * to set all member fields. */ err = security_msg_queue_msgctl(NULL, cmd); if (err) return err; memset(&msginfo, 0, sizeof(msginfo)); msginfo.msgmni = ns->msg_ctlmni; msginfo.msgmax = ns->msg_ctlmax; msginfo.msgmnb = ns->msg_ctlmnb; msginfo.msgssz = MSGSSZ; msginfo.msgseg = MSGSEG; down_read(&msg_ids(ns).rw_mutex); if (cmd == MSG_INFO) { msginfo.msgpool = msg_ids(ns).in_use; msginfo.msgmap = atomic_read(&ns->msg_hdrs); msginfo.msgtql = atomic_read(&ns->msg_bytes); } else { msginfo.msgmap = MSGMAP; msginfo.msgpool = MSGPOOL; msginfo.msgtql = MSGTQL; } max_id = ipc_get_maxid(&msg_ids(ns)); up_read(&msg_ids(ns).rw_mutex); if (copy_to_user(buf, &msginfo, sizeof(struct msginfo))) return -EFAULT; return (max_id < 0) ? 0 : max_id; } case MSG_STAT: /* msqid is an index rather than a msg queue id */ case IPC_STAT: { struct msqid64_ds tbuf; int success_return; if (!buf) return -EFAULT; if (cmd == MSG_STAT) { msq = msg_lock(ns, msqid); if (IS_ERR(msq)) return PTR_ERR(msq); success_return = msq->q_perm.id; } else { msq = msg_lock_check(ns, msqid); if (IS_ERR(msq)) return PTR_ERR(msq); success_return = 0; } err = -EACCES; if (ipcperms(&msq->q_perm, S_IRUGO)) goto out_unlock; err = security_msg_queue_msgctl(msq, cmd); if (err) goto out_unlock; memset(&tbuf, 0, sizeof(tbuf)); kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm); tbuf.msg_stime = msq->q_stime; tbuf.msg_rtime = msq->q_rtime; tbuf.msg_ctime = msq->q_ctime; tbuf.msg_cbytes = msq->q_cbytes; tbuf.msg_qnum = msq->q_qnum; tbuf.msg_qbytes = msq->q_qbytes; tbuf.msg_lspid = msq->q_lspid; tbuf.msg_lrpid = msq->q_lrpid; msg_unlock(msq); if (copy_msqid_to_user(buf, &tbuf, version)) return -EFAULT; return success_return; } case IPC_SET: case IPC_RMID: err = msgctl_down(ns, msqid, cmd, buf, version); return err; default: return -EINVAL; } out_unlock: msg_unlock(msq); return err; } static int testmsg(struct msg_msg *msg, long type, int mode) { switch(mode) { case SEARCH_ANY: return 1; case SEARCH_LESSEQUAL: if (msg->m_type <=type) return 1; break; case SEARCH_EQUAL: if (msg->m_type == type) return 1; break; case SEARCH_NOTEQUAL: if (msg->m_type != type) return 1; break; } return 0; } static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) { struct list_head *tmp; tmp = msq->q_receivers.next; while (tmp != &msq->q_receivers) { struct msg_receiver *msr; msr = list_entry(tmp, struct msg_receiver, r_list); tmp = tmp->next; if (testmsg(msg, msr->r_msgtype, msr->r_mode) && !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, msr->r_msgtype, msr->r_mode)) { list_del(&msr->r_list); if (msr->r_maxsize < msg->m_ts) { msr->r_msg = NULL; wake_up_process(msr->r_tsk); smp_mb(); msr->r_msg = ERR_PTR(-E2BIG); } else { msr->r_msg = NULL; msq->q_lrpid = task_pid_vnr(msr->r_tsk); msq->q_rtime = get_seconds(); wake_up_process(msr->r_tsk); smp_mb(); msr->r_msg = msg; return 1; } } } return 0; } long do_msgsnd(int msqid, long mtype, void __user *mtext, size_t msgsz, int msgflg) { struct msg_queue *msq; struct msg_msg *msg; int err; struct ipc_namespace *ns; ns = current->nsproxy->ipc_ns; if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0) return -EINVAL; if (mtype < 1) return -EINVAL; msg = load_msg(mtext, msgsz); if (IS_ERR(msg)) return PTR_ERR(msg); msg->m_type = mtype; msg->m_ts = msgsz; msq = msg_lock_check(ns, msqid); if (IS_ERR(msq)) { err = PTR_ERR(msq); goto out_free; } for (;;) { struct msg_sender s; err = -EACCES; if (ipcperms(&msq->q_perm, S_IWUGO)) goto out_unlock_free; err = security_msg_queue_msgsnd(msq, msg, msgflg); if (err) goto out_unlock_free; if (msgsz + msq->q_cbytes <= msq->q_qbytes && 1 + msq->q_qnum <= msq->q_qbytes) { break; } /* queue full, wait: */ if (msgflg & IPC_NOWAIT) { err = -EAGAIN; goto out_unlock_free; } ss_add(msq, &s); ipc_rcu_getref(msq); msg_unlock(msq); schedule(); ipc_lock_by_ptr(&msq->q_perm); ipc_rcu_putref(msq); if (msq->q_perm.deleted) { err = -EIDRM; goto out_unlock_free; } ss_del(&s); if (signal_pending(current)) { err = -ERESTARTNOHAND; goto out_unlock_free; } } msq->q_lspid = task_tgid_vnr(current); msq->q_stime = get_seconds(); if (!pipelined_send(msq, msg)) { /* noone is waiting for this message, enqueue it */ list_add_tail(&msg->m_list, &msq->q_messages); msq->q_cbytes += msgsz; msq->q_qnum++; atomic_add(msgsz, &ns->msg_bytes); atomic_inc(&ns->msg_hdrs); } err = 0; msg = NULL; out_unlock_free: msg_unlock(msq); out_free: if (msg != NULL) free_msg(msg); return err; } SYSCALL_DEFINE4(msgsnd, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz, int, msgflg) { long mtype; if (get_user(mtype, &msgp->mtype)) return -EFAULT; return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg); } static inline int convert_mode(long *msgtyp, int msgflg) { /* * find message of correct type. * msgtyp = 0 => get first. * msgtyp > 0 => get first message of matching type. * msgtyp < 0 => get message with least type must be < abs(msgtype). */ if (*msgtyp == 0) return SEARCH_ANY; if (*msgtyp < 0) { *msgtyp = -*msgtyp; return SEARCH_LESSEQUAL; } if (msgflg & MSG_EXCEPT) return SEARCH_NOTEQUAL; return SEARCH_EQUAL; } long do_msgrcv(int msqid, long *pmtype, void __user *mtext, size_t msgsz, long msgtyp, int msgflg) { struct msg_queue *msq; struct msg_msg *msg; int mode; struct ipc_namespace *ns; if (msqid < 0 || (long) msgsz < 0) return -EINVAL; mode = convert_mode(&msgtyp, msgflg); ns = current->nsproxy->ipc_ns; msq = msg_lock_check(ns, msqid); if (IS_ERR(msq)) return PTR_ERR(msq); for (;;) { struct msg_receiver msr_d; struct list_head *tmp; msg = ERR_PTR(-EACCES); if (ipcperms(&msq->q_perm, S_IRUGO)) goto out_unlock; msg = ERR_PTR(-EAGAIN); tmp = msq->q_messages.next; while (tmp != &msq->q_messages) { struct msg_msg *walk_msg; walk_msg = list_entry(tmp, struct msg_msg, m_list); if (testmsg(walk_msg, msgtyp, mode) && !security_msg_queue_msgrcv(msq, walk_msg, current, msgtyp, mode)) { msg = walk_msg; if (mode == SEARCH_LESSEQUAL && walk_msg->m_type != 1) { msg = walk_msg; msgtyp = walk_msg->m_type - 1; } else { msg = walk_msg; break; } } tmp = tmp->next; } if (!IS_ERR(msg)) { /* * Found a suitable message. * Unlink it from the queue. */ if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) { msg = ERR_PTR(-E2BIG); goto out_unlock; } list_del(&msg->m_list); msq->q_qnum--; msq->q_rtime = get_seconds(); msq->q_lrpid = task_tgid_vnr(current); msq->q_cbytes -= msg->m_ts; atomic_sub(msg->m_ts, &ns->msg_bytes); atomic_dec(&ns->msg_hdrs); ss_wakeup(&msq->q_senders, 0); msg_unlock(msq); break; } /* No message waiting. Wait for a message */ if (msgflg & IPC_NOWAIT) { msg = ERR_PTR(-ENOMSG); goto out_unlock; } list_add_tail(&msr_d.r_list, &msq->q_receivers); msr_d.r_tsk = current; msr_d.r_msgtype = msgtyp; msr_d.r_mode = mode; if (msgflg & MSG_NOERROR) msr_d.r_maxsize = INT_MAX; else msr_d.r_maxsize = msgsz; msr_d.r_msg = ERR_PTR(-EAGAIN); current->state = TASK_INTERRUPTIBLE; msg_unlock(msq); schedule(); /* Lockless receive, part 1: * Disable preemption. We don't hold a reference to the queue * and getting a reference would defeat the idea of a lockless * operation, thus the code relies on rcu to guarantee the * existance of msq: * Prior to destruction, expunge_all(-EIRDM) changes r_msg. * Thus if r_msg is -EAGAIN, then the queue not yet destroyed. * rcu_read_lock() prevents preemption between reading r_msg * and the spin_lock() inside ipc_lock_by_ptr(). */ rcu_read_lock(); /* Lockless receive, part 2: * Wait until pipelined_send or expunge_all are outside of * wake_up_process(). There is a race with exit(), see * ipc/mqueue.c for the details. */ msg = (struct msg_msg*)msr_d.r_msg; while (msg == NULL) { cpu_relax(); msg = (struct msg_msg *)msr_d.r_msg; } /* Lockless receive, part 3: * If there is a message or an error then accept it without * locking. */ if (msg != ERR_PTR(-EAGAIN)) { rcu_read_unlock(); break; } /* Lockless receive, part 3: * Acquire the queue spinlock. */ ipc_lock_by_ptr(&msq->q_perm); rcu_read_unlock(); /* Lockless receive, part 4: * Repeat test after acquiring the spinlock. */ msg = (struct msg_msg*)msr_d.r_msg; if (msg != ERR_PTR(-EAGAIN)) goto out_unlock; list_del(&msr_d.r_list); if (signal_pending(current)) { msg = ERR_PTR(-ERESTARTNOHAND); out_unlock: msg_unlock(msq); break; } } if (IS_ERR(msg)) return PTR_ERR(msg); msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz; *pmtype = msg->m_type; if (store_msg(mtext, msg, msgsz)) msgsz = -EFAULT; free_msg(msg); return msgsz; } SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz, long, msgtyp, int, msgflg) { long err, mtype; err = do_msgrcv(msqid, &mtype, msgp->mtext, msgsz, msgtyp, msgflg); if (err < 0) goto out; if (put_user(mtype, &msgp->mtype)) err = -EFAULT; out: return err; } #ifdef CONFIG_PROC_FS static int sysvipc_msg_proc_show(struct seq_file *s, void *it) { struct msg_queue *msq = it; return seq_printf(s, "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n", msq->q_perm.key, msq->q_perm.id, msq->q_perm.mode, msq->q_cbytes, msq->q_qnum, msq->q_lspid, msq->q_lrpid, msq->q_perm.uid, msq->q_perm.gid, msq->q_perm.cuid, msq->q_perm.cgid, msq->q_stime, msq->q_rtime, msq->q_ctime); } #endif
gpl-2.0
XileForce/Vindicator-S6-Test
net/ieee802154/6lowpan.c
1555
38506
/* * Copyright 2011, Siemens AG * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com> */ /* * Based on patches from Jon Smirl <jonsmirl@gmail.com> * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ /* Jon's code is based on 6lowpan implementation for Contiki which is: * Copyright (c) 2008, Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the Institute nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include <linux/bitops.h> #include <linux/if_arp.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/netdevice.h> #include <net/af_ieee802154.h> #include <net/ieee802154.h> #include <net/ieee802154_netdev.h> #include <net/ipv6.h> #include "6lowpan.h" /* TTL uncompression values */ static const u8 lowpan_ttl_values[] = {0, 1, 64, 255}; static LIST_HEAD(lowpan_devices); /* * Uncompression of linklocal: * 0 -> 16 bytes from packet * 1 -> 2 bytes from prefix - bunch of zeroes and 8 from packet * 2 -> 2 bytes from prefix - zeroes + 2 from packet * 3 -> 2 bytes from prefix - infer 8 bytes from lladdr * * NOTE: => the uncompress function does change 0xf to 0x10 * NOTE: 0x00 => no-autoconfig => unspecified */ static const u8 lowpan_unc_llconf[] = {0x0f, 0x28, 0x22, 0x20}; /* * Uncompression of ctx-based: * 0 -> 0 bits from packet [unspecified / reserved] * 1 -> 8 bytes from prefix - bunch of zeroes and 8 from packet * 2 -> 8 bytes from prefix - zeroes + 2 from packet * 3 -> 8 bytes from prefix - infer 8 bytes from lladdr */ static const u8 lowpan_unc_ctxconf[] = {0x00, 0x88, 0x82, 0x80}; /* * Uncompression of ctx-base * 0 -> 0 bits from packet * 1 -> 2 bytes from prefix - bunch of zeroes 5 from packet * 2 -> 2 bytes from prefix - zeroes + 3 from packet * 3 -> 2 bytes from prefix - infer 1 bytes from lladdr */ static const u8 lowpan_unc_mxconf[] = {0x0f, 0x25, 0x23, 0x21}; /* Link local prefix */ static const u8 lowpan_llprefix[] = {0xfe, 0x80}; /* private device info */ struct lowpan_dev_info { struct net_device *real_dev; /* real WPAN device ptr */ struct mutex dev_list_mtx; /* mutex for list ops */ unsigned short fragment_tag; }; struct lowpan_dev_record { struct net_device *ldev; struct list_head list; }; struct lowpan_fragment { struct sk_buff *skb; /* skb to be assembled */ u16 length; /* length to be assemled */ u32 bytes_rcv; /* bytes received */ u16 tag; /* current fragment tag */ struct timer_list timer; /* assembling timer */ struct list_head list; /* fragments list */ }; static LIST_HEAD(lowpan_fragments); static DEFINE_SPINLOCK(flist_lock); static inline struct lowpan_dev_info *lowpan_dev_info(const struct net_device *dev) { return netdev_priv(dev); } static inline void lowpan_address_flip(u8 *src, u8 *dest) { int i; for (i = 0; i < IEEE802154_ADDR_LEN; i++) (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i]; } /* list of all 6lowpan devices, uses for package delivering */ /* print data in line */ static inline void lowpan_raw_dump_inline(const char *caller, char *msg, unsigned char *buf, int len) { #ifdef DEBUG if (msg) pr_debug("(%s) %s: ", caller, msg); print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 16, 1, buf, len, false); #endif /* DEBUG */ } /* * print data in a table format: * * addr: xx xx xx xx xx xx * addr: xx xx xx xx xx xx * ... */ static inline void lowpan_raw_dump_table(const char *caller, char *msg, unsigned char *buf, int len) { #ifdef DEBUG if (msg) pr_debug("(%s) %s:\n", caller, msg); print_hex_dump(KERN_DEBUG, "\t", DUMP_PREFIX_OFFSET, 16, 1, buf, len, false); #endif /* DEBUG */ } static u8 lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift, const struct in6_addr *ipaddr, const unsigned char *lladdr) { u8 val = 0; if (is_addr_mac_addr_based(ipaddr, lladdr)) val = 3; /* 0-bits */ else if (lowpan_is_iid_16_bit_compressable(ipaddr)) { /* compress IID to 16 bits xxxx::XXXX */ memcpy(*hc06_ptr, &ipaddr->s6_addr16[7], 2); *hc06_ptr += 2; val = 2; /* 16-bits */ } else { /* do not compress IID => xxxx::IID */ memcpy(*hc06_ptr, &ipaddr->s6_addr16[4], 8); *hc06_ptr += 8; val = 1; /* 64-bits */ } return rol8(val, shift); } static void lowpan_uip_ds6_set_addr_iid(struct in6_addr *ipaddr, unsigned char *lladdr) { memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ADDR_LEN); /* second bit-flip (Universe/Local) is done according RFC2464 */ ipaddr->s6_addr[8] ^= 0x02; } /* * Uncompress addresses based on a prefix and a postfix with zeroes in * between. If the postfix is zero in length it will use the link address * to configure the IP address (autoconf style). * pref_post_count takes a byte where the first nibble specify prefix count * and the second postfix count (NOTE: 15/0xf => 16 bytes copy). */ static int lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr, u8 const *prefix, u8 pref_post_count, unsigned char *lladdr) { u8 prefcount = pref_post_count >> 4; u8 postcount = pref_post_count & 0x0f; /* full nibble 15 => 16 */ prefcount = (prefcount == 15 ? 16 : prefcount); postcount = (postcount == 15 ? 16 : postcount); if (lladdr) lowpan_raw_dump_inline(__func__, "linklocal address", lladdr, IEEE802154_ADDR_LEN); if (prefcount > 0) memcpy(ipaddr, prefix, prefcount); if (prefcount + postcount < 16) memset(&ipaddr->s6_addr[prefcount], 0, 16 - (prefcount + postcount)); if (postcount > 0) { memcpy(&ipaddr->s6_addr[16 - postcount], skb->data, postcount); skb_pull(skb, postcount); } else if (prefcount > 0) { if (lladdr == NULL) return -EINVAL; /* no IID based configuration if no prefix and no data */ lowpan_uip_ds6_set_addr_iid(ipaddr, lladdr); } pr_debug("uncompressing %d + %d => ", prefcount, postcount); lowpan_raw_dump_inline(NULL, NULL, ipaddr->s6_addr, 16); return 0; } static void lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb) { struct udphdr *uh = udp_hdr(skb); if (((uh->source & LOWPAN_NHC_UDP_4BIT_MASK) == LOWPAN_NHC_UDP_4BIT_PORT) && ((uh->dest & LOWPAN_NHC_UDP_4BIT_MASK) == LOWPAN_NHC_UDP_4BIT_PORT)) { pr_debug("UDP header: both ports compression to 4 bits\n"); **hc06_ptr = LOWPAN_NHC_UDP_CS_P_11; **(hc06_ptr + 1) = /* subtraction is faster */ (u8)((uh->dest - LOWPAN_NHC_UDP_4BIT_PORT) + ((uh->source & LOWPAN_NHC_UDP_4BIT_PORT) << 4)); *hc06_ptr += 2; } else if ((uh->dest & LOWPAN_NHC_UDP_8BIT_MASK) == LOWPAN_NHC_UDP_8BIT_PORT) { pr_debug("UDP header: remove 8 bits of dest\n"); **hc06_ptr = LOWPAN_NHC_UDP_CS_P_01; memcpy(*hc06_ptr + 1, &uh->source, 2); **(hc06_ptr + 3) = (u8)(uh->dest - LOWPAN_NHC_UDP_8BIT_PORT); *hc06_ptr += 4; } else if ((uh->source & LOWPAN_NHC_UDP_8BIT_MASK) == LOWPAN_NHC_UDP_8BIT_PORT) { pr_debug("UDP header: remove 8 bits of source\n"); **hc06_ptr = LOWPAN_NHC_UDP_CS_P_10; memcpy(*hc06_ptr + 1, &uh->dest, 2); **(hc06_ptr + 3) = (u8)(uh->source - LOWPAN_NHC_UDP_8BIT_PORT); *hc06_ptr += 4; } else { pr_debug("UDP header: can't compress\n"); **hc06_ptr = LOWPAN_NHC_UDP_CS_P_00; memcpy(*hc06_ptr + 1, &uh->source, 2); memcpy(*hc06_ptr + 3, &uh->dest, 2); *hc06_ptr += 5; } /* checksum is always inline */ memcpy(*hc06_ptr, &uh->check, 2); *hc06_ptr += 2; /* skip the UDP header */ skb_pull(skb, sizeof(struct udphdr)); } static inline int lowpan_fetch_skb_u8(struct sk_buff *skb, u8 *val) { if (unlikely(!pskb_may_pull(skb, 1))) return -EINVAL; *val = skb->data[0]; skb_pull(skb, 1); return 0; } static inline int lowpan_fetch_skb_u16(struct sk_buff *skb, u16 *val) { if (unlikely(!pskb_may_pull(skb, 2))) return -EINVAL; *val = (skb->data[0] << 8) | skb->data[1]; skb_pull(skb, 2); return 0; } static int lowpan_uncompress_udp_header(struct sk_buff *skb, struct udphdr *uh) { u8 tmp; if (!uh) goto err; if (lowpan_fetch_skb_u8(skb, &tmp)) goto err; if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) { pr_debug("UDP header uncompression\n"); switch (tmp & LOWPAN_NHC_UDP_CS_P_11) { case LOWPAN_NHC_UDP_CS_P_00: memcpy(&uh->source, &skb->data[0], 2); memcpy(&uh->dest, &skb->data[2], 2); skb_pull(skb, 4); break; case LOWPAN_NHC_UDP_CS_P_01: memcpy(&uh->source, &skb->data[0], 2); uh->dest = skb->data[2] + LOWPAN_NHC_UDP_8BIT_PORT; skb_pull(skb, 3); break; case LOWPAN_NHC_UDP_CS_P_10: uh->source = skb->data[0] + LOWPAN_NHC_UDP_8BIT_PORT; memcpy(&uh->dest, &skb->data[1], 2); skb_pull(skb, 3); break; case LOWPAN_NHC_UDP_CS_P_11: uh->source = LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] >> 4); uh->dest = LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] & 0x0f); skb_pull(skb, 1); break; default: pr_debug("ERROR: unknown UDP format\n"); goto err; break; } pr_debug("uncompressed UDP ports: src = %d, dst = %d\n", uh->source, uh->dest); /* copy checksum */ memcpy(&uh->check, &skb->data[0], 2); skb_pull(skb, 2); /* * UDP lenght needs to be infered from the lower layers * here, we obtain the hint from the remaining size of the * frame */ uh->len = htons(skb->len + sizeof(struct udphdr)); pr_debug("uncompressed UDP length: src = %d", uh->len); } else { pr_debug("ERROR: unsupported NH format\n"); goto err; } return 0; err: return -EINVAL; } static int lowpan_header_create(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *_daddr, const void *_saddr, unsigned int len) { u8 tmp, iphc0, iphc1, *hc06_ptr; struct ipv6hdr *hdr; const u8 *saddr = _saddr; const u8 *daddr = _daddr; u8 head[100]; struct ieee802154_addr sa, da; /* TODO: * if this package isn't ipv6 one, where should it be routed? */ if (type != ETH_P_IPV6) return 0; hdr = ipv6_hdr(skb); hc06_ptr = head + 2; pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n" "\tnexthdr = 0x%02x\n\thop_lim = %d\n", hdr->version, ntohs(hdr->payload_len), hdr->nexthdr, hdr->hop_limit); lowpan_raw_dump_table(__func__, "raw skb network header dump", skb_network_header(skb), sizeof(struct ipv6hdr)); if (!saddr) saddr = dev->dev_addr; lowpan_raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8); /* * As we copy some bit-length fields, in the IPHC encoding bytes, * we sometimes use |= * If the field is 0, and the current bit value in memory is 1, * this does not work. We therefore reset the IPHC encoding here */ iphc0 = LOWPAN_DISPATCH_IPHC; iphc1 = 0; /* TODO: context lookup */ lowpan_raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8); /* * Traffic class, flow label * If flow label is 0, compress it. If traffic class is 0, compress it * We have to process both in the same time as the offset of traffic * class depends on the presence of version and flow label */ /* hc06 format of TC is ECN | DSCP , original one is DSCP | ECN */ tmp = (hdr->priority << 4) | (hdr->flow_lbl[0] >> 4); tmp = ((tmp & 0x03) << 6) | (tmp >> 2); if (((hdr->flow_lbl[0] & 0x0F) == 0) && (hdr->flow_lbl[1] == 0) && (hdr->flow_lbl[2] == 0)) { /* flow label can be compressed */ iphc0 |= LOWPAN_IPHC_FL_C; if ((hdr->priority == 0) && ((hdr->flow_lbl[0] & 0xF0) == 0)) { /* compress (elide) all */ iphc0 |= LOWPAN_IPHC_TC_C; } else { /* compress only the flow label */ *hc06_ptr = tmp; hc06_ptr += 1; } } else { /* Flow label cannot be compressed */ if ((hdr->priority == 0) && ((hdr->flow_lbl[0] & 0xF0) == 0)) { /* compress only traffic class */ iphc0 |= LOWPAN_IPHC_TC_C; *hc06_ptr = (tmp & 0xc0) | (hdr->flow_lbl[0] & 0x0F); memcpy(hc06_ptr + 1, &hdr->flow_lbl[1], 2); hc06_ptr += 3; } else { /* compress nothing */ memcpy(hc06_ptr, &hdr, 4); /* replace the top byte with new ECN | DSCP format */ *hc06_ptr = tmp; hc06_ptr += 4; } } /* NOTE: payload length is always compressed */ /* Next Header is compress if UDP */ if (hdr->nexthdr == UIP_PROTO_UDP) iphc0 |= LOWPAN_IPHC_NH_C; if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) { *hc06_ptr = hdr->nexthdr; hc06_ptr += 1; } /* * Hop limit * if 1: compress, encoding is 01 * if 64: compress, encoding is 10 * if 255: compress, encoding is 11 * else do not compress */ switch (hdr->hop_limit) { case 1: iphc0 |= LOWPAN_IPHC_TTL_1; break; case 64: iphc0 |= LOWPAN_IPHC_TTL_64; break; case 255: iphc0 |= LOWPAN_IPHC_TTL_255; break; default: *hc06_ptr = hdr->hop_limit; hc06_ptr += 1; break; } /* source address compression */ if (is_addr_unspecified(&hdr->saddr)) { pr_debug("source address is unspecified, setting SAC\n"); iphc1 |= LOWPAN_IPHC_SAC; /* TODO: context lookup */ } else if (is_addr_link_local(&hdr->saddr)) { pr_debug("source address is link-local\n"); iphc1 |= lowpan_compress_addr_64(&hc06_ptr, LOWPAN_IPHC_SAM_BIT, &hdr->saddr, saddr); } else { pr_debug("send the full source address\n"); memcpy(hc06_ptr, &hdr->saddr.s6_addr16[0], 16); hc06_ptr += 16; } /* destination address compression */ if (is_addr_mcast(&hdr->daddr)) { pr_debug("destination address is multicast: "); iphc1 |= LOWPAN_IPHC_M; if (lowpan_is_mcast_addr_compressable8(&hdr->daddr)) { pr_debug("compressed to 1 octet\n"); iphc1 |= LOWPAN_IPHC_DAM_11; /* use last byte */ *hc06_ptr = hdr->daddr.s6_addr[15]; hc06_ptr += 1; } else if (lowpan_is_mcast_addr_compressable32(&hdr->daddr)) { pr_debug("compressed to 4 octets\n"); iphc1 |= LOWPAN_IPHC_DAM_10; /* second byte + the last three */ *hc06_ptr = hdr->daddr.s6_addr[1]; memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[13], 3); hc06_ptr += 4; } else if (lowpan_is_mcast_addr_compressable48(&hdr->daddr)) { pr_debug("compressed to 6 octets\n"); iphc1 |= LOWPAN_IPHC_DAM_01; /* second byte + the last five */ *hc06_ptr = hdr->daddr.s6_addr[1]; memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[11], 5); hc06_ptr += 6; } else { pr_debug("using full address\n"); iphc1 |= LOWPAN_IPHC_DAM_00; memcpy(hc06_ptr, &hdr->daddr.s6_addr[0], 16); hc06_ptr += 16; } } else { /* TODO: context lookup */ if (is_addr_link_local(&hdr->daddr)) { pr_debug("dest address is unicast and link-local\n"); iphc1 |= lowpan_compress_addr_64(&hc06_ptr, LOWPAN_IPHC_DAM_BIT, &hdr->daddr, daddr); } else { pr_debug("dest address is unicast: using full one\n"); memcpy(hc06_ptr, &hdr->daddr.s6_addr16[0], 16); hc06_ptr += 16; } } /* UDP header compression */ if (hdr->nexthdr == UIP_PROTO_UDP) lowpan_compress_udp_header(&hc06_ptr, skb); head[0] = iphc0; head[1] = iphc1; skb_pull(skb, sizeof(struct ipv6hdr)); memcpy(skb_push(skb, hc06_ptr - head), head, hc06_ptr - head); lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len); /* * NOTE1: I'm still unsure about the fact that compression and WPAN * header are created here and not later in the xmit. So wait for * an opinion of net maintainers. */ /* * NOTE2: to be absolutely correct, we must derive PANid information * from MAC subif of the 'dev' and 'real_dev' network devices, but * this isn't implemented in mainline yet, so currently we assign 0xff */ { mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA; mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev); /* prepare wpan address data */ sa.addr_type = IEEE802154_ADDR_LONG; sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); memcpy(&(sa.hwaddr), saddr, 8); /* intra-PAN communications */ da.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); /* * if the destination address is the broadcast address, use the * corresponding short address */ if (lowpan_is_addr_broadcast(daddr)) { da.addr_type = IEEE802154_ADDR_SHORT; da.short_addr = IEEE802154_ADDR_BROADCAST; } else { da.addr_type = IEEE802154_ADDR_LONG; memcpy(&(da.hwaddr), daddr, IEEE802154_ADDR_LEN); /* request acknowledgment */ mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ; } return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev, type, (void *)&da, (void *)&sa, skb->len); } } static int lowpan_give_skb_to_devices(struct sk_buff *skb) { struct lowpan_dev_record *entry; struct sk_buff *skb_cp; int stat = NET_RX_SUCCESS; rcu_read_lock(); list_for_each_entry_rcu(entry, &lowpan_devices, list) if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) { skb_cp = skb_copy(skb, GFP_ATOMIC); if (!skb_cp) { stat = -ENOMEM; break; } skb_cp->dev = entry->ldev; stat = netif_rx(skb_cp); } rcu_read_unlock(); return stat; } static int lowpan_skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr) { struct sk_buff *new; int stat = NET_RX_SUCCESS; new = skb_copy_expand(skb, sizeof(struct ipv6hdr), skb_tailroom(skb), GFP_ATOMIC); kfree_skb(skb); if (!new) return -ENOMEM; skb_push(new, sizeof(struct ipv6hdr)); skb_reset_network_header(new); skb_copy_to_linear_data(new, hdr, sizeof(struct ipv6hdr)); new->protocol = htons(ETH_P_IPV6); new->pkt_type = PACKET_HOST; stat = lowpan_give_skb_to_devices(new); kfree_skb(new); return stat; } static void lowpan_fragment_timer_expired(unsigned long entry_addr) { struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr; pr_debug("timer expired for frame with tag %d\n", entry->tag); list_del(&entry->list); dev_kfree_skb(entry->skb); kfree(entry); } static struct lowpan_fragment * lowpan_alloc_new_frame(struct sk_buff *skb, u16 len, u16 tag) { struct lowpan_fragment *frame; frame = kzalloc(sizeof(struct lowpan_fragment), GFP_ATOMIC); if (!frame) goto frame_err; INIT_LIST_HEAD(&frame->list); frame->length = len; frame->tag = tag; /* allocate buffer for frame assembling */ frame->skb = netdev_alloc_skb_ip_align(skb->dev, frame->length + sizeof(struct ipv6hdr)); if (!frame->skb) goto skb_err; frame->skb->priority = skb->priority; frame->skb->dev = skb->dev; /* reserve headroom for uncompressed ipv6 header */ skb_reserve(frame->skb, sizeof(struct ipv6hdr)); skb_put(frame->skb, frame->length); init_timer(&frame->timer); /* time out is the same as for ipv6 - 60 sec */ frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT; frame->timer.data = (unsigned long)frame; frame->timer.function = lowpan_fragment_timer_expired; add_timer(&frame->timer); list_add_tail(&frame->list, &lowpan_fragments); return frame; skb_err: kfree(frame); frame_err: return NULL; } static int lowpan_process_data(struct sk_buff *skb) { struct ipv6hdr hdr; u8 tmp, iphc0, iphc1, num_context = 0; u8 *_saddr, *_daddr; int err; lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len); /* at least two bytes will be used for the encoding */ if (skb->len < 2) goto drop; if (lowpan_fetch_skb_u8(skb, &iphc0)) goto drop; /* fragments assembling */ switch (iphc0 & LOWPAN_DISPATCH_MASK) { case LOWPAN_DISPATCH_FRAG1: case LOWPAN_DISPATCH_FRAGN: { struct lowpan_fragment *frame; /* slen stores the rightmost 8 bits of the 11 bits length */ u8 slen, offset = 0; u16 len, tag; bool found = false; if (lowpan_fetch_skb_u8(skb, &slen) || /* frame length */ lowpan_fetch_skb_u16(skb, &tag)) /* fragment tag */ goto drop; /* adds the 3 MSB to the 8 LSB to retrieve the 11 bits length */ len = ((iphc0 & 7) << 8) | slen; if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1) { pr_debug("%s received a FRAG1 packet (tag: %d, " "size of the entire IP packet: %d)", __func__, tag, len); } else { /* FRAGN */ if (lowpan_fetch_skb_u8(skb, &offset)) goto unlock_and_drop; pr_debug("%s received a FRAGN packet (tag: %d, " "size of the entire IP packet: %d, " "offset: %d)", __func__, tag, len, offset * 8); } /* * check if frame assembling with the same tag is * already in progress */ spin_lock_bh(&flist_lock); list_for_each_entry(frame, &lowpan_fragments, list) if (frame->tag == tag) { found = true; break; } /* alloc new frame structure */ if (!found) { pr_debug("%s first fragment received for tag %d, " "begin packet reassembly", __func__, tag); frame = lowpan_alloc_new_frame(skb, len, tag); if (!frame) goto unlock_and_drop; } /* if payload fits buffer, copy it */ if (likely((offset * 8 + skb->len) <= frame->length)) skb_copy_to_linear_data_offset(frame->skb, offset * 8, skb->data, skb->len); else goto unlock_and_drop; frame->bytes_rcv += skb->len; /* frame assembling complete */ if ((frame->bytes_rcv == frame->length) && frame->timer.expires > jiffies) { /* if timer haven't expired - first of all delete it */ del_timer_sync(&frame->timer); list_del(&frame->list); spin_unlock_bh(&flist_lock); pr_debug("%s successfully reassembled fragment " "(tag %d)", __func__, tag); dev_kfree_skb(skb); skb = frame->skb; kfree(frame); if (lowpan_fetch_skb_u8(skb, &iphc0)) goto drop; break; } spin_unlock_bh(&flist_lock); return kfree_skb(skb), 0; } default: break; } if (lowpan_fetch_skb_u8(skb, &iphc1)) goto drop; _saddr = mac_cb(skb)->sa.hwaddr; _daddr = mac_cb(skb)->da.hwaddr; pr_debug("iphc0 = %02x, iphc1 = %02x\n", iphc0, iphc1); /* another if the CID flag is set */ if (iphc1 & LOWPAN_IPHC_CID) { pr_debug("CID flag is set, increase header with one\n"); if (lowpan_fetch_skb_u8(skb, &num_context)) goto drop; } hdr.version = 6; /* Traffic Class and Flow Label */ switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) { /* * Traffic Class and FLow Label carried in-line * ECN + DSCP + 4-bit Pad + Flow Label (4 bytes) */ case 0: /* 00b */ if (lowpan_fetch_skb_u8(skb, &tmp)) goto drop; memcpy(&hdr.flow_lbl, &skb->data[0], 3); skb_pull(skb, 3); hdr.priority = ((tmp >> 2) & 0x0f); hdr.flow_lbl[0] = ((tmp >> 2) & 0x30) | (tmp << 6) | (hdr.flow_lbl[0] & 0x0f); break; /* * Traffic class carried in-line * ECN + DSCP (1 byte), Flow Label is elided */ case 2: /* 10b */ if (lowpan_fetch_skb_u8(skb, &tmp)) goto drop; hdr.priority = ((tmp >> 2) & 0x0f); hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30); hdr.flow_lbl[1] = 0; hdr.flow_lbl[2] = 0; break; /* * Flow Label carried in-line * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided */ case 1: /* 01b */ if (lowpan_fetch_skb_u8(skb, &tmp)) goto drop; hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30); memcpy(&hdr.flow_lbl[1], &skb->data[0], 2); skb_pull(skb, 2); break; /* Traffic Class and Flow Label are elided */ case 3: /* 11b */ hdr.priority = 0; hdr.flow_lbl[0] = 0; hdr.flow_lbl[1] = 0; hdr.flow_lbl[2] = 0; break; default: break; } /* Next Header */ if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) { /* Next header is carried inline */ if (lowpan_fetch_skb_u8(skb, &(hdr.nexthdr))) goto drop; pr_debug("NH flag is set, next header carried inline: %02x\n", hdr.nexthdr); } /* Hop Limit */ if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I) hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03]; else { if (lowpan_fetch_skb_u8(skb, &(hdr.hop_limit))) goto drop; } /* Extract SAM to the tmp variable */ tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03; /* Source address uncompression */ pr_debug("source address stateless compression\n"); err = lowpan_uncompress_addr(skb, &hdr.saddr, lowpan_llprefix, lowpan_unc_llconf[tmp], skb->data); if (err) goto drop; /* Extract DAM to the tmp variable */ tmp = ((iphc1 & LOWPAN_IPHC_DAM_11) >> LOWPAN_IPHC_DAM_BIT) & 0x03; /* check for Multicast Compression */ if (iphc1 & LOWPAN_IPHC_M) { if (iphc1 & LOWPAN_IPHC_DAC) { pr_debug("dest: context-based mcast compression\n"); /* TODO: implement this */ } else { u8 prefix[] = {0xff, 0x02}; pr_debug("dest: non context-based mcast compression\n"); if (0 < tmp && tmp < 3) { if (lowpan_fetch_skb_u8(skb, &prefix[1])) goto drop; } err = lowpan_uncompress_addr(skb, &hdr.daddr, prefix, lowpan_unc_mxconf[tmp], NULL); if (err) goto drop; } } else { pr_debug("dest: stateless compression\n"); err = lowpan_uncompress_addr(skb, &hdr.daddr, lowpan_llprefix, lowpan_unc_llconf[tmp], skb->data); if (err) goto drop; } /* UDP data uncompression */ if (iphc0 & LOWPAN_IPHC_NH_C) { struct udphdr uh; struct sk_buff *new; if (lowpan_uncompress_udp_header(skb, &uh)) goto drop; /* * replace the compressed UDP head by the uncompressed UDP * header */ new = skb_copy_expand(skb, sizeof(struct udphdr), skb_tailroom(skb), GFP_ATOMIC); kfree_skb(skb); if (!new) return -ENOMEM; skb = new; skb_push(skb, sizeof(struct udphdr)); skb_reset_transport_header(skb); skb_copy_to_linear_data(skb, &uh, sizeof(struct udphdr)); lowpan_raw_dump_table(__func__, "raw UDP header dump", (u8 *)&uh, sizeof(uh)); hdr.nexthdr = UIP_PROTO_UDP; } /* Not fragmented package */ hdr.payload_len = htons(skb->len); pr_debug("skb headroom size = %d, data length = %d\n", skb_headroom(skb), skb->len); pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n\t" "nexthdr = 0x%02x\n\thop_lim = %d\n", hdr.version, ntohs(hdr.payload_len), hdr.nexthdr, hdr.hop_limit); lowpan_raw_dump_table(__func__, "raw header dump", (u8 *)&hdr, sizeof(hdr)); return lowpan_skb_deliver(skb, &hdr); unlock_and_drop: spin_unlock_bh(&flist_lock); drop: kfree_skb(skb); return -EINVAL; } static int lowpan_set_address(struct net_device *dev, void *p) { struct sockaddr *sa = p; if (netif_running(dev)) return -EBUSY; /* TODO: validate addr */ memcpy(dev->dev_addr, sa->sa_data, dev->addr_len); return 0; } static int lowpan_get_mac_header_length(struct sk_buff *skb) { /* * Currently long addressing mode is supported only, so the overall * header size is 21: * FC SeqNum DPAN DA SA Sec * 2 + 1 + 2 + 8 + 8 + 0 = 21 */ return 21; } static int lowpan_fragment_xmit(struct sk_buff *skb, u8 *head, int mlen, int plen, int offset, int type) { struct sk_buff *frag; int hlen, ret; hlen = (type == LOWPAN_DISPATCH_FRAG1) ? LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE; lowpan_raw_dump_inline(__func__, "6lowpan fragment header", head, hlen); frag = dev_alloc_skb(hlen + mlen + plen + IEEE802154_MFR_SIZE); if (!frag) return -ENOMEM; frag->priority = skb->priority; frag->dev = skb->dev; /* copy header, MFR and payload */ memcpy(skb_put(frag, mlen), skb->data, mlen); memcpy(skb_put(frag, hlen), head, hlen); if (plen) skb_copy_from_linear_data_offset(skb, offset + mlen, skb_put(frag, plen), plen); lowpan_raw_dump_table(__func__, " raw fragment dump", frag->data, frag->len); ret = dev_queue_xmit(frag); return ret; } static int lowpan_skb_fragmentation(struct sk_buff *skb, struct net_device *dev) { int err, header_length, payload_length, tag, offset = 0; u8 head[5]; header_length = lowpan_get_mac_header_length(skb); payload_length = skb->len - header_length; tag = lowpan_dev_info(dev)->fragment_tag++; /* first fragment header */ head[0] = LOWPAN_DISPATCH_FRAG1 | ((payload_length >> 8) & 0x7); head[1] = payload_length & 0xff; head[2] = tag >> 8; head[3] = tag & 0xff; err = lowpan_fragment_xmit(skb, head, header_length, LOWPAN_FRAG_SIZE, 0, LOWPAN_DISPATCH_FRAG1); if (err) { pr_debug("%s unable to send FRAG1 packet (tag: %d)", __func__, tag); goto exit; } offset = LOWPAN_FRAG_SIZE; /* next fragment header */ head[0] &= ~LOWPAN_DISPATCH_FRAG1; head[0] |= LOWPAN_DISPATCH_FRAGN; while ((payload_length - offset > 0) && (err >= 0)) { int len = LOWPAN_FRAG_SIZE; head[4] = offset / 8; if (payload_length - offset < len) len = payload_length - offset; err = lowpan_fragment_xmit(skb, head, header_length, len, offset, LOWPAN_DISPATCH_FRAGN); if (err) { pr_debug("%s unable to send a subsequent FRAGN packet " "(tag: %d, offset: %d", __func__, tag, offset); goto exit; } offset += len; } exit: return err; } static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev) { int err = -1; pr_debug("package xmit\n"); skb->dev = lowpan_dev_info(dev)->real_dev; if (skb->dev == NULL) { pr_debug("ERROR: no real wpan device found\n"); goto error; } /* Send directly if less than the MTU minus the 2 checksum bytes. */ if (skb->len <= IEEE802154_MTU - IEEE802154_MFR_SIZE) { err = dev_queue_xmit(skb); goto out; } pr_debug("frame is too big, fragmentation is needed\n"); err = lowpan_skb_fragmentation(skb, dev); error: dev_kfree_skb(skb); out: if (err) pr_debug("ERROR: xmit failed\n"); return (err < 0) ? NET_XMIT_DROP : err; } static struct wpan_phy *lowpan_get_phy(const struct net_device *dev) { struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; return ieee802154_mlme_ops(real_dev)->get_phy(real_dev); } static u16 lowpan_get_pan_id(const struct net_device *dev) { struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev); } static u16 lowpan_get_short_addr(const struct net_device *dev) { struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev); } static u8 lowpan_get_dsn(const struct net_device *dev) { struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev); } static struct header_ops lowpan_header_ops = { .create = lowpan_header_create, }; static struct lock_class_key lowpan_tx_busylock; static struct lock_class_key lowpan_netdev_xmit_lock_key; static void lowpan_set_lockdep_class_one(struct net_device *dev, struct netdev_queue *txq, void *_unused) { lockdep_set_class(&txq->_xmit_lock, &lowpan_netdev_xmit_lock_key); } static int lowpan_dev_init(struct net_device *dev) { netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL); dev->qdisc_tx_busylock = &lowpan_tx_busylock; return 0; } static const struct net_device_ops lowpan_netdev_ops = { .ndo_init = lowpan_dev_init, .ndo_start_xmit = lowpan_xmit, .ndo_set_mac_address = lowpan_set_address, }; static struct ieee802154_mlme_ops lowpan_mlme = { .get_pan_id = lowpan_get_pan_id, .get_phy = lowpan_get_phy, .get_short_addr = lowpan_get_short_addr, .get_dsn = lowpan_get_dsn, }; static void lowpan_setup(struct net_device *dev) { dev->addr_len = IEEE802154_ADDR_LEN; memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN); dev->type = ARPHRD_IEEE802154; /* Frame Control + Sequence Number + Address fields + Security Header */ dev->hard_header_len = 2 + 1 + 20 + 14; dev->needed_tailroom = 2; /* FCS */ dev->mtu = 1281; dev->tx_queue_len = 0; dev->flags = IFF_BROADCAST | IFF_MULTICAST; dev->watchdog_timeo = 0; dev->netdev_ops = &lowpan_netdev_ops; dev->header_ops = &lowpan_header_ops; dev->ml_priv = &lowpan_mlme; dev->destructor = free_netdev; } static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[]) { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN) return -EINVAL; } return 0; } static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct sk_buff *local_skb; if (!netif_running(dev)) goto drop; if (dev->type != ARPHRD_IEEE802154) goto drop; /* check that it's our buffer */ if (skb->data[0] == LOWPAN_DISPATCH_IPV6) { /* Copy the packet so that the IPv6 header is * properly aligned. */ local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1, skb_tailroom(skb), GFP_ATOMIC); if (!local_skb) goto drop; local_skb->protocol = htons(ETH_P_IPV6); local_skb->pkt_type = PACKET_HOST; /* Pull off the 1-byte of 6lowpan header. */ skb_pull(local_skb, 1); skb_reset_network_header(local_skb); skb_set_transport_header(local_skb, sizeof(struct ipv6hdr)); lowpan_give_skb_to_devices(local_skb); kfree_skb(local_skb); kfree_skb(skb); } else { switch (skb->data[0] & 0xe0) { case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */ case LOWPAN_DISPATCH_FRAG1: /* first fragment header */ case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */ local_skb = skb_clone(skb, GFP_ATOMIC); if (!local_skb) goto drop; lowpan_process_data(local_skb); kfree_skb(skb); break; default: break; } } return NET_RX_SUCCESS; drop: kfree_skb(skb); return NET_RX_DROP; } static int lowpan_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct net_device *real_dev; struct lowpan_dev_record *entry; pr_debug("adding new link\n"); if (!tb[IFLA_LINK]) return -EINVAL; /* find and hold real wpan device */ real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); if (!real_dev) return -ENODEV; lowpan_dev_info(dev)->real_dev = real_dev; lowpan_dev_info(dev)->fragment_tag = 0; mutex_init(&lowpan_dev_info(dev)->dev_list_mtx); entry = kzalloc(sizeof(struct lowpan_dev_record), GFP_KERNEL); if (!entry) { dev_put(real_dev); lowpan_dev_info(dev)->real_dev = NULL; return -ENOMEM; } entry->ldev = dev; mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); INIT_LIST_HEAD(&entry->list); list_add_tail(&entry->list, &lowpan_devices); mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx); register_netdevice(dev); return 0; } static void lowpan_dellink(struct net_device *dev, struct list_head *head) { struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev); struct net_device *real_dev = lowpan_dev->real_dev; struct lowpan_dev_record *entry, *tmp; ASSERT_RTNL(); mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) { if (entry->ldev == dev) { list_del(&entry->list); kfree(entry); } } mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx); mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx); unregister_netdevice_queue(dev, head); dev_put(real_dev); } static struct rtnl_link_ops lowpan_link_ops __read_mostly = { .kind = "lowpan", .priv_size = sizeof(struct lowpan_dev_info), .setup = lowpan_setup, .newlink = lowpan_newlink, .dellink = lowpan_dellink, .validate = lowpan_validate, }; static inline int __init lowpan_netlink_init(void) { return rtnl_link_register(&lowpan_link_ops); } static inline void lowpan_netlink_fini(void) { rtnl_link_unregister(&lowpan_link_ops); } static int lowpan_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = ptr; LIST_HEAD(del_list); struct lowpan_dev_record *entry, *tmp; if (dev->type != ARPHRD_IEEE802154) goto out; if (event == NETDEV_UNREGISTER) { list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) { if (lowpan_dev_info(entry->ldev)->real_dev == dev) lowpan_dellink(entry->ldev, &del_list); } unregister_netdevice_many(&del_list); } out: return NOTIFY_DONE; } static struct notifier_block lowpan_dev_notifier = { .notifier_call = lowpan_device_event, }; static struct packet_type lowpan_packet_type = { .type = __constant_htons(ETH_P_IEEE802154), .func = lowpan_rcv, }; static int __init lowpan_init_module(void) { int err = 0; err = lowpan_netlink_init(); if (err < 0) goto out; dev_add_pack(&lowpan_packet_type); err = register_netdevice_notifier(&lowpan_dev_notifier); if (err < 0) { dev_remove_pack(&lowpan_packet_type); lowpan_netlink_fini(); } out: return err; } static void __exit lowpan_cleanup_module(void) { struct lowpan_fragment *frame, *tframe; lowpan_netlink_fini(); dev_remove_pack(&lowpan_packet_type); unregister_netdevice_notifier(&lowpan_dev_notifier); /* Now 6lowpan packet_type is removed, so no new fragments are * expected on RX, therefore that's the time to clean incomplete * fragments. */ spin_lock_bh(&flist_lock); list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) { del_timer_sync(&frame->timer); list_del(&frame->list); dev_kfree_skb(frame->skb); kfree(frame); } spin_unlock_bh(&flist_lock); } module_init(lowpan_init_module); module_exit(lowpan_cleanup_module); MODULE_LICENSE("GPL"); MODULE_ALIAS_RTNL_LINK("lowpan");
gpl-2.0
y12uc231/linux
arch/mips/kernel/rtlx.c
1555
8895
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2005, 06 Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 2013 Imagination Technologies Ltd. */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/syscalls.h> #include <linux/moduleloader.h> #include <linux/atomic.h> #include <asm/mipsmtregs.h> #include <asm/mips_mt.h> #include <asm/processor.h> #include <asm/rtlx.h> #include <asm/setup.h> #include <asm/vpe.h> static int sp_stopping; struct rtlx_info *rtlx; struct chan_waitqueues channel_wqs[RTLX_CHANNELS]; struct vpe_notifications rtlx_notify; void (*aprp_hook)(void) = NULL; EXPORT_SYMBOL(aprp_hook); static void __used dump_rtlx(void) { int i; pr_info("id 0x%lx state %d\n", rtlx->id, rtlx->state); for (i = 0; i < RTLX_CHANNELS; i++) { struct rtlx_channel *chan = &rtlx->channel[i]; pr_info(" rt_state %d lx_state %d buffer_size %d\n", chan->rt_state, chan->lx_state, chan->buffer_size); pr_info(" rt_read %d rt_write %d\n", chan->rt_read, chan->rt_write); pr_info(" lx_read %d lx_write %d\n", chan->lx_read, chan->lx_write); pr_info(" rt_buffer <%s>\n", chan->rt_buffer); pr_info(" lx_buffer <%s>\n", chan->lx_buffer); } } /* call when we have the address of the shared structure from the SP side. */ static int rtlx_init(struct rtlx_info *rtlxi) { if (rtlxi->id != RTLX_ID) { pr_err("no valid RTLX id at 0x%p 0x%lx\n", rtlxi, rtlxi->id); return -ENOEXEC; } rtlx = rtlxi; return 0; } /* notifications */ void rtlx_starting(int vpe) { int i; sp_stopping = 0; /* force a reload of rtlx */ rtlx = NULL; /* wake up any sleeping rtlx_open's */ for (i = 0; i < RTLX_CHANNELS; i++) wake_up_interruptible(&channel_wqs[i].lx_queue); } void rtlx_stopping(int vpe) { int i; sp_stopping = 1; for (i = 0; i < RTLX_CHANNELS; i++) wake_up_interruptible(&channel_wqs[i].lx_queue); } int rtlx_open(int index, int can_sleep) { struct rtlx_info **p; struct rtlx_channel *chan; enum rtlx_state state; int ret = 0; if (index >= RTLX_CHANNELS) { pr_debug("rtlx_open index out of range\n"); return -ENOSYS; } if (atomic_inc_return(&channel_wqs[index].in_open) > 1) { pr_debug("rtlx_open channel %d already opened\n", index); ret = -EBUSY; goto out_fail; } if (rtlx == NULL) { p = vpe_get_shared(aprp_cpu_index()); if (p == NULL) { if (can_sleep) { ret = __wait_event_interruptible( channel_wqs[index].lx_queue, (p = vpe_get_shared(aprp_cpu_index()))); if (ret) goto out_fail; } else { pr_debug("No SP program loaded, and device opened with O_NONBLOCK\n"); ret = -ENOSYS; goto out_fail; } } smp_rmb(); if (*p == NULL) { if (can_sleep) { DEFINE_WAIT(wait); for (;;) { prepare_to_wait( &channel_wqs[index].lx_queue, &wait, TASK_INTERRUPTIBLE); smp_rmb(); if (*p != NULL) break; if (!signal_pending(current)) { schedule(); continue; } ret = -ERESTARTSYS; goto out_fail; } finish_wait(&channel_wqs[index].lx_queue, &wait); } else { pr_err(" *vpe_get_shared is NULL. Has an SP program been loaded?\n"); ret = -ENOSYS; goto out_fail; } } if ((unsigned int)*p < KSEG0) { pr_warn("vpe_get_shared returned an invalid pointer maybe an error code %d\n", (int)*p); ret = -ENOSYS; goto out_fail; } ret = rtlx_init(*p); if (ret < 0) goto out_ret; } chan = &rtlx->channel[index]; state = xchg(&chan->lx_state, RTLX_STATE_OPENED); if (state == RTLX_STATE_OPENED) { ret = -EBUSY; goto out_fail; } out_fail: smp_mb(); atomic_dec(&channel_wqs[index].in_open); smp_mb(); out_ret: return ret; } int rtlx_release(int index) { if (rtlx == NULL) { pr_err("rtlx_release() with null rtlx\n"); return 0; } rtlx->channel[index].lx_state = RTLX_STATE_UNUSED; return 0; } unsigned int rtlx_read_poll(int index, int can_sleep) { struct rtlx_channel *chan; if (rtlx == NULL) return 0; chan = &rtlx->channel[index]; /* data available to read? */ if (chan->lx_read == chan->lx_write) { if (can_sleep) { int ret = __wait_event_interruptible( channel_wqs[index].lx_queue, (chan->lx_read != chan->lx_write) || sp_stopping); if (ret) return ret; if (sp_stopping) return 0; } else return 0; } return (chan->lx_write + chan->buffer_size - chan->lx_read) % chan->buffer_size; } static inline int write_spacefree(int read, int write, int size) { if (read == write) { /* * Never fill the buffer completely, so indexes are always * equal if empty and only empty, or !equal if data available */ return size - 1; } return ((read + size - write) % size) - 1; } unsigned int rtlx_write_poll(int index) { struct rtlx_channel *chan = &rtlx->channel[index]; return write_spacefree(chan->rt_read, chan->rt_write, chan->buffer_size); } ssize_t rtlx_read(int index, void __user *buff, size_t count) { size_t lx_write, fl = 0L; struct rtlx_channel *lx; unsigned long failed; if (rtlx == NULL) return -ENOSYS; lx = &rtlx->channel[index]; mutex_lock(&channel_wqs[index].mutex); smp_rmb(); lx_write = lx->lx_write; /* find out how much in total */ count = min(count, (size_t)(lx_write + lx->buffer_size - lx->lx_read) % lx->buffer_size); /* then how much from the read pointer onwards */ fl = min(count, (size_t)lx->buffer_size - lx->lx_read); failed = copy_to_user(buff, lx->lx_buffer + lx->lx_read, fl); if (failed) goto out; /* and if there is anything left at the beginning of the buffer */ if (count - fl) failed = copy_to_user(buff + fl, lx->lx_buffer, count - fl); out: count -= failed; smp_wmb(); lx->lx_read = (lx->lx_read + count) % lx->buffer_size; smp_wmb(); mutex_unlock(&channel_wqs[index].mutex); return count; } ssize_t rtlx_write(int index, const void __user *buffer, size_t count) { struct rtlx_channel *rt; unsigned long failed; size_t rt_read; size_t fl; if (rtlx == NULL) return -ENOSYS; rt = &rtlx->channel[index]; mutex_lock(&channel_wqs[index].mutex); smp_rmb(); rt_read = rt->rt_read; /* total number of bytes to copy */ count = min_t(size_t, count, write_spacefree(rt_read, rt->rt_write, rt->buffer_size)); /* first bit from write pointer to the end of the buffer, or count */ fl = min(count, (size_t) rt->buffer_size - rt->rt_write); failed = copy_from_user(rt->rt_buffer + rt->rt_write, buffer, fl); if (failed) goto out; /* if there's any left copy to the beginning of the buffer */ if (count - fl) failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl); out: count -= failed; smp_wmb(); rt->rt_write = (rt->rt_write + count) % rt->buffer_size; smp_wmb(); mutex_unlock(&channel_wqs[index].mutex); _interrupt_sp(); return count; } static int file_open(struct inode *inode, struct file *filp) { return rtlx_open(iminor(inode), (filp->f_flags & O_NONBLOCK) ? 0 : 1); } static int file_release(struct inode *inode, struct file *filp) { return rtlx_release(iminor(inode)); } static unsigned int file_poll(struct file *file, poll_table *wait) { int minor = iminor(file_inode(file)); unsigned int mask = 0; poll_wait(file, &channel_wqs[minor].rt_queue, wait); poll_wait(file, &channel_wqs[minor].lx_queue, wait); if (rtlx == NULL) return 0; /* data available to read? */ if (rtlx_read_poll(minor, 0)) mask |= POLLIN | POLLRDNORM; /* space to write */ if (rtlx_write_poll(minor)) mask |= POLLOUT | POLLWRNORM; return mask; } static ssize_t file_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { int minor = iminor(file_inode(file)); /* data available? */ if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1)) return 0; /* -EAGAIN makes 'cat' whine */ return rtlx_read(minor, buffer, count); } static ssize_t file_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { int minor = iminor(file_inode(file)); /* any space left... */ if (!rtlx_write_poll(minor)) { int ret; if (file->f_flags & O_NONBLOCK) return -EAGAIN; ret = __wait_event_interruptible(channel_wqs[minor].rt_queue, rtlx_write_poll(minor)); if (ret) return ret; } return rtlx_write(minor, buffer, count); } const struct file_operations rtlx_fops = { .owner = THIS_MODULE, .open = file_open, .release = file_release, .write = file_write, .read = file_read, .poll = file_poll, .llseek = noop_llseek, }; module_init(rtlx_module_init); module_exit(rtlx_module_exit); MODULE_DESCRIPTION("MIPS RTLX"); MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc."); MODULE_LICENSE("GPL");
gpl-2.0
djmatt604/android_kernel_T989D_JB
drivers/atm/solos-pci.c
1811
35129
/* * Driver for the Solos PCI ADSL2+ card, designed to support Linux by * Traverse Technologies -- http://www.traverse.com.au/ * Xrio Limited -- http://www.xrio.com/ * * * Copyright © 2008 Traverse Technologies * Copyright © 2008 Intel Corporation * * Authors: Nathan Williams <nathan@traverse.com.au> * David Woodhouse <dwmw2@infradead.org> * Treker Chen <treker@xrio.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define DEBUG #define VERBOSE_DEBUG #include <linux/interrupt.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/atm.h> #include <linux/atmdev.h> #include <linux/skbuff.h> #include <linux/sysfs.h> #include <linux/device.h> #include <linux/kobject.h> #include <linux/firmware.h> #include <linux/ctype.h> #include <linux/swab.h> #include <linux/slab.h> #define VERSION "0.07" #define PTAG "solos-pci" #define CONFIG_RAM_SIZE 128 #define FLAGS_ADDR 0x7C #define IRQ_EN_ADDR 0x78 #define FPGA_VER 0x74 #define IRQ_CLEAR 0x70 #define WRITE_FLASH 0x6C #define PORTS 0x68 #define FLASH_BLOCK 0x64 #define FLASH_BUSY 0x60 #define FPGA_MODE 0x5C #define FLASH_MODE 0x58 #define TX_DMA_ADDR(port) (0x40 + (4 * (port))) #define RX_DMA_ADDR(port) (0x30 + (4 * (port))) #define DATA_RAM_SIZE 32768 #define BUF_SIZE 2048 #define OLD_BUF_SIZE 4096 /* For FPGA versions <= 2*/ #define FPGA_PAGE 528 /* FPGA flash page size*/ #define SOLOS_PAGE 512 /* Solos flash page size*/ #define FPGA_BLOCK (FPGA_PAGE * 8) /* FPGA flash block size*/ #define SOLOS_BLOCK (SOLOS_PAGE * 8) /* Solos flash block size*/ #define RX_BUF(card, nr) ((card->buffers) + (nr)*(card->buffer_size)*2) #define TX_BUF(card, nr) ((card->buffers) + (nr)*(card->buffer_size)*2 + (card->buffer_size)) #define FLASH_BUF ((card->buffers) + 4*(card->buffer_size)*2) #define RX_DMA_SIZE 2048 #define FPGA_VERSION(a,b) (((a) << 8) + (b)) #define LEGACY_BUFFERS 2 #define DMA_SUPPORTED 4 static int reset = 0; static int atmdebug = 0; static int firmware_upgrade = 0; static int fpga_upgrade = 0; static int db_firmware_upgrade = 0; static int db_fpga_upgrade = 0; struct pkt_hdr { __le16 size; __le16 vpi; __le16 vci; __le16 type; }; struct solos_skb_cb { struct atm_vcc *vcc; uint32_t dma_addr; }; #define SKB_CB(skb) ((struct solos_skb_cb *)skb->cb) #define PKT_DATA 0 #define PKT_COMMAND 1 #define PKT_POPEN 3 #define PKT_PCLOSE 4 #define PKT_STATUS 5 struct solos_card { void __iomem *config_regs; void __iomem *buffers; int nr_ports; int tx_mask; struct pci_dev *dev; struct atm_dev *atmdev[4]; struct tasklet_struct tlet; spinlock_t tx_lock; spinlock_t tx_queue_lock; spinlock_t cli_queue_lock; spinlock_t param_queue_lock; struct list_head param_queue; struct sk_buff_head tx_queue[4]; struct sk_buff_head cli_queue[4]; struct sk_buff *tx_skb[4]; struct sk_buff *rx_skb[4]; wait_queue_head_t param_wq; wait_queue_head_t fw_wq; int using_dma; int fpga_version; int buffer_size; }; struct solos_param { struct list_head list; pid_t pid; int port; struct sk_buff *response; }; #define SOLOS_CHAN(atmdev) ((int)(unsigned long)(atmdev)->phy_data) MODULE_AUTHOR("Traverse Technologies <support@traverse.com.au>"); MODULE_DESCRIPTION("Solos PCI driver"); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("solos-FPGA.bin"); MODULE_FIRMWARE("solos-Firmware.bin"); MODULE_FIRMWARE("solos-db-FPGA.bin"); MODULE_PARM_DESC(reset, "Reset Solos chips on startup"); MODULE_PARM_DESC(atmdebug, "Print ATM data"); MODULE_PARM_DESC(firmware_upgrade, "Initiate Solos firmware upgrade"); MODULE_PARM_DESC(fpga_upgrade, "Initiate FPGA upgrade"); MODULE_PARM_DESC(db_firmware_upgrade, "Initiate daughter board Solos firmware upgrade"); MODULE_PARM_DESC(db_fpga_upgrade, "Initiate daughter board FPGA upgrade"); module_param(reset, int, 0444); module_param(atmdebug, int, 0644); module_param(firmware_upgrade, int, 0444); module_param(fpga_upgrade, int, 0444); module_param(db_firmware_upgrade, int, 0444); module_param(db_fpga_upgrade, int, 0444); static void fpga_queue(struct solos_card *card, int port, struct sk_buff *skb, struct atm_vcc *vcc); static uint32_t fpga_tx(struct solos_card *); static irqreturn_t solos_irq(int irq, void *dev_id); static struct atm_vcc* find_vcc(struct atm_dev *dev, short vpi, int vci); static int list_vccs(int vci); static int atm_init(struct solos_card *, struct device *); static void atm_remove(struct solos_card *); static int send_command(struct solos_card *card, int dev, const char *buf, size_t size); static void solos_bh(unsigned long); static int print_buffer(struct sk_buff *buf); static inline void solos_pop(struct atm_vcc *vcc, struct sk_buff *skb) { if (vcc->pop) vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); } static ssize_t solos_param_show(struct device *dev, struct device_attribute *attr, char *buf) { struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev); struct solos_card *card = atmdev->dev_data; struct solos_param prm; struct sk_buff *skb; struct pkt_hdr *header; int buflen; buflen = strlen(attr->attr.name) + 10; skb = alloc_skb(sizeof(*header) + buflen, GFP_KERNEL); if (!skb) { dev_warn(&card->dev->dev, "Failed to allocate sk_buff in solos_param_show()\n"); return -ENOMEM; } header = (void *)skb_put(skb, sizeof(*header)); buflen = snprintf((void *)&header[1], buflen - 1, "L%05d\n%s\n", current->pid, attr->attr.name); skb_put(skb, buflen); header->size = cpu_to_le16(buflen); header->vpi = cpu_to_le16(0); header->vci = cpu_to_le16(0); header->type = cpu_to_le16(PKT_COMMAND); prm.pid = current->pid; prm.response = NULL; prm.port = SOLOS_CHAN(atmdev); spin_lock_irq(&card->param_queue_lock); list_add(&prm.list, &card->param_queue); spin_unlock_irq(&card->param_queue_lock); fpga_queue(card, prm.port, skb, NULL); wait_event_timeout(card->param_wq, prm.response, 5 * HZ); spin_lock_irq(&card->param_queue_lock); list_del(&prm.list); spin_unlock_irq(&card->param_queue_lock); if (!prm.response) return -EIO; buflen = prm.response->len; memcpy(buf, prm.response->data, buflen); kfree_skb(prm.response); return buflen; } static ssize_t solos_param_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev); struct solos_card *card = atmdev->dev_data; struct solos_param prm; struct sk_buff *skb; struct pkt_hdr *header; int buflen; ssize_t ret; buflen = strlen(attr->attr.name) + 11 + count; skb = alloc_skb(sizeof(*header) + buflen, GFP_KERNEL); if (!skb) { dev_warn(&card->dev->dev, "Failed to allocate sk_buff in solos_param_store()\n"); return -ENOMEM; } header = (void *)skb_put(skb, sizeof(*header)); buflen = snprintf((void *)&header[1], buflen - 1, "L%05d\n%s\n%s\n", current->pid, attr->attr.name, buf); skb_put(skb, buflen); header->size = cpu_to_le16(buflen); header->vpi = cpu_to_le16(0); header->vci = cpu_to_le16(0); header->type = cpu_to_le16(PKT_COMMAND); prm.pid = current->pid; prm.response = NULL; prm.port = SOLOS_CHAN(atmdev); spin_lock_irq(&card->param_queue_lock); list_add(&prm.list, &card->param_queue); spin_unlock_irq(&card->param_queue_lock); fpga_queue(card, prm.port, skb, NULL); wait_event_timeout(card->param_wq, prm.response, 5 * HZ); spin_lock_irq(&card->param_queue_lock); list_del(&prm.list); spin_unlock_irq(&card->param_queue_lock); skb = prm.response; if (!skb) return -EIO; buflen = skb->len; /* Sometimes it has a newline, sometimes it doesn't. */ if (skb->data[buflen - 1] == '\n') buflen--; if (buflen == 2 && !strncmp(skb->data, "OK", 2)) ret = count; else if (buflen == 5 && !strncmp(skb->data, "ERROR", 5)) ret = -EIO; else { /* We know we have enough space allocated for this; we allocated it ourselves */ skb->data[buflen] = 0; dev_warn(&card->dev->dev, "Unexpected parameter response: '%s'\n", skb->data); ret = -EIO; } kfree_skb(skb); return ret; } static char *next_string(struct sk_buff *skb) { int i = 0; char *this = skb->data; for (i = 0; i < skb->len; i++) { if (this[i] == '\n') { this[i] = 0; skb_pull(skb, i + 1); return this; } if (!isprint(this[i])) return NULL; } return NULL; } /* * Status packet has fields separated by \n, starting with a version number * for the information therein. Fields are.... * * packet version * RxBitRate (version >= 1) * TxBitRate (version >= 1) * State (version >= 1) * LocalSNRMargin (version >= 1) * LocalLineAttn (version >= 1) */ static int process_status(struct solos_card *card, int port, struct sk_buff *skb) { char *str, *end, *state_str, *snr, *attn; int ver, rate_up, rate_down; if (!card->atmdev[port]) return -ENODEV; str = next_string(skb); if (!str) return -EIO; ver = simple_strtol(str, NULL, 10); if (ver < 1) { dev_warn(&card->dev->dev, "Unexpected status interrupt version %d\n", ver); return -EIO; } str = next_string(skb); if (!str) return -EIO; if (!strcmp(str, "ERROR")) { dev_dbg(&card->dev->dev, "Status packet indicated Solos error on port %d (starting up?)\n", port); return 0; } rate_down = simple_strtol(str, &end, 10); if (*end) return -EIO; str = next_string(skb); if (!str) return -EIO; rate_up = simple_strtol(str, &end, 10); if (*end) return -EIO; state_str = next_string(skb); if (!state_str) return -EIO; /* Anything but 'Showtime' is down */ if (strcmp(state_str, "Showtime")) { atm_dev_signal_change(card->atmdev[port], ATM_PHY_SIG_LOST); dev_info(&card->dev->dev, "Port %d: %s\n", port, state_str); return 0; } snr = next_string(skb); if (!snr) return -EIO; attn = next_string(skb); if (!attn) return -EIO; dev_info(&card->dev->dev, "Port %d: %s @%d/%d kb/s%s%s%s%s\n", port, state_str, rate_down/1000, rate_up/1000, snr[0]?", SNR ":"", snr, attn[0]?", Attn ":"", attn); card->atmdev[port]->link_rate = rate_down / 424; atm_dev_signal_change(card->atmdev[port], ATM_PHY_SIG_FOUND); return 0; } static int process_command(struct solos_card *card, int port, struct sk_buff *skb) { struct solos_param *prm; unsigned long flags; int cmdpid; int found = 0; if (skb->len < 7) return 0; if (skb->data[0] != 'L' || !isdigit(skb->data[1]) || !isdigit(skb->data[2]) || !isdigit(skb->data[3]) || !isdigit(skb->data[4]) || !isdigit(skb->data[5]) || skb->data[6] != '\n') return 0; cmdpid = simple_strtol(&skb->data[1], NULL, 10); spin_lock_irqsave(&card->param_queue_lock, flags); list_for_each_entry(prm, &card->param_queue, list) { if (prm->port == port && prm->pid == cmdpid) { prm->response = skb; skb_pull(skb, 7); wake_up(&card->param_wq); found = 1; break; } } spin_unlock_irqrestore(&card->param_queue_lock, flags); return found; } static ssize_t console_show(struct device *dev, struct device_attribute *attr, char *buf) { struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev); struct solos_card *card = atmdev->dev_data; struct sk_buff *skb; unsigned int len; spin_lock(&card->cli_queue_lock); skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]); spin_unlock(&card->cli_queue_lock); if(skb == NULL) return sprintf(buf, "No data.\n"); len = skb->len; memcpy(buf, skb->data, len); dev_dbg(&card->dev->dev, "len: %d\n", len); kfree_skb(skb); return len; } static int send_command(struct solos_card *card, int dev, const char *buf, size_t size) { struct sk_buff *skb; struct pkt_hdr *header; if (size > (BUF_SIZE - sizeof(*header))) { dev_dbg(&card->dev->dev, "Command is too big. Dropping request\n"); return 0; } skb = alloc_skb(size + sizeof(*header), GFP_ATOMIC); if (!skb) { dev_warn(&card->dev->dev, "Failed to allocate sk_buff in send_command()\n"); return 0; } header = (void *)skb_put(skb, sizeof(*header)); header->size = cpu_to_le16(size); header->vpi = cpu_to_le16(0); header->vci = cpu_to_le16(0); header->type = cpu_to_le16(PKT_COMMAND); memcpy(skb_put(skb, size), buf, size); fpga_queue(card, dev, skb, NULL); return 0; } static ssize_t console_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev); struct solos_card *card = atmdev->dev_data; int err; err = send_command(card, SOLOS_CHAN(atmdev), buf, count); return err?:count; } static DEVICE_ATTR(console, 0644, console_show, console_store); #define SOLOS_ATTR_RO(x) static DEVICE_ATTR(x, 0444, solos_param_show, NULL); #define SOLOS_ATTR_RW(x) static DEVICE_ATTR(x, 0644, solos_param_show, solos_param_store); #include "solos-attrlist.c" #undef SOLOS_ATTR_RO #undef SOLOS_ATTR_RW #define SOLOS_ATTR_RO(x) &dev_attr_##x.attr, #define SOLOS_ATTR_RW(x) &dev_attr_##x.attr, static struct attribute *solos_attrs[] = { #include "solos-attrlist.c" NULL }; static struct attribute_group solos_attr_group = { .attrs = solos_attrs, .name = "parameters", }; static int flash_upgrade(struct solos_card *card, int chip) { const struct firmware *fw; const char *fw_name; int blocksize = 0; int numblocks = 0; int offset; switch (chip) { case 0: fw_name = "solos-FPGA.bin"; blocksize = FPGA_BLOCK; break; case 1: fw_name = "solos-Firmware.bin"; blocksize = SOLOS_BLOCK; break; case 2: if (card->fpga_version > LEGACY_BUFFERS){ fw_name = "solos-db-FPGA.bin"; blocksize = FPGA_BLOCK; } else { dev_info(&card->dev->dev, "FPGA version doesn't support" " daughter board upgrades\n"); return -EPERM; } break; case 3: if (card->fpga_version > LEGACY_BUFFERS){ fw_name = "solos-Firmware.bin"; blocksize = SOLOS_BLOCK; } else { dev_info(&card->dev->dev, "FPGA version doesn't support" " daughter board upgrades\n"); return -EPERM; } break; default: return -ENODEV; } if (request_firmware(&fw, fw_name, &card->dev->dev)) return -ENOENT; dev_info(&card->dev->dev, "Flash upgrade starting\n"); numblocks = fw->size / blocksize; dev_info(&card->dev->dev, "Firmware size: %zd\n", fw->size); dev_info(&card->dev->dev, "Number of blocks: %d\n", numblocks); dev_info(&card->dev->dev, "Changing FPGA to Update mode\n"); iowrite32(1, card->config_regs + FPGA_MODE); (void) ioread32(card->config_regs + FPGA_MODE); /* Set mode to Chip Erase */ if(chip == 0 || chip == 2) dev_info(&card->dev->dev, "Set FPGA Flash mode to FPGA Chip Erase\n"); if(chip == 1 || chip == 3) dev_info(&card->dev->dev, "Set FPGA Flash mode to Solos Chip Erase\n"); iowrite32((chip * 2), card->config_regs + FLASH_MODE); iowrite32(1, card->config_regs + WRITE_FLASH); wait_event(card->fw_wq, !ioread32(card->config_regs + FLASH_BUSY)); for (offset = 0; offset < fw->size; offset += blocksize) { int i; /* Clear write flag */ iowrite32(0, card->config_regs + WRITE_FLASH); /* Set mode to Block Write */ /* dev_info(&card->dev->dev, "Set FPGA Flash mode to Block Write\n"); */ iowrite32(((chip * 2) + 1), card->config_regs + FLASH_MODE); /* Copy block to buffer, swapping each 16 bits */ for(i = 0; i < blocksize; i += 4) { uint32_t word = swahb32p((uint32_t *)(fw->data + offset + i)); if(card->fpga_version > LEGACY_BUFFERS) iowrite32(word, FLASH_BUF + i); else iowrite32(word, RX_BUF(card, 3) + i); } /* Specify block number and then trigger flash write */ iowrite32(offset / blocksize, card->config_regs + FLASH_BLOCK); iowrite32(1, card->config_regs + WRITE_FLASH); wait_event(card->fw_wq, !ioread32(card->config_regs + FLASH_BUSY)); } release_firmware(fw); iowrite32(0, card->config_regs + WRITE_FLASH); iowrite32(0, card->config_regs + FPGA_MODE); iowrite32(0, card->config_regs + FLASH_MODE); dev_info(&card->dev->dev, "Returning FPGA to Data mode\n"); return 0; } static irqreturn_t solos_irq(int irq, void *dev_id) { struct solos_card *card = dev_id; int handled = 1; iowrite32(0, card->config_regs + IRQ_CLEAR); /* If we're up and running, just kick the tasklet to process TX/RX */ if (card->atmdev[0]) tasklet_schedule(&card->tlet); else wake_up(&card->fw_wq); return IRQ_RETVAL(handled); } void solos_bh(unsigned long card_arg) { struct solos_card *card = (void *)card_arg; uint32_t card_flags; uint32_t rx_done = 0; int port; /* * Since fpga_tx() is going to need to read the flags under its lock, * it can return them to us so that we don't have to hit PCI MMIO * again for the same information */ card_flags = fpga_tx(card); for (port = 0; port < card->nr_ports; port++) { if (card_flags & (0x10 << port)) { struct pkt_hdr _hdr, *header; struct sk_buff *skb; struct atm_vcc *vcc; int size; if (card->using_dma) { skb = card->rx_skb[port]; card->rx_skb[port] = NULL; pci_unmap_single(card->dev, SKB_CB(skb)->dma_addr, RX_DMA_SIZE, PCI_DMA_FROMDEVICE); header = (void *)skb->data; size = le16_to_cpu(header->size); skb_put(skb, size + sizeof(*header)); skb_pull(skb, sizeof(*header)); } else { header = &_hdr; rx_done |= 0x10 << port; memcpy_fromio(header, RX_BUF(card, port), sizeof(*header)); size = le16_to_cpu(header->size); if (size > (card->buffer_size - sizeof(*header))){ dev_warn(&card->dev->dev, "Invalid buffer size\n"); continue; } skb = alloc_skb(size + 1, GFP_ATOMIC); if (!skb) { if (net_ratelimit()) dev_warn(&card->dev->dev, "Failed to allocate sk_buff for RX\n"); continue; } memcpy_fromio(skb_put(skb, size), RX_BUF(card, port) + sizeof(*header), size); } if (atmdebug) { dev_info(&card->dev->dev, "Received: port %d\n", port); dev_info(&card->dev->dev, "size: %d VPI: %d VCI: %d\n", size, le16_to_cpu(header->vpi), le16_to_cpu(header->vci)); print_buffer(skb); } switch (le16_to_cpu(header->type)) { case PKT_DATA: vcc = find_vcc(card->atmdev[port], le16_to_cpu(header->vpi), le16_to_cpu(header->vci)); if (!vcc) { if (net_ratelimit()) dev_warn(&card->dev->dev, "Received packet for unknown VPI.VCI %d.%d on port %d\n", le16_to_cpu(header->vpi), le16_to_cpu(header->vci), port); continue; } atm_charge(vcc, skb->truesize); vcc->push(vcc, skb); atomic_inc(&vcc->stats->rx); break; case PKT_STATUS: if (process_status(card, port, skb) && net_ratelimit()) { dev_warn(&card->dev->dev, "Bad status packet of %d bytes on port %d:\n", skb->len, port); print_buffer(skb); } dev_kfree_skb_any(skb); break; case PKT_COMMAND: default: /* FIXME: Not really, surely? */ if (process_command(card, port, skb)) break; spin_lock(&card->cli_queue_lock); if (skb_queue_len(&card->cli_queue[port]) > 10) { if (net_ratelimit()) dev_warn(&card->dev->dev, "Dropping console response on port %d\n", port); dev_kfree_skb_any(skb); } else skb_queue_tail(&card->cli_queue[port], skb); spin_unlock(&card->cli_queue_lock); break; } } /* Allocate RX skbs for any ports which need them */ if (card->using_dma && card->atmdev[port] && !card->rx_skb[port]) { struct sk_buff *skb = alloc_skb(RX_DMA_SIZE, GFP_ATOMIC); if (skb) { SKB_CB(skb)->dma_addr = pci_map_single(card->dev, skb->data, RX_DMA_SIZE, PCI_DMA_FROMDEVICE); iowrite32(SKB_CB(skb)->dma_addr, card->config_regs + RX_DMA_ADDR(port)); card->rx_skb[port] = skb; } else { if (net_ratelimit()) dev_warn(&card->dev->dev, "Failed to allocate RX skb"); /* We'll have to try again later */ tasklet_schedule(&card->tlet); } } } if (rx_done) iowrite32(rx_done, card->config_regs + FLAGS_ADDR); return; } static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci) { struct hlist_head *head; struct atm_vcc *vcc = NULL; struct hlist_node *node; struct sock *s; read_lock(&vcc_sklist_lock); head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)]; sk_for_each(s, node, head) { vcc = atm_sk(s); if (vcc->dev == dev && vcc->vci == vci && vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE && test_bit(ATM_VF_READY, &vcc->flags)) goto out; } vcc = NULL; out: read_unlock(&vcc_sklist_lock); return vcc; } static int list_vccs(int vci) { struct hlist_head *head; struct atm_vcc *vcc; struct hlist_node *node; struct sock *s; int num_found = 0; int i; read_lock(&vcc_sklist_lock); if (vci != 0){ head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)]; sk_for_each(s, node, head) { num_found ++; vcc = atm_sk(s); printk(KERN_DEBUG "Device: %d Vpi: %d Vci: %d\n", vcc->dev->number, vcc->vpi, vcc->vci); } } else { for(i = 0; i < VCC_HTABLE_SIZE; i++){ head = &vcc_hash[i]; sk_for_each(s, node, head) { num_found ++; vcc = atm_sk(s); printk(KERN_DEBUG "Device: %d Vpi: %d Vci: %d\n", vcc->dev->number, vcc->vpi, vcc->vci); } } } read_unlock(&vcc_sklist_lock); return num_found; } static int popen(struct atm_vcc *vcc) { struct solos_card *card = vcc->dev->dev_data; struct sk_buff *skb; struct pkt_hdr *header; if (vcc->qos.aal != ATM_AAL5) { dev_warn(&card->dev->dev, "Unsupported ATM type %d\n", vcc->qos.aal); return -EINVAL; } skb = alloc_skb(sizeof(*header), GFP_ATOMIC); if (!skb) { if (net_ratelimit()) dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n"); return -ENOMEM; } header = (void *)skb_put(skb, sizeof(*header)); header->size = cpu_to_le16(0); header->vpi = cpu_to_le16(vcc->vpi); header->vci = cpu_to_le16(vcc->vci); header->type = cpu_to_le16(PKT_POPEN); fpga_queue(card, SOLOS_CHAN(vcc->dev), skb, NULL); set_bit(ATM_VF_ADDR, &vcc->flags); set_bit(ATM_VF_READY, &vcc->flags); list_vccs(0); return 0; } static void pclose(struct atm_vcc *vcc) { struct solos_card *card = vcc->dev->dev_data; struct sk_buff *skb; struct pkt_hdr *header; skb = alloc_skb(sizeof(*header), GFP_ATOMIC); if (!skb) { dev_warn(&card->dev->dev, "Failed to allocate sk_buff in pclose()\n"); return; } header = (void *)skb_put(skb, sizeof(*header)); header->size = cpu_to_le16(0); header->vpi = cpu_to_le16(vcc->vpi); header->vci = cpu_to_le16(vcc->vci); header->type = cpu_to_le16(PKT_PCLOSE); fpga_queue(card, SOLOS_CHAN(vcc->dev), skb, NULL); clear_bit(ATM_VF_ADDR, &vcc->flags); clear_bit(ATM_VF_READY, &vcc->flags); /* Hold up vcc_destroy_socket() (our caller) until solos_bh() in the tasklet has finished processing any incoming packets (and, more to the point, using the vcc pointer). */ tasklet_unlock_wait(&card->tlet); return; } static int print_buffer(struct sk_buff *buf) { int len,i; char msg[500]; char item[10]; len = buf->len; for (i = 0; i < len; i++){ if(i % 8 == 0) sprintf(msg, "%02X: ", i); sprintf(item,"%02X ",*(buf->data + i)); strcat(msg, item); if(i % 8 == 7) { sprintf(item, "\n"); strcat(msg, item); printk(KERN_DEBUG "%s", msg); } } if (i % 8 != 0) { sprintf(item, "\n"); strcat(msg, item); printk(KERN_DEBUG "%s", msg); } printk(KERN_DEBUG "\n"); return 0; } static void fpga_queue(struct solos_card *card, int port, struct sk_buff *skb, struct atm_vcc *vcc) { int old_len; unsigned long flags; SKB_CB(skb)->vcc = vcc; spin_lock_irqsave(&card->tx_queue_lock, flags); old_len = skb_queue_len(&card->tx_queue[port]); skb_queue_tail(&card->tx_queue[port], skb); if (!old_len) card->tx_mask |= (1 << port); spin_unlock_irqrestore(&card->tx_queue_lock, flags); /* Theoretically we could just schedule the tasklet here, but that introduces latency we don't want -- it's noticeable */ if (!old_len) fpga_tx(card); } static uint32_t fpga_tx(struct solos_card *card) { uint32_t tx_pending, card_flags; uint32_t tx_started = 0; struct sk_buff *skb; struct atm_vcc *vcc; unsigned char port; unsigned long flags; spin_lock_irqsave(&card->tx_lock, flags); card_flags = ioread32(card->config_regs + FLAGS_ADDR); /* * The queue lock is required for _writing_ to tx_mask, but we're * OK to read it here without locking. The only potential update * that we could race with is in fpga_queue() where it sets a bit * for a new port... but it's going to call this function again if * it's doing that, anyway. */ tx_pending = card->tx_mask & ~card_flags; for (port = 0; tx_pending; tx_pending >>= 1, port++) { if (tx_pending & 1) { struct sk_buff *oldskb = card->tx_skb[port]; if (oldskb) pci_unmap_single(card->dev, SKB_CB(oldskb)->dma_addr, oldskb->len, PCI_DMA_TODEVICE); spin_lock(&card->tx_queue_lock); skb = skb_dequeue(&card->tx_queue[port]); if (!skb) card->tx_mask &= ~(1 << port); spin_unlock(&card->tx_queue_lock); if (skb && !card->using_dma) { memcpy_toio(TX_BUF(card, port), skb->data, skb->len); tx_started |= 1 << port; oldskb = skb; /* We're done with this skb already */ } else if (skb && card->using_dma) { SKB_CB(skb)->dma_addr = pci_map_single(card->dev, skb->data, skb->len, PCI_DMA_TODEVICE); iowrite32(SKB_CB(skb)->dma_addr, card->config_regs + TX_DMA_ADDR(port)); } if (!oldskb) continue; /* Clean up and free oldskb now it's gone */ if (atmdebug) { struct pkt_hdr *header = (void *)oldskb->data; int size = le16_to_cpu(header->size); skb_pull(oldskb, sizeof(*header)); dev_info(&card->dev->dev, "Transmitted: port %d\n", port); dev_info(&card->dev->dev, "size: %d VPI: %d VCI: %d\n", size, le16_to_cpu(header->vpi), le16_to_cpu(header->vci)); print_buffer(oldskb); } vcc = SKB_CB(oldskb)->vcc; if (vcc) { atomic_inc(&vcc->stats->tx); solos_pop(vcc, oldskb); } else dev_kfree_skb_irq(oldskb); } } /* For non-DMA TX, write the 'TX start' bit for all four ports simultaneously */ if (tx_started) iowrite32(tx_started, card->config_regs + FLAGS_ADDR); spin_unlock_irqrestore(&card->tx_lock, flags); return card_flags; } static int psend(struct atm_vcc *vcc, struct sk_buff *skb) { struct solos_card *card = vcc->dev->dev_data; struct pkt_hdr *header; int pktlen; pktlen = skb->len; if (pktlen > (BUF_SIZE - sizeof(*header))) { dev_warn(&card->dev->dev, "Length of PDU is too large. Dropping PDU.\n"); solos_pop(vcc, skb); return 0; } if (!skb_clone_writable(skb, sizeof(*header))) { int expand_by = 0; int ret; if (skb_headroom(skb) < sizeof(*header)) expand_by = sizeof(*header) - skb_headroom(skb); ret = pskb_expand_head(skb, expand_by, 0, GFP_ATOMIC); if (ret) { dev_warn(&card->dev->dev, "pskb_expand_head failed.\n"); solos_pop(vcc, skb); return ret; } } header = (void *)skb_push(skb, sizeof(*header)); /* This does _not_ include the size of the header */ header->size = cpu_to_le16(pktlen); header->vpi = cpu_to_le16(vcc->vpi); header->vci = cpu_to_le16(vcc->vci); header->type = cpu_to_le16(PKT_DATA); fpga_queue(card, SOLOS_CHAN(vcc->dev), skb, vcc); return 0; } static struct atmdev_ops fpga_ops = { .open = popen, .close = pclose, .ioctl = NULL, .getsockopt = NULL, .setsockopt = NULL, .send = psend, .send_oam = NULL, .phy_put = NULL, .phy_get = NULL, .change_qos = NULL, .proc_read = NULL, .owner = THIS_MODULE }; static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id) { int err; uint16_t fpga_ver; uint8_t major_ver, minor_ver; uint32_t data32; struct solos_card *card; card = kzalloc(sizeof(*card), GFP_KERNEL); if (!card) return -ENOMEM; card->dev = dev; init_waitqueue_head(&card->fw_wq); init_waitqueue_head(&card->param_wq); err = pci_enable_device(dev); if (err) { dev_warn(&dev->dev, "Failed to enable PCI device\n"); goto out; } err = pci_set_dma_mask(dev, DMA_BIT_MASK(32)); if (err) { dev_warn(&dev->dev, "Failed to set 32-bit DMA mask\n"); goto out; } err = pci_request_regions(dev, "solos"); if (err) { dev_warn(&dev->dev, "Failed to request regions\n"); goto out; } card->config_regs = pci_iomap(dev, 0, CONFIG_RAM_SIZE); if (!card->config_regs) { dev_warn(&dev->dev, "Failed to ioremap config registers\n"); goto out_release_regions; } card->buffers = pci_iomap(dev, 1, DATA_RAM_SIZE); if (!card->buffers) { dev_warn(&dev->dev, "Failed to ioremap data buffers\n"); goto out_unmap_config; } if (reset) { iowrite32(1, card->config_regs + FPGA_MODE); data32 = ioread32(card->config_regs + FPGA_MODE); iowrite32(0, card->config_regs + FPGA_MODE); data32 = ioread32(card->config_regs + FPGA_MODE); } data32 = ioread32(card->config_regs + FPGA_VER); fpga_ver = (data32 & 0x0000FFFF); major_ver = ((data32 & 0xFF000000) >> 24); minor_ver = ((data32 & 0x00FF0000) >> 16); card->fpga_version = FPGA_VERSION(major_ver,minor_ver); if (card->fpga_version > LEGACY_BUFFERS) card->buffer_size = BUF_SIZE; else card->buffer_size = OLD_BUF_SIZE; dev_info(&dev->dev, "Solos FPGA Version %d.%02d svn-%d\n", major_ver, minor_ver, fpga_ver); if (fpga_ver < 37 && (fpga_upgrade || firmware_upgrade || db_fpga_upgrade || db_firmware_upgrade)) { dev_warn(&dev->dev, "FPGA too old; cannot upgrade flash. Use JTAG.\n"); fpga_upgrade = firmware_upgrade = 0; db_fpga_upgrade = db_firmware_upgrade = 0; } if (card->fpga_version >= DMA_SUPPORTED){ card->using_dma = 1; } else { card->using_dma = 0; /* Set RX empty flag for all ports */ iowrite32(0xF0, card->config_regs + FLAGS_ADDR); } data32 = ioread32(card->config_regs + PORTS); card->nr_ports = (data32 & 0x000000FF); pci_set_drvdata(dev, card); tasklet_init(&card->tlet, solos_bh, (unsigned long)card); spin_lock_init(&card->tx_lock); spin_lock_init(&card->tx_queue_lock); spin_lock_init(&card->cli_queue_lock); spin_lock_init(&card->param_queue_lock); INIT_LIST_HEAD(&card->param_queue); err = request_irq(dev->irq, solos_irq, IRQF_SHARED, "solos-pci", card); if (err) { dev_dbg(&card->dev->dev, "Failed to request interrupt IRQ: %d\n", dev->irq); goto out_unmap_both; } iowrite32(1, card->config_regs + IRQ_EN_ADDR); if (fpga_upgrade) flash_upgrade(card, 0); if (firmware_upgrade) flash_upgrade(card, 1); if (db_fpga_upgrade) flash_upgrade(card, 2); if (db_firmware_upgrade) flash_upgrade(card, 3); err = atm_init(card, &dev->dev); if (err) goto out_free_irq; return 0; out_free_irq: iowrite32(0, card->config_regs + IRQ_EN_ADDR); free_irq(dev->irq, card); tasklet_kill(&card->tlet); out_unmap_both: pci_set_drvdata(dev, NULL); pci_iounmap(dev, card->config_regs); out_unmap_config: pci_iounmap(dev, card->buffers); out_release_regions: pci_release_regions(dev); out: kfree(card); return err; } static int atm_init(struct solos_card *card, struct device *parent) { int i; for (i = 0; i < card->nr_ports; i++) { struct sk_buff *skb; struct pkt_hdr *header; skb_queue_head_init(&card->tx_queue[i]); skb_queue_head_init(&card->cli_queue[i]); card->atmdev[i] = atm_dev_register("solos-pci", parent, &fpga_ops, -1, NULL); if (!card->atmdev[i]) { dev_err(&card->dev->dev, "Could not register ATM device %d\n", i); atm_remove(card); return -ENODEV; } if (device_create_file(&card->atmdev[i]->class_dev, &dev_attr_console)) dev_err(&card->dev->dev, "Could not register console for ATM device %d\n", i); if (sysfs_create_group(&card->atmdev[i]->class_dev.kobj, &solos_attr_group)) dev_err(&card->dev->dev, "Could not register parameter group for ATM device %d\n", i); dev_info(&card->dev->dev, "Registered ATM device %d\n", card->atmdev[i]->number); card->atmdev[i]->ci_range.vpi_bits = 8; card->atmdev[i]->ci_range.vci_bits = 16; card->atmdev[i]->dev_data = card; card->atmdev[i]->phy_data = (void *)(unsigned long)i; atm_dev_signal_change(card->atmdev[i], ATM_PHY_SIG_FOUND); skb = alloc_skb(sizeof(*header), GFP_ATOMIC); if (!skb) { dev_warn(&card->dev->dev, "Failed to allocate sk_buff in atm_init()\n"); continue; } header = (void *)skb_put(skb, sizeof(*header)); header->size = cpu_to_le16(0); header->vpi = cpu_to_le16(0); header->vci = cpu_to_le16(0); header->type = cpu_to_le16(PKT_STATUS); fpga_queue(card, i, skb, NULL); } return 0; } static void atm_remove(struct solos_card *card) { int i; for (i = 0; i < card->nr_ports; i++) { if (card->atmdev[i]) { struct sk_buff *skb; dev_info(&card->dev->dev, "Unregistering ATM device %d\n", card->atmdev[i]->number); sysfs_remove_group(&card->atmdev[i]->class_dev.kobj, &solos_attr_group); atm_dev_deregister(card->atmdev[i]); skb = card->rx_skb[i]; if (skb) { pci_unmap_single(card->dev, SKB_CB(skb)->dma_addr, RX_DMA_SIZE, PCI_DMA_FROMDEVICE); dev_kfree_skb(skb); } skb = card->tx_skb[i]; if (skb) { pci_unmap_single(card->dev, SKB_CB(skb)->dma_addr, skb->len, PCI_DMA_TODEVICE); dev_kfree_skb(skb); } while ((skb = skb_dequeue(&card->tx_queue[i]))) dev_kfree_skb(skb); } } } static void fpga_remove(struct pci_dev *dev) { struct solos_card *card = pci_get_drvdata(dev); /* Disable IRQs */ iowrite32(0, card->config_regs + IRQ_EN_ADDR); /* Reset FPGA */ iowrite32(1, card->config_regs + FPGA_MODE); (void)ioread32(card->config_regs + FPGA_MODE); atm_remove(card); free_irq(dev->irq, card); tasklet_kill(&card->tlet); /* Release device from reset */ iowrite32(0, card->config_regs + FPGA_MODE); (void)ioread32(card->config_regs + FPGA_MODE); pci_iounmap(dev, card->buffers); pci_iounmap(dev, card->config_regs); pci_release_regions(dev); pci_disable_device(dev); pci_set_drvdata(dev, NULL); kfree(card); } static struct pci_device_id fpga_pci_tbl[] __devinitdata = { { 0x10ee, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0, } }; MODULE_DEVICE_TABLE(pci,fpga_pci_tbl); static struct pci_driver fpga_driver = { .name = "solos", .id_table = fpga_pci_tbl, .probe = fpga_probe, .remove = fpga_remove, }; static int __init solos_pci_init(void) { printk(KERN_INFO "Solos PCI Driver Version %s\n", VERSION); return pci_register_driver(&fpga_driver); } static void __exit solos_pci_exit(void) { pci_unregister_driver(&fpga_driver); printk(KERN_INFO "Solos PCI Driver %s Unloaded\n", VERSION); } module_init(solos_pci_init); module_exit(solos_pci_exit);
gpl-2.0
civato/Note8.0-StormBorn
drivers/net/wireless/libertas/cfg.c
2323
54598
/* * Implement cfg80211 ("iw") support. * * Copyright (C) 2009 M&N Solutions GmbH, 61191 Rosbach, Germany * Holger Schurig <hs4233@mail.mn-solutions.de> * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/sched.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/ieee80211.h> #include <net/cfg80211.h> #include <asm/unaligned.h> #include "decl.h" #include "cfg.h" #include "cmd.h" #define CHAN2G(_channel, _freq, _flags) { \ .band = IEEE80211_BAND_2GHZ, \ .center_freq = (_freq), \ .hw_value = (_channel), \ .flags = (_flags), \ .max_antenna_gain = 0, \ .max_power = 30, \ } static struct ieee80211_channel lbs_2ghz_channels[] = { CHAN2G(1, 2412, 0), CHAN2G(2, 2417, 0), CHAN2G(3, 2422, 0), CHAN2G(4, 2427, 0), CHAN2G(5, 2432, 0), CHAN2G(6, 2437, 0), CHAN2G(7, 2442, 0), CHAN2G(8, 2447, 0), CHAN2G(9, 2452, 0), CHAN2G(10, 2457, 0), CHAN2G(11, 2462, 0), CHAN2G(12, 2467, 0), CHAN2G(13, 2472, 0), CHAN2G(14, 2484, 0), }; #define RATETAB_ENT(_rate, _hw_value, _flags) { \ .bitrate = (_rate), \ .hw_value = (_hw_value), \ .flags = (_flags), \ } /* Table 6 in section 3.2.1.1 */ static struct ieee80211_rate lbs_rates[] = { RATETAB_ENT(10, 0, 0), RATETAB_ENT(20, 1, 0), RATETAB_ENT(55, 2, 0), RATETAB_ENT(110, 3, 0), RATETAB_ENT(60, 9, 0), RATETAB_ENT(90, 6, 0), RATETAB_ENT(120, 7, 0), RATETAB_ENT(180, 8, 0), RATETAB_ENT(240, 9, 0), RATETAB_ENT(360, 10, 0), RATETAB_ENT(480, 11, 0), RATETAB_ENT(540, 12, 0), }; static struct ieee80211_supported_band lbs_band_2ghz = { .channels = lbs_2ghz_channels, .n_channels = ARRAY_SIZE(lbs_2ghz_channels), .bitrates = lbs_rates, .n_bitrates = ARRAY_SIZE(lbs_rates), }; static const u32 cipher_suites[] = { WLAN_CIPHER_SUITE_WEP40, WLAN_CIPHER_SUITE_WEP104, WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, }; /* Time to stay on the channel */ #define LBS_DWELL_PASSIVE 100 #define LBS_DWELL_ACTIVE 40 /*************************************************************************** * Misc utility functions * * TLVs are Marvell specific. They are very similar to IEs, they have the * same structure: type, length, data*. The only difference: for IEs, the * type and length are u8, but for TLVs they're __le16. */ /* * Convert NL80211's auth_type to the one from Libertas, see chapter 5.9.1 * in the firmware spec */ static u8 lbs_auth_to_authtype(enum nl80211_auth_type auth_type) { int ret = -ENOTSUPP; switch (auth_type) { case NL80211_AUTHTYPE_OPEN_SYSTEM: case NL80211_AUTHTYPE_SHARED_KEY: ret = auth_type; break; case NL80211_AUTHTYPE_AUTOMATIC: ret = NL80211_AUTHTYPE_OPEN_SYSTEM; break; case NL80211_AUTHTYPE_NETWORK_EAP: ret = 0x80; break; default: /* silence compiler */ break; } return ret; } /* * Various firmware commands need the list of supported rates, but with * the hight-bit set for basic rates */ static int lbs_add_rates(u8 *rates) { size_t i; for (i = 0; i < ARRAY_SIZE(lbs_rates); i++) { u8 rate = lbs_rates[i].bitrate / 5; if (rate == 0x02 || rate == 0x04 || rate == 0x0b || rate == 0x16) rate |= 0x80; rates[i] = rate; } return ARRAY_SIZE(lbs_rates); } /*************************************************************************** * TLV utility functions * * TLVs are Marvell specific. They are very similar to IEs, they have the * same structure: type, length, data*. The only difference: for IEs, the * type and length are u8, but for TLVs they're __le16. */ /* * Add ssid TLV */ #define LBS_MAX_SSID_TLV_SIZE \ (sizeof(struct mrvl_ie_header) \ + IEEE80211_MAX_SSID_LEN) static int lbs_add_ssid_tlv(u8 *tlv, const u8 *ssid, int ssid_len) { struct mrvl_ie_ssid_param_set *ssid_tlv = (void *)tlv; /* * TLV-ID SSID 00 00 * length 06 00 * ssid 4d 4e 54 45 53 54 */ ssid_tlv->header.type = cpu_to_le16(TLV_TYPE_SSID); ssid_tlv->header.len = cpu_to_le16(ssid_len); memcpy(ssid_tlv->ssid, ssid, ssid_len); return sizeof(ssid_tlv->header) + ssid_len; } /* * Add channel list TLV (section 8.4.2) * * Actual channel data comes from priv->wdev->wiphy->channels. */ #define LBS_MAX_CHANNEL_LIST_TLV_SIZE \ (sizeof(struct mrvl_ie_header) \ + (LBS_SCAN_BEFORE_NAP * sizeof(struct chanscanparamset))) static int lbs_add_channel_list_tlv(struct lbs_private *priv, u8 *tlv, int last_channel, int active_scan) { int chanscanparamsize = sizeof(struct chanscanparamset) * (last_channel - priv->scan_channel); struct mrvl_ie_header *header = (void *) tlv; /* * TLV-ID CHANLIST 01 01 * length 0e 00 * channel 00 01 00 00 00 64 00 * radio type 00 * channel 01 * scan type 00 * min scan time 00 00 * max scan time 64 00 * channel 2 00 02 00 00 00 64 00 * */ header->type = cpu_to_le16(TLV_TYPE_CHANLIST); header->len = cpu_to_le16(chanscanparamsize); tlv += sizeof(struct mrvl_ie_header); /* lbs_deb_scan("scan: channels %d to %d\n", priv->scan_channel, last_channel); */ memset(tlv, 0, chanscanparamsize); while (priv->scan_channel < last_channel) { struct chanscanparamset *param = (void *) tlv; param->radiotype = CMD_SCAN_RADIO_TYPE_BG; param->channumber = priv->scan_req->channels[priv->scan_channel]->hw_value; if (active_scan) { param->maxscantime = cpu_to_le16(LBS_DWELL_ACTIVE); } else { param->chanscanmode.passivescan = 1; param->maxscantime = cpu_to_le16(LBS_DWELL_PASSIVE); } tlv += sizeof(struct chanscanparamset); priv->scan_channel++; } return sizeof(struct mrvl_ie_header) + chanscanparamsize; } /* * Add rates TLV * * The rates are in lbs_bg_rates[], but for the 802.11b * rates the high bit is set. We add this TLV only because * there's a firmware which otherwise doesn't report all * APs in range. */ #define LBS_MAX_RATES_TLV_SIZE \ (sizeof(struct mrvl_ie_header) \ + (ARRAY_SIZE(lbs_rates))) /* Adds a TLV with all rates the hardware supports */ static int lbs_add_supported_rates_tlv(u8 *tlv) { size_t i; struct mrvl_ie_rates_param_set *rate_tlv = (void *)tlv; /* * TLV-ID RATES 01 00 * length 0e 00 * rates 82 84 8b 96 0c 12 18 24 30 48 60 6c */ rate_tlv->header.type = cpu_to_le16(TLV_TYPE_RATES); tlv += sizeof(rate_tlv->header); i = lbs_add_rates(tlv); tlv += i; rate_tlv->header.len = cpu_to_le16(i); return sizeof(rate_tlv->header) + i; } /* Add common rates from a TLV and return the new end of the TLV */ static u8 * add_ie_rates(u8 *tlv, const u8 *ie, int *nrates) { int hw, ap, ap_max = ie[1]; u8 hw_rate; /* Advance past IE header */ ie += 2; lbs_deb_hex(LBS_DEB_ASSOC, "AP IE Rates", (u8 *) ie, ap_max); for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) { hw_rate = lbs_rates[hw].bitrate / 5; for (ap = 0; ap < ap_max; ap++) { if (hw_rate == (ie[ap] & 0x7f)) { *tlv++ = ie[ap]; *nrates = *nrates + 1; } } } return tlv; } /* * Adds a TLV with all rates the hardware *and* BSS supports. */ static int lbs_add_common_rates_tlv(u8 *tlv, struct cfg80211_bss *bss) { struct mrvl_ie_rates_param_set *rate_tlv = (void *)tlv; const u8 *rates_eid, *ext_rates_eid; int n = 0; rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SUPP_RATES); ext_rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_EXT_SUPP_RATES); /* * 01 00 TLV_TYPE_RATES * 04 00 len * 82 84 8b 96 rates */ rate_tlv->header.type = cpu_to_le16(TLV_TYPE_RATES); tlv += sizeof(rate_tlv->header); /* Add basic rates */ if (rates_eid) { tlv = add_ie_rates(tlv, rates_eid, &n); /* Add extended rates, if any */ if (ext_rates_eid) tlv = add_ie_rates(tlv, ext_rates_eid, &n); } else { lbs_deb_assoc("assoc: bss had no basic rate IE\n"); /* Fallback: add basic 802.11b rates */ *tlv++ = 0x82; *tlv++ = 0x84; *tlv++ = 0x8b; *tlv++ = 0x96; n = 4; } rate_tlv->header.len = cpu_to_le16(n); return sizeof(rate_tlv->header) + n; } /* * Add auth type TLV. * * This is only needed for newer firmware (V9 and up). */ #define LBS_MAX_AUTH_TYPE_TLV_SIZE \ sizeof(struct mrvl_ie_auth_type) static int lbs_add_auth_type_tlv(u8 *tlv, enum nl80211_auth_type auth_type) { struct mrvl_ie_auth_type *auth = (void *) tlv; /* * 1f 01 TLV_TYPE_AUTH_TYPE * 01 00 len * 01 auth type */ auth->header.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE); auth->header.len = cpu_to_le16(sizeof(*auth)-sizeof(auth->header)); auth->auth = cpu_to_le16(lbs_auth_to_authtype(auth_type)); return sizeof(*auth); } /* * Add channel (phy ds) TLV */ #define LBS_MAX_CHANNEL_TLV_SIZE \ sizeof(struct mrvl_ie_header) static int lbs_add_channel_tlv(u8 *tlv, u8 channel) { struct mrvl_ie_ds_param_set *ds = (void *) tlv; /* * 03 00 TLV_TYPE_PHY_DS * 01 00 len * 06 channel */ ds->header.type = cpu_to_le16(TLV_TYPE_PHY_DS); ds->header.len = cpu_to_le16(sizeof(*ds)-sizeof(ds->header)); ds->channel = channel; return sizeof(*ds); } /* * Add (empty) CF param TLV of the form: */ #define LBS_MAX_CF_PARAM_TLV_SIZE \ sizeof(struct mrvl_ie_header) static int lbs_add_cf_param_tlv(u8 *tlv) { struct mrvl_ie_cf_param_set *cf = (void *)tlv; /* * 04 00 TLV_TYPE_CF * 06 00 len * 00 cfpcnt * 00 cfpperiod * 00 00 cfpmaxduration * 00 00 cfpdurationremaining */ cf->header.type = cpu_to_le16(TLV_TYPE_CF); cf->header.len = cpu_to_le16(sizeof(*cf)-sizeof(cf->header)); return sizeof(*cf); } /* * Add WPA TLV */ #define LBS_MAX_WPA_TLV_SIZE \ (sizeof(struct mrvl_ie_header) \ + 128 /* TODO: I guessed the size */) static int lbs_add_wpa_tlv(u8 *tlv, const u8 *ie, u8 ie_len) { size_t tlv_len; /* * We need just convert an IE to an TLV. IEs use u8 for the header, * u8 type * u8 len * u8[] data * but TLVs use __le16 instead: * __le16 type * __le16 len * u8[] data */ *tlv++ = *ie++; *tlv++ = 0; tlv_len = *tlv++ = *ie++; *tlv++ = 0; while (tlv_len--) *tlv++ = *ie++; /* the TLV is two bytes larger than the IE */ return ie_len + 2; } /* * Set Channel */ static int lbs_cfg_set_channel(struct wiphy *wiphy, struct net_device *netdev, struct ieee80211_channel *channel, enum nl80211_channel_type channel_type) { struct lbs_private *priv = wiphy_priv(wiphy); int ret = -ENOTSUPP; lbs_deb_enter_args(LBS_DEB_CFG80211, "freq %d, type %d", channel->center_freq, channel_type); if (channel_type != NL80211_CHAN_NO_HT) goto out; ret = lbs_set_channel(priv, channel->hw_value); out: lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); return ret; } /* * Scanning */ /* * When scanning, the firmware doesn't send a nul packet with the power-safe * bit to the AP. So we cannot stay away from our current channel too long, * otherwise we loose data. So take a "nap" while scanning every other * while. */ #define LBS_SCAN_BEFORE_NAP 4 /* * When the firmware reports back a scan-result, it gives us an "u8 rssi", * which isn't really an RSSI, as it becomes larger when moving away from * the AP. Anyway, we need to convert that into mBm. */ #define LBS_SCAN_RSSI_TO_MBM(rssi) \ ((-(int)rssi + 3)*100) static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy, struct cmd_header *resp) { struct cmd_ds_802_11_scan_rsp *scanresp = (void *)resp; int bsssize; const u8 *pos; const u8 *tsfdesc; int tsfsize; int i; int ret = -EILSEQ; lbs_deb_enter(LBS_DEB_CFG80211); bsssize = get_unaligned_le16(&scanresp->bssdescriptsize); lbs_deb_scan("scan response: %d BSSs (%d bytes); resp size %d bytes\n", scanresp->nr_sets, bsssize, le16_to_cpu(resp->size)); if (scanresp->nr_sets == 0) { ret = 0; goto done; } /* * The general layout of the scan response is described in chapter * 5.7.1. Basically we have a common part, then any number of BSS * descriptor sections. Finally we have section with the same number * of TSFs. * * cmd_ds_802_11_scan_rsp * cmd_header * pos_size * nr_sets * bssdesc 1 * bssid * rssi * timestamp * intvl * capa * IEs * bssdesc 2 * bssdesc n * MrvlIEtypes_TsfFimestamp_t * TSF for BSS 1 * TSF for BSS 2 * TSF for BSS n */ pos = scanresp->bssdesc_and_tlvbuffer; lbs_deb_hex(LBS_DEB_SCAN, "SCAN_RSP", scanresp->bssdesc_and_tlvbuffer, scanresp->bssdescriptsize); tsfdesc = pos + bsssize; tsfsize = 4 + 8 * scanresp->nr_sets; lbs_deb_hex(LBS_DEB_SCAN, "SCAN_TSF", (u8 *) tsfdesc, tsfsize); /* Validity check: we expect a Marvell-Local TLV */ i = get_unaligned_le16(tsfdesc); tsfdesc += 2; if (i != TLV_TYPE_TSFTIMESTAMP) { lbs_deb_scan("scan response: invalid TSF Timestamp %d\n", i); goto done; } /* * Validity check: the TLV holds TSF values with 8 bytes each, so * the size in the TLV must match the nr_sets value */ i = get_unaligned_le16(tsfdesc); tsfdesc += 2; if (i / 8 != scanresp->nr_sets) { lbs_deb_scan("scan response: invalid number of TSF timestamp " "sets (expected %d got %d)\n", scanresp->nr_sets, i / 8); goto done; } for (i = 0; i < scanresp->nr_sets; i++) { const u8 *bssid; const u8 *ie; int left; int ielen; int rssi; u16 intvl; u16 capa; int chan_no = -1; const u8 *ssid = NULL; u8 ssid_len = 0; DECLARE_SSID_BUF(ssid_buf); int len = get_unaligned_le16(pos); pos += 2; /* BSSID */ bssid = pos; pos += ETH_ALEN; /* RSSI */ rssi = *pos++; /* Packet time stamp */ pos += 8; /* Beacon interval */ intvl = get_unaligned_le16(pos); pos += 2; /* Capabilities */ capa = get_unaligned_le16(pos); pos += 2; /* To find out the channel, we must parse the IEs */ ie = pos; /* * 6+1+8+2+2: size of BSSID, RSSI, time stamp, beacon * interval, capabilities */ ielen = left = len - (6 + 1 + 8 + 2 + 2); while (left >= 2) { u8 id, elen; id = *pos++; elen = *pos++; left -= 2; if (elen > left || elen == 0) { lbs_deb_scan("scan response: invalid IE fmt\n"); goto done; } if (id == WLAN_EID_DS_PARAMS) chan_no = *pos; if (id == WLAN_EID_SSID) { ssid = pos; ssid_len = elen; } left -= elen; pos += elen; } /* No channel, no luck */ if (chan_no != -1) { struct wiphy *wiphy = priv->wdev->wiphy; int freq = ieee80211_channel_to_frequency(chan_no, IEEE80211_BAND_2GHZ); struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq); lbs_deb_scan("scan: %pM, capa %04x, chan %2d, %s, " "%d dBm\n", bssid, capa, chan_no, print_ssid(ssid_buf, ssid, ssid_len), LBS_SCAN_RSSI_TO_MBM(rssi)/100); if (channel && !(channel->flags & IEEE80211_CHAN_DISABLED)) cfg80211_inform_bss(wiphy, channel, bssid, le64_to_cpu(*(__le64 *)tsfdesc), capa, intvl, ie, ielen, LBS_SCAN_RSSI_TO_MBM(rssi), GFP_KERNEL); } else lbs_deb_scan("scan response: missing BSS channel IE\n"); tsfdesc += 8; } ret = 0; done: lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); return ret; } /* * Our scan command contains a TLV, consting of a SSID TLV, a channel list * TLV and a rates TLV. Determine the maximum size of them: */ #define LBS_SCAN_MAX_CMD_SIZE \ (sizeof(struct cmd_ds_802_11_scan) \ + LBS_MAX_SSID_TLV_SIZE \ + LBS_MAX_CHANNEL_LIST_TLV_SIZE \ + LBS_MAX_RATES_TLV_SIZE) /* * Assumes priv->scan_req is initialized and valid * Assumes priv->scan_channel is initialized */ static void lbs_scan_worker(struct work_struct *work) { struct lbs_private *priv = container_of(work, struct lbs_private, scan_work.work); struct cmd_ds_802_11_scan *scan_cmd; u8 *tlv; /* pointer into our current, growing TLV storage area */ int last_channel; int running, carrier; lbs_deb_enter(LBS_DEB_SCAN); scan_cmd = kzalloc(LBS_SCAN_MAX_CMD_SIZE, GFP_KERNEL); if (scan_cmd == NULL) goto out_no_scan_cmd; /* prepare fixed part of scan command */ scan_cmd->bsstype = CMD_BSS_TYPE_ANY; /* stop network while we're away from our main channel */ running = !netif_queue_stopped(priv->dev); carrier = netif_carrier_ok(priv->dev); if (running) netif_stop_queue(priv->dev); if (carrier) netif_carrier_off(priv->dev); /* prepare fixed part of scan command */ tlv = scan_cmd->tlvbuffer; /* add SSID TLV */ if (priv->scan_req->n_ssids) tlv += lbs_add_ssid_tlv(tlv, priv->scan_req->ssids[0].ssid, priv->scan_req->ssids[0].ssid_len); /* add channel TLVs */ last_channel = priv->scan_channel + LBS_SCAN_BEFORE_NAP; if (last_channel > priv->scan_req->n_channels) last_channel = priv->scan_req->n_channels; tlv += lbs_add_channel_list_tlv(priv, tlv, last_channel, priv->scan_req->n_ssids); /* add rates TLV */ tlv += lbs_add_supported_rates_tlv(tlv); if (priv->scan_channel < priv->scan_req->n_channels) { cancel_delayed_work(&priv->scan_work); if (!priv->stopping) queue_delayed_work(priv->work_thread, &priv->scan_work, msecs_to_jiffies(300)); } /* This is the final data we are about to send */ scan_cmd->hdr.size = cpu_to_le16(tlv - (u8 *)scan_cmd); lbs_deb_hex(LBS_DEB_SCAN, "SCAN_CMD", (void *)scan_cmd, sizeof(*scan_cmd)); lbs_deb_hex(LBS_DEB_SCAN, "SCAN_TLV", scan_cmd->tlvbuffer, tlv - scan_cmd->tlvbuffer); __lbs_cmd(priv, CMD_802_11_SCAN, &scan_cmd->hdr, le16_to_cpu(scan_cmd->hdr.size), lbs_ret_scan, 0); if (priv->scan_channel >= priv->scan_req->n_channels) { /* Mark scan done */ if (priv->internal_scan) kfree(priv->scan_req); else cfg80211_scan_done(priv->scan_req, false); priv->scan_req = NULL; priv->last_scan = jiffies; } /* Restart network */ if (carrier) netif_carrier_on(priv->dev); if (running && !priv->tx_pending_len) netif_wake_queue(priv->dev); kfree(scan_cmd); /* Wake up anything waiting on scan completion */ if (priv->scan_req == NULL) { lbs_deb_scan("scan: waking up waiters\n"); wake_up_all(&priv->scan_q); } out_no_scan_cmd: lbs_deb_leave(LBS_DEB_SCAN); } static void _internal_start_scan(struct lbs_private *priv, bool internal, struct cfg80211_scan_request *request) { lbs_deb_enter(LBS_DEB_CFG80211); lbs_deb_scan("scan: ssids %d, channels %d, ie_len %zd\n", request->n_ssids, request->n_channels, request->ie_len); priv->scan_channel = 0; queue_delayed_work(priv->work_thread, &priv->scan_work, msecs_to_jiffies(50)); priv->scan_req = request; priv->internal_scan = internal; lbs_deb_leave(LBS_DEB_CFG80211); } static int lbs_cfg_scan(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_scan_request *request) { struct lbs_private *priv = wiphy_priv(wiphy); int ret = 0; lbs_deb_enter(LBS_DEB_CFG80211); if (priv->scan_req || delayed_work_pending(&priv->scan_work)) { /* old scan request not yet processed */ ret = -EAGAIN; goto out; } _internal_start_scan(priv, false, request); if (priv->surpriseremoved) ret = -EIO; out: lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); return ret; } /* * Events */ void lbs_send_disconnect_notification(struct lbs_private *priv) { lbs_deb_enter(LBS_DEB_CFG80211); cfg80211_disconnected(priv->dev, 0, NULL, 0, GFP_KERNEL); lbs_deb_leave(LBS_DEB_CFG80211); } void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event) { lbs_deb_enter(LBS_DEB_CFG80211); cfg80211_michael_mic_failure(priv->dev, priv->assoc_bss, event == MACREG_INT_CODE_MIC_ERR_MULTICAST ? NL80211_KEYTYPE_GROUP : NL80211_KEYTYPE_PAIRWISE, -1, NULL, GFP_KERNEL); lbs_deb_leave(LBS_DEB_CFG80211); } /* * Connect/disconnect */ /* * This removes all WEP keys */ static int lbs_remove_wep_keys(struct lbs_private *priv) { struct cmd_ds_802_11_set_wep cmd; int ret; lbs_deb_enter(LBS_DEB_CFG80211); memset(&cmd, 0, sizeof(cmd)); cmd.hdr.size = cpu_to_le16(sizeof(cmd)); cmd.keyindex = cpu_to_le16(priv->wep_tx_key); cmd.action = cpu_to_le16(CMD_ACT_REMOVE); ret = lbs_cmd_with_response(priv, CMD_802_11_SET_WEP, &cmd); lbs_deb_leave(LBS_DEB_CFG80211); return ret; } /* * Set WEP keys */ static int lbs_set_wep_keys(struct lbs_private *priv) { struct cmd_ds_802_11_set_wep cmd; int i; int ret; lbs_deb_enter(LBS_DEB_CFG80211); /* * command 13 00 * size 50 00 * sequence xx xx * result 00 00 * action 02 00 ACT_ADD * transmit key 00 00 * type for key 1 01 WEP40 * type for key 2 00 * type for key 3 00 * type for key 4 00 * key 1 39 39 39 39 39 00 00 00 * 00 00 00 00 00 00 00 00 * key 2 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 * key 3 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 * key 4 00 00 00 00 00 00 00 00 */ if (priv->wep_key_len[0] || priv->wep_key_len[1] || priv->wep_key_len[2] || priv->wep_key_len[3]) { /* Only set wep keys if we have at least one of them */ memset(&cmd, 0, sizeof(cmd)); cmd.hdr.size = cpu_to_le16(sizeof(cmd)); cmd.keyindex = cpu_to_le16(priv->wep_tx_key); cmd.action = cpu_to_le16(CMD_ACT_ADD); for (i = 0; i < 4; i++) { switch (priv->wep_key_len[i]) { case WLAN_KEY_LEN_WEP40: cmd.keytype[i] = CMD_TYPE_WEP_40_BIT; break; case WLAN_KEY_LEN_WEP104: cmd.keytype[i] = CMD_TYPE_WEP_104_BIT; break; default: cmd.keytype[i] = 0; break; } memcpy(cmd.keymaterial[i], priv->wep_key[i], priv->wep_key_len[i]); } ret = lbs_cmd_with_response(priv, CMD_802_11_SET_WEP, &cmd); } else { /* Otherwise remove all wep keys */ ret = lbs_remove_wep_keys(priv); } lbs_deb_leave(LBS_DEB_CFG80211); return ret; } /* * Enable/Disable RSN status */ static int lbs_enable_rsn(struct lbs_private *priv, int enable) { struct cmd_ds_802_11_enable_rsn cmd; int ret; lbs_deb_enter_args(LBS_DEB_CFG80211, "%d", enable); /* * cmd 2f 00 * size 0c 00 * sequence xx xx * result 00 00 * action 01 00 ACT_SET * enable 01 00 */ memset(&cmd, 0, sizeof(cmd)); cmd.hdr.size = cpu_to_le16(sizeof(cmd)); cmd.action = cpu_to_le16(CMD_ACT_SET); cmd.enable = cpu_to_le16(enable); ret = lbs_cmd_with_response(priv, CMD_802_11_ENABLE_RSN, &cmd); lbs_deb_leave(LBS_DEB_CFG80211); return ret; } /* * Set WPA/WPA key material */ /* * like "struct cmd_ds_802_11_key_material", but with cmd_header. Once we * get rid of WEXT, this should go into host.h */ struct cmd_key_material { struct cmd_header hdr; __le16 action; struct MrvlIEtype_keyParamSet param; } __packed; static int lbs_set_key_material(struct lbs_private *priv, int key_type, int key_info, u8 *key, u16 key_len) { struct cmd_key_material cmd; int ret; lbs_deb_enter(LBS_DEB_CFG80211); /* * Example for WPA (TKIP): * * cmd 5e 00 * size 34 00 * sequence xx xx * result 00 00 * action 01 00 * TLV type 00 01 key param * length 00 26 * key type 01 00 TKIP * key info 06 00 UNICAST | ENABLED * key len 20 00 * key 32 bytes */ memset(&cmd, 0, sizeof(cmd)); cmd.hdr.size = cpu_to_le16(sizeof(cmd)); cmd.action = cpu_to_le16(CMD_ACT_SET); cmd.param.type = cpu_to_le16(TLV_TYPE_KEY_MATERIAL); cmd.param.length = cpu_to_le16(sizeof(cmd.param) - 4); cmd.param.keytypeid = cpu_to_le16(key_type); cmd.param.keyinfo = cpu_to_le16(key_info); cmd.param.keylen = cpu_to_le16(key_len); if (key && key_len) memcpy(cmd.param.key, key, key_len); ret = lbs_cmd_with_response(priv, CMD_802_11_KEY_MATERIAL, &cmd); lbs_deb_leave(LBS_DEB_CFG80211); return ret; } /* * Sets the auth type (open, shared, etc) in the firmware. That * we use CMD_802_11_AUTHENTICATE is misleading, this firmware * command doesn't send an authentication frame at all, it just * stores the auth_type. */ static int lbs_set_authtype(struct lbs_private *priv, struct cfg80211_connect_params *sme) { struct cmd_ds_802_11_authenticate cmd; int ret; lbs_deb_enter_args(LBS_DEB_CFG80211, "%d", sme->auth_type); /* * cmd 11 00 * size 19 00 * sequence xx xx * result 00 00 * BSS id 00 13 19 80 da 30 * auth type 00 * reserved 00 00 00 00 00 00 00 00 00 00 */ memset(&cmd, 0, sizeof(cmd)); cmd.hdr.size = cpu_to_le16(sizeof(cmd)); if (sme->bssid) memcpy(cmd.bssid, sme->bssid, ETH_ALEN); /* convert auth_type */ ret = lbs_auth_to_authtype(sme->auth_type); if (ret < 0) goto done; cmd.authtype = ret; ret = lbs_cmd_with_response(priv, CMD_802_11_AUTHENTICATE, &cmd); done: lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); return ret; } /* * Create association request */ #define LBS_ASSOC_MAX_CMD_SIZE \ (sizeof(struct cmd_ds_802_11_associate) \ - 512 /* cmd_ds_802_11_associate.iebuf */ \ + LBS_MAX_SSID_TLV_SIZE \ + LBS_MAX_CHANNEL_TLV_SIZE \ + LBS_MAX_CF_PARAM_TLV_SIZE \ + LBS_MAX_AUTH_TYPE_TLV_SIZE \ + LBS_MAX_WPA_TLV_SIZE) static int lbs_associate(struct lbs_private *priv, struct cfg80211_bss *bss, struct cfg80211_connect_params *sme) { struct cmd_ds_802_11_associate_response *resp; struct cmd_ds_802_11_associate *cmd = kzalloc(LBS_ASSOC_MAX_CMD_SIZE, GFP_KERNEL); const u8 *ssid_eid; size_t len, resp_ie_len; int status; int ret; u8 *pos = &(cmd->iebuf[0]); u8 *tmp; lbs_deb_enter(LBS_DEB_CFG80211); if (!cmd) { ret = -ENOMEM; goto done; } /* * cmd 50 00 * length 34 00 * sequence xx xx * result 00 00 * BSS id 00 13 19 80 da 30 * capabilities 11 00 * listen interval 0a 00 * beacon interval 00 00 * DTIM period 00 * TLVs xx (up to 512 bytes) */ cmd->hdr.command = cpu_to_le16(CMD_802_11_ASSOCIATE); /* Fill in static fields */ memcpy(cmd->bssid, bss->bssid, ETH_ALEN); cmd->listeninterval = cpu_to_le16(MRVDRV_DEFAULT_LISTEN_INTERVAL); cmd->capability = cpu_to_le16(bss->capability); /* add SSID TLV */ ssid_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SSID); if (ssid_eid) pos += lbs_add_ssid_tlv(pos, ssid_eid + 2, ssid_eid[1]); else lbs_deb_assoc("no SSID\n"); /* add DS param TLV */ if (bss->channel) pos += lbs_add_channel_tlv(pos, bss->channel->hw_value); else lbs_deb_assoc("no channel\n"); /* add (empty) CF param TLV */ pos += lbs_add_cf_param_tlv(pos); /* add rates TLV */ tmp = pos + 4; /* skip Marvell IE header */ pos += lbs_add_common_rates_tlv(pos, bss); lbs_deb_hex(LBS_DEB_ASSOC, "Common Rates", tmp, pos - tmp); /* add auth type TLV */ if (MRVL_FW_MAJOR_REV(priv->fwrelease) >= 9) pos += lbs_add_auth_type_tlv(pos, sme->auth_type); /* add WPA/WPA2 TLV */ if (sme->ie && sme->ie_len) pos += lbs_add_wpa_tlv(pos, sme->ie, sme->ie_len); len = (sizeof(*cmd) - sizeof(cmd->iebuf)) + (u16)(pos - (u8 *) &cmd->iebuf); cmd->hdr.size = cpu_to_le16(len); lbs_deb_hex(LBS_DEB_ASSOC, "ASSOC_CMD", (u8 *) cmd, le16_to_cpu(cmd->hdr.size)); /* store for later use */ memcpy(priv->assoc_bss, bss->bssid, ETH_ALEN); ret = lbs_cmd_with_response(priv, CMD_802_11_ASSOCIATE, cmd); if (ret) goto done; /* generate connect message to cfg80211 */ resp = (void *) cmd; /* recast for easier field access */ status = le16_to_cpu(resp->statuscode); /* Older FW versions map the IEEE 802.11 Status Code in the association * response to the following values returned in resp->statuscode: * * IEEE Status Code Marvell Status Code * 0 -> 0x0000 ASSOC_RESULT_SUCCESS * 13 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED * 14 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED * 15 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED * 16 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED * others -> 0x0003 ASSOC_RESULT_REFUSED * * Other response codes: * 0x0001 -> ASSOC_RESULT_INVALID_PARAMETERS (unused) * 0x0002 -> ASSOC_RESULT_TIMEOUT (internal timer expired waiting for * association response from the AP) */ if (MRVL_FW_MAJOR_REV(priv->fwrelease) <= 8) { switch (status) { case 0: break; case 1: lbs_deb_assoc("invalid association parameters\n"); status = WLAN_STATUS_CAPS_UNSUPPORTED; break; case 2: lbs_deb_assoc("timer expired while waiting for AP\n"); status = WLAN_STATUS_AUTH_TIMEOUT; break; case 3: lbs_deb_assoc("association refused by AP\n"); status = WLAN_STATUS_ASSOC_DENIED_UNSPEC; break; case 4: lbs_deb_assoc("authentication refused by AP\n"); status = WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION; break; default: lbs_deb_assoc("association failure %d\n", status); /* v5 OLPC firmware does return the AP status code if * it's not one of the values above. Let that through. */ break; } } lbs_deb_assoc("status %d, statuscode 0x%04x, capability 0x%04x, " "aid 0x%04x\n", status, le16_to_cpu(resp->statuscode), le16_to_cpu(resp->capability), le16_to_cpu(resp->aid)); resp_ie_len = le16_to_cpu(resp->hdr.size) - sizeof(resp->hdr) - 6; cfg80211_connect_result(priv->dev, priv->assoc_bss, sme->ie, sme->ie_len, resp->iebuf, resp_ie_len, status, GFP_KERNEL); if (status == 0) { /* TODO: get rid of priv->connect_status */ priv->connect_status = LBS_CONNECTED; netif_carrier_on(priv->dev); if (!priv->tx_pending_len) netif_tx_wake_all_queues(priv->dev); } done: lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); return ret; } static struct cfg80211_scan_request * _new_connect_scan_req(struct wiphy *wiphy, struct cfg80211_connect_params *sme) { struct cfg80211_scan_request *creq = NULL; int i, n_channels = 0; enum ieee80211_band band; for (band = 0; band < IEEE80211_NUM_BANDS; band++) { if (wiphy->bands[band]) n_channels += wiphy->bands[band]->n_channels; } creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) + n_channels * sizeof(void *), GFP_ATOMIC); if (!creq) return NULL; /* SSIDs come after channels */ creq->ssids = (void *)&creq->channels[n_channels]; creq->n_channels = n_channels; creq->n_ssids = 1; /* Scan all available channels */ i = 0; for (band = 0; band < IEEE80211_NUM_BANDS; band++) { int j; if (!wiphy->bands[band]) continue; for (j = 0; j < wiphy->bands[band]->n_channels; j++) { /* ignore disabled channels */ if (wiphy->bands[band]->channels[j].flags & IEEE80211_CHAN_DISABLED) continue; creq->channels[i] = &wiphy->bands[band]->channels[j]; i++; } } if (i) { /* Set real number of channels specified in creq->channels[] */ creq->n_channels = i; /* Scan for the SSID we're going to connect to */ memcpy(creq->ssids[0].ssid, sme->ssid, sme->ssid_len); creq->ssids[0].ssid_len = sme->ssid_len; } else { /* No channels found... */ kfree(creq); creq = NULL; } return creq; } static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_connect_params *sme) { struct lbs_private *priv = wiphy_priv(wiphy); struct cfg80211_bss *bss = NULL; int ret = 0; u8 preamble = RADIO_PREAMBLE_SHORT; lbs_deb_enter(LBS_DEB_CFG80211); if (!sme->bssid) { /* Run a scan if one isn't in-progress already and if the last * scan was done more than 2 seconds ago. */ if (priv->scan_req == NULL && time_after(jiffies, priv->last_scan + (2 * HZ))) { struct cfg80211_scan_request *creq; creq = _new_connect_scan_req(wiphy, sme); if (!creq) { ret = -EINVAL; goto done; } lbs_deb_assoc("assoc: scanning for compatible AP\n"); _internal_start_scan(priv, true, creq); } /* Wait for any in-progress scan to complete */ lbs_deb_assoc("assoc: waiting for scan to complete\n"); wait_event_interruptible_timeout(priv->scan_q, (priv->scan_req == NULL), (15 * HZ)); lbs_deb_assoc("assoc: scanning competed\n"); } /* Find the BSS we want using available scan results */ bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid, sme->ssid, sme->ssid_len, WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); if (!bss) { wiphy_err(wiphy, "assoc: bss %pM not in scan results\n", sme->bssid); ret = -ENOENT; goto done; } lbs_deb_assoc("trying %pM\n", bss->bssid); lbs_deb_assoc("cipher 0x%x, key index %d, key len %d\n", sme->crypto.cipher_group, sme->key_idx, sme->key_len); /* As this is a new connection, clear locally stored WEP keys */ priv->wep_tx_key = 0; memset(priv->wep_key, 0, sizeof(priv->wep_key)); memset(priv->wep_key_len, 0, sizeof(priv->wep_key_len)); /* set/remove WEP keys */ switch (sme->crypto.cipher_group) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: /* Store provided WEP keys in priv-> */ priv->wep_tx_key = sme->key_idx; priv->wep_key_len[sme->key_idx] = sme->key_len; memcpy(priv->wep_key[sme->key_idx], sme->key, sme->key_len); /* Set WEP keys and WEP mode */ lbs_set_wep_keys(priv); priv->mac_control |= CMD_ACT_MAC_WEP_ENABLE; lbs_set_mac_control(priv); /* No RSN mode for WEP */ lbs_enable_rsn(priv, 0); break; case 0: /* there's no WLAN_CIPHER_SUITE_NONE definition */ /* * If we don't have no WEP, no WPA and no WPA2, * we remove all keys like in the WPA/WPA2 setup, * we just don't set RSN. * * Therefore: fall-through */ case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: /* Remove WEP keys and WEP mode */ lbs_remove_wep_keys(priv); priv->mac_control &= ~CMD_ACT_MAC_WEP_ENABLE; lbs_set_mac_control(priv); /* clear the WPA/WPA2 keys */ lbs_set_key_material(priv, KEY_TYPE_ID_WEP, /* doesn't matter */ KEY_INFO_WPA_UNICAST, NULL, 0); lbs_set_key_material(priv, KEY_TYPE_ID_WEP, /* doesn't matter */ KEY_INFO_WPA_MCAST, NULL, 0); /* RSN mode for WPA/WPA2 */ lbs_enable_rsn(priv, sme->crypto.cipher_group != 0); break; default: wiphy_err(wiphy, "unsupported cipher group 0x%x\n", sme->crypto.cipher_group); ret = -ENOTSUPP; goto done; } lbs_set_authtype(priv, sme); lbs_set_radio(priv, preamble, 1); /* Do the actual association */ ret = lbs_associate(priv, bss, sme); done: if (bss) cfg80211_put_bss(bss); lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); return ret; } static int lbs_cfg_disconnect(struct wiphy *wiphy, struct net_device *dev, u16 reason_code) { struct lbs_private *priv = wiphy_priv(wiphy); struct cmd_ds_802_11_deauthenticate cmd; lbs_deb_enter_args(LBS_DEB_CFG80211, "reason_code %d", reason_code); /* store for lbs_cfg_ret_disconnect() */ priv->disassoc_reason = reason_code; memset(&cmd, 0, sizeof(cmd)); cmd.hdr.size = cpu_to_le16(sizeof(cmd)); /* Mildly ugly to use a locally store my own BSSID ... */ memcpy(cmd.macaddr, &priv->assoc_bss, ETH_ALEN); cmd.reasoncode = cpu_to_le16(reason_code); if (lbs_cmd_with_response(priv, CMD_802_11_DEAUTHENTICATE, &cmd)) return -EFAULT; cfg80211_disconnected(priv->dev, priv->disassoc_reason, NULL, 0, GFP_KERNEL); priv->connect_status = LBS_DISCONNECTED; return 0; } static int lbs_cfg_set_default_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, bool unicast, bool multicast) { struct lbs_private *priv = wiphy_priv(wiphy); lbs_deb_enter(LBS_DEB_CFG80211); if (key_index != priv->wep_tx_key) { lbs_deb_assoc("set_default_key: to %d\n", key_index); priv->wep_tx_key = key_index; lbs_set_wep_keys(priv); } return 0; } static int lbs_cfg_add_key(struct wiphy *wiphy, struct net_device *netdev, u8 idx, bool pairwise, const u8 *mac_addr, struct key_params *params) { struct lbs_private *priv = wiphy_priv(wiphy); u16 key_info; u16 key_type; int ret = 0; lbs_deb_enter(LBS_DEB_CFG80211); lbs_deb_assoc("add_key: cipher 0x%x, mac_addr %pM\n", params->cipher, mac_addr); lbs_deb_assoc("add_key: key index %d, key len %d\n", idx, params->key_len); if (params->key_len) lbs_deb_hex(LBS_DEB_CFG80211, "KEY", params->key, params->key_len); lbs_deb_assoc("add_key: seq len %d\n", params->seq_len); if (params->seq_len) lbs_deb_hex(LBS_DEB_CFG80211, "SEQ", params->seq, params->seq_len); switch (params->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: /* actually compare if something has changed ... */ if ((priv->wep_key_len[idx] != params->key_len) || memcmp(priv->wep_key[idx], params->key, params->key_len) != 0) { priv->wep_key_len[idx] = params->key_len; memcpy(priv->wep_key[idx], params->key, params->key_len); lbs_set_wep_keys(priv); } break; case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: key_info = KEY_INFO_WPA_ENABLED | ((idx == 0) ? KEY_INFO_WPA_UNICAST : KEY_INFO_WPA_MCAST); key_type = (params->cipher == WLAN_CIPHER_SUITE_TKIP) ? KEY_TYPE_ID_TKIP : KEY_TYPE_ID_AES; lbs_set_key_material(priv, key_type, key_info, params->key, params->key_len); break; default: wiphy_err(wiphy, "unhandled cipher 0x%x\n", params->cipher); ret = -ENOTSUPP; break; } return ret; } static int lbs_cfg_del_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, bool pairwise, const u8 *mac_addr) { lbs_deb_enter(LBS_DEB_CFG80211); lbs_deb_assoc("del_key: key_idx %d, mac_addr %pM\n", key_index, mac_addr); #ifdef TODO struct lbs_private *priv = wiphy_priv(wiphy); /* * I think can keep this a NO-OP, because: * - we clear all keys whenever we do lbs_cfg_connect() anyway * - neither "iw" nor "wpa_supplicant" won't call this during * an ongoing connection * - TODO: but I have to check if this is still true when * I set the AP to periodic re-keying * - we've not kzallec() something when we've added a key at * lbs_cfg_connect() or lbs_cfg_add_key(). * * This causes lbs_cfg_del_key() only called at disconnect time, * where we'd just waste time deleting a key that is not going * to be used anyway. */ if (key_index < 3 && priv->wep_key_len[key_index]) { priv->wep_key_len[key_index] = 0; lbs_set_wep_keys(priv); } #endif return 0; } /* * Get station */ static int lbs_cfg_get_station(struct wiphy *wiphy, struct net_device *dev, u8 *mac, struct station_info *sinfo) { struct lbs_private *priv = wiphy_priv(wiphy); s8 signal, noise; int ret; size_t i; lbs_deb_enter(LBS_DEB_CFG80211); sinfo->filled |= STATION_INFO_TX_BYTES | STATION_INFO_TX_PACKETS | STATION_INFO_RX_BYTES | STATION_INFO_RX_PACKETS; sinfo->tx_bytes = priv->dev->stats.tx_bytes; sinfo->tx_packets = priv->dev->stats.tx_packets; sinfo->rx_bytes = priv->dev->stats.rx_bytes; sinfo->rx_packets = priv->dev->stats.rx_packets; /* Get current RSSI */ ret = lbs_get_rssi(priv, &signal, &noise); if (ret == 0) { sinfo->signal = signal; sinfo->filled |= STATION_INFO_SIGNAL; } /* Convert priv->cur_rate from hw_value to NL80211 value */ for (i = 0; i < ARRAY_SIZE(lbs_rates); i++) { if (priv->cur_rate == lbs_rates[i].hw_value) { sinfo->txrate.legacy = lbs_rates[i].bitrate; sinfo->filled |= STATION_INFO_TX_BITRATE; break; } } return 0; } /* * "Site survey", here just current channel and noise level */ static int lbs_get_survey(struct wiphy *wiphy, struct net_device *dev, int idx, struct survey_info *survey) { struct lbs_private *priv = wiphy_priv(wiphy); s8 signal, noise; int ret; if (idx != 0) ret = -ENOENT; lbs_deb_enter(LBS_DEB_CFG80211); survey->channel = ieee80211_get_channel(wiphy, ieee80211_channel_to_frequency(priv->channel, IEEE80211_BAND_2GHZ)); ret = lbs_get_rssi(priv, &signal, &noise); if (ret == 0) { survey->filled = SURVEY_INFO_NOISE_DBM; survey->noise = noise; } lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); return ret; } /* * Change interface */ static int lbs_change_intf(struct wiphy *wiphy, struct net_device *dev, enum nl80211_iftype type, u32 *flags, struct vif_params *params) { struct lbs_private *priv = wiphy_priv(wiphy); int ret = 0; lbs_deb_enter(LBS_DEB_CFG80211); switch (type) { case NL80211_IFTYPE_MONITOR: ret = lbs_set_monitor_mode(priv, 1); break; case NL80211_IFTYPE_STATION: if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) ret = lbs_set_monitor_mode(priv, 0); if (!ret) ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE, 1); break; case NL80211_IFTYPE_ADHOC: if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) ret = lbs_set_monitor_mode(priv, 0); if (!ret) ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE, 2); break; default: ret = -ENOTSUPP; } if (!ret) priv->wdev->iftype = type; lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); return ret; } /* * IBSS (Ad-Hoc) */ /* * The firmware needs the following bits masked out of the beacon-derived * capability field when associating/joining to a BSS: * 9 (QoS), 11 (APSD), 12 (unused), 14 (unused), 15 (unused) */ #define CAPINFO_MASK (~(0xda00)) static void lbs_join_post(struct lbs_private *priv, struct cfg80211_ibss_params *params, u8 *bssid, u16 capability) { u8 fake_ie[2 + IEEE80211_MAX_SSID_LEN + /* ssid */ 2 + 4 + /* basic rates */ 2 + 1 + /* DS parameter */ 2 + 2 + /* atim */ 2 + 8]; /* extended rates */ u8 *fake = fake_ie; lbs_deb_enter(LBS_DEB_CFG80211); /* * For cfg80211_inform_bss, we'll need a fake IE, as we can't get * the real IE from the firmware. So we fabricate a fake IE based on * what the firmware actually sends (sniffed with wireshark). */ /* Fake SSID IE */ *fake++ = WLAN_EID_SSID; *fake++ = params->ssid_len; memcpy(fake, params->ssid, params->ssid_len); fake += params->ssid_len; /* Fake supported basic rates IE */ *fake++ = WLAN_EID_SUPP_RATES; *fake++ = 4; *fake++ = 0x82; *fake++ = 0x84; *fake++ = 0x8b; *fake++ = 0x96; /* Fake DS channel IE */ *fake++ = WLAN_EID_DS_PARAMS; *fake++ = 1; *fake++ = params->channel->hw_value; /* Fake IBSS params IE */ *fake++ = WLAN_EID_IBSS_PARAMS; *fake++ = 2; *fake++ = 0; /* ATIM=0 */ *fake++ = 0; /* Fake extended rates IE, TODO: don't add this for 802.11b only, * but I don't know how this could be checked */ *fake++ = WLAN_EID_EXT_SUPP_RATES; *fake++ = 8; *fake++ = 0x0c; *fake++ = 0x12; *fake++ = 0x18; *fake++ = 0x24; *fake++ = 0x30; *fake++ = 0x48; *fake++ = 0x60; *fake++ = 0x6c; lbs_deb_hex(LBS_DEB_CFG80211, "IE", fake_ie, fake - fake_ie); cfg80211_inform_bss(priv->wdev->wiphy, params->channel, bssid, 0, capability, params->beacon_interval, fake_ie, fake - fake_ie, 0, GFP_KERNEL); memcpy(priv->wdev->ssid, params->ssid, params->ssid_len); priv->wdev->ssid_len = params->ssid_len; cfg80211_ibss_joined(priv->dev, bssid, GFP_KERNEL); /* TODO: consider doing this at MACREG_INT_CODE_LINK_SENSED time */ priv->connect_status = LBS_CONNECTED; netif_carrier_on(priv->dev); if (!priv->tx_pending_len) netif_wake_queue(priv->dev); lbs_deb_leave(LBS_DEB_CFG80211); } static int lbs_ibss_join_existing(struct lbs_private *priv, struct cfg80211_ibss_params *params, struct cfg80211_bss *bss) { const u8 *rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SUPP_RATES); struct cmd_ds_802_11_ad_hoc_join cmd; u8 preamble = RADIO_PREAMBLE_SHORT; int ret = 0; lbs_deb_enter(LBS_DEB_CFG80211); /* TODO: set preamble based on scan result */ ret = lbs_set_radio(priv, preamble, 1); if (ret) goto out; /* * Example CMD_802_11_AD_HOC_JOIN command: * * command 2c 00 CMD_802_11_AD_HOC_JOIN * size 65 00 * sequence xx xx * result 00 00 * bssid 02 27 27 97 2f 96 * ssid 49 42 53 53 00 00 00 00 * 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 * type 02 CMD_BSS_TYPE_IBSS * beacon period 64 00 * dtim period 00 * timestamp 00 00 00 00 00 00 00 00 * localtime 00 00 00 00 00 00 00 00 * IE DS 03 * IE DS len 01 * IE DS channel 01 * reserveed 00 00 00 00 * IE IBSS 06 * IE IBSS len 02 * IE IBSS atim 00 00 * reserved 00 00 00 00 * capability 02 00 * rates 82 84 8b 96 0c 12 18 24 30 48 60 6c 00 * fail timeout ff 00 * probe delay 00 00 */ memset(&cmd, 0, sizeof(cmd)); cmd.hdr.size = cpu_to_le16(sizeof(cmd)); memcpy(cmd.bss.bssid, bss->bssid, ETH_ALEN); memcpy(cmd.bss.ssid, params->ssid, params->ssid_len); cmd.bss.type = CMD_BSS_TYPE_IBSS; cmd.bss.beaconperiod = cpu_to_le16(params->beacon_interval); cmd.bss.ds.header.id = WLAN_EID_DS_PARAMS; cmd.bss.ds.header.len = 1; cmd.bss.ds.channel = params->channel->hw_value; cmd.bss.ibss.header.id = WLAN_EID_IBSS_PARAMS; cmd.bss.ibss.header.len = 2; cmd.bss.ibss.atimwindow = 0; cmd.bss.capability = cpu_to_le16(bss->capability & CAPINFO_MASK); /* set rates to the intersection of our rates and the rates in the bss */ if (!rates_eid) { lbs_add_rates(cmd.bss.rates); } else { int hw, i; u8 rates_max = rates_eid[1]; u8 *rates = cmd.bss.rates; for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) { u8 hw_rate = lbs_rates[hw].bitrate / 5; for (i = 0; i < rates_max; i++) { if (hw_rate == (rates_eid[i+2] & 0x7f)) { u8 rate = rates_eid[i+2]; if (rate == 0x02 || rate == 0x04 || rate == 0x0b || rate == 0x16) rate |= 0x80; *rates++ = rate; } } } } /* Only v8 and below support setting this */ if (MRVL_FW_MAJOR_REV(priv->fwrelease) <= 8) { cmd.failtimeout = cpu_to_le16(MRVDRV_ASSOCIATION_TIME_OUT); cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME); } ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_JOIN, &cmd); if (ret) goto out; /* * This is a sample response to CMD_802_11_AD_HOC_JOIN: * * response 2c 80 * size 09 00 * sequence xx xx * result 00 00 * reserved 00 */ lbs_join_post(priv, params, bss->bssid, bss->capability); out: lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); return ret; } static int lbs_ibss_start_new(struct lbs_private *priv, struct cfg80211_ibss_params *params) { struct cmd_ds_802_11_ad_hoc_start cmd; struct cmd_ds_802_11_ad_hoc_result *resp = (struct cmd_ds_802_11_ad_hoc_result *) &cmd; u8 preamble = RADIO_PREAMBLE_SHORT; int ret = 0; u16 capability; lbs_deb_enter(LBS_DEB_CFG80211); ret = lbs_set_radio(priv, preamble, 1); if (ret) goto out; /* * Example CMD_802_11_AD_HOC_START command: * * command 2b 00 CMD_802_11_AD_HOC_START * size b1 00 * sequence xx xx * result 00 00 * ssid 54 45 53 54 00 00 00 00 * 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 * bss type 02 * beacon period 64 00 * dtim period 00 * IE IBSS 06 * IE IBSS len 02 * IE IBSS atim 00 00 * reserved 00 00 00 00 * IE DS 03 * IE DS len 01 * IE DS channel 01 * reserved 00 00 00 00 * probe delay 00 00 * capability 02 00 * rates 82 84 8b 96 (basic rates with have bit 7 set) * 0c 12 18 24 30 48 60 6c * padding 100 bytes */ memset(&cmd, 0, sizeof(cmd)); cmd.hdr.size = cpu_to_le16(sizeof(cmd)); memcpy(cmd.ssid, params->ssid, params->ssid_len); cmd.bsstype = CMD_BSS_TYPE_IBSS; cmd.beaconperiod = cpu_to_le16(params->beacon_interval); cmd.ibss.header.id = WLAN_EID_IBSS_PARAMS; cmd.ibss.header.len = 2; cmd.ibss.atimwindow = 0; cmd.ds.header.id = WLAN_EID_DS_PARAMS; cmd.ds.header.len = 1; cmd.ds.channel = params->channel->hw_value; /* Only v8 and below support setting probe delay */ if (MRVL_FW_MAJOR_REV(priv->fwrelease) <= 8) cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME); /* TODO: mix in WLAN_CAPABILITY_PRIVACY */ capability = WLAN_CAPABILITY_IBSS; cmd.capability = cpu_to_le16(capability); lbs_add_rates(cmd.rates); ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_START, &cmd); if (ret) goto out; /* * This is a sample response to CMD_802_11_AD_HOC_JOIN: * * response 2b 80 * size 14 00 * sequence xx xx * result 00 00 * reserved 00 * bssid 02 2b 7b 0f 86 0e */ lbs_join_post(priv, params, resp->bssid, capability); out: lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); return ret; } static int lbs_join_ibss(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ibss_params *params) { struct lbs_private *priv = wiphy_priv(wiphy); int ret = 0; struct cfg80211_bss *bss; DECLARE_SSID_BUF(ssid_buf); lbs_deb_enter(LBS_DEB_CFG80211); if (!params->channel) { ret = -ENOTSUPP; goto out; } ret = lbs_set_channel(priv, params->channel->hw_value); if (ret) goto out; /* Search if someone is beaconing. This assumes that the * bss list is populated already */ bss = cfg80211_get_bss(wiphy, params->channel, params->bssid, params->ssid, params->ssid_len, WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS); if (bss) { ret = lbs_ibss_join_existing(priv, params, bss); cfg80211_put_bss(bss); } else ret = lbs_ibss_start_new(priv, params); out: lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); return ret; } static int lbs_leave_ibss(struct wiphy *wiphy, struct net_device *dev) { struct lbs_private *priv = wiphy_priv(wiphy); struct cmd_ds_802_11_ad_hoc_stop cmd; int ret = 0; lbs_deb_enter(LBS_DEB_CFG80211); memset(&cmd, 0, sizeof(cmd)); cmd.hdr.size = cpu_to_le16(sizeof(cmd)); ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_STOP, &cmd); /* TODO: consider doing this at MACREG_INT_CODE_ADHOC_BCN_LOST time */ lbs_mac_event_disconnected(priv); lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); return ret; } /* * Initialization */ static struct cfg80211_ops lbs_cfg80211_ops = { .set_channel = lbs_cfg_set_channel, .scan = lbs_cfg_scan, .connect = lbs_cfg_connect, .disconnect = lbs_cfg_disconnect, .add_key = lbs_cfg_add_key, .del_key = lbs_cfg_del_key, .set_default_key = lbs_cfg_set_default_key, .get_station = lbs_cfg_get_station, .dump_survey = lbs_get_survey, .change_virtual_intf = lbs_change_intf, .join_ibss = lbs_join_ibss, .leave_ibss = lbs_leave_ibss, }; /* * At this time lbs_private *priv doesn't even exist, so we just allocate * memory and don't initialize the wiphy further. This is postponed until we * can talk to the firmware and happens at registration time in * lbs_cfg_wiphy_register(). */ struct wireless_dev *lbs_cfg_alloc(struct device *dev) { int ret = 0; struct wireless_dev *wdev; lbs_deb_enter(LBS_DEB_CFG80211); wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); if (!wdev) { dev_err(dev, "cannot allocate wireless device\n"); return ERR_PTR(-ENOMEM); } wdev->wiphy = wiphy_new(&lbs_cfg80211_ops, sizeof(struct lbs_private)); if (!wdev->wiphy) { dev_err(dev, "cannot allocate wiphy\n"); ret = -ENOMEM; goto err_wiphy_new; } lbs_deb_leave(LBS_DEB_CFG80211); return wdev; err_wiphy_new: kfree(wdev); lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); return ERR_PTR(ret); } static void lbs_cfg_set_regulatory_hint(struct lbs_private *priv) { struct region_code_mapping { const char *cn; int code; }; /* Section 5.17.2 */ static const struct region_code_mapping regmap[] = { {"US ", 0x10}, /* US FCC */ {"CA ", 0x20}, /* Canada */ {"EU ", 0x30}, /* ETSI */ {"ES ", 0x31}, /* Spain */ {"FR ", 0x32}, /* France */ {"JP ", 0x40}, /* Japan */ }; size_t i; lbs_deb_enter(LBS_DEB_CFG80211); for (i = 0; i < ARRAY_SIZE(regmap); i++) if (regmap[i].code == priv->regioncode) { regulatory_hint(priv->wdev->wiphy, regmap[i].cn); break; } lbs_deb_leave(LBS_DEB_CFG80211); } /* * This function get's called after lbs_setup_firmware() determined the * firmware capabities. So we can setup the wiphy according to our * hardware/firmware. */ int lbs_cfg_register(struct lbs_private *priv) { struct wireless_dev *wdev = priv->wdev; int ret; lbs_deb_enter(LBS_DEB_CFG80211); wdev->wiphy->max_scan_ssids = 1; wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); if (lbs_rtap_supported(priv)) wdev->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR); wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &lbs_band_2ghz; /* * We could check priv->fwcapinfo && FW_CAPINFO_WPA, but I have * never seen a firmware without WPA */ wdev->wiphy->cipher_suites = cipher_suites; wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); wdev->wiphy->reg_notifier = lbs_reg_notifier; ret = wiphy_register(wdev->wiphy); if (ret < 0) pr_err("cannot register wiphy device\n"); priv->wiphy_registered = true; ret = register_netdev(priv->dev); if (ret) pr_err("cannot register network device\n"); INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker); lbs_cfg_set_regulatory_hint(priv); lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); return ret; } int lbs_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) { struct lbs_private *priv = wiphy_priv(wiphy); int ret; lbs_deb_enter_args(LBS_DEB_CFG80211, "cfg80211 regulatory domain " "callback for domain %c%c\n", request->alpha2[0], request->alpha2[1]); ret = lbs_set_11d_domain_info(priv, request, wiphy->bands); lbs_deb_leave(LBS_DEB_CFG80211); return ret; } void lbs_scan_deinit(struct lbs_private *priv) { lbs_deb_enter(LBS_DEB_CFG80211); cancel_delayed_work_sync(&priv->scan_work); } void lbs_cfg_free(struct lbs_private *priv) { struct wireless_dev *wdev = priv->wdev; lbs_deb_enter(LBS_DEB_CFG80211); if (!wdev) return; if (priv->wiphy_registered) wiphy_unregister(wdev->wiphy); if (wdev->wiphy) wiphy_free(wdev->wiphy); kfree(wdev); }
gpl-2.0
DJNoXD/rockchip-kernel-rk2918
drivers/media/common/saa7146_vbi.c
3091
14248
#include <media/saa7146_vv.h> static int vbi_pixel_to_capture = 720 * 2; static int vbi_workaround(struct saa7146_dev *dev) { struct saa7146_vv *vv = dev->vv_data; u32 *cpu; dma_addr_t dma_addr; int count = 0; int i; DECLARE_WAITQUEUE(wait, current); DEB_VBI(("dev:%p\n",dev)); /* once again, a bug in the saa7146: the brs acquisition is buggy and especially the BXO-counter does not work as specified. there is this workaround, but please don't let me explain it. ;-) */ cpu = pci_alloc_consistent(dev->pci, 4096, &dma_addr); if (NULL == cpu) return -ENOMEM; /* setup some basic programming, just for the workaround */ saa7146_write(dev, BASE_EVEN3, dma_addr); saa7146_write(dev, BASE_ODD3, dma_addr+vbi_pixel_to_capture); saa7146_write(dev, PROT_ADDR3, dma_addr+4096); saa7146_write(dev, PITCH3, vbi_pixel_to_capture); saa7146_write(dev, BASE_PAGE3, 0x0); saa7146_write(dev, NUM_LINE_BYTE3, (2<<16)|((vbi_pixel_to_capture)<<0)); saa7146_write(dev, MC2, MASK_04|MASK_20); /* load brs-control register */ WRITE_RPS1(CMD_WR_REG | (1 << 8) | (BRS_CTRL/4)); /* BXO = 1h, BRS to outbound */ WRITE_RPS1(0xc000008c); /* wait for vbi_a or vbi_b*/ if ( 0 != (SAA7146_USE_PORT_B_FOR_VBI & dev->ext_vv_data->flags)) { DEB_D(("...using port b\n")); WRITE_RPS1(CMD_PAUSE | CMD_OAN | CMD_SIG1 | CMD_E_FID_B); WRITE_RPS1(CMD_PAUSE | CMD_OAN | CMD_SIG1 | CMD_O_FID_B); /* WRITE_RPS1(CMD_PAUSE | MASK_09); */ } else { DEB_D(("...using port a\n")); WRITE_RPS1(CMD_PAUSE | MASK_10); } /* upload brs */ WRITE_RPS1(CMD_UPLOAD | MASK_08); /* load brs-control register */ WRITE_RPS1(CMD_WR_REG | (1 << 8) | (BRS_CTRL/4)); /* BYO = 1, BXO = NQBIL (=1728 for PAL, for NTSC this is 858*2) - NumByte3 (=1440) = 288 */ WRITE_RPS1(((1728-(vbi_pixel_to_capture)) << 7) | MASK_19); /* wait for brs_done */ WRITE_RPS1(CMD_PAUSE | MASK_08); /* upload brs */ WRITE_RPS1(CMD_UPLOAD | MASK_08); /* load video-dma3 NumLines3 and NumBytes3 */ WRITE_RPS1(CMD_WR_REG | (1 << 8) | (NUM_LINE_BYTE3/4)); /* dev->vbi_count*2 lines, 720 pixel (= 1440 Bytes) */ WRITE_RPS1((2 << 16) | (vbi_pixel_to_capture)); /* load brs-control register */ WRITE_RPS1(CMD_WR_REG | (1 << 8) | (BRS_CTRL/4)); /* Set BRS right: note: this is an experimental value for BXO (=> PAL!) */ WRITE_RPS1((540 << 7) | (5 << 19)); // 5 == vbi_start /* wait for brs_done */ WRITE_RPS1(CMD_PAUSE | MASK_08); /* upload brs and video-dma3*/ WRITE_RPS1(CMD_UPLOAD | MASK_08 | MASK_04); /* load mc2 register: enable dma3 */ WRITE_RPS1(CMD_WR_REG | (1 << 8) | (MC1/4)); WRITE_RPS1(MASK_20 | MASK_04); /* generate interrupt */ WRITE_RPS1(CMD_INTERRUPT); /* stop rps1 */ WRITE_RPS1(CMD_STOP); /* we have to do the workaround twice to be sure that everything is ok */ for(i = 0; i < 2; i++) { /* indicate to the irq handler that we do the workaround */ saa7146_write(dev, MC2, MASK_31|MASK_15); saa7146_write(dev, NUM_LINE_BYTE3, (1<<16)|(2<<0)); saa7146_write(dev, MC2, MASK_04|MASK_20); /* enable rps1 irqs */ SAA7146_IER_ENABLE(dev,MASK_28); /* prepare to wait to be woken up by the irq-handler */ add_wait_queue(&vv->vbi_wq, &wait); current->state = TASK_INTERRUPTIBLE; /* start rps1 to enable workaround */ saa7146_write(dev, RPS_ADDR1, dev->d_rps1.dma_handle); saa7146_write(dev, MC1, (MASK_13 | MASK_29)); schedule(); DEB_VBI(("brs bug workaround %d/1.\n",i)); remove_wait_queue(&vv->vbi_wq, &wait); current->state = TASK_RUNNING; /* disable rps1 irqs */ SAA7146_IER_DISABLE(dev,MASK_28); /* stop video-dma3 */ saa7146_write(dev, MC1, MASK_20); if(signal_pending(current)) { DEB_VBI(("aborted (rps:0x%08x).\n",saa7146_read(dev,RPS_ADDR1))); /* stop rps1 for sure */ saa7146_write(dev, MC1, MASK_29); pci_free_consistent(dev->pci, 4096, cpu, dma_addr); return -EINTR; } } pci_free_consistent(dev->pci, 4096, cpu, dma_addr); return 0; } static void saa7146_set_vbi_capture(struct saa7146_dev *dev, struct saa7146_buf *buf, struct saa7146_buf *next) { struct saa7146_vv *vv = dev->vv_data; struct saa7146_video_dma vdma3; int count = 0; unsigned long e_wait = vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? CMD_E_FID_A : CMD_E_FID_B; unsigned long o_wait = vv->current_hps_sync == SAA7146_HPS_SYNC_PORT_A ? CMD_O_FID_A : CMD_O_FID_B; /* vdma3.base_even = 0xc8000000+2560*70; vdma3.base_odd = 0xc8000000; vdma3.prot_addr = 0xc8000000+2560*164; vdma3.pitch = 2560; vdma3.base_page = 0; vdma3.num_line_byte = (64<<16)|((vbi_pixel_to_capture)<<0); // set above! */ vdma3.base_even = buf->pt[2].offset; vdma3.base_odd = buf->pt[2].offset + 16 * vbi_pixel_to_capture; vdma3.prot_addr = buf->pt[2].offset + 16 * 2 * vbi_pixel_to_capture; vdma3.pitch = vbi_pixel_to_capture; vdma3.base_page = buf->pt[2].dma | ME1; vdma3.num_line_byte = (16 << 16) | vbi_pixel_to_capture; saa7146_write_out_dma(dev, 3, &vdma3); /* write beginning of rps-program */ count = 0; /* wait for o_fid_a/b / e_fid_a/b toggle only if bit 1 is not set */ /* we don't wait here for the first field anymore. this is different from the video capture and might cause that the first buffer is only half filled (with only one field). but since this is some sort of streaming data, this is not that negative. but by doing this, we can use the whole engine from videobuf-dma-sg.c... */ /* WRITE_RPS1(CMD_PAUSE | CMD_OAN | CMD_SIG1 | e_wait); WRITE_RPS1(CMD_PAUSE | CMD_OAN | CMD_SIG1 | o_wait); */ /* set bit 1 */ WRITE_RPS1(CMD_WR_REG | (1 << 8) | (MC2/4)); WRITE_RPS1(MASK_28 | MASK_12); /* turn on video-dma3 */ WRITE_RPS1(CMD_WR_REG_MASK | (MC1/4)); WRITE_RPS1(MASK_04 | MASK_20); /* => mask */ WRITE_RPS1(MASK_04 | MASK_20); /* => values */ /* wait for o_fid_a/b / e_fid_a/b toggle */ WRITE_RPS1(CMD_PAUSE | o_wait); WRITE_RPS1(CMD_PAUSE | e_wait); /* generate interrupt */ WRITE_RPS1(CMD_INTERRUPT); /* stop */ WRITE_RPS1(CMD_STOP); /* enable rps1 irqs */ SAA7146_IER_ENABLE(dev, MASK_28); /* write the address of the rps-program */ saa7146_write(dev, RPS_ADDR1, dev->d_rps1.dma_handle); /* turn on rps */ saa7146_write(dev, MC1, (MASK_13 | MASK_29)); } static int buffer_activate(struct saa7146_dev *dev, struct saa7146_buf *buf, struct saa7146_buf *next) { struct saa7146_vv *vv = dev->vv_data; buf->vb.state = VIDEOBUF_ACTIVE; DEB_VBI(("dev:%p, buf:%p, next:%p\n",dev,buf,next)); saa7146_set_vbi_capture(dev,buf,next); mod_timer(&vv->vbi_q.timeout, jiffies+BUFFER_TIMEOUT); return 0; } static int buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,enum v4l2_field field) { struct file *file = q->priv_data; struct saa7146_fh *fh = file->private_data; struct saa7146_dev *dev = fh->dev; struct saa7146_buf *buf = (struct saa7146_buf *)vb; int err = 0; int lines, llength, size; lines = 16 * 2 ; /* 2 fields */ llength = vbi_pixel_to_capture; size = lines * llength; DEB_VBI(("vb:%p\n",vb)); if (0 != buf->vb.baddr && buf->vb.bsize < size) { DEB_VBI(("size mismatch.\n")); return -EINVAL; } if (buf->vb.size != size) saa7146_dma_free(dev,q,buf); if (VIDEOBUF_NEEDS_INIT == buf->vb.state) { struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb); buf->vb.width = llength; buf->vb.height = lines; buf->vb.size = size; buf->vb.field = field; // FIXME: check this saa7146_pgtable_free(dev->pci, &buf->pt[2]); saa7146_pgtable_alloc(dev->pci, &buf->pt[2]); err = videobuf_iolock(q,&buf->vb, NULL); if (err) goto oops; err = saa7146_pgtable_build_single(dev->pci, &buf->pt[2], dma->sglist, dma->sglen); if (0 != err) return err; } buf->vb.state = VIDEOBUF_PREPARED; buf->activate = buffer_activate; return 0; oops: DEB_VBI(("error out.\n")); saa7146_dma_free(dev,q,buf); return err; } static int buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size) { int llength,lines; lines = 16 * 2 ; /* 2 fields */ llength = vbi_pixel_to_capture; *size = lines * llength; *count = 2; DEB_VBI(("count:%d, size:%d\n",*count,*size)); return 0; } static void buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb) { struct file *file = q->priv_data; struct saa7146_fh *fh = file->private_data; struct saa7146_dev *dev = fh->dev; struct saa7146_vv *vv = dev->vv_data; struct saa7146_buf *buf = (struct saa7146_buf *)vb; DEB_VBI(("vb:%p\n",vb)); saa7146_buffer_queue(dev,&vv->vbi_q,buf); } static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb) { struct file *file = q->priv_data; struct saa7146_fh *fh = file->private_data; struct saa7146_dev *dev = fh->dev; struct saa7146_buf *buf = (struct saa7146_buf *)vb; DEB_VBI(("vb:%p\n",vb)); saa7146_dma_free(dev,q,buf); } static struct videobuf_queue_ops vbi_qops = { .buf_setup = buffer_setup, .buf_prepare = buffer_prepare, .buf_queue = buffer_queue, .buf_release = buffer_release, }; /* ------------------------------------------------------------------ */ static void vbi_stop(struct saa7146_fh *fh, struct file *file) { struct saa7146_dev *dev = fh->dev; struct saa7146_vv *vv = dev->vv_data; unsigned long flags; DEB_VBI(("dev:%p, fh:%p\n",dev, fh)); spin_lock_irqsave(&dev->slock,flags); /* disable rps1 */ saa7146_write(dev, MC1, MASK_29); /* disable rps1 irqs */ SAA7146_IER_DISABLE(dev, MASK_28); /* shut down dma 3 transfers */ saa7146_write(dev, MC1, MASK_20); if (vv->vbi_q.curr) { saa7146_buffer_finish(dev,&vv->vbi_q,VIDEOBUF_DONE); } videobuf_queue_cancel(&fh->vbi_q); vv->vbi_streaming = NULL; del_timer(&vv->vbi_q.timeout); del_timer(&fh->vbi_read_timeout); spin_unlock_irqrestore(&dev->slock, flags); } static void vbi_read_timeout(unsigned long data) { struct file *file = (struct file*)data; struct saa7146_fh *fh = file->private_data; struct saa7146_dev *dev = fh->dev; DEB_VBI(("dev:%p, fh:%p\n",dev, fh)); vbi_stop(fh, file); } static void vbi_init(struct saa7146_dev *dev, struct saa7146_vv *vv) { DEB_VBI(("dev:%p\n",dev)); INIT_LIST_HEAD(&vv->vbi_q.queue); init_timer(&vv->vbi_q.timeout); vv->vbi_q.timeout.function = saa7146_buffer_timeout; vv->vbi_q.timeout.data = (unsigned long)(&vv->vbi_q); vv->vbi_q.dev = dev; init_waitqueue_head(&vv->vbi_wq); } static int vbi_open(struct saa7146_dev *dev, struct file *file) { struct saa7146_fh *fh = file->private_data; u32 arbtr_ctrl = saa7146_read(dev, PCI_BT_V1); int ret = 0; DEB_VBI(("dev:%p, fh:%p\n",dev,fh)); ret = saa7146_res_get(fh, RESOURCE_DMA3_BRS); if (0 == ret) { DEB_S(("cannot get vbi RESOURCE_DMA3_BRS resource\n")); return -EBUSY; } /* adjust arbitrition control for video dma 3 */ arbtr_ctrl &= ~0x1f0000; arbtr_ctrl |= 0x1d0000; saa7146_write(dev, PCI_BT_V1, arbtr_ctrl); saa7146_write(dev, MC2, (MASK_04|MASK_20)); memset(&fh->vbi_fmt,0,sizeof(fh->vbi_fmt)); fh->vbi_fmt.sampling_rate = 27000000; fh->vbi_fmt.offset = 248; /* todo */ fh->vbi_fmt.samples_per_line = vbi_pixel_to_capture; fh->vbi_fmt.sample_format = V4L2_PIX_FMT_GREY; /* fixme: this only works for PAL */ fh->vbi_fmt.start[0] = 5; fh->vbi_fmt.count[0] = 16; fh->vbi_fmt.start[1] = 312; fh->vbi_fmt.count[1] = 16; videobuf_queue_sg_init(&fh->vbi_q, &vbi_qops, &dev->pci->dev, &dev->slock, V4L2_BUF_TYPE_VBI_CAPTURE, V4L2_FIELD_SEQ_TB, // FIXME: does this really work? sizeof(struct saa7146_buf), file, &dev->v4l2_lock); init_timer(&fh->vbi_read_timeout); fh->vbi_read_timeout.function = vbi_read_timeout; fh->vbi_read_timeout.data = (unsigned long)file; /* initialize the brs */ if ( 0 != (SAA7146_USE_PORT_B_FOR_VBI & dev->ext_vv_data->flags)) { saa7146_write(dev, BRS_CTRL, MASK_30|MASK_29 | (7 << 19)); } else { saa7146_write(dev, BRS_CTRL, 0x00000001); if (0 != (ret = vbi_workaround(dev))) { DEB_VBI(("vbi workaround failed!\n")); /* return ret;*/ } } /* upload brs register */ saa7146_write(dev, MC2, (MASK_08|MASK_24)); return 0; } static void vbi_close(struct saa7146_dev *dev, struct file *file) { struct saa7146_fh *fh = file->private_data; struct saa7146_vv *vv = dev->vv_data; DEB_VBI(("dev:%p, fh:%p\n",dev,fh)); if( fh == vv->vbi_streaming ) { vbi_stop(fh, file); } saa7146_res_free(fh, RESOURCE_DMA3_BRS); } static void vbi_irq_done(struct saa7146_dev *dev, unsigned long status) { struct saa7146_vv *vv = dev->vv_data; spin_lock(&dev->slock); if (vv->vbi_q.curr) { DEB_VBI(("dev:%p, curr:%p\n",dev,vv->vbi_q.curr)); /* this must be += 2, one count for each field */ vv->vbi_fieldcount+=2; vv->vbi_q.curr->vb.field_count = vv->vbi_fieldcount; saa7146_buffer_finish(dev,&vv->vbi_q,VIDEOBUF_DONE); } else { DEB_VBI(("dev:%p\n",dev)); } saa7146_buffer_next(dev,&vv->vbi_q,1); spin_unlock(&dev->slock); } static ssize_t vbi_read(struct file *file, char __user *data, size_t count, loff_t *ppos) { struct saa7146_fh *fh = file->private_data; struct saa7146_dev *dev = fh->dev; struct saa7146_vv *vv = dev->vv_data; ssize_t ret = 0; DEB_VBI(("dev:%p, fh:%p\n",dev,fh)); if( NULL == vv->vbi_streaming ) { // fixme: check if dma3 is available // fixme: activate vbi engine here if necessary. (really?) vv->vbi_streaming = fh; } if( fh != vv->vbi_streaming ) { DEB_VBI(("open %p is already using vbi capture.",vv->vbi_streaming)); return -EBUSY; } mod_timer(&fh->vbi_read_timeout, jiffies+BUFFER_TIMEOUT); ret = videobuf_read_stream(&fh->vbi_q, data, count, ppos, 1, file->f_flags & O_NONBLOCK); /* printk("BASE_ODD3: 0x%08x\n", saa7146_read(dev, BASE_ODD3)); printk("BASE_EVEN3: 0x%08x\n", saa7146_read(dev, BASE_EVEN3)); printk("PROT_ADDR3: 0x%08x\n", saa7146_read(dev, PROT_ADDR3)); printk("PITCH3: 0x%08x\n", saa7146_read(dev, PITCH3)); printk("BASE_PAGE3: 0x%08x\n", saa7146_read(dev, BASE_PAGE3)); printk("NUM_LINE_BYTE3: 0x%08x\n", saa7146_read(dev, NUM_LINE_BYTE3)); printk("BRS_CTRL: 0x%08x\n", saa7146_read(dev, BRS_CTRL)); */ return ret; } struct saa7146_use_ops saa7146_vbi_uops = { .init = vbi_init, .open = vbi_open, .release = vbi_close, .irq_done = vbi_irq_done, .read = vbi_read, };
gpl-2.0
weizhenwei/mi1_kernel
drivers/usb/serial/visor.c
3603
21364
/* * USB HandSpring Visor, Palm m50x, and Sony Clie driver * (supports all of the Palm OS USB devices) * * Copyright (C) 1999 - 2004 * Greg Kroah-Hartman (greg@kroah.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * See Documentation/usb/usb-serial.txt for more information on using this * driver * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/spinlock.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/usb/cdc.h> #include "visor.h" /* * Version Information */ #define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>" #define DRIVER_DESC "USB HandSpring Visor / Palm OS driver" /* function prototypes for a handspring visor */ static int visor_open(struct tty_struct *tty, struct usb_serial_port *port); static void visor_close(struct usb_serial_port *port); static int visor_probe(struct usb_serial *serial, const struct usb_device_id *id); static int visor_calc_num_ports(struct usb_serial *serial); static void visor_read_int_callback(struct urb *urb); static int clie_3_5_startup(struct usb_serial *serial); static int treo_attach(struct usb_serial *serial); static int clie_5_attach(struct usb_serial *serial); static int palm_os_3_probe(struct usb_serial *serial, const struct usb_device_id *id); static int palm_os_4_probe(struct usb_serial *serial, const struct usb_device_id *id); /* Parameters that may be passed into the module. */ static bool debug; static __u16 vendor; static __u16 product; static struct usb_device_id id_table [] = { { USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_VISOR_ID), .driver_info = (kernel_ulong_t)&palm_os_3_probe }, { USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_TREO_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_TREO600_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(GSPDA_VENDOR_ID, GSPDA_XPLORE_M68_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(PALM_VENDOR_ID, PALM_M500_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(PALM_VENDOR_ID, PALM_M505_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(PALM_VENDOR_ID, PALM_M515_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(PALM_VENDOR_ID, PALM_I705_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(PALM_VENDOR_ID, PALM_M100_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(PALM_VENDOR_ID, PALM_M125_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(PALM_VENDOR_ID, PALM_M130_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_T_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(PALM_VENDOR_ID, PALM_TREO_650), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_Z_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(PALM_VENDOR_ID, PALM_ZIRE_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_4_0_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_S360_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_4_1_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_NX60_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_NZ90V_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_TJ25_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(ACER_VENDOR_ID, ACER_S10_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SPH_I500_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(TAPWAVE_VENDOR_ID, TAPWAVE_ZODIAC_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(GARMIN_VENDOR_ID, GARMIN_IQUE_3600_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(ACEECA_VENDOR_ID, ACEECA_MEZ1000_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_7135_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { USB_DEVICE(FOSSIL_VENDOR_ID, FOSSIL_ABACUS_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { }, /* optional parameter entry */ { } /* Terminating entry */ }; static struct usb_device_id clie_id_5_table [] = { { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_UX50_ID), .driver_info = (kernel_ulong_t)&palm_os_4_probe }, { }, /* optional parameter entry */ { } /* Terminating entry */ }; static struct usb_device_id clie_id_3_5_table [] = { { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_3_5_ID) }, { } /* Terminating entry */ }; static struct usb_device_id id_table_combined [] = { { USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_VISOR_ID) }, { USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_TREO_ID) }, { USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_TREO600_ID) }, { USB_DEVICE(GSPDA_VENDOR_ID, GSPDA_XPLORE_M68_ID) }, { USB_DEVICE(PALM_VENDOR_ID, PALM_M500_ID) }, { USB_DEVICE(PALM_VENDOR_ID, PALM_M505_ID) }, { USB_DEVICE(PALM_VENDOR_ID, PALM_M515_ID) }, { USB_DEVICE(PALM_VENDOR_ID, PALM_I705_ID) }, { USB_DEVICE(PALM_VENDOR_ID, PALM_M100_ID) }, { USB_DEVICE(PALM_VENDOR_ID, PALM_M125_ID) }, { USB_DEVICE(PALM_VENDOR_ID, PALM_M130_ID) }, { USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_T_ID) }, { USB_DEVICE(PALM_VENDOR_ID, PALM_TREO_650) }, { USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_Z_ID) }, { USB_DEVICE(PALM_VENDOR_ID, PALM_ZIRE_ID) }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_3_5_ID) }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_4_0_ID) }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_S360_ID) }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_4_1_ID) }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_NX60_ID) }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_NZ90V_ID) }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_UX50_ID) }, { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_TJ25_ID) }, { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID) }, { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SPH_I500_ID) }, { USB_DEVICE(TAPWAVE_VENDOR_ID, TAPWAVE_ZODIAC_ID) }, { USB_DEVICE(GARMIN_VENDOR_ID, GARMIN_IQUE_3600_ID) }, { USB_DEVICE(ACEECA_VENDOR_ID, ACEECA_MEZ1000_ID) }, { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_7135_ID) }, { USB_DEVICE(FOSSIL_VENDOR_ID, FOSSIL_ABACUS_ID) }, { }, /* optional parameter entry */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table_combined); static struct usb_driver visor_driver = { .name = "visor", .probe = usb_serial_probe, .disconnect = usb_serial_disconnect, .id_table = id_table_combined, }; /* All of the device info needed for the Handspring Visor, and Palm 4.0 devices */ static struct usb_serial_driver handspring_device = { .driver = { .owner = THIS_MODULE, .name = "visor", }, .description = "Handspring Visor / Palm OS", .id_table = id_table, .num_ports = 2, .bulk_out_size = 256, .open = visor_open, .close = visor_close, .throttle = usb_serial_generic_throttle, .unthrottle = usb_serial_generic_unthrottle, .attach = treo_attach, .probe = visor_probe, .calc_num_ports = visor_calc_num_ports, .read_int_callback = visor_read_int_callback, }; /* All of the device info needed for the Clie UX50, TH55 Palm 5.0 devices */ static struct usb_serial_driver clie_5_device = { .driver = { .owner = THIS_MODULE, .name = "clie_5", }, .description = "Sony Clie 5.0", .id_table = clie_id_5_table, .num_ports = 2, .bulk_out_size = 256, .open = visor_open, .close = visor_close, .throttle = usb_serial_generic_throttle, .unthrottle = usb_serial_generic_unthrottle, .attach = clie_5_attach, .probe = visor_probe, .calc_num_ports = visor_calc_num_ports, .read_int_callback = visor_read_int_callback, }; /* device info for the Sony Clie OS version 3.5 */ static struct usb_serial_driver clie_3_5_device = { .driver = { .owner = THIS_MODULE, .name = "clie_3.5", }, .description = "Sony Clie 3.5", .id_table = clie_id_3_5_table, .num_ports = 1, .bulk_out_size = 256, .open = visor_open, .close = visor_close, .throttle = usb_serial_generic_throttle, .unthrottle = usb_serial_generic_unthrottle, .attach = clie_3_5_startup, }; static struct usb_serial_driver * const serial_drivers[] = { &handspring_device, &clie_5_device, &clie_3_5_device, NULL }; /****************************************************************************** * Handspring Visor specific driver functions ******************************************************************************/ static int visor_open(struct tty_struct *tty, struct usb_serial_port *port) { int result = 0; dbg("%s - port %d", __func__, port->number); if (!port->read_urb) { /* this is needed for some brain dead Sony devices */ dev_err(&port->dev, "Device lied about number of ports, please use a lower one.\n"); return -ENODEV; } /* Start reading from the device */ result = usb_serial_generic_open(tty, port); if (result) goto exit; if (port->interrupt_in_urb) { dbg("%s - adding interrupt input for treo", __func__); result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (result) dev_err(&port->dev, "%s - failed submitting interrupt urb, error %d\n", __func__, result); } exit: return result; } static void visor_close(struct usb_serial_port *port) { unsigned char *transfer_buffer; dbg("%s - port %d", __func__, port->number); /* shutdown our urbs */ usb_serial_generic_close(port); usb_kill_urb(port->interrupt_in_urb); mutex_lock(&port->serial->disc_mutex); if (!port->serial->disconnected) { /* Try to send shutdown message, unless the device is gone */ transfer_buffer = kmalloc(0x12, GFP_KERNEL); if (transfer_buffer) { usb_control_msg(port->serial->dev, usb_rcvctrlpipe(port->serial->dev, 0), VISOR_CLOSE_NOTIFICATION, 0xc2, 0x0000, 0x0000, transfer_buffer, 0x12, 300); kfree(transfer_buffer); } } mutex_unlock(&port->serial->disc_mutex); } static void visor_read_int_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; int status = urb->status; int result; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dbg("%s - urb shutting down with status: %d", __func__, status); return; default: dbg("%s - nonzero urb status received: %d", __func__, status); goto exit; } /* * This information is still unknown what it can be used for. * If anyone has an idea, please let the author know... * * Rumor has it this endpoint is used to notify when data * is ready to be read from the bulk ones. */ usb_serial_debug_data(debug, &port->dev, __func__, urb->actual_length, urb->transfer_buffer); exit: result = usb_submit_urb(urb, GFP_ATOMIC); if (result) dev_err(&urb->dev->dev, "%s - Error %d submitting interrupt urb\n", __func__, result); } static int palm_os_3_probe(struct usb_serial *serial, const struct usb_device_id *id) { struct device *dev = &serial->dev->dev; struct visor_connection_info *connection_info; unsigned char *transfer_buffer; char *string; int retval = 0; int i; int num_ports = 0; dbg("%s", __func__); transfer_buffer = kmalloc(sizeof(*connection_info), GFP_KERNEL); if (!transfer_buffer) { dev_err(dev, "%s - kmalloc(%Zd) failed.\n", __func__, sizeof(*connection_info)); return -ENOMEM; } /* send a get connection info request */ retval = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), VISOR_GET_CONNECTION_INFORMATION, 0xc2, 0x0000, 0x0000, transfer_buffer, sizeof(*connection_info), 300); if (retval < 0) { dev_err(dev, "%s - error %d getting connection information\n", __func__, retval); goto exit; } if (retval == sizeof(*connection_info)) { connection_info = (struct visor_connection_info *) transfer_buffer; num_ports = le16_to_cpu(connection_info->num_ports); for (i = 0; i < num_ports; ++i) { switch ( connection_info->connections[i].port_function_id) { case VISOR_FUNCTION_GENERIC: string = "Generic"; break; case VISOR_FUNCTION_DEBUGGER: string = "Debugger"; break; case VISOR_FUNCTION_HOTSYNC: string = "HotSync"; break; case VISOR_FUNCTION_CONSOLE: string = "Console"; break; case VISOR_FUNCTION_REMOTE_FILE_SYS: string = "Remote File System"; break; default: string = "unknown"; break; } dev_info(dev, "%s: port %d, is for %s use\n", serial->type->description, connection_info->connections[i].port, string); } } /* * Handle devices that report invalid stuff here. */ if (num_ports == 0 || num_ports > 2) { dev_warn(dev, "%s: No valid connect info available\n", serial->type->description); num_ports = 2; } dev_info(dev, "%s: Number of ports: %d\n", serial->type->description, num_ports); /* * save off our num_ports info so that we can use it in the * calc_num_ports callback */ usb_set_serial_data(serial, (void *)(long)num_ports); /* ask for the number of bytes available, but ignore the response as it is broken */ retval = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), VISOR_REQUEST_BYTES_AVAILABLE, 0xc2, 0x0000, 0x0005, transfer_buffer, 0x02, 300); if (retval < 0) dev_err(dev, "%s - error %d getting bytes available request\n", __func__, retval); retval = 0; exit: kfree(transfer_buffer); return retval; } static int palm_os_4_probe(struct usb_serial *serial, const struct usb_device_id *id) { struct device *dev = &serial->dev->dev; struct palm_ext_connection_info *connection_info; unsigned char *transfer_buffer; int retval; dbg("%s", __func__); transfer_buffer = kmalloc(sizeof(*connection_info), GFP_KERNEL); if (!transfer_buffer) { dev_err(dev, "%s - kmalloc(%Zd) failed.\n", __func__, sizeof(*connection_info)); return -ENOMEM; } retval = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), PALM_GET_EXT_CONNECTION_INFORMATION, 0xc2, 0x0000, 0x0000, transfer_buffer, sizeof(*connection_info), 300); if (retval < 0) dev_err(dev, "%s - error %d getting connection info\n", __func__, retval); else usb_serial_debug_data(debug, &serial->dev->dev, __func__, retval, transfer_buffer); kfree(transfer_buffer); return 0; } static int visor_probe(struct usb_serial *serial, const struct usb_device_id *id) { int retval = 0; int (*startup)(struct usb_serial *serial, const struct usb_device_id *id); dbg("%s", __func__); /* * some Samsung Android phones in modem mode have the same ID * as SPH-I500, but they are ACM devices, so dont bind to them */ if (id->idVendor == SAMSUNG_VENDOR_ID && id->idProduct == SAMSUNG_SPH_I500_ID && serial->dev->descriptor.bDeviceClass == USB_CLASS_COMM && serial->dev->descriptor.bDeviceSubClass == USB_CDC_SUBCLASS_ACM) return -ENODEV; if (serial->dev->actconfig->desc.bConfigurationValue != 1) { dev_err(&serial->dev->dev, "active config #%d != 1 ??\n", serial->dev->actconfig->desc.bConfigurationValue); return -ENODEV; } if (id->driver_info) { startup = (void *)id->driver_info; retval = startup(serial, id); } return retval; } static int visor_calc_num_ports(struct usb_serial *serial) { int num_ports = (int)(long)(usb_get_serial_data(serial)); if (num_ports) usb_set_serial_data(serial, NULL); return num_ports; } static int clie_3_5_startup(struct usb_serial *serial) { struct device *dev = &serial->dev->dev; int result; u8 *data; dbg("%s", __func__); data = kmalloc(1, GFP_KERNEL); if (!data) return -ENOMEM; /* * Note that PEG-300 series devices expect the following two calls. */ /* get the config number */ result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), USB_REQ_GET_CONFIGURATION, USB_DIR_IN, 0, 0, data, 1, 3000); if (result < 0) { dev_err(dev, "%s: get config number failed: %d\n", __func__, result); goto out; } if (result != 1) { dev_err(dev, "%s: get config number bad return length: %d\n", __func__, result); result = -EIO; goto out; } /* get the interface number */ result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), USB_REQ_GET_INTERFACE, USB_DIR_IN | USB_RECIP_INTERFACE, 0, 0, data, 1, 3000); if (result < 0) { dev_err(dev, "%s: get interface number failed: %d\n", __func__, result); goto out; } if (result != 1) { dev_err(dev, "%s: get interface number bad return length: %d\n", __func__, result); result = -EIO; goto out; } result = 0; out: kfree(data); return result; } static int treo_attach(struct usb_serial *serial) { struct usb_serial_port *swap_port; /* Only do this endpoint hack for the Handspring devices with * interrupt in endpoints, which for now are the Treo devices. */ if (!((le16_to_cpu(serial->dev->descriptor.idVendor) == HANDSPRING_VENDOR_ID) || (le16_to_cpu(serial->dev->descriptor.idVendor) == KYOCERA_VENDOR_ID)) || (serial->num_interrupt_in == 0)) return 0; dbg("%s", __func__); /* * It appears that Treos and Kyoceras want to use the * 1st bulk in endpoint to communicate with the 2nd bulk out endpoint, * so let's swap the 1st and 2nd bulk in and interrupt endpoints. * Note that swapping the bulk out endpoints would break lots of * apps that want to communicate on the second port. */ #define COPY_PORT(dest, src) \ do { \ dest->read_urb = src->read_urb; \ dest->bulk_in_endpointAddress = src->bulk_in_endpointAddress;\ dest->bulk_in_buffer = src->bulk_in_buffer; \ dest->interrupt_in_urb = src->interrupt_in_urb; \ dest->interrupt_in_endpointAddress = \ src->interrupt_in_endpointAddress;\ dest->interrupt_in_buffer = src->interrupt_in_buffer; \ } while (0); swap_port = kmalloc(sizeof(*swap_port), GFP_KERNEL); if (!swap_port) return -ENOMEM; COPY_PORT(swap_port, serial->port[0]); COPY_PORT(serial->port[0], serial->port[1]); COPY_PORT(serial->port[1], swap_port); kfree(swap_port); return 0; } static int clie_5_attach(struct usb_serial *serial) { struct usb_serial_port *port; unsigned int pipe; int j; dbg("%s", __func__); /* TH55 registers 2 ports. Communication in from the UX50/TH55 uses bulk_in_endpointAddress from port 0. Communication out to the UX50/TH55 uses bulk_out_endpointAddress from port 1 Lets do a quick and dirty mapping */ /* some sanity check */ if (serial->num_ports < 2) return -1; /* port 0 now uses the modified endpoint Address */ port = serial->port[0]; port->bulk_out_endpointAddress = serial->port[1]->bulk_out_endpointAddress; pipe = usb_sndbulkpipe(serial->dev, port->bulk_out_endpointAddress); for (j = 0; j < ARRAY_SIZE(port->write_urbs); ++j) port->write_urbs[j]->pipe = pipe; return 0; } static int __init visor_init(void) { int i, retval; /* Only if parameters were passed to us */ if (vendor > 0 && product > 0) { struct usb_device_id usb_dev_temp[] = { { USB_DEVICE(vendor, product), .driver_info = (kernel_ulong_t) &palm_os_4_probe } }; /* Find the last entry in id_table */ for (i = 0;; i++) { if (id_table[i].idVendor == 0) { id_table[i] = usb_dev_temp[0]; break; } } /* Find the last entry in id_table_combined */ for (i = 0;; i++) { if (id_table_combined[i].idVendor == 0) { id_table_combined[i] = usb_dev_temp[0]; break; } } printk(KERN_INFO KBUILD_MODNAME ": Untested USB device specified at time of module insertion\n"); printk(KERN_INFO KBUILD_MODNAME ": Warning: This is not guaranteed to work\n"); printk(KERN_INFO KBUILD_MODNAME ": Using a newer kernel is preferred to this method\n"); printk(KERN_INFO KBUILD_MODNAME ": Adding Palm OS protocol 4.x support for unknown device: 0x%x/0x%x\n", vendor, product); } retval = usb_serial_register_drivers(&visor_driver, serial_drivers); if (retval == 0) printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_DESC "\n"); return retval; } static void __exit visor_exit (void) { usb_serial_deregister_drivers(&visor_driver, serial_drivers); } module_init(visor_init); module_exit(visor_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug enabled or not"); module_param(vendor, ushort, 0); MODULE_PARM_DESC(vendor, "User specified vendor ID"); module_param(product, ushort, 0); MODULE_PARM_DESC(product, "User specified product ID");
gpl-2.0
lupohirp/Acer_Liquid_Glow_Kernel
net/ipv4/netfilter/nf_nat_pptp.c
3603
9954
/* * nf_nat_pptp.c * * NAT support for PPTP (Point to Point Tunneling Protocol). * PPTP is a a protocol for creating virtual private networks. * It is a specification defined by Microsoft and some vendors * working with Microsoft. PPTP is built on top of a modified * version of the Internet Generic Routing Encapsulation Protocol. * GRE is defined in RFC 1701 and RFC 1702. Documentation of * PPTP can be found in RFC 2637 * * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org> * * Development of this code funded by Astaro AG (http://www.astaro.com/) * * TODO: - NAT to a unique tuple, not to TCP source port * (needs netfilter tuple reservation) */ #include <linux/module.h> #include <linux/tcp.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_helper.h> #include <net/netfilter/nf_nat_rule.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_expect.h> #include <net/netfilter/nf_conntrack_zones.h> #include <linux/netfilter/nf_conntrack_proto_gre.h> #include <linux/netfilter/nf_conntrack_pptp.h> #define NF_NAT_PPTP_VERSION "3.0" #define REQ_CID(req, off) (*(__be16 *)((char *)(req) + (off))) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>"); MODULE_DESCRIPTION("Netfilter NAT helper module for PPTP"); MODULE_ALIAS("ip_nat_pptp"); static void pptp_nat_expected(struct nf_conn *ct, struct nf_conntrack_expect *exp) { struct net *net = nf_ct_net(ct); const struct nf_conn *master = ct->master; struct nf_conntrack_expect *other_exp; struct nf_conntrack_tuple t; const struct nf_ct_pptp_master *ct_pptp_info; const struct nf_nat_pptp *nat_pptp_info; struct nf_nat_range range; ct_pptp_info = &nfct_help(master)->help.ct_pptp_info; nat_pptp_info = &nfct_nat(master)->help.nat_pptp_info; /* And here goes the grand finale of corrosion... */ if (exp->dir == IP_CT_DIR_ORIGINAL) { pr_debug("we are PNS->PAC\n"); /* therefore, build tuple for PAC->PNS */ t.src.l3num = AF_INET; t.src.u3.ip = master->tuplehash[!exp->dir].tuple.src.u3.ip; t.src.u.gre.key = ct_pptp_info->pac_call_id; t.dst.u3.ip = master->tuplehash[!exp->dir].tuple.dst.u3.ip; t.dst.u.gre.key = ct_pptp_info->pns_call_id; t.dst.protonum = IPPROTO_GRE; } else { pr_debug("we are PAC->PNS\n"); /* build tuple for PNS->PAC */ t.src.l3num = AF_INET; t.src.u3.ip = master->tuplehash[!exp->dir].tuple.src.u3.ip; t.src.u.gre.key = nat_pptp_info->pns_call_id; t.dst.u3.ip = master->tuplehash[!exp->dir].tuple.dst.u3.ip; t.dst.u.gre.key = nat_pptp_info->pac_call_id; t.dst.protonum = IPPROTO_GRE; } pr_debug("trying to unexpect other dir: "); nf_ct_dump_tuple_ip(&t); other_exp = nf_ct_expect_find_get(net, nf_ct_zone(ct), &t); if (other_exp) { nf_ct_unexpect_related(other_exp); nf_ct_expect_put(other_exp); pr_debug("success\n"); } else { pr_debug("not found!\n"); } /* This must be a fresh one. */ BUG_ON(ct->status & IPS_NAT_DONE_MASK); /* Change src to where master sends to */ range.flags = IP_NAT_RANGE_MAP_IPS; range.min_ip = range.max_ip = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip; if (exp->dir == IP_CT_DIR_ORIGINAL) { range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED; range.min = range.max = exp->saved_proto; } nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC); /* For DST manip, map port here to where it's expected. */ range.flags = IP_NAT_RANGE_MAP_IPS; range.min_ip = range.max_ip = ct->master->tuplehash[!exp->dir].tuple.src.u3.ip; if (exp->dir == IP_CT_DIR_REPLY) { range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED; range.min = range.max = exp->saved_proto; } nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST); } /* outbound packets == from PNS to PAC */ static int pptp_outbound_pkt(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, struct PptpControlHeader *ctlh, union pptp_ctrl_union *pptpReq) { struct nf_ct_pptp_master *ct_pptp_info; struct nf_nat_pptp *nat_pptp_info; u_int16_t msg; __be16 new_callid; unsigned int cid_off; ct_pptp_info = &nfct_help(ct)->help.ct_pptp_info; nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info; new_callid = ct_pptp_info->pns_call_id; switch (msg = ntohs(ctlh->messageType)) { case PPTP_OUT_CALL_REQUEST: cid_off = offsetof(union pptp_ctrl_union, ocreq.callID); /* FIXME: ideally we would want to reserve a call ID * here. current netfilter NAT core is not able to do * this :( For now we use TCP source port. This breaks * multiple calls within one control session */ /* save original call ID in nat_info */ nat_pptp_info->pns_call_id = ct_pptp_info->pns_call_id; /* don't use tcph->source since we are at a DSTmanip * hook (e.g. PREROUTING) and pkt is not mangled yet */ new_callid = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.tcp.port; /* save new call ID in ct info */ ct_pptp_info->pns_call_id = new_callid; break; case PPTP_IN_CALL_REPLY: cid_off = offsetof(union pptp_ctrl_union, icack.callID); break; case PPTP_CALL_CLEAR_REQUEST: cid_off = offsetof(union pptp_ctrl_union, clrreq.callID); break; default: pr_debug("unknown outbound packet 0x%04x:%s\n", msg, msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0]); /* fall through */ case PPTP_SET_LINK_INFO: /* only need to NAT in case PAC is behind NAT box */ case PPTP_START_SESSION_REQUEST: case PPTP_START_SESSION_REPLY: case PPTP_STOP_SESSION_REQUEST: case PPTP_STOP_SESSION_REPLY: case PPTP_ECHO_REQUEST: case PPTP_ECHO_REPLY: /* no need to alter packet */ return NF_ACCEPT; } /* only OUT_CALL_REQUEST, IN_CALL_REPLY, CALL_CLEAR_REQUEST pass * down to here */ pr_debug("altering call id from 0x%04x to 0x%04x\n", ntohs(REQ_CID(pptpReq, cid_off)), ntohs(new_callid)); /* mangle packet */ if (nf_nat_mangle_tcp_packet(skb, ct, ctinfo, cid_off + sizeof(struct pptp_pkt_hdr) + sizeof(struct PptpControlHeader), sizeof(new_callid), (char *)&new_callid, sizeof(new_callid)) == 0) return NF_DROP; return NF_ACCEPT; } static void pptp_exp_gre(struct nf_conntrack_expect *expect_orig, struct nf_conntrack_expect *expect_reply) { const struct nf_conn *ct = expect_orig->master; struct nf_ct_pptp_master *ct_pptp_info; struct nf_nat_pptp *nat_pptp_info; ct_pptp_info = &nfct_help(ct)->help.ct_pptp_info; nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info; /* save original PAC call ID in nat_info */ nat_pptp_info->pac_call_id = ct_pptp_info->pac_call_id; /* alter expectation for PNS->PAC direction */ expect_orig->saved_proto.gre.key = ct_pptp_info->pns_call_id; expect_orig->tuple.src.u.gre.key = nat_pptp_info->pns_call_id; expect_orig->tuple.dst.u.gre.key = ct_pptp_info->pac_call_id; expect_orig->dir = IP_CT_DIR_ORIGINAL; /* alter expectation for PAC->PNS direction */ expect_reply->saved_proto.gre.key = nat_pptp_info->pns_call_id; expect_reply->tuple.src.u.gre.key = nat_pptp_info->pac_call_id; expect_reply->tuple.dst.u.gre.key = ct_pptp_info->pns_call_id; expect_reply->dir = IP_CT_DIR_REPLY; } /* inbound packets == from PAC to PNS */ static int pptp_inbound_pkt(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, struct PptpControlHeader *ctlh, union pptp_ctrl_union *pptpReq) { const struct nf_nat_pptp *nat_pptp_info; u_int16_t msg; __be16 new_pcid; unsigned int pcid_off; nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info; new_pcid = nat_pptp_info->pns_call_id; switch (msg = ntohs(ctlh->messageType)) { case PPTP_OUT_CALL_REPLY: pcid_off = offsetof(union pptp_ctrl_union, ocack.peersCallID); break; case PPTP_IN_CALL_CONNECT: pcid_off = offsetof(union pptp_ctrl_union, iccon.peersCallID); break; case PPTP_IN_CALL_REQUEST: /* only need to nat in case PAC is behind NAT box */ return NF_ACCEPT; case PPTP_WAN_ERROR_NOTIFY: pcid_off = offsetof(union pptp_ctrl_union, wanerr.peersCallID); break; case PPTP_CALL_DISCONNECT_NOTIFY: pcid_off = offsetof(union pptp_ctrl_union, disc.callID); break; case PPTP_SET_LINK_INFO: pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID); break; default: pr_debug("unknown inbound packet %s\n", msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0]); /* fall through */ case PPTP_START_SESSION_REQUEST: case PPTP_START_SESSION_REPLY: case PPTP_STOP_SESSION_REQUEST: case PPTP_STOP_SESSION_REPLY: case PPTP_ECHO_REQUEST: case PPTP_ECHO_REPLY: /* no need to alter packet */ return NF_ACCEPT; } /* only OUT_CALL_REPLY, IN_CALL_CONNECT, IN_CALL_REQUEST, * WAN_ERROR_NOTIFY, CALL_DISCONNECT_NOTIFY pass down here */ /* mangle packet */ pr_debug("altering peer call id from 0x%04x to 0x%04x\n", ntohs(REQ_CID(pptpReq, pcid_off)), ntohs(new_pcid)); if (nf_nat_mangle_tcp_packet(skb, ct, ctinfo, pcid_off + sizeof(struct pptp_pkt_hdr) + sizeof(struct PptpControlHeader), sizeof(new_pcid), (char *)&new_pcid, sizeof(new_pcid)) == 0) return NF_DROP; return NF_ACCEPT; } static int __init nf_nat_helper_pptp_init(void) { nf_nat_need_gre(); BUG_ON(nf_nat_pptp_hook_outbound != NULL); rcu_assign_pointer(nf_nat_pptp_hook_outbound, pptp_outbound_pkt); BUG_ON(nf_nat_pptp_hook_inbound != NULL); rcu_assign_pointer(nf_nat_pptp_hook_inbound, pptp_inbound_pkt); BUG_ON(nf_nat_pptp_hook_exp_gre != NULL); rcu_assign_pointer(nf_nat_pptp_hook_exp_gre, pptp_exp_gre); BUG_ON(nf_nat_pptp_hook_expectfn != NULL); rcu_assign_pointer(nf_nat_pptp_hook_expectfn, pptp_nat_expected); return 0; } static void __exit nf_nat_helper_pptp_fini(void) { rcu_assign_pointer(nf_nat_pptp_hook_expectfn, NULL); rcu_assign_pointer(nf_nat_pptp_hook_exp_gre, NULL); rcu_assign_pointer(nf_nat_pptp_hook_inbound, NULL); rcu_assign_pointer(nf_nat_pptp_hook_outbound, NULL); synchronize_rcu(); } module_init(nf_nat_helper_pptp_init); module_exit(nf_nat_helper_pptp_fini);
gpl-2.0
JudsonWilson/CS244_RC3_Kernel
net/llc/llc_station.c
4883
3301
/* * llc_station.c - station component of LLC * * Copyright (c) 1997 by Procom Technology, Inc. * 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program can be redistributed or modified under the terms of the * GNU General Public License as published by the Free Software Foundation. * This program is distributed without any warranty or implied warranty * of merchantability or fitness for a particular purpose. * * See the GNU General Public License for more details. */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <net/llc.h> #include <net/llc_sap.h> #include <net/llc_conn.h> #include <net/llc_c_ac.h> #include <net/llc_s_ac.h> #include <net/llc_c_ev.h> #include <net/llc_c_st.h> #include <net/llc_s_ev.h> #include <net/llc_s_st.h> #include <net/llc_pdu.h> static int llc_stat_ev_rx_null_dsap_xid_c(struct sk_buff *skb) { struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); return LLC_PDU_IS_CMD(pdu) && /* command PDU */ LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */ LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_XID && !pdu->dsap ? 0 : 1; /* NULL DSAP value */ } static int llc_stat_ev_rx_null_dsap_test_c(struct sk_buff *skb) { struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); return LLC_PDU_IS_CMD(pdu) && /* command PDU */ LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */ LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_TEST && !pdu->dsap ? 0 : 1; /* NULL DSAP */ } static int llc_station_ac_send_xid_r(struct sk_buff *skb) { u8 mac_da[ETH_ALEN], dsap; int rc = 1; struct sk_buff *nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, sizeof(struct llc_xid_info)); if (!nskb) goto out; rc = 0; llc_pdu_decode_sa(skb, mac_da); llc_pdu_decode_ssap(skb, &dsap); llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, 0, dsap, LLC_PDU_RSP); llc_pdu_init_as_xid_rsp(nskb, LLC_XID_NULL_CLASS_2, 127); rc = llc_mac_hdr_init(nskb, skb->dev->dev_addr, mac_da); if (unlikely(rc)) goto free; dev_queue_xmit(nskb); out: return rc; free: kfree_skb(nskb); goto out; } static int llc_station_ac_send_test_r(struct sk_buff *skb) { u8 mac_da[ETH_ALEN], dsap; int rc = 1; u32 data_size; struct sk_buff *nskb; /* The test request command is type U (llc_len = 3) */ data_size = ntohs(eth_hdr(skb)->h_proto) - 3; nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size); if (!nskb) goto out; rc = 0; llc_pdu_decode_sa(skb, mac_da); llc_pdu_decode_ssap(skb, &dsap); llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, 0, dsap, LLC_PDU_RSP); llc_pdu_init_as_test_rsp(nskb, skb); rc = llc_mac_hdr_init(nskb, skb->dev->dev_addr, mac_da); if (unlikely(rc)) goto free; dev_queue_xmit(nskb); out: return rc; free: kfree_skb(nskb); goto out; } /** * llc_station_rcv - send received pdu to the station state machine * @skb: received frame. * * Sends data unit to station state machine. */ static void llc_station_rcv(struct sk_buff *skb) { if (llc_stat_ev_rx_null_dsap_xid_c(skb)) llc_station_ac_send_xid_r(skb); else if (llc_stat_ev_rx_null_dsap_test_c(skb)) llc_station_ac_send_test_r(skb); kfree_skb(skb); } void __init llc_station_init(void) { llc_set_station_handler(llc_station_rcv); } void llc_station_exit(void) { llc_set_station_handler(NULL); }
gpl-2.0
sktjdgns1189/android_kernel_tgnco_phx
sound/pci/emu10k1/io.c
7955
16252
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Creative Labs, Inc. * Routines for control of EMU10K1 chips * * BUGS: * -- * * TODO: * -- * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/time.h> #include <sound/core.h> #include <sound/emu10k1.h> #include <linux/delay.h> #include <linux/export.h> #include "p17v.h" unsigned int snd_emu10k1_ptr_read(struct snd_emu10k1 * emu, unsigned int reg, unsigned int chn) { unsigned long flags; unsigned int regptr, val; unsigned int mask; mask = emu->audigy ? A_PTR_ADDRESS_MASK : PTR_ADDRESS_MASK; regptr = ((reg << 16) & mask) | (chn & PTR_CHANNELNUM_MASK); if (reg & 0xff000000) { unsigned char size, offset; size = (reg >> 24) & 0x3f; offset = (reg >> 16) & 0x1f; mask = ((1 << size) - 1) << offset; spin_lock_irqsave(&emu->emu_lock, flags); outl(regptr, emu->port + PTR); val = inl(emu->port + DATA); spin_unlock_irqrestore(&emu->emu_lock, flags); return (val & mask) >> offset; } else { spin_lock_irqsave(&emu->emu_lock, flags); outl(regptr, emu->port + PTR); val = inl(emu->port + DATA); spin_unlock_irqrestore(&emu->emu_lock, flags); return val; } } EXPORT_SYMBOL(snd_emu10k1_ptr_read); void snd_emu10k1_ptr_write(struct snd_emu10k1 *emu, unsigned int reg, unsigned int chn, unsigned int data) { unsigned int regptr; unsigned long flags; unsigned int mask; if (!emu) { snd_printk(KERN_ERR "ptr_write: emu is null!\n"); dump_stack(); return; } mask = emu->audigy ? A_PTR_ADDRESS_MASK : PTR_ADDRESS_MASK; regptr = ((reg << 16) & mask) | (chn & PTR_CHANNELNUM_MASK); if (reg & 0xff000000) { unsigned char size, offset; size = (reg >> 24) & 0x3f; offset = (reg >> 16) & 0x1f; mask = ((1 << size) - 1) << offset; data = (data << offset) & mask; spin_lock_irqsave(&emu->emu_lock, flags); outl(regptr, emu->port + PTR); data |= inl(emu->port + DATA) & ~mask; outl(data, emu->port + DATA); spin_unlock_irqrestore(&emu->emu_lock, flags); } else { spin_lock_irqsave(&emu->emu_lock, flags); outl(regptr, emu->port + PTR); outl(data, emu->port + DATA); spin_unlock_irqrestore(&emu->emu_lock, flags); } } EXPORT_SYMBOL(snd_emu10k1_ptr_write); unsigned int snd_emu10k1_ptr20_read(struct snd_emu10k1 * emu, unsigned int reg, unsigned int chn) { unsigned long flags; unsigned int regptr, val; regptr = (reg << 16) | chn; spin_lock_irqsave(&emu->emu_lock, flags); outl(regptr, emu->port + 0x20 + PTR); val = inl(emu->port + 0x20 + DATA); spin_unlock_irqrestore(&emu->emu_lock, flags); return val; } void snd_emu10k1_ptr20_write(struct snd_emu10k1 *emu, unsigned int reg, unsigned int chn, unsigned int data) { unsigned int regptr; unsigned long flags; regptr = (reg << 16) | chn; spin_lock_irqsave(&emu->emu_lock, flags); outl(regptr, emu->port + 0x20 + PTR); outl(data, emu->port + 0x20 + DATA); spin_unlock_irqrestore(&emu->emu_lock, flags); } int snd_emu10k1_spi_write(struct snd_emu10k1 * emu, unsigned int data) { unsigned int reset, set; unsigned int reg, tmp; int n, result; int err = 0; /* This function is not re-entrant, so protect against it. */ spin_lock(&emu->spi_lock); if (emu->card_capabilities->ca0108_chip) reg = 0x3c; /* PTR20, reg 0x3c */ else { /* For other chip types the SPI register * is currently unknown. */ err = 1; goto spi_write_exit; } if (data > 0xffff) { /* Only 16bit values allowed */ err = 1; goto spi_write_exit; } tmp = snd_emu10k1_ptr20_read(emu, reg, 0); reset = (tmp & ~0x3ffff) | 0x20000; /* Set xxx20000 */ set = reset | 0x10000; /* Set xxx1xxxx */ snd_emu10k1_ptr20_write(emu, reg, 0, reset | data); tmp = snd_emu10k1_ptr20_read(emu, reg, 0); /* write post */ snd_emu10k1_ptr20_write(emu, reg, 0, set | data); result = 1; /* Wait for status bit to return to 0 */ for (n = 0; n < 100; n++) { udelay(10); tmp = snd_emu10k1_ptr20_read(emu, reg, 0); if (!(tmp & 0x10000)) { result = 0; break; } } if (result) { /* Timed out */ err = 1; goto spi_write_exit; } snd_emu10k1_ptr20_write(emu, reg, 0, reset | data); tmp = snd_emu10k1_ptr20_read(emu, reg, 0); /* Write post */ err = 0; spi_write_exit: spin_unlock(&emu->spi_lock); return err; } /* The ADC does not support i2c read, so only write is implemented */ int snd_emu10k1_i2c_write(struct snd_emu10k1 *emu, u32 reg, u32 value) { u32 tmp; int timeout = 0; int status; int retry; int err = 0; if ((reg > 0x7f) || (value > 0x1ff)) { snd_printk(KERN_ERR "i2c_write: invalid values.\n"); return -EINVAL; } /* This function is not re-entrant, so protect against it. */ spin_lock(&emu->i2c_lock); tmp = reg << 25 | value << 16; /* This controls the I2C connected to the WM8775 ADC Codec */ snd_emu10k1_ptr20_write(emu, P17V_I2C_1, 0, tmp); tmp = snd_emu10k1_ptr20_read(emu, P17V_I2C_1, 0); /* write post */ for (retry = 0; retry < 10; retry++) { /* Send the data to i2c */ tmp = 0; tmp = tmp | (I2C_A_ADC_LAST|I2C_A_ADC_START|I2C_A_ADC_ADD); snd_emu10k1_ptr20_write(emu, P17V_I2C_ADDR, 0, tmp); /* Wait till the transaction ends */ while (1) { mdelay(1); status = snd_emu10k1_ptr20_read(emu, P17V_I2C_ADDR, 0); timeout++; if ((status & I2C_A_ADC_START) == 0) break; if (timeout > 1000) { snd_printk(KERN_WARNING "emu10k1:I2C:timeout status=0x%x\n", status); break; } } //Read back and see if the transaction is successful if ((status & I2C_A_ADC_ABORT) == 0) break; } if (retry == 10) { snd_printk(KERN_ERR "Writing to ADC failed!\n"); snd_printk(KERN_ERR "status=0x%x, reg=%d, value=%d\n", status, reg, value); /* dump_stack(); */ err = -EINVAL; } spin_unlock(&emu->i2c_lock); return err; } int snd_emu1010_fpga_write(struct snd_emu10k1 * emu, u32 reg, u32 value) { unsigned long flags; if (reg > 0x3f) return 1; reg += 0x40; /* 0x40 upwards are registers. */ if (value > 0x3f) /* 0 to 0x3f are values */ return 1; spin_lock_irqsave(&emu->emu_lock, flags); outl(reg, emu->port + A_IOCFG); udelay(10); outl(reg | 0x80, emu->port + A_IOCFG); /* High bit clocks the value into the fpga. */ udelay(10); outl(value, emu->port + A_IOCFG); udelay(10); outl(value | 0x80 , emu->port + A_IOCFG); /* High bit clocks the value into the fpga. */ spin_unlock_irqrestore(&emu->emu_lock, flags); return 0; } int snd_emu1010_fpga_read(struct snd_emu10k1 * emu, u32 reg, u32 *value) { unsigned long flags; if (reg > 0x3f) return 1; reg += 0x40; /* 0x40 upwards are registers. */ spin_lock_irqsave(&emu->emu_lock, flags); outl(reg, emu->port + A_IOCFG); udelay(10); outl(reg | 0x80, emu->port + A_IOCFG); /* High bit clocks the value into the fpga. */ udelay(10); *value = ((inl(emu->port + A_IOCFG) >> 8) & 0x7f); spin_unlock_irqrestore(&emu->emu_lock, flags); return 0; } /* Each Destination has one and only one Source, * but one Source can feed any number of Destinations simultaneously. */ int snd_emu1010_fpga_link_dst_src_write(struct snd_emu10k1 * emu, u32 dst, u32 src) { snd_emu1010_fpga_write(emu, 0x00, ((dst >> 8) & 0x3f) ); snd_emu1010_fpga_write(emu, 0x01, (dst & 0x3f) ); snd_emu1010_fpga_write(emu, 0x02, ((src >> 8) & 0x3f) ); snd_emu1010_fpga_write(emu, 0x03, (src & 0x3f) ); return 0; } void snd_emu10k1_intr_enable(struct snd_emu10k1 *emu, unsigned int intrenb) { unsigned long flags; unsigned int enable; spin_lock_irqsave(&emu->emu_lock, flags); enable = inl(emu->port + INTE) | intrenb; outl(enable, emu->port + INTE); spin_unlock_irqrestore(&emu->emu_lock, flags); } void snd_emu10k1_intr_disable(struct snd_emu10k1 *emu, unsigned int intrenb) { unsigned long flags; unsigned int enable; spin_lock_irqsave(&emu->emu_lock, flags); enable = inl(emu->port + INTE) & ~intrenb; outl(enable, emu->port + INTE); spin_unlock_irqrestore(&emu->emu_lock, flags); } void snd_emu10k1_voice_intr_enable(struct snd_emu10k1 *emu, unsigned int voicenum) { unsigned long flags; unsigned int val; spin_lock_irqsave(&emu->emu_lock, flags); /* voice interrupt */ if (voicenum >= 32) { outl(CLIEH << 16, emu->port + PTR); val = inl(emu->port + DATA); val |= 1 << (voicenum - 32); } else { outl(CLIEL << 16, emu->port + PTR); val = inl(emu->port + DATA); val |= 1 << voicenum; } outl(val, emu->port + DATA); spin_unlock_irqrestore(&emu->emu_lock, flags); } void snd_emu10k1_voice_intr_disable(struct snd_emu10k1 *emu, unsigned int voicenum) { unsigned long flags; unsigned int val; spin_lock_irqsave(&emu->emu_lock, flags); /* voice interrupt */ if (voicenum >= 32) { outl(CLIEH << 16, emu->port + PTR); val = inl(emu->port + DATA); val &= ~(1 << (voicenum - 32)); } else { outl(CLIEL << 16, emu->port + PTR); val = inl(emu->port + DATA); val &= ~(1 << voicenum); } outl(val, emu->port + DATA); spin_unlock_irqrestore(&emu->emu_lock, flags); } void snd_emu10k1_voice_intr_ack(struct snd_emu10k1 *emu, unsigned int voicenum) { unsigned long flags; spin_lock_irqsave(&emu->emu_lock, flags); /* voice interrupt */ if (voicenum >= 32) { outl(CLIPH << 16, emu->port + PTR); voicenum = 1 << (voicenum - 32); } else { outl(CLIPL << 16, emu->port + PTR); voicenum = 1 << voicenum; } outl(voicenum, emu->port + DATA); spin_unlock_irqrestore(&emu->emu_lock, flags); } void snd_emu10k1_voice_half_loop_intr_enable(struct snd_emu10k1 *emu, unsigned int voicenum) { unsigned long flags; unsigned int val; spin_lock_irqsave(&emu->emu_lock, flags); /* voice interrupt */ if (voicenum >= 32) { outl(HLIEH << 16, emu->port + PTR); val = inl(emu->port + DATA); val |= 1 << (voicenum - 32); } else { outl(HLIEL << 16, emu->port + PTR); val = inl(emu->port + DATA); val |= 1 << voicenum; } outl(val, emu->port + DATA); spin_unlock_irqrestore(&emu->emu_lock, flags); } void snd_emu10k1_voice_half_loop_intr_disable(struct snd_emu10k1 *emu, unsigned int voicenum) { unsigned long flags; unsigned int val; spin_lock_irqsave(&emu->emu_lock, flags); /* voice interrupt */ if (voicenum >= 32) { outl(HLIEH << 16, emu->port + PTR); val = inl(emu->port + DATA); val &= ~(1 << (voicenum - 32)); } else { outl(HLIEL << 16, emu->port + PTR); val = inl(emu->port + DATA); val &= ~(1 << voicenum); } outl(val, emu->port + DATA); spin_unlock_irqrestore(&emu->emu_lock, flags); } void snd_emu10k1_voice_half_loop_intr_ack(struct snd_emu10k1 *emu, unsigned int voicenum) { unsigned long flags; spin_lock_irqsave(&emu->emu_lock, flags); /* voice interrupt */ if (voicenum >= 32) { outl(HLIPH << 16, emu->port + PTR); voicenum = 1 << (voicenum - 32); } else { outl(HLIPL << 16, emu->port + PTR); voicenum = 1 << voicenum; } outl(voicenum, emu->port + DATA); spin_unlock_irqrestore(&emu->emu_lock, flags); } void snd_emu10k1_voice_set_loop_stop(struct snd_emu10k1 *emu, unsigned int voicenum) { unsigned long flags; unsigned int sol; spin_lock_irqsave(&emu->emu_lock, flags); /* voice interrupt */ if (voicenum >= 32) { outl(SOLEH << 16, emu->port + PTR); sol = inl(emu->port + DATA); sol |= 1 << (voicenum - 32); } else { outl(SOLEL << 16, emu->port + PTR); sol = inl(emu->port + DATA); sol |= 1 << voicenum; } outl(sol, emu->port + DATA); spin_unlock_irqrestore(&emu->emu_lock, flags); } void snd_emu10k1_voice_clear_loop_stop(struct snd_emu10k1 *emu, unsigned int voicenum) { unsigned long flags; unsigned int sol; spin_lock_irqsave(&emu->emu_lock, flags); /* voice interrupt */ if (voicenum >= 32) { outl(SOLEH << 16, emu->port + PTR); sol = inl(emu->port + DATA); sol &= ~(1 << (voicenum - 32)); } else { outl(SOLEL << 16, emu->port + PTR); sol = inl(emu->port + DATA); sol &= ~(1 << voicenum); } outl(sol, emu->port + DATA); spin_unlock_irqrestore(&emu->emu_lock, flags); } void snd_emu10k1_wait(struct snd_emu10k1 *emu, unsigned int wait) { volatile unsigned count; unsigned int newtime = 0, curtime; curtime = inl(emu->port + WC) >> 6; while (wait-- > 0) { count = 0; while (count++ < 16384) { newtime = inl(emu->port + WC) >> 6; if (newtime != curtime) break; } if (count > 16384) break; curtime = newtime; } } unsigned short snd_emu10k1_ac97_read(struct snd_ac97 *ac97, unsigned short reg) { struct snd_emu10k1 *emu = ac97->private_data; unsigned long flags; unsigned short val; spin_lock_irqsave(&emu->emu_lock, flags); outb(reg, emu->port + AC97ADDRESS); val = inw(emu->port + AC97DATA); spin_unlock_irqrestore(&emu->emu_lock, flags); return val; } void snd_emu10k1_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short data) { struct snd_emu10k1 *emu = ac97->private_data; unsigned long flags; spin_lock_irqsave(&emu->emu_lock, flags); outb(reg, emu->port + AC97ADDRESS); outw(data, emu->port + AC97DATA); spin_unlock_irqrestore(&emu->emu_lock, flags); } /* * convert rate to pitch */ unsigned int snd_emu10k1_rate_to_pitch(unsigned int rate) { static u32 logMagTable[128] = { 0x00000, 0x02dfc, 0x05b9e, 0x088e6, 0x0b5d6, 0x0e26f, 0x10eb3, 0x13aa2, 0x1663f, 0x1918a, 0x1bc84, 0x1e72e, 0x2118b, 0x23b9a, 0x2655d, 0x28ed5, 0x2b803, 0x2e0e8, 0x30985, 0x331db, 0x359eb, 0x381b6, 0x3a93d, 0x3d081, 0x3f782, 0x41e42, 0x444c1, 0x46b01, 0x49101, 0x4b6c4, 0x4dc49, 0x50191, 0x5269e, 0x54b6f, 0x57006, 0x59463, 0x5b888, 0x5dc74, 0x60029, 0x623a7, 0x646ee, 0x66a00, 0x68cdd, 0x6af86, 0x6d1fa, 0x6f43c, 0x7164b, 0x73829, 0x759d4, 0x77b4f, 0x79c9a, 0x7bdb5, 0x7dea1, 0x7ff5e, 0x81fed, 0x8404e, 0x86082, 0x88089, 0x8a064, 0x8c014, 0x8df98, 0x8fef1, 0x91e20, 0x93d26, 0x95c01, 0x97ab4, 0x9993e, 0x9b79f, 0x9d5d9, 0x9f3ec, 0xa11d8, 0xa2f9d, 0xa4d3c, 0xa6ab5, 0xa8808, 0xaa537, 0xac241, 0xadf26, 0xafbe7, 0xb1885, 0xb3500, 0xb5157, 0xb6d8c, 0xb899f, 0xba58f, 0xbc15e, 0xbdd0c, 0xbf899, 0xc1404, 0xc2f50, 0xc4a7b, 0xc6587, 0xc8073, 0xc9b3f, 0xcb5ed, 0xcd07c, 0xceaec, 0xd053f, 0xd1f73, 0xd398a, 0xd5384, 0xd6d60, 0xd8720, 0xda0c3, 0xdba4a, 0xdd3b4, 0xded03, 0xe0636, 0xe1f4e, 0xe384a, 0xe512c, 0xe69f3, 0xe829f, 0xe9b31, 0xeb3a9, 0xecc08, 0xee44c, 0xefc78, 0xf148a, 0xf2c83, 0xf4463, 0xf5c2a, 0xf73da, 0xf8b71, 0xfa2f0, 0xfba57, 0xfd1a7, 0xfe8df }; static char logSlopeTable[128] = { 0x5c, 0x5c, 0x5b, 0x5a, 0x5a, 0x59, 0x58, 0x58, 0x57, 0x56, 0x56, 0x55, 0x55, 0x54, 0x53, 0x53, 0x52, 0x52, 0x51, 0x51, 0x50, 0x50, 0x4f, 0x4f, 0x4e, 0x4d, 0x4d, 0x4d, 0x4c, 0x4c, 0x4b, 0x4b, 0x4a, 0x4a, 0x49, 0x49, 0x48, 0x48, 0x47, 0x47, 0x47, 0x46, 0x46, 0x45, 0x45, 0x45, 0x44, 0x44, 0x43, 0x43, 0x43, 0x42, 0x42, 0x42, 0x41, 0x41, 0x41, 0x40, 0x40, 0x40, 0x3f, 0x3f, 0x3f, 0x3e, 0x3e, 0x3e, 0x3d, 0x3d, 0x3d, 0x3c, 0x3c, 0x3c, 0x3b, 0x3b, 0x3b, 0x3b, 0x3a, 0x3a, 0x3a, 0x39, 0x39, 0x39, 0x39, 0x38, 0x38, 0x38, 0x38, 0x37, 0x37, 0x37, 0x37, 0x36, 0x36, 0x36, 0x36, 0x35, 0x35, 0x35, 0x35, 0x34, 0x34, 0x34, 0x34, 0x34, 0x33, 0x33, 0x33, 0x33, 0x32, 0x32, 0x32, 0x32, 0x32, 0x31, 0x31, 0x31, 0x31, 0x31, 0x30, 0x30, 0x30, 0x30, 0x30, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f }; int i; if (rate == 0) return 0; /* Bail out if no leading "1" */ rate *= 11185; /* Scale 48000 to 0x20002380 */ for (i = 31; i > 0; i--) { if (rate & 0x80000000) { /* Detect leading "1" */ return (((unsigned int) (i - 15) << 20) + logMagTable[0x7f & (rate >> 24)] + (0x7f & (rate >> 17)) * logSlopeTable[0x7f & (rate >> 24)]); } rate <<= 1; } return 0; /* Should never reach this point */ }
gpl-2.0
lollipop-og/kernel_google
drivers/s390/cio/device_id.c
9235
5853
/* * CCW device SENSE ID I/O handling. * * Copyright IBM Corp. 2002,2009 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/types.h> #include <linux/errno.h> #include <asm/ccwdev.h> #include <asm/setup.h> #include <asm/cio.h> #include <asm/diag.h> #include "cio.h" #include "cio_debug.h" #include "device.h" #include "io_sch.h" #define SENSE_ID_RETRIES 256 #define SENSE_ID_TIMEOUT (10 * HZ) #define SENSE_ID_MIN_LEN 4 #define SENSE_ID_BASIC_LEN 7 /** * diag210_to_senseid - convert diag 0x210 data to sense id information * @senseid: sense id * @diag: diag 0x210 data * * Return 0 on success, non-zero otherwise. */ static int diag210_to_senseid(struct senseid *senseid, struct diag210 *diag) { static struct { int class, type, cu_type; } vm_devices[] = { { 0x08, 0x01, 0x3480 }, { 0x08, 0x02, 0x3430 }, { 0x08, 0x10, 0x3420 }, { 0x08, 0x42, 0x3424 }, { 0x08, 0x44, 0x9348 }, { 0x08, 0x81, 0x3490 }, { 0x08, 0x82, 0x3422 }, { 0x10, 0x41, 0x1403 }, { 0x10, 0x42, 0x3211 }, { 0x10, 0x43, 0x3203 }, { 0x10, 0x45, 0x3800 }, { 0x10, 0x47, 0x3262 }, { 0x10, 0x48, 0x3820 }, { 0x10, 0x49, 0x3800 }, { 0x10, 0x4a, 0x4245 }, { 0x10, 0x4b, 0x4248 }, { 0x10, 0x4d, 0x3800 }, { 0x10, 0x4e, 0x3820 }, { 0x10, 0x4f, 0x3820 }, { 0x10, 0x82, 0x2540 }, { 0x10, 0x84, 0x3525 }, { 0x20, 0x81, 0x2501 }, { 0x20, 0x82, 0x2540 }, { 0x20, 0x84, 0x3505 }, { 0x40, 0x01, 0x3278 }, { 0x40, 0x04, 0x3277 }, { 0x40, 0x80, 0x2250 }, { 0x40, 0xc0, 0x5080 }, { 0x80, 0x00, 0x3215 }, }; int i; /* Special case for osa devices. */ if (diag->vrdcvcla == 0x02 && diag->vrdcvtyp == 0x20) { senseid->cu_type = 0x3088; senseid->cu_model = 0x60; senseid->reserved = 0xff; return 0; } for (i = 0; i < ARRAY_SIZE(vm_devices); i++) { if (diag->vrdcvcla == vm_devices[i].class && diag->vrdcvtyp == vm_devices[i].type) { senseid->cu_type = vm_devices[i].cu_type; senseid->reserved = 0xff; return 0; } } return -ENODEV; } /** * diag_get_dev_info - retrieve device information via diag 0x210 * @cdev: ccw device * * Returns zero on success, non-zero otherwise. */ static int diag210_get_dev_info(struct ccw_device *cdev) { struct ccw_dev_id *dev_id = &cdev->private->dev_id; struct senseid *senseid = &cdev->private->senseid; struct diag210 diag_data; int rc; if (dev_id->ssid != 0) return -ENODEV; memset(&diag_data, 0, sizeof(diag_data)); diag_data.vrdcdvno = dev_id->devno; diag_data.vrdclen = sizeof(diag_data); rc = diag210(&diag_data); CIO_TRACE_EVENT(4, "diag210"); CIO_HEX_EVENT(4, &rc, sizeof(rc)); CIO_HEX_EVENT(4, &diag_data, sizeof(diag_data)); if (rc != 0 && rc != 2) goto err_failed; if (diag210_to_senseid(senseid, &diag_data)) goto err_unknown; return 0; err_unknown: CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: unknown diag210 data\n", dev_id->ssid, dev_id->devno); return -ENODEV; err_failed: CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: diag210 failed (rc=%d)\n", dev_id->ssid, dev_id->devno, rc); return -ENODEV; } /* * Initialize SENSE ID data. */ static void snsid_init(struct ccw_device *cdev) { cdev->private->flags.esid = 0; memset(&cdev->private->senseid, 0, sizeof(cdev->private->senseid)); cdev->private->senseid.cu_type = 0xffff; } /* * Check for complete SENSE ID data. */ static int snsid_check(struct ccw_device *cdev, void *data) { struct cmd_scsw *scsw = &cdev->private->irb.scsw.cmd; int len = sizeof(struct senseid) - scsw->count; /* Check for incomplete SENSE ID data. */ if (len < SENSE_ID_MIN_LEN) goto out_restart; if (cdev->private->senseid.cu_type == 0xffff) goto out_restart; /* Check for incompatible SENSE ID data. */ if (cdev->private->senseid.reserved != 0xff) return -EOPNOTSUPP; /* Check for extended-identification information. */ if (len > SENSE_ID_BASIC_LEN) cdev->private->flags.esid = 1; return 0; out_restart: snsid_init(cdev); return -EAGAIN; } /* * Process SENSE ID request result. */ static void snsid_callback(struct ccw_device *cdev, void *data, int rc) { struct ccw_dev_id *id = &cdev->private->dev_id; struct senseid *senseid = &cdev->private->senseid; int vm = 0; if (rc && MACHINE_IS_VM) { /* Try diag 0x210 fallback on z/VM. */ snsid_init(cdev); if (diag210_get_dev_info(cdev) == 0) { rc = 0; vm = 1; } } CIO_MSG_EVENT(2, "snsid: device 0.%x.%04x: rc=%d %04x/%02x " "%04x/%02x%s\n", id->ssid, id->devno, rc, senseid->cu_type, senseid->cu_model, senseid->dev_type, senseid->dev_model, vm ? " (diag210)" : ""); ccw_device_sense_id_done(cdev, rc); } /** * ccw_device_sense_id_start - perform SENSE ID * @cdev: ccw device * * Execute a SENSE ID channel program on @cdev to update its sense id * information. When finished, call ccw_device_sense_id_done with a * return code specifying the result. */ void ccw_device_sense_id_start(struct ccw_device *cdev) { struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw_request *req = &cdev->private->req; struct ccw1 *cp = cdev->private->iccws; CIO_TRACE_EVENT(4, "snsid"); CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); /* Data setup. */ snsid_init(cdev); /* Channel program setup. */ cp->cmd_code = CCW_CMD_SENSE_ID; cp->cda = (u32) (addr_t) &cdev->private->senseid; cp->count = sizeof(struct senseid); cp->flags = CCW_FLAG_SLI; /* Request setup. */ memset(req, 0, sizeof(*req)); req->cp = cp; req->timeout = SENSE_ID_TIMEOUT; req->maxretries = SENSE_ID_RETRIES; req->lpm = sch->schib.pmcw.pam & sch->opm; req->check = snsid_check; req->callback = snsid_callback; ccw_request_start(cdev); }
gpl-2.0
SlimRoms/kernel_htc_msm8660
drivers/s390/cio/device_id.c
9235
5853
/* * CCW device SENSE ID I/O handling. * * Copyright IBM Corp. 2002,2009 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/types.h> #include <linux/errno.h> #include <asm/ccwdev.h> #include <asm/setup.h> #include <asm/cio.h> #include <asm/diag.h> #include "cio.h" #include "cio_debug.h" #include "device.h" #include "io_sch.h" #define SENSE_ID_RETRIES 256 #define SENSE_ID_TIMEOUT (10 * HZ) #define SENSE_ID_MIN_LEN 4 #define SENSE_ID_BASIC_LEN 7 /** * diag210_to_senseid - convert diag 0x210 data to sense id information * @senseid: sense id * @diag: diag 0x210 data * * Return 0 on success, non-zero otherwise. */ static int diag210_to_senseid(struct senseid *senseid, struct diag210 *diag) { static struct { int class, type, cu_type; } vm_devices[] = { { 0x08, 0x01, 0x3480 }, { 0x08, 0x02, 0x3430 }, { 0x08, 0x10, 0x3420 }, { 0x08, 0x42, 0x3424 }, { 0x08, 0x44, 0x9348 }, { 0x08, 0x81, 0x3490 }, { 0x08, 0x82, 0x3422 }, { 0x10, 0x41, 0x1403 }, { 0x10, 0x42, 0x3211 }, { 0x10, 0x43, 0x3203 }, { 0x10, 0x45, 0x3800 }, { 0x10, 0x47, 0x3262 }, { 0x10, 0x48, 0x3820 }, { 0x10, 0x49, 0x3800 }, { 0x10, 0x4a, 0x4245 }, { 0x10, 0x4b, 0x4248 }, { 0x10, 0x4d, 0x3800 }, { 0x10, 0x4e, 0x3820 }, { 0x10, 0x4f, 0x3820 }, { 0x10, 0x82, 0x2540 }, { 0x10, 0x84, 0x3525 }, { 0x20, 0x81, 0x2501 }, { 0x20, 0x82, 0x2540 }, { 0x20, 0x84, 0x3505 }, { 0x40, 0x01, 0x3278 }, { 0x40, 0x04, 0x3277 }, { 0x40, 0x80, 0x2250 }, { 0x40, 0xc0, 0x5080 }, { 0x80, 0x00, 0x3215 }, }; int i; /* Special case for osa devices. */ if (diag->vrdcvcla == 0x02 && diag->vrdcvtyp == 0x20) { senseid->cu_type = 0x3088; senseid->cu_model = 0x60; senseid->reserved = 0xff; return 0; } for (i = 0; i < ARRAY_SIZE(vm_devices); i++) { if (diag->vrdcvcla == vm_devices[i].class && diag->vrdcvtyp == vm_devices[i].type) { senseid->cu_type = vm_devices[i].cu_type; senseid->reserved = 0xff; return 0; } } return -ENODEV; } /** * diag_get_dev_info - retrieve device information via diag 0x210 * @cdev: ccw device * * Returns zero on success, non-zero otherwise. */ static int diag210_get_dev_info(struct ccw_device *cdev) { struct ccw_dev_id *dev_id = &cdev->private->dev_id; struct senseid *senseid = &cdev->private->senseid; struct diag210 diag_data; int rc; if (dev_id->ssid != 0) return -ENODEV; memset(&diag_data, 0, sizeof(diag_data)); diag_data.vrdcdvno = dev_id->devno; diag_data.vrdclen = sizeof(diag_data); rc = diag210(&diag_data); CIO_TRACE_EVENT(4, "diag210"); CIO_HEX_EVENT(4, &rc, sizeof(rc)); CIO_HEX_EVENT(4, &diag_data, sizeof(diag_data)); if (rc != 0 && rc != 2) goto err_failed; if (diag210_to_senseid(senseid, &diag_data)) goto err_unknown; return 0; err_unknown: CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: unknown diag210 data\n", dev_id->ssid, dev_id->devno); return -ENODEV; err_failed: CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: diag210 failed (rc=%d)\n", dev_id->ssid, dev_id->devno, rc); return -ENODEV; } /* * Initialize SENSE ID data. */ static void snsid_init(struct ccw_device *cdev) { cdev->private->flags.esid = 0; memset(&cdev->private->senseid, 0, sizeof(cdev->private->senseid)); cdev->private->senseid.cu_type = 0xffff; } /* * Check for complete SENSE ID data. */ static int snsid_check(struct ccw_device *cdev, void *data) { struct cmd_scsw *scsw = &cdev->private->irb.scsw.cmd; int len = sizeof(struct senseid) - scsw->count; /* Check for incomplete SENSE ID data. */ if (len < SENSE_ID_MIN_LEN) goto out_restart; if (cdev->private->senseid.cu_type == 0xffff) goto out_restart; /* Check for incompatible SENSE ID data. */ if (cdev->private->senseid.reserved != 0xff) return -EOPNOTSUPP; /* Check for extended-identification information. */ if (len > SENSE_ID_BASIC_LEN) cdev->private->flags.esid = 1; return 0; out_restart: snsid_init(cdev); return -EAGAIN; } /* * Process SENSE ID request result. */ static void snsid_callback(struct ccw_device *cdev, void *data, int rc) { struct ccw_dev_id *id = &cdev->private->dev_id; struct senseid *senseid = &cdev->private->senseid; int vm = 0; if (rc && MACHINE_IS_VM) { /* Try diag 0x210 fallback on z/VM. */ snsid_init(cdev); if (diag210_get_dev_info(cdev) == 0) { rc = 0; vm = 1; } } CIO_MSG_EVENT(2, "snsid: device 0.%x.%04x: rc=%d %04x/%02x " "%04x/%02x%s\n", id->ssid, id->devno, rc, senseid->cu_type, senseid->cu_model, senseid->dev_type, senseid->dev_model, vm ? " (diag210)" : ""); ccw_device_sense_id_done(cdev, rc); } /** * ccw_device_sense_id_start - perform SENSE ID * @cdev: ccw device * * Execute a SENSE ID channel program on @cdev to update its sense id * information. When finished, call ccw_device_sense_id_done with a * return code specifying the result. */ void ccw_device_sense_id_start(struct ccw_device *cdev) { struct subchannel *sch = to_subchannel(cdev->dev.parent); struct ccw_request *req = &cdev->private->req; struct ccw1 *cp = cdev->private->iccws; CIO_TRACE_EVENT(4, "snsid"); CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id)); /* Data setup. */ snsid_init(cdev); /* Channel program setup. */ cp->cmd_code = CCW_CMD_SENSE_ID; cp->cda = (u32) (addr_t) &cdev->private->senseid; cp->count = sizeof(struct senseid); cp->flags = CCW_FLAG_SLI; /* Request setup. */ memset(req, 0, sizeof(*req)); req->cp = cp; req->timeout = SENSE_ID_TIMEOUT; req->maxretries = SENSE_ID_RETRIES; req->lpm = sch->schib.pmcw.pam & sch->opm; req->check = snsid_check; req->callback = snsid_callback; ccw_request_start(cdev); }
gpl-2.0
dheerajjamwal/Backport_ltsi_3.14.22
arch/powerpc/sysdev/cpm2.c
12819
8753
/* * General Purpose functions for the global management of the * 8260 Communication Processor Module. * Copyright (c) 1999-2001 Dan Malek <dan@embeddedalley.com> * Copyright (c) 2000 MontaVista Software, Inc (source@mvista.com) * 2.3.99 Updates * * 2006 (c) MontaVista Software, Inc. * Vitaly Bordug <vbordug@ru.mvista.com> * Merged to arch/powerpc from arch/ppc/syslib/cpm2_common.c * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ /* * * In addition to the individual control of the communication * channels, there are a few functions that globally affect the * communication processor. * * Buffer descriptors must be allocated from the dual ported memory * space. The allocator for that is here. When the communication * process is reset, we reclaim the memory available. There is * currently no deallocator for this memory. */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/mpc8260.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/cpm2.h> #include <asm/rheap.h> #include <asm/fs_pd.h> #include <sysdev/fsl_soc.h> cpm_cpm2_t __iomem *cpmp; /* Pointer to comm processor space */ /* We allocate this here because it is used almost exclusively for * the communication processor devices. */ cpm2_map_t __iomem *cpm2_immr; EXPORT_SYMBOL(cpm2_immr); #define CPM_MAP_SIZE (0x40000) /* 256k - the PQ3 reserve this amount of space for CPM as it is larger than on PQ2 */ void __init cpm2_reset(void) { #ifdef CONFIG_PPC_85xx cpm2_immr = ioremap(get_immrbase() + 0x80000, CPM_MAP_SIZE); #else cpm2_immr = ioremap(get_immrbase(), CPM_MAP_SIZE); #endif /* Reclaim the DP memory for our use. */ cpm_muram_init(); /* Tell everyone where the comm processor resides. */ cpmp = &cpm2_immr->im_cpm; #ifndef CONFIG_PPC_EARLY_DEBUG_CPM /* Reset the CPM. */ cpm_command(CPM_CR_RST, 0); #endif } static DEFINE_SPINLOCK(cmd_lock); #define MAX_CR_CMD_LOOPS 10000 int cpm_command(u32 command, u8 opcode) { int i, ret; unsigned long flags; spin_lock_irqsave(&cmd_lock, flags); ret = 0; out_be32(&cpmp->cp_cpcr, command | opcode | CPM_CR_FLG); for (i = 0; i < MAX_CR_CMD_LOOPS; i++) if ((in_be32(&cpmp->cp_cpcr) & CPM_CR_FLG) == 0) goto out; printk(KERN_ERR "%s(): Not able to issue CPM command\n", __func__); ret = -EIO; out: spin_unlock_irqrestore(&cmd_lock, flags); return ret; } EXPORT_SYMBOL(cpm_command); /* Set a baud rate generator. This needs lots of work. There are * eight BRGs, which can be connected to the CPM channels or output * as clocks. The BRGs are in two different block of internal * memory mapped space. * The baud rate clock is the system clock divided by something. * It was set up long ago during the initial boot phase and is * is given to us. * Baud rate clocks are zero-based in the driver code (as that maps * to port numbers). Documentation uses 1-based numbering. */ void __cpm2_setbrg(uint brg, uint rate, uint clk, int div16, int src) { u32 __iomem *bp; u32 val; /* This is good enough to get SMCs running..... */ if (brg < 4) { bp = cpm2_map_size(im_brgc1, 16); } else { bp = cpm2_map_size(im_brgc5, 16); brg -= 4; } bp += brg; /* Round the clock divider to the nearest integer. */ val = (((clk * 2 / rate) - 1) & ~1) | CPM_BRG_EN | src; if (div16) val |= CPM_BRG_DIV16; out_be32(bp, val); cpm2_unmap(bp); } EXPORT_SYMBOL(__cpm2_setbrg); int cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode) { int ret = 0; int shift; int i, bits = 0; cpmux_t __iomem *im_cpmux; u32 __iomem *reg; u32 mask = 7; u8 clk_map[][3] = { {CPM_CLK_FCC1, CPM_BRG5, 0}, {CPM_CLK_FCC1, CPM_BRG6, 1}, {CPM_CLK_FCC1, CPM_BRG7, 2}, {CPM_CLK_FCC1, CPM_BRG8, 3}, {CPM_CLK_FCC1, CPM_CLK9, 4}, {CPM_CLK_FCC1, CPM_CLK10, 5}, {CPM_CLK_FCC1, CPM_CLK11, 6}, {CPM_CLK_FCC1, CPM_CLK12, 7}, {CPM_CLK_FCC2, CPM_BRG5, 0}, {CPM_CLK_FCC2, CPM_BRG6, 1}, {CPM_CLK_FCC2, CPM_BRG7, 2}, {CPM_CLK_FCC2, CPM_BRG8, 3}, {CPM_CLK_FCC2, CPM_CLK13, 4}, {CPM_CLK_FCC2, CPM_CLK14, 5}, {CPM_CLK_FCC2, CPM_CLK15, 6}, {CPM_CLK_FCC2, CPM_CLK16, 7}, {CPM_CLK_FCC3, CPM_BRG5, 0}, {CPM_CLK_FCC3, CPM_BRG6, 1}, {CPM_CLK_FCC3, CPM_BRG7, 2}, {CPM_CLK_FCC3, CPM_BRG8, 3}, {CPM_CLK_FCC3, CPM_CLK13, 4}, {CPM_CLK_FCC3, CPM_CLK14, 5}, {CPM_CLK_FCC3, CPM_CLK15, 6}, {CPM_CLK_FCC3, CPM_CLK16, 7}, {CPM_CLK_SCC1, CPM_BRG1, 0}, {CPM_CLK_SCC1, CPM_BRG2, 1}, {CPM_CLK_SCC1, CPM_BRG3, 2}, {CPM_CLK_SCC1, CPM_BRG4, 3}, {CPM_CLK_SCC1, CPM_CLK11, 4}, {CPM_CLK_SCC1, CPM_CLK12, 5}, {CPM_CLK_SCC1, CPM_CLK3, 6}, {CPM_CLK_SCC1, CPM_CLK4, 7}, {CPM_CLK_SCC2, CPM_BRG1, 0}, {CPM_CLK_SCC2, CPM_BRG2, 1}, {CPM_CLK_SCC2, CPM_BRG3, 2}, {CPM_CLK_SCC2, CPM_BRG4, 3}, {CPM_CLK_SCC2, CPM_CLK11, 4}, {CPM_CLK_SCC2, CPM_CLK12, 5}, {CPM_CLK_SCC2, CPM_CLK3, 6}, {CPM_CLK_SCC2, CPM_CLK4, 7}, {CPM_CLK_SCC3, CPM_BRG1, 0}, {CPM_CLK_SCC3, CPM_BRG2, 1}, {CPM_CLK_SCC3, CPM_BRG3, 2}, {CPM_CLK_SCC3, CPM_BRG4, 3}, {CPM_CLK_SCC3, CPM_CLK5, 4}, {CPM_CLK_SCC3, CPM_CLK6, 5}, {CPM_CLK_SCC3, CPM_CLK7, 6}, {CPM_CLK_SCC3, CPM_CLK8, 7}, {CPM_CLK_SCC4, CPM_BRG1, 0}, {CPM_CLK_SCC4, CPM_BRG2, 1}, {CPM_CLK_SCC4, CPM_BRG3, 2}, {CPM_CLK_SCC4, CPM_BRG4, 3}, {CPM_CLK_SCC4, CPM_CLK5, 4}, {CPM_CLK_SCC4, CPM_CLK6, 5}, {CPM_CLK_SCC4, CPM_CLK7, 6}, {CPM_CLK_SCC4, CPM_CLK8, 7}, }; im_cpmux = cpm2_map(im_cpmux); switch (target) { case CPM_CLK_SCC1: reg = &im_cpmux->cmx_scr; shift = 24; break; case CPM_CLK_SCC2: reg = &im_cpmux->cmx_scr; shift = 16; break; case CPM_CLK_SCC3: reg = &im_cpmux->cmx_scr; shift = 8; break; case CPM_CLK_SCC4: reg = &im_cpmux->cmx_scr; shift = 0; break; case CPM_CLK_FCC1: reg = &im_cpmux->cmx_fcr; shift = 24; break; case CPM_CLK_FCC2: reg = &im_cpmux->cmx_fcr; shift = 16; break; case CPM_CLK_FCC3: reg = &im_cpmux->cmx_fcr; shift = 8; break; default: printk(KERN_ERR "cpm2_clock_setup: invalid clock target\n"); return -EINVAL; } for (i = 0; i < ARRAY_SIZE(clk_map); i++) { if (clk_map[i][0] == target && clk_map[i][1] == clock) { bits = clk_map[i][2]; break; } } if (i == ARRAY_SIZE(clk_map)) ret = -EINVAL; bits <<= shift; mask <<= shift; if (mode == CPM_CLK_RTX) { bits |= bits << 3; mask |= mask << 3; } else if (mode == CPM_CLK_RX) { bits <<= 3; mask <<= 3; } out_be32(reg, (in_be32(reg) & ~mask) | bits); cpm2_unmap(im_cpmux); return ret; } int cpm2_smc_clk_setup(enum cpm_clk_target target, int clock) { int ret = 0; int shift; int i, bits = 0; cpmux_t __iomem *im_cpmux; u8 __iomem *reg; u8 mask = 3; u8 clk_map[][3] = { {CPM_CLK_SMC1, CPM_BRG1, 0}, {CPM_CLK_SMC1, CPM_BRG7, 1}, {CPM_CLK_SMC1, CPM_CLK7, 2}, {CPM_CLK_SMC1, CPM_CLK9, 3}, {CPM_CLK_SMC2, CPM_BRG2, 0}, {CPM_CLK_SMC2, CPM_BRG8, 1}, {CPM_CLK_SMC2, CPM_CLK4, 2}, {CPM_CLK_SMC2, CPM_CLK15, 3}, }; im_cpmux = cpm2_map(im_cpmux); switch (target) { case CPM_CLK_SMC1: reg = &im_cpmux->cmx_smr; mask = 3; shift = 4; break; case CPM_CLK_SMC2: reg = &im_cpmux->cmx_smr; mask = 3; shift = 0; break; default: printk(KERN_ERR "cpm2_smc_clock_setup: invalid clock target\n"); return -EINVAL; } for (i = 0; i < ARRAY_SIZE(clk_map); i++) { if (clk_map[i][0] == target && clk_map[i][1] == clock) { bits = clk_map[i][2]; break; } } if (i == ARRAY_SIZE(clk_map)) ret = -EINVAL; bits <<= shift; mask <<= shift; out_8(reg, (in_8(reg) & ~mask) | bits); cpm2_unmap(im_cpmux); return ret; } struct cpm2_ioports { u32 dir, par, sor, odr, dat; u32 res[3]; }; void cpm2_set_pin(int port, int pin, int flags) { struct cpm2_ioports __iomem *iop = (struct cpm2_ioports __iomem *)&cpm2_immr->im_ioport; pin = 1 << (31 - pin); if (flags & CPM_PIN_OUTPUT) setbits32(&iop[port].dir, pin); else clrbits32(&iop[port].dir, pin); if (!(flags & CPM_PIN_GPIO)) setbits32(&iop[port].par, pin); else clrbits32(&iop[port].par, pin); if (flags & CPM_PIN_SECONDARY) setbits32(&iop[port].sor, pin); else clrbits32(&iop[port].sor, pin); if (flags & CPM_PIN_OPENDRAIN) setbits32(&iop[port].odr, pin); else clrbits32(&iop[port].odr, pin); } static int cpm_init_par_io(void) { struct device_node *np; for_each_compatible_node(np, NULL, "fsl,cpm2-pario-bank") cpm2_gpiochip_add32(np); return 0; } arch_initcall(cpm_init_par_io);
gpl-2.0
TroNit/BlackDome_Kernel_JB
drivers/char/scx200_gpio.c
12819
3128
/* linux/drivers/char/scx200_gpio.c National Semiconductor SCx200 GPIO driver. Allows a user space process to play with the GPIO pins. Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> */ #include <linux/device.h> #include <linux/fs.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <asm/uaccess.h> #include <asm/io.h> #include <linux/types.h> #include <linux/cdev.h> #include <linux/scx200_gpio.h> #include <linux/nsc_gpio.h> #define DRVNAME "scx200_gpio" static struct platform_device *pdev; MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>"); MODULE_DESCRIPTION("NatSemi/AMD SCx200 GPIO Pin Driver"); MODULE_LICENSE("GPL"); static int major = 0; /* default to dynamic major */ module_param(major, int, 0); MODULE_PARM_DESC(major, "Major device number"); #define MAX_PINS 32 /* 64 later, when known ok */ struct nsc_gpio_ops scx200_gpio_ops = { .owner = THIS_MODULE, .gpio_config = scx200_gpio_configure, .gpio_dump = nsc_gpio_dump, .gpio_get = scx200_gpio_get, .gpio_set = scx200_gpio_set, .gpio_change = scx200_gpio_change, .gpio_current = scx200_gpio_current }; EXPORT_SYMBOL_GPL(scx200_gpio_ops); static int scx200_gpio_open(struct inode *inode, struct file *file) { unsigned m = iminor(inode); file->private_data = &scx200_gpio_ops; if (m >= MAX_PINS) return -EINVAL; return nonseekable_open(inode, file); } static int scx200_gpio_release(struct inode *inode, struct file *file) { return 0; } static const struct file_operations scx200_gpio_fileops = { .owner = THIS_MODULE, .write = nsc_gpio_write, .read = nsc_gpio_read, .open = scx200_gpio_open, .release = scx200_gpio_release, .llseek = no_llseek, }; static struct cdev scx200_gpio_cdev; /* use 1 cdev for all pins */ static int __init scx200_gpio_init(void) { int rc; dev_t devid; if (!scx200_gpio_present()) { printk(KERN_ERR DRVNAME ": no SCx200 gpio present\n"); return -ENODEV; } /* support dev_dbg() with pdev->dev */ pdev = platform_device_alloc(DRVNAME, 0); if (!pdev) return -ENOMEM; rc = platform_device_add(pdev); if (rc) goto undo_malloc; /* nsc_gpio uses dev_dbg(), so needs this */ scx200_gpio_ops.dev = &pdev->dev; if (major) { devid = MKDEV(major, 0); rc = register_chrdev_region(devid, MAX_PINS, "scx200_gpio"); } else { rc = alloc_chrdev_region(&devid, 0, MAX_PINS, "scx200_gpio"); major = MAJOR(devid); } if (rc < 0) { dev_err(&pdev->dev, "SCx200 chrdev_region err: %d\n", rc); goto undo_platform_device_add; } cdev_init(&scx200_gpio_cdev, &scx200_gpio_fileops); cdev_add(&scx200_gpio_cdev, devid, MAX_PINS); return 0; /* succeed */ undo_platform_device_add: platform_device_del(pdev); undo_malloc: platform_device_put(pdev); return rc; } static void __exit scx200_gpio_cleanup(void) { cdev_del(&scx200_gpio_cdev); /* cdev_put(&scx200_gpio_cdev); */ unregister_chrdev_region(MKDEV(major, 0), MAX_PINS); platform_device_unregister(pdev); } module_init(scx200_gpio_init); module_exit(scx200_gpio_cleanup);
gpl-2.0
zefie/nxt_andx86_kernel
tools/perf/util/quote.c
13587
1265
#include "cache.h" #include "quote.h" /* Help to copy the thing properly quoted for the shell safety. * any single quote is replaced with '\'', any exclamation point * is replaced with '\!', and the whole thing is enclosed in a * * E.g. * original sq_quote result * name ==> name ==> 'name' * a b ==> a b ==> 'a b' * a'b ==> a'\''b ==> 'a'\''b' * a!b ==> a'\!'b ==> 'a'\!'b' */ static inline int need_bs_quote(char c) { return (c == '\'' || c == '!'); } static void sq_quote_buf(struct strbuf *dst, const char *src) { char *to_free = NULL; if (dst->buf == src) to_free = strbuf_detach(dst, NULL); strbuf_addch(dst, '\''); while (*src) { size_t len = strcspn(src, "'!"); strbuf_add(dst, src, len); src += len; while (need_bs_quote(*src)) { strbuf_addstr(dst, "'\\"); strbuf_addch(dst, *src++); strbuf_addch(dst, '\''); } } strbuf_addch(dst, '\''); free(to_free); } void sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen) { int i; /* Copy into destination buffer. */ strbuf_grow(dst, 255); for (i = 0; argv[i]; ++i) { strbuf_addch(dst, ' '); sq_quote_buf(dst, argv[i]); if (maxlen && dst->len > maxlen) die("Too many or long arguments"); } }
gpl-2.0