repo_name
string | path
string | copies
string | size
string | content
string | license
string |
---|---|---|---|---|---|
AICP/kernel_motorola_msm8960dt-common | drivers/staging/speakup/speakup_audptr.c | 3141 | 5964 | /*
* originally written by: Kirk Reiser <kirk@braille.uwo.ca>
* this version considerably modified by David Borowski, david575@rogers.com
*
* Copyright (C) 1998-99 Kirk Reiser.
* Copyright (C) 2003 David Borowski.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* specificly written as a driver for the speakup screenreview
* s not a general device driver.
*/
#include "spk_priv.h"
#include "speakup.h"
#include "serialio.h"
#define DRV_VERSION "2.11"
#define SYNTH_CLEAR 0x18 /* flush synth buffer */
#define PROCSPEECH '\r' /* start synth processing speech char */
static int synth_probe(struct spk_synth *synth);
static void synth_flush(struct spk_synth *synth);
static struct var_t vars[] = {
{ CAPS_START, .u.s = {"\x05[f99]" } },
{ CAPS_STOP, .u.s = {"\x05[f80]" } },
{ RATE, .u.n = {"\x05[r%d]", 10, 0, 20, 100, -10, NULL } },
{ PITCH, .u.n = {"\x05[f%d]", 80, 39, 4500, 0, 0, NULL } },
{ VOL, .u.n = {"\x05[g%d]", 21, 0, 40, 0, 0, NULL } },
{ TONE, .u.n = {"\x05[s%d]", 9, 0, 63, 0, 0, 0 } },
{ PUNCT, .u.n = {"\x05[A%c]", 0, 0, 3, 0, 0, "nmsa" } },
{ DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR
};
/*
* These attributes will appear in /sys/accessibility/speakup/audptr.
*/
static struct kobj_attribute caps_start_attribute =
__ATTR(caps_start, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
__ATTR(punct, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
__ATTR(rate, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
__ATTR(tone, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
__ATTR(vol, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
__ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
__ATTR(direct, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
__ATTR(full_time, ROOT_W, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
__ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
__ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
* at once.
*/
static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&pitch_attribute.attr,
&punct_attribute.attr,
&rate_attribute.attr,
&tone_attribute.attr,
&vol_attribute.attr,
&delay_time_attribute.attr,
&direct_attribute.attr,
&full_time_attribute.attr,
&jiffy_delta_attribute.attr,
&trigger_time_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
static struct spk_synth synth_audptr = {
.name = "audptr",
.version = DRV_VERSION,
.long_name = "Audapter",
.init = "\x05[D1]\x05[Ol]",
.procspeech = PROCSPEECH,
.clear = SYNTH_CLEAR,
.delay = 400,
.trigger = 50,
.jiffies = 30,
.full = 18000,
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
.probe = synth_probe,
.release = spk_serial_release,
.synth_immediate = spk_synth_immediate,
.catch_up = spk_do_catch_up,
.flush = synth_flush,
.is_alive = spk_synth_is_alive_restart,
.synth_adjust = NULL,
.read_buff_add = NULL,
.get_index = NULL,
.indexing = {
.command = NULL,
.lowindex = 0,
.highindex = 0,
.currindex = 0,
},
.attributes = {
.attrs = synth_attrs,
.name = "audptr",
},
};
static void synth_flush(struct spk_synth *synth)
{
int timeout = SPK_XMITR_TIMEOUT;
while (spk_serial_tx_busy()) {
if (!--timeout)
break;
udelay(1);
}
outb(SYNTH_CLEAR, speakup_info.port_tts);
spk_serial_out(PROCSPEECH);
}
static void synth_version(struct spk_synth *synth)
{
unsigned char test = 0;
char synth_id[40] = "";
spk_synth_immediate(synth, "\x05[Q]");
synth_id[test] = spk_serial_in();
if (synth_id[test] == 'A') {
do {
/* read version string from synth */
synth_id[++test] = spk_serial_in();
} while (synth_id[test] != '\n' && test < 32);
synth_id[++test] = 0x00;
}
if (synth_id[0] == 'A')
pr_info("%s version: %s", synth->long_name, synth_id);
}
static int synth_probe(struct spk_synth *synth)
{
int failed = 0;
failed = spk_serial_synth_probe(synth);
if (failed == 0)
synth_version(synth);
synth->alive = !failed;
return 0;
}
module_param_named(ser, synth_audptr.ser, int, S_IRUGO);
module_param_named(start, synth_audptr.startup, short, S_IRUGO);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
static int __init audptr_init(void)
{
return synth_add(&synth_audptr);
}
static void __exit audptr_exit(void)
{
synth_remove(&synth_audptr);
}
module_init(audptr_init);
module_exit(audptr_exit);
MODULE_AUTHOR("Kirk Reiser <kirk@braille.uwo.ca>");
MODULE_AUTHOR("David Borowski");
MODULE_DESCRIPTION("Speakup support for Audapter synthesizer");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
Kernel-Saram/LG-SU760-Kernel | drivers/usb/serial/bus.c | 4165 | 4183 | /*
* USB Serial Converter Bus specific functions
*
* Copyright (C) 2002 Greg Kroah-Hartman (greg@kroah.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
static int usb_serial_device_match(struct device *dev,
struct device_driver *drv)
{
struct usb_serial_driver *driver;
const struct usb_serial_port *port;
/*
* drivers are already assigned to ports in serial_probe so it's
* a simple check here.
*/
port = to_usb_serial_port(dev);
if (!port)
return 0;
driver = to_usb_serial_driver(drv);
if (driver == port->serial->type)
return 1;
return 0;
}
static ssize_t show_port_number(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
return sprintf(buf, "%d\n", port->number - port->serial->minor);
}
static DEVICE_ATTR(port_number, S_IRUGO, show_port_number, NULL);
static int usb_serial_device_probe(struct device *dev)
{
struct usb_serial_driver *driver;
struct usb_serial_port *port;
int retval = 0;
int minor;
port = to_usb_serial_port(dev);
if (!port) {
retval = -ENODEV;
goto exit;
}
if (port->dev_state != PORT_REGISTERING)
goto exit;
driver = port->serial->type;
if (driver->port_probe) {
retval = driver->port_probe(port);
if (retval)
goto exit;
}
retval = device_create_file(dev, &dev_attr_port_number);
if (retval) {
if (driver->port_remove)
retval = driver->port_remove(port);
goto exit;
}
minor = port->number;
tty_register_device(usb_serial_tty_driver, minor, dev);
dev_info(&port->serial->dev->dev,
"%s converter now attached to ttyUSB%d\n",
driver->description, minor);
exit:
return retval;
}
static int usb_serial_device_remove(struct device *dev)
{
struct usb_serial_driver *driver;
struct usb_serial_port *port;
int retval = 0;
int minor;
port = to_usb_serial_port(dev);
if (!port)
return -ENODEV;
if (port->dev_state != PORT_UNREGISTERING)
return retval;
device_remove_file(&port->dev, &dev_attr_port_number);
driver = port->serial->type;
if (driver->port_remove)
retval = driver->port_remove(port);
minor = port->number;
tty_unregister_device(usb_serial_tty_driver, minor);
dev_info(dev, "%s converter now disconnected from ttyUSB%d\n",
driver->description, minor);
return retval;
}
#ifdef CONFIG_HOTPLUG
static ssize_t store_new_id(struct device_driver *driver,
const char *buf, size_t count)
{
struct usb_serial_driver *usb_drv = to_usb_serial_driver(driver);
ssize_t retval = usb_store_new_id(&usb_drv->dynids, driver, buf, count);
if (retval >= 0 && usb_drv->usb_driver != NULL)
retval = usb_store_new_id(&usb_drv->usb_driver->dynids,
&usb_drv->usb_driver->drvwrap.driver,
buf, count);
return retval;
}
static struct driver_attribute drv_attrs[] = {
__ATTR(new_id, S_IWUSR, NULL, store_new_id),
__ATTR_NULL,
};
static void free_dynids(struct usb_serial_driver *drv)
{
struct usb_dynid *dynid, *n;
spin_lock(&drv->dynids.lock);
list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
list_del(&dynid->node);
kfree(dynid);
}
spin_unlock(&drv->dynids.lock);
}
#else
static struct driver_attribute drv_attrs[] = {
__ATTR_NULL,
};
static inline void free_dynids(struct usb_serial_driver *drv)
{
}
#endif
struct bus_type usb_serial_bus_type = {
.name = "usb-serial",
.match = usb_serial_device_match,
.probe = usb_serial_device_probe,
.remove = usb_serial_device_remove,
.drv_attrs = drv_attrs,
};
int usb_serial_bus_register(struct usb_serial_driver *driver)
{
int retval;
driver->driver.bus = &usb_serial_bus_type;
spin_lock_init(&driver->dynids.lock);
INIT_LIST_HEAD(&driver->dynids.list);
retval = driver_register(&driver->driver);
return retval;
}
void usb_serial_bus_deregister(struct usb_serial_driver *driver)
{
free_dynids(driver);
driver_unregister(&driver->driver);
}
| gpl-2.0 |
aospcus/android_kernel_htc_flounder | arch/blackfin/mach-bf609/dma.c | 4677 | 4996 | /*
* the simple DMA Implementation for Blackfin
*
* Copyright 2007-2009 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/module.h>
#include <asm/blackfin.h>
#include <asm/dma.h>
struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
(struct dma_register *) DMA0_NEXT_DESC_PTR,
(struct dma_register *) DMA1_NEXT_DESC_PTR,
(struct dma_register *) DMA2_NEXT_DESC_PTR,
(struct dma_register *) DMA3_NEXT_DESC_PTR,
(struct dma_register *) DMA4_NEXT_DESC_PTR,
(struct dma_register *) DMA5_NEXT_DESC_PTR,
(struct dma_register *) DMA6_NEXT_DESC_PTR,
(struct dma_register *) DMA7_NEXT_DESC_PTR,
(struct dma_register *) DMA8_NEXT_DESC_PTR,
(struct dma_register *) DMA9_NEXT_DESC_PTR,
(struct dma_register *) DMA10_NEXT_DESC_PTR,
(struct dma_register *) DMA11_NEXT_DESC_PTR,
(struct dma_register *) DMA12_NEXT_DESC_PTR,
(struct dma_register *) DMA13_NEXT_DESC_PTR,
(struct dma_register *) DMA14_NEXT_DESC_PTR,
(struct dma_register *) DMA15_NEXT_DESC_PTR,
(struct dma_register *) DMA16_NEXT_DESC_PTR,
(struct dma_register *) DMA17_NEXT_DESC_PTR,
(struct dma_register *) DMA18_NEXT_DESC_PTR,
(struct dma_register *) DMA19_NEXT_DESC_PTR,
(struct dma_register *) DMA20_NEXT_DESC_PTR,
(struct dma_register *) MDMA0_SRC_CRC0_NEXT_DESC_PTR,
(struct dma_register *) MDMA0_DEST_CRC0_NEXT_DESC_PTR,
(struct dma_register *) MDMA1_SRC_CRC1_NEXT_DESC_PTR,
(struct dma_register *) MDMA1_DEST_CRC1_NEXT_DESC_PTR,
(struct dma_register *) MDMA2_SRC_NEXT_DESC_PTR,
(struct dma_register *) MDMA2_DEST_NEXT_DESC_PTR,
(struct dma_register *) MDMA3_SRC_NEXT_DESC_PTR,
(struct dma_register *) MDMA3_DEST_NEXT_DESC_PTR,
(struct dma_register *) DMA29_NEXT_DESC_PTR,
(struct dma_register *) DMA30_NEXT_DESC_PTR,
(struct dma_register *) DMA31_NEXT_DESC_PTR,
(struct dma_register *) DMA32_NEXT_DESC_PTR,
(struct dma_register *) DMA33_NEXT_DESC_PTR,
(struct dma_register *) DMA34_NEXT_DESC_PTR,
(struct dma_register *) DMA35_NEXT_DESC_PTR,
(struct dma_register *) DMA36_NEXT_DESC_PTR,
(struct dma_register *) DMA37_NEXT_DESC_PTR,
(struct dma_register *) DMA38_NEXT_DESC_PTR,
(struct dma_register *) DMA39_NEXT_DESC_PTR,
(struct dma_register *) DMA40_NEXT_DESC_PTR,
(struct dma_register *) DMA41_NEXT_DESC_PTR,
(struct dma_register *) DMA42_NEXT_DESC_PTR,
(struct dma_register *) DMA43_NEXT_DESC_PTR,
(struct dma_register *) DMA44_NEXT_DESC_PTR,
(struct dma_register *) DMA45_NEXT_DESC_PTR,
(struct dma_register *) DMA46_NEXT_DESC_PTR,
};
EXPORT_SYMBOL(dma_io_base_addr);
int channel2irq(unsigned int channel)
{
int ret_irq = -1;
switch (channel) {
case CH_SPORT0_RX:
ret_irq = IRQ_SPORT0_RX;
break;
case CH_SPORT0_TX:
ret_irq = IRQ_SPORT0_TX;
break;
case CH_SPORT1_RX:
ret_irq = IRQ_SPORT1_RX;
break;
case CH_SPORT1_TX:
ret_irq = IRQ_SPORT1_TX;
break;
case CH_SPORT2_RX:
ret_irq = IRQ_SPORT2_RX;
break;
case CH_SPORT2_TX:
ret_irq = IRQ_SPORT2_TX;
break;
case CH_SPI0_TX:
ret_irq = IRQ_SPI0_TX;
break;
case CH_SPI0_RX:
ret_irq = IRQ_SPI0_RX;
break;
case CH_SPI1_TX:
ret_irq = IRQ_SPI1_TX;
break;
case CH_SPI1_RX:
ret_irq = IRQ_SPI1_RX;
break;
case CH_RSI:
ret_irq = IRQ_RSI;
break;
case CH_SDU:
ret_irq = IRQ_SDU;
break;
case CH_LP0:
ret_irq = IRQ_LP0;
break;
case CH_LP1:
ret_irq = IRQ_LP1;
break;
case CH_LP2:
ret_irq = IRQ_LP2;
break;
case CH_LP3:
ret_irq = IRQ_LP3;
break;
case CH_UART0_RX:
ret_irq = IRQ_UART0_RX;
break;
case CH_UART0_TX:
ret_irq = IRQ_UART0_TX;
break;
case CH_UART1_RX:
ret_irq = IRQ_UART1_RX;
break;
case CH_UART1_TX:
ret_irq = IRQ_UART1_TX;
break;
case CH_EPPI0_CH0:
ret_irq = IRQ_EPPI0_CH0;
break;
case CH_EPPI0_CH1:
ret_irq = IRQ_EPPI0_CH1;
break;
case CH_EPPI1_CH0:
ret_irq = IRQ_EPPI1_CH0;
break;
case CH_EPPI1_CH1:
ret_irq = IRQ_EPPI1_CH1;
break;
case CH_EPPI2_CH0:
ret_irq = IRQ_EPPI2_CH0;
break;
case CH_EPPI2_CH1:
ret_irq = IRQ_EPPI2_CH1;
break;
case CH_PIXC_CH0:
ret_irq = IRQ_PIXC_CH0;
break;
case CH_PIXC_CH1:
ret_irq = IRQ_PIXC_CH1;
break;
case CH_PIXC_CH2:
ret_irq = IRQ_PIXC_CH2;
break;
case CH_PVP_CPDOB:
ret_irq = IRQ_PVP_CPDOB;
break;
case CH_PVP_CPDOC:
ret_irq = IRQ_PVP_CPDOC;
break;
case CH_PVP_CPSTAT:
ret_irq = IRQ_PVP_CPSTAT;
break;
case CH_PVP_CPCI:
ret_irq = IRQ_PVP_CPCI;
break;
case CH_PVP_MPDO:
ret_irq = IRQ_PVP_MPDO;
break;
case CH_PVP_MPDI:
ret_irq = IRQ_PVP_MPDI;
break;
case CH_PVP_MPSTAT:
ret_irq = IRQ_PVP_MPSTAT;
break;
case CH_PVP_MPCI:
ret_irq = IRQ_PVP_MPCI;
break;
case CH_PVP_CPDOA:
ret_irq = IRQ_PVP_CPDOA;
break;
case CH_MEM_STREAM0_SRC:
case CH_MEM_STREAM0_DEST:
ret_irq = IRQ_MDMAS0;
break;
case CH_MEM_STREAM1_SRC:
case CH_MEM_STREAM1_DEST:
ret_irq = IRQ_MDMAS1;
break;
case CH_MEM_STREAM2_SRC:
case CH_MEM_STREAM2_DEST:
ret_irq = IRQ_MDMAS2;
break;
case CH_MEM_STREAM3_SRC:
case CH_MEM_STREAM3_DEST:
ret_irq = IRQ_MDMAS3;
break;
}
return ret_irq;
}
| gpl-2.0 |
eugenesan/android_kernel_lge_hammerhead | arch/mips/netlogic/xlp/wakeup.c | 4677 | 3377 | /*
* Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
* reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the NetLogic
* license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/threads.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/mipsregs.h>
#include <asm/addrspace.h>
#include <asm/string.h>
#include <asm/netlogic/haldefs.h>
#include <asm/netlogic/common.h>
#include <asm/netlogic/mips-extns.h>
#include <asm/netlogic/xlp-hal/iomap.h>
#include <asm/netlogic/xlp-hal/pic.h>
#include <asm/netlogic/xlp-hal/xlp.h>
#include <asm/netlogic/xlp-hal/sys.h>
static void xlp_enable_secondary_cores(void)
{
uint32_t core, value, coremask, syscoremask;
int count;
/* read cores in reset from SYS block */
syscoremask = nlm_read_sys_reg(nlm_sys_base, SYS_CPU_RESET);
/* update user specified */
nlm_coremask = nlm_coremask & (syscoremask | 1);
for (core = 1; core < 8; core++) {
coremask = 1 << core;
if ((nlm_coremask & coremask) == 0)
continue;
/* Enable CPU clock */
value = nlm_read_sys_reg(nlm_sys_base, SYS_CORE_DFS_DIS_CTRL);
value &= ~coremask;
nlm_write_sys_reg(nlm_sys_base, SYS_CORE_DFS_DIS_CTRL, value);
/* Remove CPU Reset */
value = nlm_read_sys_reg(nlm_sys_base, SYS_CPU_RESET);
value &= ~coremask;
nlm_write_sys_reg(nlm_sys_base, SYS_CPU_RESET, value);
/* Poll for CPU to mark itself coherent */
count = 100000;
do {
value = nlm_read_sys_reg(nlm_sys_base,
SYS_CPU_NONCOHERENT_MODE);
} while ((value & coremask) != 0 && count-- > 0);
if (count == 0)
pr_err("Failed to enable core %d\n", core);
}
}
void xlp_wakeup_secondary_cpus(void)
{
/*
* In case of u-boot, the secondaries are in reset
* first wakeup core 0 threads
*/
xlp_boot_core0_siblings();
/* now get other cores out of reset */
xlp_enable_secondary_cores();
}
| gpl-2.0 |
Alucard24/Dorimanx-SG2-I9100-Kernel | tools/perf/util/color.c | 5701 | 6753 | #include <linux/kernel.h>
#include "cache.h"
#include "color.h"
int perf_use_color_default = -1;
static int parse_color(const char *name, int len)
{
static const char * const color_names[] = {
"normal", "black", "red", "green", "yellow",
"blue", "magenta", "cyan", "white"
};
char *end;
int i;
for (i = 0; i < (int)ARRAY_SIZE(color_names); i++) {
const char *str = color_names[i];
if (!strncasecmp(name, str, len) && !str[len])
return i - 1;
}
i = strtol(name, &end, 10);
if (end - name == len && i >= -1 && i <= 255)
return i;
return -2;
}
static int parse_attr(const char *name, int len)
{
static const int attr_values[] = { 1, 2, 4, 5, 7 };
static const char * const attr_names[] = {
"bold", "dim", "ul", "blink", "reverse"
};
unsigned int i;
for (i = 0; i < ARRAY_SIZE(attr_names); i++) {
const char *str = attr_names[i];
if (!strncasecmp(name, str, len) && !str[len])
return attr_values[i];
}
return -1;
}
void color_parse(const char *value, const char *var, char *dst)
{
color_parse_mem(value, strlen(value), var, dst);
}
void color_parse_mem(const char *value, int value_len, const char *var,
char *dst)
{
const char *ptr = value;
int len = value_len;
int attr = -1;
int fg = -2;
int bg = -2;
if (!strncasecmp(value, "reset", len)) {
strcpy(dst, PERF_COLOR_RESET);
return;
}
/* [fg [bg]] [attr] */
while (len > 0) {
const char *word = ptr;
int val, wordlen = 0;
while (len > 0 && !isspace(word[wordlen])) {
wordlen++;
len--;
}
ptr = word + wordlen;
while (len > 0 && isspace(*ptr)) {
ptr++;
len--;
}
val = parse_color(word, wordlen);
if (val >= -1) {
if (fg == -2) {
fg = val;
continue;
}
if (bg == -2) {
bg = val;
continue;
}
goto bad;
}
val = parse_attr(word, wordlen);
if (val < 0 || attr != -1)
goto bad;
attr = val;
}
if (attr >= 0 || fg >= 0 || bg >= 0) {
int sep = 0;
*dst++ = '\033';
*dst++ = '[';
if (attr >= 0) {
*dst++ = '0' + attr;
sep++;
}
if (fg >= 0) {
if (sep++)
*dst++ = ';';
if (fg < 8) {
*dst++ = '3';
*dst++ = '0' + fg;
} else {
dst += sprintf(dst, "38;5;%d", fg);
}
}
if (bg >= 0) {
if (sep++)
*dst++ = ';';
if (bg < 8) {
*dst++ = '4';
*dst++ = '0' + bg;
} else {
dst += sprintf(dst, "48;5;%d", bg);
}
}
*dst++ = 'm';
}
*dst = 0;
return;
bad:
die("bad color value '%.*s' for variable '%s'", value_len, value, var);
}
int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty)
{
if (value) {
if (!strcasecmp(value, "never"))
return 0;
if (!strcasecmp(value, "always"))
return 1;
if (!strcasecmp(value, "auto"))
goto auto_color;
}
/* Missing or explicit false to turn off colorization */
if (!perf_config_bool(var, value))
return 0;
/* any normal truth value defaults to 'auto' */
auto_color:
if (stdout_is_tty < 0)
stdout_is_tty = isatty(1);
if (stdout_is_tty || (pager_in_use() && pager_use_color)) {
char *term = getenv("TERM");
if (term && strcmp(term, "dumb"))
return 1;
}
return 0;
}
int perf_color_default_config(const char *var, const char *value, void *cb)
{
if (!strcmp(var, "color.ui")) {
perf_use_color_default = perf_config_colorbool(var, value, -1);
return 0;
}
return perf_default_config(var, value, cb);
}
static int __color_vsnprintf(char *bf, size_t size, const char *color,
const char *fmt, va_list args, const char *trail)
{
int r = 0;
/*
* Auto-detect:
*/
if (perf_use_color_default < 0) {
if (isatty(1) || pager_in_use())
perf_use_color_default = 1;
else
perf_use_color_default = 0;
}
if (perf_use_color_default && *color)
r += scnprintf(bf, size, "%s", color);
r += vscnprintf(bf + r, size - r, fmt, args);
if (perf_use_color_default && *color)
r += scnprintf(bf + r, size - r, "%s", PERF_COLOR_RESET);
if (trail)
r += scnprintf(bf + r, size - r, "%s", trail);
return r;
}
static int __color_vfprintf(FILE *fp, const char *color, const char *fmt,
va_list args, const char *trail)
{
int r = 0;
/*
* Auto-detect:
*/
if (perf_use_color_default < 0) {
if (isatty(fileno(fp)) || pager_in_use())
perf_use_color_default = 1;
else
perf_use_color_default = 0;
}
if (perf_use_color_default && *color)
r += fprintf(fp, "%s", color);
r += vfprintf(fp, fmt, args);
if (perf_use_color_default && *color)
r += fprintf(fp, "%s", PERF_COLOR_RESET);
if (trail)
r += fprintf(fp, "%s", trail);
return r;
}
int color_vsnprintf(char *bf, size_t size, const char *color,
const char *fmt, va_list args)
{
return __color_vsnprintf(bf, size, color, fmt, args, NULL);
}
int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args)
{
return __color_vfprintf(fp, color, fmt, args, NULL);
}
int color_snprintf(char *bf, size_t size, const char *color,
const char *fmt, ...)
{
va_list args;
int r;
va_start(args, fmt);
r = color_vsnprintf(bf, size, color, fmt, args);
va_end(args);
return r;
}
int color_fprintf(FILE *fp, const char *color, const char *fmt, ...)
{
va_list args;
int r;
va_start(args, fmt);
r = color_vfprintf(fp, color, fmt, args);
va_end(args);
return r;
}
int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...)
{
va_list args;
int r;
va_start(args, fmt);
r = __color_vfprintf(fp, color, fmt, args, "\n");
va_end(args);
return r;
}
/*
* This function splits the buffer by newlines and colors the lines individually.
*
* Returns 0 on success.
*/
int color_fwrite_lines(FILE *fp, const char *color,
size_t count, const char *buf)
{
if (!*color)
return fwrite(buf, count, 1, fp) != 1;
while (count) {
char *p = memchr(buf, '\n', count);
if (p != buf && (fputs(color, fp) < 0 ||
fwrite(buf, p ? (size_t)(p - buf) : count, 1, fp) != 1 ||
fputs(PERF_COLOR_RESET, fp) < 0))
return -1;
if (!p)
return 0;
if (fputc('\n', fp) < 0)
return -1;
count -= p + 1 - buf;
buf = p + 1;
}
return 0;
}
const char *get_percent_color(double percent)
{
const char *color = PERF_COLOR_NORMAL;
/*
* We color high-overhead entries in red, mid-overhead
* entries in green - and keep the low overhead places
* normal:
*/
if (percent >= MIN_RED)
color = PERF_COLOR_RED;
else {
if (percent > MIN_GREEN)
color = PERF_COLOR_GREEN;
}
return color;
}
int percent_color_fprintf(FILE *fp, const char *fmt, double percent)
{
int r;
const char *color;
color = get_percent_color(percent);
r = color_fprintf(fp, color, fmt, percent);
return r;
}
int percent_color_snprintf(char *bf, size_t size, const char *fmt, double percent)
{
const char *color = get_percent_color(percent);
return color_snprintf(bf, size, color, fmt, percent);
}
| gpl-2.0 |
sminki/android_kernel_sony_u8500 | arch/mips/lasat/sysctl.c | 8773 | 6089 | /*
* Thomas Horsten <thh@lasat.com>
* Copyright (C) 2000 LASAT Networks A/S.
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Routines specific to the LASAT boards
*/
#include <linux/types.h>
#include <asm/lasat/lasat.h>
#include <linux/module.h>
#include <linux/sysctl.h>
#include <linux/stddef.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/net.h>
#include <linux/inet.h>
#include <linux/uaccess.h>
#include <asm/time.h>
#ifdef CONFIG_DS1603
#include "ds1603.h"
#endif
/* And the same for proc */
int proc_dolasatstring(ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int r;
r = proc_dostring(table, write, buffer, lenp, ppos);
if ((!write) || r)
return r;
lasat_write_eeprom_info();
return 0;
}
/* proc function to write EEPROM after changing int entry */
int proc_dolasatint(ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int r;
r = proc_dointvec(table, write, buffer, lenp, ppos);
if ((!write) || r)
return r;
lasat_write_eeprom_info();
return 0;
}
#ifdef CONFIG_DS1603
static int rtctmp;
/* proc function to read/write RealTime Clock */
int proc_dolasatrtc(ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct timespec ts;
int r;
if (!write) {
read_persistent_clock(&ts);
rtctmp = ts.tv_sec;
/* check for time < 0 and set to 0 */
if (rtctmp < 0)
rtctmp = 0;
}
r = proc_dointvec(table, write, buffer, lenp, ppos);
if (r)
return r;
if (write)
rtc_mips_set_mmss(rtctmp);
return 0;
}
#endif
#ifdef CONFIG_INET
int proc_lasat_ip(ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
unsigned int ip;
char *p, c;
int len;
char ipbuf[32];
if (!table->data || !table->maxlen || !*lenp ||
(*ppos && !write)) {
*lenp = 0;
return 0;
}
if (write) {
len = 0;
p = buffer;
while (len < *lenp) {
if (get_user(c, p++))
return -EFAULT;
if (c == 0 || c == '\n')
break;
len++;
}
if (len >= sizeof(ipbuf)-1)
len = sizeof(ipbuf) - 1;
if (copy_from_user(ipbuf, buffer, len))
return -EFAULT;
ipbuf[len] = 0;
*ppos += *lenp;
/* Now see if we can convert it to a valid IP */
ip = in_aton(ipbuf);
*(unsigned int *)(table->data) = ip;
lasat_write_eeprom_info();
} else {
ip = *(unsigned int *)(table->data);
sprintf(ipbuf, "%d.%d.%d.%d",
(ip) & 0xff,
(ip >> 8) & 0xff,
(ip >> 16) & 0xff,
(ip >> 24) & 0xff);
len = strlen(ipbuf);
if (len > *lenp)
len = *lenp;
if (len)
if (copy_to_user(buffer, ipbuf, len))
return -EFAULT;
if (len < *lenp) {
if (put_user('\n', ((char *) buffer) + len))
return -EFAULT;
len++;
}
*lenp = len;
*ppos += len;
}
return 0;
}
#endif
int proc_lasat_prid(ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int r;
r = proc_dointvec(table, write, buffer, lenp, ppos);
if (r < 0)
return r;
if (write) {
lasat_board_info.li_eeprom_info.prid =
lasat_board_info.li_prid;
lasat_write_eeprom_info();
lasat_init_board_info();
}
return 0;
}
extern int lasat_boot_to_service;
static ctl_table lasat_table[] = {
{
.procname = "cpu-hz",
.data = &lasat_board_info.li_cpu_hz,
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
{
.procname = "bus-hz",
.data = &lasat_board_info.li_bus_hz,
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
{
.procname = "bmid",
.data = &lasat_board_info.li_bmid,
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
{
.procname = "prid",
.data = &lasat_board_info.li_prid,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_lasat_prid,
},
#ifdef CONFIG_INET
{
.procname = "ipaddr",
.data = &lasat_board_info.li_eeprom_info.ipaddr,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_lasat_ip,
},
{
.procname = "netmask",
.data = &lasat_board_info.li_eeprom_info.netmask,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_lasat_ip,
},
#endif
{
.procname = "passwd_hash",
.data = &lasat_board_info.li_eeprom_info.passwd_hash,
.maxlen =
sizeof(lasat_board_info.li_eeprom_info.passwd_hash),
.mode = 0600,
.proc_handler = proc_dolasatstring,
},
{
.procname = "boot-service",
.data = &lasat_boot_to_service,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#ifdef CONFIG_DS1603
{
.procname = "rtc",
.data = &rtctmp,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dolasatrtc,
},
#endif
{
.procname = "namestr",
.data = &lasat_board_info.li_namestr,
.maxlen = sizeof(lasat_board_info.li_namestr),
.mode = 0444,
.proc_handler = proc_dostring,
},
{
.procname = "typestr",
.data = &lasat_board_info.li_typestr,
.maxlen = sizeof(lasat_board_info.li_typestr),
.mode = 0444,
.proc_handler = proc_dostring,
},
{}
};
static ctl_table lasat_root_table[] = {
{
.procname = "lasat",
.mode = 0555,
.child = lasat_table
},
{}
};
static int __init lasat_register_sysctl(void)
{
struct ctl_table_header *lasat_table_header;
lasat_table_header =
register_sysctl_table(lasat_root_table);
if (!lasat_table_header) {
printk(KERN_ERR "Unable to register LASAT sysctl\n");
return -ENOMEM;
}
return 0;
}
__initcall(lasat_register_sysctl);
| gpl-2.0 |
MIPS/kernel-linux-mti | net/atm/mpoa_proc.c | 9029 | 7322 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#ifdef CONFIG_PROC_FS
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/time.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/atmmpc.h>
#include <linux/atm.h>
#include <linux/gfp.h>
#include "mpc.h"
#include "mpoa_caches.h"
/*
* mpoa_proc.c: Implementation MPOA client's proc
* file system statistics
*/
#if 1
#define dprintk(format, args...) \
printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
#else
#define dprintk(format, args...) \
do { if (0) \
printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
} while (0)
#endif
#if 0
#define ddprintk(format, args...) \
printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
#else
#define ddprintk(format, args...) \
do { if (0) \
printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
} while (0)
#endif
#define STAT_FILE_NAME "mpc" /* Our statistic file's name */
extern struct mpoa_client *mpcs;
extern struct proc_dir_entry *atm_proc_root; /* from proc.c. */
static int proc_mpc_open(struct inode *inode, struct file *file);
static ssize_t proc_mpc_write(struct file *file, const char __user *buff,
size_t nbytes, loff_t *ppos);
static int parse_qos(const char *buff);
/*
* Define allowed FILE OPERATIONS
*/
static const struct file_operations mpc_file_operations = {
.owner = THIS_MODULE,
.open = proc_mpc_open,
.read = seq_read,
.llseek = seq_lseek,
.write = proc_mpc_write,
.release = seq_release,
};
/*
* Returns the state of an ingress cache entry as a string
*/
static const char *ingress_state_string(int state)
{
switch (state) {
case INGRESS_RESOLVING:
return "resolving ";
case INGRESS_RESOLVED:
return "resolved ";
case INGRESS_INVALID:
return "invalid ";
case INGRESS_REFRESHING:
return "refreshing ";
}
return "";
}
/*
* Returns the state of an egress cache entry as a string
*/
static const char *egress_state_string(int state)
{
switch (state) {
case EGRESS_RESOLVED:
return "resolved ";
case EGRESS_PURGE:
return "purge ";
case EGRESS_INVALID:
return "invalid ";
}
return "";
}
/*
* FIXME: mpcs (and per-mpc lists) have no locking whatsoever.
*/
static void *mpc_start(struct seq_file *m, loff_t *pos)
{
loff_t l = *pos;
struct mpoa_client *mpc;
if (!l--)
return SEQ_START_TOKEN;
for (mpc = mpcs; mpc; mpc = mpc->next)
if (!l--)
return mpc;
return NULL;
}
static void *mpc_next(struct seq_file *m, void *v, loff_t *pos)
{
struct mpoa_client *p = v;
(*pos)++;
return v == SEQ_START_TOKEN ? mpcs : p->next;
}
static void mpc_stop(struct seq_file *m, void *v)
{
}
/*
* READING function - called when the /proc/atm/mpoa file is read from.
*/
static int mpc_show(struct seq_file *m, void *v)
{
struct mpoa_client *mpc = v;
int i;
in_cache_entry *in_entry;
eg_cache_entry *eg_entry;
struct timeval now;
unsigned char ip_string[16];
if (v == SEQ_START_TOKEN) {
atm_mpoa_disp_qos(m);
return 0;
}
seq_printf(m, "\nInterface %d:\n\n", mpc->dev_num);
seq_printf(m, "Ingress Entries:\nIP address State Holding time Packets fwded VPI VCI\n");
do_gettimeofday(&now);
for (in_entry = mpc->in_cache; in_entry; in_entry = in_entry->next) {
sprintf(ip_string, "%pI4", &in_entry->ctrl_info.in_dst_ip);
seq_printf(m, "%-16s%s%-14lu%-12u",
ip_string,
ingress_state_string(in_entry->entry_state),
in_entry->ctrl_info.holding_time -
(now.tv_sec-in_entry->tv.tv_sec),
in_entry->packets_fwded);
if (in_entry->shortcut)
seq_printf(m, " %-3d %-3d",
in_entry->shortcut->vpi,
in_entry->shortcut->vci);
seq_printf(m, "\n");
}
seq_printf(m, "\n");
seq_printf(m, "Egress Entries:\nIngress MPC ATM addr\nCache-id State Holding time Packets recvd Latest IP addr VPI VCI\n");
for (eg_entry = mpc->eg_cache; eg_entry; eg_entry = eg_entry->next) {
unsigned char *p = eg_entry->ctrl_info.in_MPC_data_ATM_addr;
for (i = 0; i < ATM_ESA_LEN; i++)
seq_printf(m, "%02x", p[i]);
seq_printf(m, "\n%-16lu%s%-14lu%-15u",
(unsigned long)ntohl(eg_entry->ctrl_info.cache_id),
egress_state_string(eg_entry->entry_state),
(eg_entry->ctrl_info.holding_time -
(now.tv_sec-eg_entry->tv.tv_sec)),
eg_entry->packets_rcvd);
/* latest IP address */
sprintf(ip_string, "%pI4", &eg_entry->latest_ip_addr);
seq_printf(m, "%-16s", ip_string);
if (eg_entry->shortcut)
seq_printf(m, " %-3d %-3d",
eg_entry->shortcut->vpi,
eg_entry->shortcut->vci);
seq_printf(m, "\n");
}
seq_printf(m, "\n");
return 0;
}
static const struct seq_operations mpc_op = {
.start = mpc_start,
.next = mpc_next,
.stop = mpc_stop,
.show = mpc_show
};
static int proc_mpc_open(struct inode *inode, struct file *file)
{
return seq_open(file, &mpc_op);
}
static ssize_t proc_mpc_write(struct file *file, const char __user *buff,
size_t nbytes, loff_t *ppos)
{
char *page, *p;
unsigned len;
if (nbytes == 0)
return 0;
if (nbytes >= PAGE_SIZE)
nbytes = PAGE_SIZE-1;
page = (char *)__get_free_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
for (p = page, len = 0; len < nbytes; p++, len++) {
if (get_user(*p, buff++)) {
free_page((unsigned long)page);
return -EFAULT;
}
if (*p == '\0' || *p == '\n')
break;
}
*p = '\0';
if (!parse_qos(page))
printk("mpoa: proc_mpc_write: could not parse '%s'\n", page);
free_page((unsigned long)page);
return len;
}
static int parse_qos(const char *buff)
{
/* possible lines look like this
* add 130.230.54.142 tx=max_pcr,max_sdu rx=max_pcr,max_sdu
*/
unsigned char ip[4];
int tx_pcr, tx_sdu, rx_pcr, rx_sdu;
__be32 ipaddr;
struct atm_qos qos;
memset(&qos, 0, sizeof(struct atm_qos));
if (sscanf(buff, "del %hhu.%hhu.%hhu.%hhu",
ip, ip+1, ip+2, ip+3) == 4) {
ipaddr = *(__be32 *)ip;
return atm_mpoa_delete_qos(atm_mpoa_search_qos(ipaddr));
}
if (sscanf(buff, "add %hhu.%hhu.%hhu.%hhu tx=%d,%d rx=tx",
ip, ip+1, ip+2, ip+3, &tx_pcr, &tx_sdu) == 6) {
rx_pcr = tx_pcr;
rx_sdu = tx_sdu;
} else if (sscanf(buff, "add %hhu.%hhu.%hhu.%hhu tx=%d,%d rx=%d,%d",
ip, ip+1, ip+2, ip+3, &tx_pcr, &tx_sdu, &rx_pcr, &rx_sdu) != 8)
return 0;
ipaddr = *(__be32 *)ip;
qos.txtp.traffic_class = ATM_CBR;
qos.txtp.max_pcr = tx_pcr;
qos.txtp.max_sdu = tx_sdu;
qos.rxtp.traffic_class = ATM_CBR;
qos.rxtp.max_pcr = rx_pcr;
qos.rxtp.max_sdu = rx_sdu;
qos.aal = ATM_AAL5;
dprintk("parse_qos(): setting qos paramameters to tx=%d,%d rx=%d,%d\n",
qos.txtp.max_pcr, qos.txtp.max_sdu,
qos.rxtp.max_pcr, qos.rxtp.max_sdu);
atm_mpoa_add_qos(ipaddr, &qos);
return 1;
}
/*
* INITIALIZATION function - called when module is initialized/loaded.
*/
int mpc_proc_init(void)
{
struct proc_dir_entry *p;
p = proc_create(STAT_FILE_NAME, 0, atm_proc_root, &mpc_file_operations);
if (!p) {
pr_err("Unable to initialize /proc/atm/%s\n", STAT_FILE_NAME);
return -ENOMEM;
}
return 0;
}
/*
* DELETING function - called when module is removed.
*/
void mpc_proc_clean(void)
{
remove_proc_entry(STAT_FILE_NAME, atm_proc_root);
}
#endif /* CONFIG_PROC_FS */
| gpl-2.0 |
SmokyBob/android_kernel_asus_padfone2 | drivers/ide/ide-cd_ioctl.c | 11845 | 11821 | /*
* cdrom.c IOCTLs handling for ide-cd driver.
*
* Copyright (C) 1994-1996 Scott Snyder <snyder@fnald0.fnal.gov>
* Copyright (C) 1996-1998 Erik Andersen <andersee@debian.org>
* Copyright (C) 1998-2000 Jens Axboe <axboe@suse.de>
*/
#include <linux/kernel.h>
#include <linux/cdrom.h>
#include <linux/gfp.h>
#include <linux/ide.h>
#include <scsi/scsi.h>
#include "ide-cd.h"
/****************************************************************************
* Other driver requests (open, close, check media change).
*/
int ide_cdrom_open_real(struct cdrom_device_info *cdi, int purpose)
{
return 0;
}
/*
* Close down the device. Invalidate all cached blocks.
*/
void ide_cdrom_release_real(struct cdrom_device_info *cdi)
{
ide_drive_t *drive = cdi->handle;
if (!cdi->use_count)
drive->atapi_flags &= ~IDE_AFLAG_TOC_VALID;
}
/*
* add logic to try GET_EVENT command first to check for media and tray
* status. this should be supported by newer cd-r/w and all DVD etc
* drives
*/
int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr)
{
ide_drive_t *drive = cdi->handle;
struct media_event_desc med;
struct request_sense sense;
int stat;
if (slot_nr != CDSL_CURRENT)
return -EINVAL;
stat = cdrom_check_status(drive, &sense);
if (!stat || sense.sense_key == UNIT_ATTENTION)
return CDS_DISC_OK;
if (!cdrom_get_media_event(cdi, &med)) {
if (med.media_present)
return CDS_DISC_OK;
else if (med.door_open)
return CDS_TRAY_OPEN;
else
return CDS_NO_DISC;
}
if (sense.sense_key == NOT_READY && sense.asc == 0x04
&& sense.ascq == 0x04)
return CDS_DISC_OK;
/*
* If not using Mt Fuji extended media tray reports,
* just return TRAY_OPEN since ATAPI doesn't provide
* any other way to detect this...
*/
if (sense.sense_key == NOT_READY) {
if (sense.asc == 0x3a && sense.ascq == 1)
return CDS_NO_DISC;
else
return CDS_TRAY_OPEN;
}
return CDS_DRIVE_NOT_READY;
}
/*
* ide-cd always generates media changed event if media is missing, which
* makes it impossible to use for proper event reporting, so disk->events
* is cleared to 0 and the following function is used only to trigger
* revalidation and never propagated to userland.
*/
unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *cdi,
unsigned int clearing, int slot_nr)
{
ide_drive_t *drive = cdi->handle;
int retval;
if (slot_nr == CDSL_CURRENT) {
(void) cdrom_check_status(drive, NULL);
retval = (drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED) ? 1 : 0;
drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED;
return retval ? DISK_EVENT_MEDIA_CHANGE : 0;
} else {
return 0;
}
}
/* Eject the disk if EJECTFLAG is 0.
If EJECTFLAG is 1, try to reload the disk. */
static
int cdrom_eject(ide_drive_t *drive, int ejectflag,
struct request_sense *sense)
{
struct cdrom_info *cd = drive->driver_data;
struct cdrom_device_info *cdi = &cd->devinfo;
char loej = 0x02;
unsigned char cmd[BLK_MAX_CDB];
if ((drive->atapi_flags & IDE_AFLAG_NO_EJECT) && !ejectflag)
return -EDRIVE_CANT_DO_THIS;
/* reload fails on some drives, if the tray is locked */
if ((drive->atapi_flags & IDE_AFLAG_DOOR_LOCKED) && ejectflag)
return 0;
/* only tell drive to close tray if open, if it can do that */
if (ejectflag && (cdi->mask & CDC_CLOSE_TRAY))
loej = 0;
memset(cmd, 0, BLK_MAX_CDB);
cmd[0] = GPCMD_START_STOP_UNIT;
cmd[4] = loej | (ejectflag != 0);
return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, 0);
}
/* Lock the door if LOCKFLAG is nonzero; unlock it otherwise. */
static
int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
struct request_sense *sense)
{
struct request_sense my_sense;
int stat;
if (sense == NULL)
sense = &my_sense;
/* If the drive cannot lock the door, just pretend. */
if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) == 0) {
stat = 0;
} else {
unsigned char cmd[BLK_MAX_CDB];
memset(cmd, 0, BLK_MAX_CDB);
cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
cmd[4] = lockflag ? 1 : 0;
stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL,
sense, 0, 0);
}
/* If we got an illegal field error, the drive
probably cannot lock the door. */
if (stat != 0 &&
sense->sense_key == ILLEGAL_REQUEST &&
(sense->asc == 0x24 || sense->asc == 0x20)) {
printk(KERN_ERR "%s: door locking not supported\n",
drive->name);
drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
stat = 0;
}
/* no medium, that's alright. */
if (stat != 0 && sense->sense_key == NOT_READY && sense->asc == 0x3a)
stat = 0;
if (stat == 0) {
if (lockflag)
drive->atapi_flags |= IDE_AFLAG_DOOR_LOCKED;
else
drive->atapi_flags &= ~IDE_AFLAG_DOOR_LOCKED;
}
return stat;
}
int ide_cdrom_tray_move(struct cdrom_device_info *cdi, int position)
{
ide_drive_t *drive = cdi->handle;
struct request_sense sense;
if (position) {
int stat = ide_cd_lockdoor(drive, 0, &sense);
if (stat)
return stat;
}
return cdrom_eject(drive, !position, &sense);
}
int ide_cdrom_lock_door(struct cdrom_device_info *cdi, int lock)
{
ide_drive_t *drive = cdi->handle;
return ide_cd_lockdoor(drive, lock, NULL);
}
/*
* ATAPI devices are free to select the speed you request or any slower
* rate. :-( Requesting too fast a speed will _not_ produce an error.
*/
int ide_cdrom_select_speed(struct cdrom_device_info *cdi, int speed)
{
ide_drive_t *drive = cdi->handle;
struct cdrom_info *cd = drive->driver_data;
struct request_sense sense;
u8 buf[ATAPI_CAPABILITIES_PAGE_SIZE];
int stat;
unsigned char cmd[BLK_MAX_CDB];
if (speed == 0)
speed = 0xffff; /* set to max */
else
speed *= 177; /* Nx to kbytes/s */
memset(cmd, 0, BLK_MAX_CDB);
cmd[0] = GPCMD_SET_SPEED;
/* Read Drive speed in kbytes/second MSB/LSB */
cmd[2] = (speed >> 8) & 0xff;
cmd[3] = speed & 0xff;
if ((cdi->mask & (CDC_CD_R | CDC_CD_RW | CDC_DVD_R)) !=
(CDC_CD_R | CDC_CD_RW | CDC_DVD_R)) {
/* Write Drive speed in kbytes/second MSB/LSB */
cmd[4] = (speed >> 8) & 0xff;
cmd[5] = speed & 0xff;
}
stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, &sense, 0, 0);
if (!ide_cdrom_get_capabilities(drive, buf)) {
ide_cdrom_update_speed(drive, buf);
cdi->speed = cd->current_speed;
}
return 0;
}
int ide_cdrom_get_last_session(struct cdrom_device_info *cdi,
struct cdrom_multisession *ms_info)
{
struct atapi_toc *toc;
ide_drive_t *drive = cdi->handle;
struct cdrom_info *info = drive->driver_data;
struct request_sense sense;
int ret;
if ((drive->atapi_flags & IDE_AFLAG_TOC_VALID) == 0 || !info->toc) {
ret = ide_cd_read_toc(drive, &sense);
if (ret)
return ret;
}
toc = info->toc;
ms_info->addr.lba = toc->last_session_lba;
ms_info->xa_flag = toc->xa_flag;
return 0;
}
int ide_cdrom_get_mcn(struct cdrom_device_info *cdi,
struct cdrom_mcn *mcn_info)
{
ide_drive_t *drive = cdi->handle;
int stat, mcnlen;
char buf[24];
unsigned char cmd[BLK_MAX_CDB];
unsigned len = sizeof(buf);
memset(cmd, 0, BLK_MAX_CDB);
cmd[0] = GPCMD_READ_SUBCHANNEL;
cmd[1] = 2; /* MSF addressing */
cmd[2] = 0x40; /* request subQ data */
cmd[3] = 2; /* format */
cmd[8] = len;
stat = ide_cd_queue_pc(drive, cmd, 0, buf, &len, NULL, 0, 0);
if (stat)
return stat;
mcnlen = sizeof(mcn_info->medium_catalog_number) - 1;
memcpy(mcn_info->medium_catalog_number, buf + 9, mcnlen);
mcn_info->medium_catalog_number[mcnlen] = '\0';
return 0;
}
int ide_cdrom_reset(struct cdrom_device_info *cdi)
{
ide_drive_t *drive = cdi->handle;
struct cdrom_info *cd = drive->driver_data;
struct request_sense sense;
struct request *rq;
int ret;
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_SPECIAL;
rq->cmd_flags = REQ_QUIET;
ret = blk_execute_rq(drive->queue, cd->disk, rq, 0);
blk_put_request(rq);
/*
* A reset will unlock the door. If it was previously locked,
* lock it again.
*/
if (drive->atapi_flags & IDE_AFLAG_DOOR_LOCKED)
(void)ide_cd_lockdoor(drive, 1, &sense);
return ret;
}
static int ide_cd_get_toc_entry(ide_drive_t *drive, int track,
struct atapi_toc_entry **ent)
{
struct cdrom_info *info = drive->driver_data;
struct atapi_toc *toc = info->toc;
int ntracks;
/*
* don't serve cached data, if the toc isn't valid
*/
if ((drive->atapi_flags & IDE_AFLAG_TOC_VALID) == 0)
return -EINVAL;
/* Check validity of requested track number. */
ntracks = toc->hdr.last_track - toc->hdr.first_track + 1;
if (toc->hdr.first_track == CDROM_LEADOUT)
ntracks = 0;
if (track == CDROM_LEADOUT)
*ent = &toc->ent[ntracks];
else if (track < toc->hdr.first_track || track > toc->hdr.last_track)
return -EINVAL;
else
*ent = &toc->ent[track - toc->hdr.first_track];
return 0;
}
static int ide_cd_fake_play_trkind(ide_drive_t *drive, void *arg)
{
struct cdrom_ti *ti = arg;
struct atapi_toc_entry *first_toc, *last_toc;
unsigned long lba_start, lba_end;
int stat;
struct request_sense sense;
unsigned char cmd[BLK_MAX_CDB];
stat = ide_cd_get_toc_entry(drive, ti->cdti_trk0, &first_toc);
if (stat)
return stat;
stat = ide_cd_get_toc_entry(drive, ti->cdti_trk1, &last_toc);
if (stat)
return stat;
if (ti->cdti_trk1 != CDROM_LEADOUT)
++last_toc;
lba_start = first_toc->addr.lba;
lba_end = last_toc->addr.lba;
if (lba_end <= lba_start)
return -EINVAL;
memset(cmd, 0, BLK_MAX_CDB);
cmd[0] = GPCMD_PLAY_AUDIO_MSF;
lba_to_msf(lba_start, &cmd[3], &cmd[4], &cmd[5]);
lba_to_msf(lba_end - 1, &cmd[6], &cmd[7], &cmd[8]);
return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, &sense, 0, 0);
}
static int ide_cd_read_tochdr(ide_drive_t *drive, void *arg)
{
struct cdrom_info *cd = drive->driver_data;
struct cdrom_tochdr *tochdr = arg;
struct atapi_toc *toc;
int stat;
/* Make sure our saved TOC is valid. */
stat = ide_cd_read_toc(drive, NULL);
if (stat)
return stat;
toc = cd->toc;
tochdr->cdth_trk0 = toc->hdr.first_track;
tochdr->cdth_trk1 = toc->hdr.last_track;
return 0;
}
static int ide_cd_read_tocentry(ide_drive_t *drive, void *arg)
{
struct cdrom_tocentry *tocentry = arg;
struct atapi_toc_entry *toce;
int stat;
stat = ide_cd_get_toc_entry(drive, tocentry->cdte_track, &toce);
if (stat)
return stat;
tocentry->cdte_ctrl = toce->control;
tocentry->cdte_adr = toce->adr;
if (tocentry->cdte_format == CDROM_MSF) {
lba_to_msf(toce->addr.lba,
&tocentry->cdte_addr.msf.minute,
&tocentry->cdte_addr.msf.second,
&tocentry->cdte_addr.msf.frame);
} else
tocentry->cdte_addr.lba = toce->addr.lba;
return 0;
}
int ide_cdrom_audio_ioctl(struct cdrom_device_info *cdi,
unsigned int cmd, void *arg)
{
ide_drive_t *drive = cdi->handle;
switch (cmd) {
/*
* emulate PLAY_AUDIO_TI command with PLAY_AUDIO_10, since
* atapi doesn't support it
*/
case CDROMPLAYTRKIND:
return ide_cd_fake_play_trkind(drive, arg);
case CDROMREADTOCHDR:
return ide_cd_read_tochdr(drive, arg);
case CDROMREADTOCENTRY:
return ide_cd_read_tocentry(drive, arg);
default:
return -EINVAL;
}
}
/* the generic packet interface to cdrom.c */
int ide_cdrom_packet(struct cdrom_device_info *cdi,
struct packet_command *cgc)
{
ide_drive_t *drive = cdi->handle;
unsigned int flags = 0;
unsigned len = cgc->buflen;
if (cgc->timeout <= 0)
cgc->timeout = ATAPI_WAIT_PC;
/* here we queue the commands from the uniform CD-ROM
layer. the packet must be complete, as we do not
touch it at all. */
if (cgc->data_direction == CGC_DATA_WRITE)
flags |= REQ_WRITE;
if (cgc->sense)
memset(cgc->sense, 0, sizeof(struct request_sense));
if (cgc->quiet)
flags |= REQ_QUIET;
cgc->stat = ide_cd_queue_pc(drive, cgc->cmd,
cgc->data_direction == CGC_DATA_WRITE,
cgc->buffer, &len,
cgc->sense, cgc->timeout, flags);
if (!cgc->stat)
cgc->buflen -= len;
return cgc->stat;
}
| gpl-2.0 |
balika011/android_kernel_lenovo_spark | arch/parisc/math-emu/fcnvfxt.c | 14149 | 8658 | /*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/fcnvfxt.c $Revision: 1.1 $
*
* Purpose:
* Single Floating-point to Single Fixed-point /w truncated result
* Single Floating-point to Double Fixed-point /w truncated result
* Double Floating-point to Single Fixed-point /w truncated result
* Double Floating-point to Double Fixed-point /w truncated result
*
* External Interfaces:
* dbl_to_dbl_fcnvfxt(srcptr,nullptr,dstptr,status)
* dbl_to_sgl_fcnvfxt(srcptr,nullptr,dstptr,status)
* sgl_to_dbl_fcnvfxt(srcptr,nullptr,dstptr,status)
* sgl_to_sgl_fcnvfxt(srcptr,nullptr,dstptr,status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "sgl_float.h"
#include "dbl_float.h"
#include "cnv_float.h"
/*
* Convert single floating-point to single fixed-point format
* with truncated result
*/
/*ARGSUSED*/
int
sgl_to_sgl_fcnvfxt(
sgl_floating_point *srcptr,
unsigned int *nullptr,
int *dstptr,
unsigned int *status)
{
register unsigned int src, temp;
register int src_exponent, result;
src = *srcptr;
src_exponent = Sgl_exponent(src) - SGL_BIAS;
/*
* Test for overflow
*/
if (src_exponent > SGL_FX_MAX_EXP) {
/* check for MININT */
if ((src_exponent > SGL_FX_MAX_EXP + 1) ||
Sgl_isnotzero_mantissa(src) || Sgl_iszero_sign(src)) {
if (Sgl_iszero_sign(src)) result = 0x7fffffff;
else result = 0x80000000;
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
*dstptr = result;
return(NOEXCEPTION);
}
}
/*
* Generate result
*/
if (src_exponent >= 0) {
temp = src;
Sgl_clear_signexponent_set_hidden(temp);
Int_from_sgl_mantissa(temp,src_exponent);
if (Sgl_isone_sign(src)) result = -Sgl_all(temp);
else result = Sgl_all(temp);
*dstptr = result;
/* check for inexact */
if (Sgl_isinexact_to_fix(src,src_exponent)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
else {
*dstptr = 0;
/* check for inexact */
if (Sgl_isnotzero_exponentmantissa(src)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
return(NOEXCEPTION);
}
/*
* Single Floating-point to Double Fixed-point
*/
/*ARGSUSED*/
int
sgl_to_dbl_fcnvfxt(
sgl_floating_point *srcptr,
unsigned int *nullptr,
dbl_integer *dstptr,
unsigned int *status)
{
register int src_exponent, resultp1;
register unsigned int src, temp, resultp2;
src = *srcptr;
src_exponent = Sgl_exponent(src) - SGL_BIAS;
/*
* Test for overflow
*/
if (src_exponent > DBL_FX_MAX_EXP) {
/* check for MININT */
if ((src_exponent > DBL_FX_MAX_EXP + 1) ||
Sgl_isnotzero_mantissa(src) || Sgl_iszero_sign(src)) {
if (Sgl_iszero_sign(src)) {
resultp1 = 0x7fffffff;
resultp2 = 0xffffffff;
}
else {
resultp1 = 0x80000000;
resultp2 = 0;
}
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
Dint_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
Dint_set_minint(resultp1,resultp2);
Dint_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* Generate result
*/
if (src_exponent >= 0) {
temp = src;
Sgl_clear_signexponent_set_hidden(temp);
Dint_from_sgl_mantissa(temp,src_exponent,resultp1,resultp2);
if (Sgl_isone_sign(src)) {
Dint_setone_sign(resultp1,resultp2);
}
Dint_copytoptr(resultp1,resultp2,dstptr);
/* check for inexact */
if (Sgl_isinexact_to_fix(src,src_exponent)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
else {
Dint_setzero(resultp1,resultp2);
Dint_copytoptr(resultp1,resultp2,dstptr);
/* check for inexact */
if (Sgl_isnotzero_exponentmantissa(src)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
return(NOEXCEPTION);
}
/*
* Double Floating-point to Single Fixed-point
*/
/*ARGSUSED*/
int
dbl_to_sgl_fcnvfxt(
dbl_floating_point *srcptr,
unsigned int *nullptr,
int *dstptr,
unsigned int *status)
{
register unsigned int srcp1, srcp2, tempp1, tempp2;
register int src_exponent, result;
Dbl_copyfromptr(srcptr,srcp1,srcp2);
src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
/*
* Test for overflow
*/
if (src_exponent > SGL_FX_MAX_EXP) {
/* check for MININT */
if (Dbl_isoverflow_to_int(src_exponent,srcp1,srcp2)) {
if (Dbl_iszero_sign(srcp1)) result = 0x7fffffff;
else result = 0x80000000;
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
*dstptr = result;
return(NOEXCEPTION);
}
}
/*
* Generate result
*/
if (src_exponent >= 0) {
tempp1 = srcp1;
tempp2 = srcp2;
Dbl_clear_signexponent_set_hidden(tempp1);
Int_from_dbl_mantissa(tempp1,tempp2,src_exponent);
if (Dbl_isone_sign(srcp1) && (src_exponent <= SGL_FX_MAX_EXP))
result = -Dbl_allp1(tempp1);
else result = Dbl_allp1(tempp1);
*dstptr = result;
/* check for inexact */
if (Dbl_isinexact_to_fix(srcp1,srcp2,src_exponent)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
else {
*dstptr = 0;
/* check for inexact */
if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
return(NOEXCEPTION);
}
/*
* Double Floating-point to Double Fixed-point
*/
/*ARGSUSED*/
int
dbl_to_dbl_fcnvfxt(
dbl_floating_point *srcptr,
unsigned int *nullptr,
dbl_integer *dstptr,
unsigned int *status)
{
register int src_exponent, resultp1;
register unsigned int srcp1, srcp2, tempp1, tempp2, resultp2;
Dbl_copyfromptr(srcptr,srcp1,srcp2);
src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
/*
* Test for overflow
*/
if (src_exponent > DBL_FX_MAX_EXP) {
/* check for MININT */
if ((src_exponent > DBL_FX_MAX_EXP + 1) ||
Dbl_isnotzero_mantissa(srcp1,srcp2) || Dbl_iszero_sign(srcp1)) {
if (Dbl_iszero_sign(srcp1)) {
resultp1 = 0x7fffffff;
resultp2 = 0xffffffff;
}
else {
resultp1 = 0x80000000;
resultp2 = 0;
}
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
Dint_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
}
/*
* Generate result
*/
if (src_exponent >= 0) {
tempp1 = srcp1;
tempp2 = srcp2;
Dbl_clear_signexponent_set_hidden(tempp1);
Dint_from_dbl_mantissa(tempp1,tempp2,src_exponent,
resultp1,resultp2);
if (Dbl_isone_sign(srcp1)) {
Dint_setone_sign(resultp1,resultp2);
}
Dint_copytoptr(resultp1,resultp2,dstptr);
/* check for inexact */
if (Dbl_isinexact_to_fix(srcp1,srcp2,src_exponent)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
else {
Dint_setzero(resultp1,resultp2);
Dint_copytoptr(resultp1,resultp2,dstptr);
/* check for inexact */
if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
return(NOEXCEPTION);
}
| gpl-2.0 |
artefvck/X_Artefvck | arch/parisc/math-emu/fcnvfxt.c | 14149 | 8658 | /*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/fcnvfxt.c $Revision: 1.1 $
*
* Purpose:
* Single Floating-point to Single Fixed-point /w truncated result
* Single Floating-point to Double Fixed-point /w truncated result
* Double Floating-point to Single Fixed-point /w truncated result
* Double Floating-point to Double Fixed-point /w truncated result
*
* External Interfaces:
* dbl_to_dbl_fcnvfxt(srcptr,nullptr,dstptr,status)
* dbl_to_sgl_fcnvfxt(srcptr,nullptr,dstptr,status)
* sgl_to_dbl_fcnvfxt(srcptr,nullptr,dstptr,status)
* sgl_to_sgl_fcnvfxt(srcptr,nullptr,dstptr,status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "sgl_float.h"
#include "dbl_float.h"
#include "cnv_float.h"
/*
* Convert single floating-point to single fixed-point format
* with truncated result
*/
/*ARGSUSED*/
int
sgl_to_sgl_fcnvfxt(
sgl_floating_point *srcptr,
unsigned int *nullptr,
int *dstptr,
unsigned int *status)
{
register unsigned int src, temp;
register int src_exponent, result;
src = *srcptr;
src_exponent = Sgl_exponent(src) - SGL_BIAS;
/*
* Test for overflow
*/
if (src_exponent > SGL_FX_MAX_EXP) {
/* check for MININT */
if ((src_exponent > SGL_FX_MAX_EXP + 1) ||
Sgl_isnotzero_mantissa(src) || Sgl_iszero_sign(src)) {
if (Sgl_iszero_sign(src)) result = 0x7fffffff;
else result = 0x80000000;
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
*dstptr = result;
return(NOEXCEPTION);
}
}
/*
* Generate result
*/
if (src_exponent >= 0) {
temp = src;
Sgl_clear_signexponent_set_hidden(temp);
Int_from_sgl_mantissa(temp,src_exponent);
if (Sgl_isone_sign(src)) result = -Sgl_all(temp);
else result = Sgl_all(temp);
*dstptr = result;
/* check for inexact */
if (Sgl_isinexact_to_fix(src,src_exponent)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
else {
*dstptr = 0;
/* check for inexact */
if (Sgl_isnotzero_exponentmantissa(src)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
return(NOEXCEPTION);
}
/*
* Single Floating-point to Double Fixed-point
*/
/*ARGSUSED*/
int
sgl_to_dbl_fcnvfxt(
sgl_floating_point *srcptr,
unsigned int *nullptr,
dbl_integer *dstptr,
unsigned int *status)
{
register int src_exponent, resultp1;
register unsigned int src, temp, resultp2;
src = *srcptr;
src_exponent = Sgl_exponent(src) - SGL_BIAS;
/*
* Test for overflow
*/
if (src_exponent > DBL_FX_MAX_EXP) {
/* check for MININT */
if ((src_exponent > DBL_FX_MAX_EXP + 1) ||
Sgl_isnotzero_mantissa(src) || Sgl_iszero_sign(src)) {
if (Sgl_iszero_sign(src)) {
resultp1 = 0x7fffffff;
resultp2 = 0xffffffff;
}
else {
resultp1 = 0x80000000;
resultp2 = 0;
}
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
Dint_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
Dint_set_minint(resultp1,resultp2);
Dint_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* Generate result
*/
if (src_exponent >= 0) {
temp = src;
Sgl_clear_signexponent_set_hidden(temp);
Dint_from_sgl_mantissa(temp,src_exponent,resultp1,resultp2);
if (Sgl_isone_sign(src)) {
Dint_setone_sign(resultp1,resultp2);
}
Dint_copytoptr(resultp1,resultp2,dstptr);
/* check for inexact */
if (Sgl_isinexact_to_fix(src,src_exponent)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
else {
Dint_setzero(resultp1,resultp2);
Dint_copytoptr(resultp1,resultp2,dstptr);
/* check for inexact */
if (Sgl_isnotzero_exponentmantissa(src)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
return(NOEXCEPTION);
}
/*
* Double Floating-point to Single Fixed-point
*/
/*ARGSUSED*/
int
dbl_to_sgl_fcnvfxt(
dbl_floating_point *srcptr,
unsigned int *nullptr,
int *dstptr,
unsigned int *status)
{
register unsigned int srcp1, srcp2, tempp1, tempp2;
register int src_exponent, result;
Dbl_copyfromptr(srcptr,srcp1,srcp2);
src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
/*
* Test for overflow
*/
if (src_exponent > SGL_FX_MAX_EXP) {
/* check for MININT */
if (Dbl_isoverflow_to_int(src_exponent,srcp1,srcp2)) {
if (Dbl_iszero_sign(srcp1)) result = 0x7fffffff;
else result = 0x80000000;
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
*dstptr = result;
return(NOEXCEPTION);
}
}
/*
* Generate result
*/
if (src_exponent >= 0) {
tempp1 = srcp1;
tempp2 = srcp2;
Dbl_clear_signexponent_set_hidden(tempp1);
Int_from_dbl_mantissa(tempp1,tempp2,src_exponent);
if (Dbl_isone_sign(srcp1) && (src_exponent <= SGL_FX_MAX_EXP))
result = -Dbl_allp1(tempp1);
else result = Dbl_allp1(tempp1);
*dstptr = result;
/* check for inexact */
if (Dbl_isinexact_to_fix(srcp1,srcp2,src_exponent)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
else {
*dstptr = 0;
/* check for inexact */
if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
return(NOEXCEPTION);
}
/*
* Double Floating-point to Double Fixed-point
*/
/*ARGSUSED*/
int
dbl_to_dbl_fcnvfxt(
dbl_floating_point *srcptr,
unsigned int *nullptr,
dbl_integer *dstptr,
unsigned int *status)
{
register int src_exponent, resultp1;
register unsigned int srcp1, srcp2, tempp1, tempp2, resultp2;
Dbl_copyfromptr(srcptr,srcp1,srcp2);
src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
/*
* Test for overflow
*/
if (src_exponent > DBL_FX_MAX_EXP) {
/* check for MININT */
if ((src_exponent > DBL_FX_MAX_EXP + 1) ||
Dbl_isnotzero_mantissa(srcp1,srcp2) || Dbl_iszero_sign(srcp1)) {
if (Dbl_iszero_sign(srcp1)) {
resultp1 = 0x7fffffff;
resultp2 = 0xffffffff;
}
else {
resultp1 = 0x80000000;
resultp2 = 0;
}
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
Dint_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
}
/*
* Generate result
*/
if (src_exponent >= 0) {
tempp1 = srcp1;
tempp2 = srcp2;
Dbl_clear_signexponent_set_hidden(tempp1);
Dint_from_dbl_mantissa(tempp1,tempp2,src_exponent,
resultp1,resultp2);
if (Dbl_isone_sign(srcp1)) {
Dint_setone_sign(resultp1,resultp2);
}
Dint_copytoptr(resultp1,resultp2,dstptr);
/* check for inexact */
if (Dbl_isinexact_to_fix(srcp1,srcp2,src_exponent)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
else {
Dint_setzero(resultp1,resultp2);
Dint_copytoptr(resultp1,resultp2,dstptr);
/* check for inexact */
if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
return(NOEXCEPTION);
}
| gpl-2.0 |
crpalmer/android_kernel_motorola_msm8974 | arch/parisc/math-emu/dfdiv.c | 14149 | 12636 | /*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/dfdiv.c $Revision: 1.1 $
*
* Purpose:
* Double Precision Floating-point Divide
*
* External Interfaces:
* dbl_fdiv(srcptr1,srcptr2,dstptr,status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "dbl_float.h"
/*
* Double Precision Floating-point Divide
*/
int
dbl_fdiv (dbl_floating_point * srcptr1, dbl_floating_point * srcptr2,
dbl_floating_point * dstptr, unsigned int *status)
{
register unsigned int opnd1p1, opnd1p2, opnd2p1, opnd2p2;
register unsigned int opnd3p1, opnd3p2, resultp1, resultp2;
register int dest_exponent, count;
register boolean inexact = FALSE, guardbit = FALSE, stickybit = FALSE;
boolean is_tiny;
Dbl_copyfromptr(srcptr1,opnd1p1,opnd1p2);
Dbl_copyfromptr(srcptr2,opnd2p1,opnd2p2);
/*
* set sign bit of result
*/
if (Dbl_sign(opnd1p1) ^ Dbl_sign(opnd2p1))
Dbl_setnegativezerop1(resultp1);
else Dbl_setzerop1(resultp1);
/*
* check first operand for NaN's or infinity
*/
if (Dbl_isinfinity_exponent(opnd1p1)) {
if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
if (Dbl_isnotnan(opnd2p1,opnd2p2)) {
if (Dbl_isinfinity(opnd2p1,opnd2p2)) {
/*
* invalid since both operands
* are infinity
*/
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* return infinity
*/
Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
}
else {
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(opnd1p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd1p1);
}
/*
* is second operand a signaling NaN?
*/
else if (Dbl_is_signalingnan(opnd2p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd2p1);
Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
return(NOEXCEPTION);
}
}
/*
* check second operand for NaN's or infinity
*/
if (Dbl_isinfinity_exponent(opnd2p1)) {
if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
/*
* return zero
*/
Dbl_setzero_exponentmantissa(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(opnd2p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd2p1);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
return(NOEXCEPTION);
}
/*
* check for division by zero
*/
if (Dbl_iszero_exponentmantissa(opnd2p1,opnd2p2)) {
if (Dbl_iszero_exponentmantissa(opnd1p1,opnd1p2)) {
/* invalid since both operands are zero */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
if (Is_divisionbyzerotrap_enabled())
return(DIVISIONBYZEROEXCEPTION);
Set_divisionbyzeroflag();
Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* Generate exponent
*/
dest_exponent = Dbl_exponent(opnd1p1) - Dbl_exponent(opnd2p1) + DBL_BIAS;
/*
* Generate mantissa
*/
if (Dbl_isnotzero_exponent(opnd1p1)) {
/* set hidden bit */
Dbl_clear_signexponent_set_hidden(opnd1p1);
}
else {
/* check for zero */
if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
Dbl_setzero_exponentmantissa(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/* is denormalized, want to normalize */
Dbl_clear_signexponent(opnd1p1);
Dbl_leftshiftby1(opnd1p1,opnd1p2);
Dbl_normalize(opnd1p1,opnd1p2,dest_exponent);
}
/* opnd2 needs to have hidden bit set with msb in hidden bit */
if (Dbl_isnotzero_exponent(opnd2p1)) {
Dbl_clear_signexponent_set_hidden(opnd2p1);
}
else {
/* is denormalized; want to normalize */
Dbl_clear_signexponent(opnd2p1);
Dbl_leftshiftby1(opnd2p1,opnd2p2);
while (Dbl_iszero_hiddenhigh7mantissa(opnd2p1)) {
dest_exponent+=8;
Dbl_leftshiftby8(opnd2p1,opnd2p2);
}
if (Dbl_iszero_hiddenhigh3mantissa(opnd2p1)) {
dest_exponent+=4;
Dbl_leftshiftby4(opnd2p1,opnd2p2);
}
while (Dbl_iszero_hidden(opnd2p1)) {
dest_exponent++;
Dbl_leftshiftby1(opnd2p1,opnd2p2);
}
}
/* Divide the source mantissas */
/*
* A non-restoring divide algorithm is used.
*/
Twoword_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2);
Dbl_setzero(opnd3p1,opnd3p2);
for (count=1; count <= DBL_P && (opnd1p1 || opnd1p2); count++) {
Dbl_leftshiftby1(opnd1p1,opnd1p2);
Dbl_leftshiftby1(opnd3p1,opnd3p2);
if (Dbl_iszero_sign(opnd1p1)) {
Dbl_setone_lowmantissap2(opnd3p2);
Twoword_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2);
}
else {
Twoword_add(opnd1p1, opnd1p2, opnd2p1, opnd2p2);
}
}
if (count <= DBL_P) {
Dbl_leftshiftby1(opnd3p1,opnd3p2);
Dbl_setone_lowmantissap2(opnd3p2);
Dbl_leftshift(opnd3p1,opnd3p2,(DBL_P-count));
if (Dbl_iszero_hidden(opnd3p1)) {
Dbl_leftshiftby1(opnd3p1,opnd3p2);
dest_exponent--;
}
}
else {
if (Dbl_iszero_hidden(opnd3p1)) {
/* need to get one more bit of result */
Dbl_leftshiftby1(opnd1p1,opnd1p2);
Dbl_leftshiftby1(opnd3p1,opnd3p2);
if (Dbl_iszero_sign(opnd1p1)) {
Dbl_setone_lowmantissap2(opnd3p2);
Twoword_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2);
}
else {
Twoword_add(opnd1p1,opnd1p2,opnd2p1,opnd2p2);
}
dest_exponent--;
}
if (Dbl_iszero_sign(opnd1p1)) guardbit = TRUE;
stickybit = Dbl_allp1(opnd1p1) || Dbl_allp2(opnd1p2);
}
inexact = guardbit | stickybit;
/*
* round result
*/
if (inexact && (dest_exponent > 0 || Is_underflowtrap_enabled())) {
Dbl_clear_signexponent(opnd3p1);
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Dbl_iszero_sign(resultp1))
Dbl_increment(opnd3p1,opnd3p2);
break;
case ROUNDMINUS:
if (Dbl_isone_sign(resultp1))
Dbl_increment(opnd3p1,opnd3p2);
break;
case ROUNDNEAREST:
if (guardbit && (stickybit ||
Dbl_isone_lowmantissap2(opnd3p2))) {
Dbl_increment(opnd3p1,opnd3p2);
}
}
if (Dbl_isone_hidden(opnd3p1)) dest_exponent++;
}
Dbl_set_mantissa(resultp1,resultp2,opnd3p1,opnd3p2);
/*
* Test for overflow
*/
if (dest_exponent >= DBL_INFINITY_EXPONENT) {
/* trap if OVERFLOWTRAP enabled */
if (Is_overflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Dbl_setwrapped_exponent(resultp1,dest_exponent,ovfl);
Dbl_copytoptr(resultp1,resultp2,dstptr);
if (inexact)
if (Is_inexacttrap_enabled())
return(OVERFLOWEXCEPTION | INEXACTEXCEPTION);
else Set_inexactflag();
return(OVERFLOWEXCEPTION);
}
Set_overflowflag();
/* set result to infinity or largest number */
Dbl_setoverflow(resultp1,resultp2);
inexact = TRUE;
}
/*
* Test for underflow
*/
else if (dest_exponent <= 0) {
/* trap if UNDERFLOWTRAP enabled */
if (Is_underflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Dbl_setwrapped_exponent(resultp1,dest_exponent,unfl);
Dbl_copytoptr(resultp1,resultp2,dstptr);
if (inexact)
if (Is_inexacttrap_enabled())
return(UNDERFLOWEXCEPTION | INEXACTEXCEPTION);
else Set_inexactflag();
return(UNDERFLOWEXCEPTION);
}
/* Determine if should set underflow flag */
is_tiny = TRUE;
if (dest_exponent == 0 && inexact) {
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Dbl_iszero_sign(resultp1)) {
Dbl_increment(opnd3p1,opnd3p2);
if (Dbl_isone_hiddenoverflow(opnd3p1))
is_tiny = FALSE;
Dbl_decrement(opnd3p1,opnd3p2);
}
break;
case ROUNDMINUS:
if (Dbl_isone_sign(resultp1)) {
Dbl_increment(opnd3p1,opnd3p2);
if (Dbl_isone_hiddenoverflow(opnd3p1))
is_tiny = FALSE;
Dbl_decrement(opnd3p1,opnd3p2);
}
break;
case ROUNDNEAREST:
if (guardbit && (stickybit ||
Dbl_isone_lowmantissap2(opnd3p2))) {
Dbl_increment(opnd3p1,opnd3p2);
if (Dbl_isone_hiddenoverflow(opnd3p1))
is_tiny = FALSE;
Dbl_decrement(opnd3p1,opnd3p2);
}
break;
}
}
/*
* denormalize result or set to signed zero
*/
stickybit = inexact;
Dbl_denormalize(opnd3p1,opnd3p2,dest_exponent,guardbit,
stickybit,inexact);
/* return rounded number */
if (inexact) {
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Dbl_iszero_sign(resultp1)) {
Dbl_increment(opnd3p1,opnd3p2);
}
break;
case ROUNDMINUS:
if (Dbl_isone_sign(resultp1)) {
Dbl_increment(opnd3p1,opnd3p2);
}
break;
case ROUNDNEAREST:
if (guardbit && (stickybit ||
Dbl_isone_lowmantissap2(opnd3p2))) {
Dbl_increment(opnd3p1,opnd3p2);
}
break;
}
if (is_tiny) Set_underflowflag();
}
Dbl_set_exponentmantissa(resultp1,resultp2,opnd3p1,opnd3p2);
}
else Dbl_set_exponent(resultp1,dest_exponent);
Dbl_copytoptr(resultp1,resultp2,dstptr);
/* check for inexact */
if (inexact) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
return(NOEXCEPTION);
}
| gpl-2.0 |
metasepi/linux-bohai-s2 | drivers/usb/host/whci/hw.c | 14661 | 2864 | /*
* Wireless Host Controller (WHC) hardware access helpers.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
#include <linux/uwb/umc.h>
#include "../../wusbcore/wusbhc.h"
#include "whcd.h"
void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val)
{
unsigned long flags;
u32 cmd;
spin_lock_irqsave(&whc->lock, flags);
cmd = le_readl(whc->base + WUSBCMD);
cmd = (cmd & ~mask) | val;
le_writel(cmd, whc->base + WUSBCMD);
spin_unlock_irqrestore(&whc->lock, flags);
}
/**
* whc_do_gencmd - start a generic command via the WUSBGENCMDSTS register
* @whc: the WHCI HC
* @cmd: command to start.
* @params: parameters for the command (the WUSBGENCMDPARAMS register value).
* @addr: pointer to any data for the command (may be NULL).
* @len: length of the data (if any).
*/
int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len)
{
unsigned long flags;
dma_addr_t dma_addr;
int t;
int ret = 0;
mutex_lock(&whc->mutex);
/* Wait for previous command to complete. */
t = wait_event_timeout(whc->cmd_wq,
(le_readl(whc->base + WUSBGENCMDSTS) & WUSBGENCMDSTS_ACTIVE) == 0,
WHC_GENCMD_TIMEOUT_MS);
if (t == 0) {
dev_err(&whc->umc->dev, "generic command timeout (%04x/%04x)\n",
le_readl(whc->base + WUSBGENCMDSTS),
le_readl(whc->base + WUSBGENCMDPARAMS));
ret = -ETIMEDOUT;
goto out;
}
if (addr) {
memcpy(whc->gen_cmd_buf, addr, len);
dma_addr = whc->gen_cmd_buf_dma;
} else
dma_addr = 0;
/* Poke registers to start cmd. */
spin_lock_irqsave(&whc->lock, flags);
le_writel(params, whc->base + WUSBGENCMDPARAMS);
le_writeq(dma_addr, whc->base + WUSBGENADDR);
le_writel(WUSBGENCMDSTS_ACTIVE | WUSBGENCMDSTS_IOC | cmd,
whc->base + WUSBGENCMDSTS);
spin_unlock_irqrestore(&whc->lock, flags);
out:
mutex_unlock(&whc->mutex);
return ret;
}
/**
* whc_hw_error - recover from a hardware error
* @whc: the WHCI HC that broke.
* @reason: a description of the failure.
*
* Recover from broken hardware with a full reset.
*/
void whc_hw_error(struct whc *whc, const char *reason)
{
struct wusbhc *wusbhc = &whc->wusbhc;
dev_err(&whc->umc->dev, "hardware error: %s\n", reason);
wusbhc_reset_all(wusbhc);
}
| gpl-2.0 |
codeaurora-unoffical/linux-msm | drivers/net/ethernet/marvell/mv643xx_eth.c | 70 | 80219 | // SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports
* Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
*
* Based on the 64360 driver from:
* Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
* Rabeeh Khoury <rabeeh@marvell.com>
*
* Copyright (C) 2003 PMC-Sierra, Inc.,
* written by Manish Lachwani
*
* Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
*
* Copyright (C) 2004-2006 MontaVista Software, Inc.
* Dale Farnsworth <dale@farnsworth.org>
*
* Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
* <sjhill@realitydiluted.com>
*
* Copyright (C) 2007-2008 Marvell Semiconductor
* Lennert Buytenhek <buytenh@marvell.com>
*
* Copyright (C) 2013 Michael Stapelberg <michael@stapelberg.de>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/dma-mapping.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <net/tso.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/phy.h>
#include <linux/mv643xx_eth.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
static char mv643xx_eth_driver_name[] = "mv643xx_eth";
static char mv643xx_eth_driver_version[] = "1.4";
/*
* Registers shared between all ports.
*/
#define PHY_ADDR 0x0000
#define WINDOW_BASE(w) (0x0200 + ((w) << 3))
#define WINDOW_SIZE(w) (0x0204 + ((w) << 3))
#define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2))
#define WINDOW_BAR_ENABLE 0x0290
#define WINDOW_PROTECT(w) (0x0294 + ((w) << 4))
/*
* Main per-port registers. These live at offset 0x0400 for
* port #0, 0x0800 for port #1, and 0x0c00 for port #2.
*/
#define PORT_CONFIG 0x0000
#define UNICAST_PROMISCUOUS_MODE 0x00000001
#define PORT_CONFIG_EXT 0x0004
#define MAC_ADDR_LOW 0x0014
#define MAC_ADDR_HIGH 0x0018
#define SDMA_CONFIG 0x001c
#define TX_BURST_SIZE_16_64BIT 0x01000000
#define TX_BURST_SIZE_4_64BIT 0x00800000
#define BLM_TX_NO_SWAP 0x00000020
#define BLM_RX_NO_SWAP 0x00000010
#define RX_BURST_SIZE_16_64BIT 0x00000008
#define RX_BURST_SIZE_4_64BIT 0x00000004
#define PORT_SERIAL_CONTROL 0x003c
#define SET_MII_SPEED_TO_100 0x01000000
#define SET_GMII_SPEED_TO_1000 0x00800000
#define SET_FULL_DUPLEX_MODE 0x00200000
#define MAX_RX_PACKET_9700BYTE 0x000a0000
#define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000
#define DO_NOT_FORCE_LINK_FAIL 0x00000400
#define SERIAL_PORT_CONTROL_RESERVED 0x00000200
#define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008
#define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004
#define FORCE_LINK_PASS 0x00000002
#define SERIAL_PORT_ENABLE 0x00000001
#define PORT_STATUS 0x0044
#define TX_FIFO_EMPTY 0x00000400
#define TX_IN_PROGRESS 0x00000080
#define PORT_SPEED_MASK 0x00000030
#define PORT_SPEED_1000 0x00000010
#define PORT_SPEED_100 0x00000020
#define PORT_SPEED_10 0x00000000
#define FLOW_CONTROL_ENABLED 0x00000008
#define FULL_DUPLEX 0x00000004
#define LINK_UP 0x00000002
#define TXQ_COMMAND 0x0048
#define TXQ_FIX_PRIO_CONF 0x004c
#define PORT_SERIAL_CONTROL1 0x004c
#define CLK125_BYPASS_EN 0x00000010
#define TX_BW_RATE 0x0050
#define TX_BW_MTU 0x0058
#define TX_BW_BURST 0x005c
#define INT_CAUSE 0x0060
#define INT_TX_END 0x07f80000
#define INT_TX_END_0 0x00080000
#define INT_RX 0x000003fc
#define INT_RX_0 0x00000004
#define INT_EXT 0x00000002
#define INT_CAUSE_EXT 0x0064
#define INT_EXT_LINK_PHY 0x00110000
#define INT_EXT_TX 0x000000ff
#define INT_MASK 0x0068
#define INT_MASK_EXT 0x006c
#define TX_FIFO_URGENT_THRESHOLD 0x0074
#define RX_DISCARD_FRAME_CNT 0x0084
#define RX_OVERRUN_FRAME_CNT 0x0088
#define TXQ_FIX_PRIO_CONF_MOVED 0x00dc
#define TX_BW_RATE_MOVED 0x00e0
#define TX_BW_MTU_MOVED 0x00e8
#define TX_BW_BURST_MOVED 0x00ec
#define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4))
#define RXQ_COMMAND 0x0280
#define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2))
#define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4))
#define TXQ_BW_CONF(q) (0x0304 + ((q) << 4))
#define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4))
/*
* Misc per-port registers.
*/
#define MIB_COUNTERS(p) (0x1000 + ((p) << 7))
#define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10))
#define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10))
#define UNICAST_TABLE(p) (0x1600 + ((p) << 10))
/*
* SDMA configuration register default value.
*/
#if defined(__BIG_ENDIAN)
#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
(RX_BURST_SIZE_4_64BIT | \
TX_BURST_SIZE_4_64BIT)
#elif defined(__LITTLE_ENDIAN)
#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
(RX_BURST_SIZE_4_64BIT | \
BLM_RX_NO_SWAP | \
BLM_TX_NO_SWAP | \
TX_BURST_SIZE_4_64BIT)
#else
#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
#endif
/*
* Misc definitions.
*/
#define DEFAULT_RX_QUEUE_SIZE 128
#define DEFAULT_TX_QUEUE_SIZE 512
#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
/* Max number of allowed TCP segments for software TSO */
#define MV643XX_MAX_TSO_SEGS 100
#define MV643XX_MAX_SKB_DESCS (MV643XX_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
#define IS_TSO_HEADER(txq, addr) \
((addr >= txq->tso_hdrs_dma) && \
(addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
#define DESC_DMA_MAP_SINGLE 0
#define DESC_DMA_MAP_PAGE 1
/*
* RX/TX descriptors.
*/
#if defined(__BIG_ENDIAN)
struct rx_desc {
u16 byte_cnt; /* Descriptor buffer byte count */
u16 buf_size; /* Buffer size */
u32 cmd_sts; /* Descriptor command status */
u32 next_desc_ptr; /* Next descriptor pointer */
u32 buf_ptr; /* Descriptor buffer pointer */
};
struct tx_desc {
u16 byte_cnt; /* buffer byte count */
u16 l4i_chk; /* CPU provided TCP checksum */
u32 cmd_sts; /* Command/status field */
u32 next_desc_ptr; /* Pointer to next descriptor */
u32 buf_ptr; /* pointer to buffer for this descriptor*/
};
#elif defined(__LITTLE_ENDIAN)
struct rx_desc {
u32 cmd_sts; /* Descriptor command status */
u16 buf_size; /* Buffer size */
u16 byte_cnt; /* Descriptor buffer byte count */
u32 buf_ptr; /* Descriptor buffer pointer */
u32 next_desc_ptr; /* Next descriptor pointer */
};
struct tx_desc {
u32 cmd_sts; /* Command/status field */
u16 l4i_chk; /* CPU provided TCP checksum */
u16 byte_cnt; /* buffer byte count */
u32 buf_ptr; /* pointer to buffer for this descriptor*/
u32 next_desc_ptr; /* Pointer to next descriptor */
};
#else
#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
#endif
/* RX & TX descriptor command */
#define BUFFER_OWNED_BY_DMA 0x80000000
/* RX & TX descriptor status */
#define ERROR_SUMMARY 0x00000001
/* RX descriptor status */
#define LAYER_4_CHECKSUM_OK 0x40000000
#define RX_ENABLE_INTERRUPT 0x20000000
#define RX_FIRST_DESC 0x08000000
#define RX_LAST_DESC 0x04000000
#define RX_IP_HDR_OK 0x02000000
#define RX_PKT_IS_IPV4 0x01000000
#define RX_PKT_IS_ETHERNETV2 0x00800000
#define RX_PKT_LAYER4_TYPE_MASK 0x00600000
#define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000
#define RX_PKT_IS_VLAN_TAGGED 0x00080000
/* TX descriptor command */
#define TX_ENABLE_INTERRUPT 0x00800000
#define GEN_CRC 0x00400000
#define TX_FIRST_DESC 0x00200000
#define TX_LAST_DESC 0x00100000
#define ZERO_PADDING 0x00080000
#define GEN_IP_V4_CHECKSUM 0x00040000
#define GEN_TCP_UDP_CHECKSUM 0x00020000
#define UDP_FRAME 0x00010000
#define MAC_HDR_EXTRA_4_BYTES 0x00008000
#define GEN_TCP_UDP_CHK_FULL 0x00000400
#define MAC_HDR_EXTRA_8_BYTES 0x00000200
#define TX_IHL_SHIFT 11
/* global *******************************************************************/
struct mv643xx_eth_shared_private {
/*
* Ethernet controller base address.
*/
void __iomem *base;
/*
* Per-port MBUS window access register value.
*/
u32 win_protect;
/*
* Hardware-specific parameters.
*/
int extended_rx_coal_limit;
int tx_bw_control;
int tx_csum_limit;
struct clk *clk;
};
#define TX_BW_CONTROL_ABSENT 0
#define TX_BW_CONTROL_OLD_LAYOUT 1
#define TX_BW_CONTROL_NEW_LAYOUT 2
static int mv643xx_eth_open(struct net_device *dev);
static int mv643xx_eth_stop(struct net_device *dev);
/* per-port *****************************************************************/
struct mib_counters {
u64 good_octets_received;
u32 bad_octets_received;
u32 internal_mac_transmit_err;
u32 good_frames_received;
u32 bad_frames_received;
u32 broadcast_frames_received;
u32 multicast_frames_received;
u32 frames_64_octets;
u32 frames_65_to_127_octets;
u32 frames_128_to_255_octets;
u32 frames_256_to_511_octets;
u32 frames_512_to_1023_octets;
u32 frames_1024_to_max_octets;
u64 good_octets_sent;
u32 good_frames_sent;
u32 excessive_collision;
u32 multicast_frames_sent;
u32 broadcast_frames_sent;
u32 unrec_mac_control_received;
u32 fc_sent;
u32 good_fc_received;
u32 bad_fc_received;
u32 undersize_received;
u32 fragments_received;
u32 oversize_received;
u32 jabber_received;
u32 mac_receive_error;
u32 bad_crc_event;
u32 collision;
u32 late_collision;
/* Non MIB hardware counters */
u32 rx_discard;
u32 rx_overrun;
};
struct rx_queue {
int index;
int rx_ring_size;
int rx_desc_count;
int rx_curr_desc;
int rx_used_desc;
struct rx_desc *rx_desc_area;
dma_addr_t rx_desc_dma;
int rx_desc_area_size;
struct sk_buff **rx_skb;
};
struct tx_queue {
int index;
int tx_ring_size;
int tx_desc_count;
int tx_curr_desc;
int tx_used_desc;
int tx_stop_threshold;
int tx_wake_threshold;
char *tso_hdrs;
dma_addr_t tso_hdrs_dma;
struct tx_desc *tx_desc_area;
char *tx_desc_mapping; /* array to track the type of the dma mapping */
dma_addr_t tx_desc_dma;
int tx_desc_area_size;
struct sk_buff_head tx_skb;
unsigned long tx_packets;
unsigned long tx_bytes;
unsigned long tx_dropped;
};
struct mv643xx_eth_private {
struct mv643xx_eth_shared_private *shared;
void __iomem *base;
int port_num;
struct net_device *dev;
struct timer_list mib_counters_timer;
spinlock_t mib_counters_lock;
struct mib_counters mib_counters;
struct work_struct tx_timeout_task;
struct napi_struct napi;
u32 int_mask;
u8 oom;
u8 work_link;
u8 work_tx;
u8 work_tx_end;
u8 work_rx;
u8 work_rx_refill;
int skb_size;
/*
* RX state.
*/
int rx_ring_size;
unsigned long rx_desc_sram_addr;
int rx_desc_sram_size;
int rxq_count;
struct timer_list rx_oom;
struct rx_queue rxq[8];
/*
* TX state.
*/
int tx_ring_size;
unsigned long tx_desc_sram_addr;
int tx_desc_sram_size;
int txq_count;
struct tx_queue txq[8];
/*
* Hardware-specific parameters.
*/
struct clk *clk;
unsigned int t_clk;
};
/* port register accessors **************************************************/
static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
{
return readl(mp->shared->base + offset);
}
static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset)
{
return readl(mp->base + offset);
}
static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
{
writel(data, mp->shared->base + offset);
}
static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data)
{
writel(data, mp->base + offset);
}
/* rxq/txq helper functions *************************************************/
static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
{
return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
}
static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
{
return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
}
static void rxq_enable(struct rx_queue *rxq)
{
struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
wrlp(mp, RXQ_COMMAND, 1 << rxq->index);
}
static void rxq_disable(struct rx_queue *rxq)
{
struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
u8 mask = 1 << rxq->index;
wrlp(mp, RXQ_COMMAND, mask << 8);
while (rdlp(mp, RXQ_COMMAND) & mask)
udelay(10);
}
static void txq_reset_hw_ptr(struct tx_queue *txq)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
u32 addr;
addr = (u32)txq->tx_desc_dma;
addr += txq->tx_curr_desc * sizeof(struct tx_desc);
wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr);
}
static void txq_enable(struct tx_queue *txq)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
wrlp(mp, TXQ_COMMAND, 1 << txq->index);
}
static void txq_disable(struct tx_queue *txq)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
u8 mask = 1 << txq->index;
wrlp(mp, TXQ_COMMAND, mask << 8);
while (rdlp(mp, TXQ_COMMAND) & mask)
udelay(10);
}
static void txq_maybe_wake(struct tx_queue *txq)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
if (netif_tx_queue_stopped(nq)) {
__netif_tx_lock(nq, smp_processor_id());
if (txq->tx_desc_count <= txq->tx_wake_threshold)
netif_tx_wake_queue(nq);
__netif_tx_unlock(nq);
}
}
static int rxq_process(struct rx_queue *rxq, int budget)
{
struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
struct net_device_stats *stats = &mp->dev->stats;
int rx;
rx = 0;
while (rx < budget && rxq->rx_desc_count) {
struct rx_desc *rx_desc;
unsigned int cmd_sts;
struct sk_buff *skb;
u16 byte_cnt;
rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
cmd_sts = rx_desc->cmd_sts;
if (cmd_sts & BUFFER_OWNED_BY_DMA)
break;
rmb();
skb = rxq->rx_skb[rxq->rx_curr_desc];
rxq->rx_skb[rxq->rx_curr_desc] = NULL;
rxq->rx_curr_desc++;
if (rxq->rx_curr_desc == rxq->rx_ring_size)
rxq->rx_curr_desc = 0;
dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr,
rx_desc->buf_size, DMA_FROM_DEVICE);
rxq->rx_desc_count--;
rx++;
mp->work_rx_refill |= 1 << rxq->index;
byte_cnt = rx_desc->byte_cnt;
/*
* Update statistics.
*
* Note that the descriptor byte count includes 2 dummy
* bytes automatically inserted by the hardware at the
* start of the packet (which we don't count), and a 4
* byte CRC at the end of the packet (which we do count).
*/
stats->rx_packets++;
stats->rx_bytes += byte_cnt - 2;
/*
* In case we received a packet without first / last bits
* on, or the error summary bit is set, the packet needs
* to be dropped.
*/
if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY))
!= (RX_FIRST_DESC | RX_LAST_DESC))
goto err;
/*
* The -4 is for the CRC in the trailer of the
* received packet
*/
skb_put(skb, byte_cnt - 2 - 4);
if (cmd_sts & LAYER_4_CHECKSUM_OK)
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->protocol = eth_type_trans(skb, mp->dev);
napi_gro_receive(&mp->napi, skb);
continue;
err:
stats->rx_dropped++;
if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
(RX_FIRST_DESC | RX_LAST_DESC)) {
if (net_ratelimit())
netdev_err(mp->dev,
"received packet spanning multiple descriptors\n");
}
if (cmd_sts & ERROR_SUMMARY)
stats->rx_errors++;
dev_kfree_skb(skb);
}
if (rx < budget)
mp->work_rx &= ~(1 << rxq->index);
return rx;
}
static int rxq_refill(struct rx_queue *rxq, int budget)
{
struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
int refilled;
refilled = 0;
while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
struct sk_buff *skb;
int rx;
struct rx_desc *rx_desc;
int size;
skb = netdev_alloc_skb(mp->dev, mp->skb_size);
if (skb == NULL) {
mp->oom = 1;
goto oom;
}
if (SKB_DMA_REALIGN)
skb_reserve(skb, SKB_DMA_REALIGN);
refilled++;
rxq->rx_desc_count++;
rx = rxq->rx_used_desc++;
if (rxq->rx_used_desc == rxq->rx_ring_size)
rxq->rx_used_desc = 0;
rx_desc = rxq->rx_desc_area + rx;
size = skb_end_pointer(skb) - skb->data;
rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
skb->data, size,
DMA_FROM_DEVICE);
rx_desc->buf_size = size;
rxq->rx_skb[rx] = skb;
wmb();
rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
wmb();
/*
* The hardware automatically prepends 2 bytes of
* dummy data to each received packet, so that the
* IP header ends up 16-byte aligned.
*/
skb_reserve(skb, 2);
}
if (refilled < budget)
mp->work_rx_refill &= ~(1 << rxq->index);
oom:
return refilled;
}
/* tx ***********************************************************************/
static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
{
int frag;
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
if (skb_frag_size(fragp) <= 8 && skb_frag_off(fragp) & 7)
return 1;
}
return 0;
}
static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb,
u16 *l4i_chk, u32 *command, int length)
{
int ret;
u32 cmd = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
int hdr_len;
int tag_bytes;
BUG_ON(skb->protocol != htons(ETH_P_IP) &&
skb->protocol != htons(ETH_P_8021Q));
hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
tag_bytes = hdr_len - ETH_HLEN;
if (length - hdr_len > mp->shared->tx_csum_limit ||
unlikely(tag_bytes & ~12)) {
ret = skb_checksum_help(skb);
if (!ret)
goto no_csum;
return ret;
}
if (tag_bytes & 4)
cmd |= MAC_HDR_EXTRA_4_BYTES;
if (tag_bytes & 8)
cmd |= MAC_HDR_EXTRA_8_BYTES;
cmd |= GEN_TCP_UDP_CHECKSUM | GEN_TCP_UDP_CHK_FULL |
GEN_IP_V4_CHECKSUM |
ip_hdr(skb)->ihl << TX_IHL_SHIFT;
/* TODO: Revisit this. With the usage of GEN_TCP_UDP_CHK_FULL
* it seems we don't need to pass the initial checksum. */
switch (ip_hdr(skb)->protocol) {
case IPPROTO_UDP:
cmd |= UDP_FRAME;
*l4i_chk = 0;
break;
case IPPROTO_TCP:
*l4i_chk = 0;
break;
default:
WARN(1, "protocol not supported");
}
} else {
no_csum:
/* Errata BTS #50, IHL must be 5 if no HW checksum */
cmd |= 5 << TX_IHL_SHIFT;
}
*command = cmd;
return 0;
}
static inline int
txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
struct sk_buff *skb, char *data, int length,
bool last_tcp, bool is_last)
{
int tx_index;
u32 cmd_sts;
struct tx_desc *desc;
tx_index = txq->tx_curr_desc++;
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
desc = &txq->tx_desc_area[tx_index];
txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
desc->l4i_chk = 0;
desc->byte_cnt = length;
if (length <= 8 && (uintptr_t)data & 0x7) {
/* Copy unaligned small data fragment to TSO header data area */
memcpy(txq->tso_hdrs + tx_index * TSO_HEADER_SIZE,
data, length);
desc->buf_ptr = txq->tso_hdrs_dma
+ tx_index * TSO_HEADER_SIZE;
} else {
/* Alignment is okay, map buffer and hand off to hardware */
txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
desc->buf_ptr = dma_map_single(dev->dev.parent, data,
length, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev->dev.parent,
desc->buf_ptr))) {
WARN(1, "dma_map_single failed!\n");
return -ENOMEM;
}
}
cmd_sts = BUFFER_OWNED_BY_DMA;
if (last_tcp) {
/* last descriptor in the TCP packet */
cmd_sts |= ZERO_PADDING | TX_LAST_DESC;
/* last descriptor in SKB */
if (is_last)
cmd_sts |= TX_ENABLE_INTERRUPT;
}
desc->cmd_sts = cmd_sts;
return 0;
}
static inline void
txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length,
u32 *first_cmd_sts, bool first_desc)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
int tx_index;
struct tx_desc *desc;
int ret;
u32 cmd_csum = 0;
u16 l4i_chk = 0;
u32 cmd_sts;
tx_index = txq->tx_curr_desc;
desc = &txq->tx_desc_area[tx_index];
ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length);
if (ret)
WARN(1, "failed to prepare checksum!");
/* Should we set this? Can't use the value from skb_tx_csum()
* as it's not the correct initial L4 checksum to use. */
desc->l4i_chk = 0;
desc->byte_cnt = hdr_len;
desc->buf_ptr = txq->tso_hdrs_dma +
txq->tx_curr_desc * TSO_HEADER_SIZE;
cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC |
GEN_CRC;
/* Defer updating the first command descriptor until all
* following descriptors have been written.
*/
if (first_desc)
*first_cmd_sts = cmd_sts;
else
desc->cmd_sts = cmd_sts;
txq->tx_curr_desc++;
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
}
static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
struct net_device *dev)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
int hdr_len, total_len, data_left, ret;
int desc_count = 0;
struct tso_t tso;
struct tx_desc *first_tx_desc;
u32 first_cmd_sts = 0;
/* Count needed descriptors */
if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
netdev_dbg(dev, "not enough descriptors for TSO!\n");
return -EBUSY;
}
first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc];
/* Initialize the TSO handler, and prepare the first payload */
hdr_len = tso_start(skb, &tso);
total_len = skb->len - hdr_len;
while (total_len > 0) {
bool first_desc = (desc_count == 0);
char *hdr;
data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
total_len -= data_left;
desc_count++;
/* prepare packet headers: MAC + IP + TCP */
hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE;
tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
txq_put_hdr_tso(skb, txq, data_left, &first_cmd_sts,
first_desc);
while (data_left > 0) {
int size;
desc_count++;
size = min_t(int, tso.size, data_left);
ret = txq_put_data_tso(dev, txq, skb, tso.data, size,
size == data_left,
total_len == 0);
if (ret)
goto err_release;
data_left -= size;
tso_build_data(skb, &tso, size);
}
}
__skb_queue_tail(&txq->tx_skb, skb);
skb_tx_timestamp(skb);
/* ensure all other descriptors are written before first cmd_sts */
wmb();
first_tx_desc->cmd_sts = first_cmd_sts;
/* clear TX_END status */
mp->work_tx_end &= ~(1 << txq->index);
/* ensure all descriptors are written before poking hardware */
wmb();
txq_enable(txq);
txq->tx_desc_count += desc_count;
return 0;
err_release:
/* TODO: Release all used data descriptors; header descriptors must not
* be DMA-unmapped.
*/
return ret;
}
static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
int nr_frags = skb_shinfo(skb)->nr_frags;
int frag;
for (frag = 0; frag < nr_frags; frag++) {
skb_frag_t *this_frag;
int tx_index;
struct tx_desc *desc;
this_frag = &skb_shinfo(skb)->frags[frag];
tx_index = txq->tx_curr_desc++;
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
desc = &txq->tx_desc_area[tx_index];
txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE;
/*
* The last fragment will generate an interrupt
* which will free the skb on TX completion.
*/
if (frag == nr_frags - 1) {
desc->cmd_sts = BUFFER_OWNED_BY_DMA |
ZERO_PADDING | TX_LAST_DESC |
TX_ENABLE_INTERRUPT;
} else {
desc->cmd_sts = BUFFER_OWNED_BY_DMA;
}
desc->l4i_chk = 0;
desc->byte_cnt = skb_frag_size(this_frag);
desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
this_frag, 0, desc->byte_cnt,
DMA_TO_DEVICE);
}
}
static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
struct net_device *dev)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
int nr_frags = skb_shinfo(skb)->nr_frags;
int tx_index;
struct tx_desc *desc;
u32 cmd_sts;
u16 l4i_chk;
int length, ret;
cmd_sts = 0;
l4i_chk = 0;
if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
if (net_ratelimit())
netdev_err(dev, "tx queue full?!\n");
return -EBUSY;
}
ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len);
if (ret)
return ret;
cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
tx_index = txq->tx_curr_desc++;
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
desc = &txq->tx_desc_area[tx_index];
txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
if (nr_frags) {
txq_submit_frag_skb(txq, skb);
length = skb_headlen(skb);
} else {
cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
length = skb->len;
}
desc->l4i_chk = l4i_chk;
desc->byte_cnt = length;
desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data,
length, DMA_TO_DEVICE);
__skb_queue_tail(&txq->tx_skb, skb);
skb_tx_timestamp(skb);
/* ensure all other descriptors are written before first cmd_sts */
wmb();
desc->cmd_sts = cmd_sts;
/* clear TX_END status */
mp->work_tx_end &= ~(1 << txq->index);
/* ensure all descriptors are written before poking hardware */
wmb();
txq_enable(txq);
txq->tx_desc_count += nr_frags + 1;
return 0;
}
static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
int length, queue, ret;
struct tx_queue *txq;
struct netdev_queue *nq;
queue = skb_get_queue_mapping(skb);
txq = mp->txq + queue;
nq = netdev_get_tx_queue(dev, queue);
if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
netdev_printk(KERN_DEBUG, dev,
"failed to linearize skb with tiny unaligned fragment\n");
return NETDEV_TX_BUSY;
}
length = skb->len;
if (skb_is_gso(skb))
ret = txq_submit_tso(txq, skb, dev);
else
ret = txq_submit_skb(txq, skb, dev);
if (!ret) {
txq->tx_bytes += length;
txq->tx_packets++;
if (txq->tx_desc_count >= txq->tx_stop_threshold)
netif_tx_stop_queue(nq);
} else {
txq->tx_dropped++;
dev_kfree_skb_any(skb);
}
return NETDEV_TX_OK;
}
/* tx napi ******************************************************************/
static void txq_kick(struct tx_queue *txq)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
u32 hw_desc_ptr;
u32 expected_ptr;
__netif_tx_lock(nq, smp_processor_id());
if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
goto out;
hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index));
expected_ptr = (u32)txq->tx_desc_dma +
txq->tx_curr_desc * sizeof(struct tx_desc);
if (hw_desc_ptr != expected_ptr)
txq_enable(txq);
out:
__netif_tx_unlock(nq);
mp->work_tx_end &= ~(1 << txq->index);
}
static int txq_reclaim(struct tx_queue *txq, int budget, int force)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
int reclaimed;
__netif_tx_lock_bh(nq);
reclaimed = 0;
while (reclaimed < budget && txq->tx_desc_count > 0) {
int tx_index;
struct tx_desc *desc;
u32 cmd_sts;
char desc_dma_map;
tx_index = txq->tx_used_desc;
desc = &txq->tx_desc_area[tx_index];
desc_dma_map = txq->tx_desc_mapping[tx_index];
cmd_sts = desc->cmd_sts;
if (cmd_sts & BUFFER_OWNED_BY_DMA) {
if (!force)
break;
desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
}
txq->tx_used_desc = tx_index + 1;
if (txq->tx_used_desc == txq->tx_ring_size)
txq->tx_used_desc = 0;
reclaimed++;
txq->tx_desc_count--;
if (!IS_TSO_HEADER(txq, desc->buf_ptr)) {
if (desc_dma_map == DESC_DMA_MAP_PAGE)
dma_unmap_page(mp->dev->dev.parent,
desc->buf_ptr,
desc->byte_cnt,
DMA_TO_DEVICE);
else
dma_unmap_single(mp->dev->dev.parent,
desc->buf_ptr,
desc->byte_cnt,
DMA_TO_DEVICE);
}
if (cmd_sts & TX_ENABLE_INTERRUPT) {
struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
if (!WARN_ON(!skb))
dev_consume_skb_any(skb);
}
if (cmd_sts & ERROR_SUMMARY) {
netdev_info(mp->dev, "tx error\n");
mp->dev->stats.tx_errors++;
}
}
__netif_tx_unlock_bh(nq);
if (reclaimed < budget)
mp->work_tx &= ~(1 << txq->index);
return reclaimed;
}
/* tx rate control **********************************************************/
/*
* Set total maximum TX rate (shared by all TX queues for this port)
* to 'rate' bits per second, with a maximum burst of 'burst' bytes.
*/
static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
{
int token_rate;
int mtu;
int bucket_size;
token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
if (token_rate > 1023)
token_rate = 1023;
mtu = (mp->dev->mtu + 255) >> 8;
if (mtu > 63)
mtu = 63;
bucket_size = (burst + 255) >> 8;
if (bucket_size > 65535)
bucket_size = 65535;
switch (mp->shared->tx_bw_control) {
case TX_BW_CONTROL_OLD_LAYOUT:
wrlp(mp, TX_BW_RATE, token_rate);
wrlp(mp, TX_BW_MTU, mtu);
wrlp(mp, TX_BW_BURST, bucket_size);
break;
case TX_BW_CONTROL_NEW_LAYOUT:
wrlp(mp, TX_BW_RATE_MOVED, token_rate);
wrlp(mp, TX_BW_MTU_MOVED, mtu);
wrlp(mp, TX_BW_BURST_MOVED, bucket_size);
break;
}
}
static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
int token_rate;
int bucket_size;
token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
if (token_rate > 1023)
token_rate = 1023;
bucket_size = (burst + 255) >> 8;
if (bucket_size > 65535)
bucket_size = 65535;
wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14);
wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate);
}
static void txq_set_fixed_prio_mode(struct tx_queue *txq)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
int off;
u32 val;
/*
* Turn on fixed priority mode.
*/
off = 0;
switch (mp->shared->tx_bw_control) {
case TX_BW_CONTROL_OLD_LAYOUT:
off = TXQ_FIX_PRIO_CONF;
break;
case TX_BW_CONTROL_NEW_LAYOUT:
off = TXQ_FIX_PRIO_CONF_MOVED;
break;
}
if (off) {
val = rdlp(mp, off);
val |= 1 << txq->index;
wrlp(mp, off, val);
}
}
/* mii management interface *************************************************/
static void mv643xx_eth_adjust_link(struct net_device *dev)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
u32 autoneg_disable = FORCE_LINK_PASS |
DISABLE_AUTO_NEG_SPEED_GMII |
DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
DISABLE_AUTO_NEG_FOR_DUPLEX;
if (dev->phydev->autoneg == AUTONEG_ENABLE) {
/* enable auto negotiation */
pscr &= ~autoneg_disable;
goto out_write;
}
pscr |= autoneg_disable;
if (dev->phydev->speed == SPEED_1000) {
/* force gigabit, half duplex not supported */
pscr |= SET_GMII_SPEED_TO_1000;
pscr |= SET_FULL_DUPLEX_MODE;
goto out_write;
}
pscr &= ~SET_GMII_SPEED_TO_1000;
if (dev->phydev->speed == SPEED_100)
pscr |= SET_MII_SPEED_TO_100;
else
pscr &= ~SET_MII_SPEED_TO_100;
if (dev->phydev->duplex == DUPLEX_FULL)
pscr |= SET_FULL_DUPLEX_MODE;
else
pscr &= ~SET_FULL_DUPLEX_MODE;
out_write:
wrlp(mp, PORT_SERIAL_CONTROL, pscr);
}
/* statistics ***************************************************************/
static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
unsigned long tx_packets = 0;
unsigned long tx_bytes = 0;
unsigned long tx_dropped = 0;
int i;
for (i = 0; i < mp->txq_count; i++) {
struct tx_queue *txq = mp->txq + i;
tx_packets += txq->tx_packets;
tx_bytes += txq->tx_bytes;
tx_dropped += txq->tx_dropped;
}
stats->tx_packets = tx_packets;
stats->tx_bytes = tx_bytes;
stats->tx_dropped = tx_dropped;
return stats;
}
static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
{
return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
}
static void mib_counters_clear(struct mv643xx_eth_private *mp)
{
int i;
for (i = 0; i < 0x80; i += 4)
mib_read(mp, i);
/* Clear non MIB hw counters also */
rdlp(mp, RX_DISCARD_FRAME_CNT);
rdlp(mp, RX_OVERRUN_FRAME_CNT);
}
static void mib_counters_update(struct mv643xx_eth_private *mp)
{
struct mib_counters *p = &mp->mib_counters;
spin_lock_bh(&mp->mib_counters_lock);
p->good_octets_received += mib_read(mp, 0x00);
p->bad_octets_received += mib_read(mp, 0x08);
p->internal_mac_transmit_err += mib_read(mp, 0x0c);
p->good_frames_received += mib_read(mp, 0x10);
p->bad_frames_received += mib_read(mp, 0x14);
p->broadcast_frames_received += mib_read(mp, 0x18);
p->multicast_frames_received += mib_read(mp, 0x1c);
p->frames_64_octets += mib_read(mp, 0x20);
p->frames_65_to_127_octets += mib_read(mp, 0x24);
p->frames_128_to_255_octets += mib_read(mp, 0x28);
p->frames_256_to_511_octets += mib_read(mp, 0x2c);
p->frames_512_to_1023_octets += mib_read(mp, 0x30);
p->frames_1024_to_max_octets += mib_read(mp, 0x34);
p->good_octets_sent += mib_read(mp, 0x38);
p->good_frames_sent += mib_read(mp, 0x40);
p->excessive_collision += mib_read(mp, 0x44);
p->multicast_frames_sent += mib_read(mp, 0x48);
p->broadcast_frames_sent += mib_read(mp, 0x4c);
p->unrec_mac_control_received += mib_read(mp, 0x50);
p->fc_sent += mib_read(mp, 0x54);
p->good_fc_received += mib_read(mp, 0x58);
p->bad_fc_received += mib_read(mp, 0x5c);
p->undersize_received += mib_read(mp, 0x60);
p->fragments_received += mib_read(mp, 0x64);
p->oversize_received += mib_read(mp, 0x68);
p->jabber_received += mib_read(mp, 0x6c);
p->mac_receive_error += mib_read(mp, 0x70);
p->bad_crc_event += mib_read(mp, 0x74);
p->collision += mib_read(mp, 0x78);
p->late_collision += mib_read(mp, 0x7c);
/* Non MIB hardware counters */
p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
spin_unlock_bh(&mp->mib_counters_lock);
}
static void mib_counters_timer_wrapper(struct timer_list *t)
{
struct mv643xx_eth_private *mp = from_timer(mp, t, mib_counters_timer);
mib_counters_update(mp);
mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
}
/* interrupt coalescing *****************************************************/
/*
* Hardware coalescing parameters are set in units of 64 t_clk
* cycles. I.e.:
*
* coal_delay_in_usec = 64000000 * register_value / t_clk_rate
*
* register_value = coal_delay_in_usec * t_clk_rate / 64000000
*
* In the ->set*() methods, we round the computed register value
* to the nearest integer.
*/
static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
{
u32 val = rdlp(mp, SDMA_CONFIG);
u64 temp;
if (mp->shared->extended_rx_coal_limit)
temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7);
else
temp = (val & 0x003fff00) >> 8;
temp *= 64000000;
temp += mp->t_clk / 2;
do_div(temp, mp->t_clk);
return (unsigned int)temp;
}
static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
{
u64 temp;
u32 val;
temp = (u64)usec * mp->t_clk;
temp += 31999999;
do_div(temp, 64000000);
val = rdlp(mp, SDMA_CONFIG);
if (mp->shared->extended_rx_coal_limit) {
if (temp > 0xffff)
temp = 0xffff;
val &= ~0x023fff80;
val |= (temp & 0x8000) << 10;
val |= (temp & 0x7fff) << 7;
} else {
if (temp > 0x3fff)
temp = 0x3fff;
val &= ~0x003fff00;
val |= (temp & 0x3fff) << 8;
}
wrlp(mp, SDMA_CONFIG, val);
}
static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
{
u64 temp;
temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
temp *= 64000000;
temp += mp->t_clk / 2;
do_div(temp, mp->t_clk);
return (unsigned int)temp;
}
static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
{
u64 temp;
temp = (u64)usec * mp->t_clk;
temp += 31999999;
do_div(temp, 64000000);
if (temp > 0x3fff)
temp = 0x3fff;
wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4);
}
/* ethtool ******************************************************************/
struct mv643xx_eth_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
int netdev_off;
int mp_off;
};
#define SSTAT(m) \
{ #m, sizeof_field(struct net_device_stats, m), \
offsetof(struct net_device, stats.m), -1 }
#define MIBSTAT(m) \
{ #m, sizeof_field(struct mib_counters, m), \
-1, offsetof(struct mv643xx_eth_private, mib_counters.m) }
static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
SSTAT(rx_packets),
SSTAT(tx_packets),
SSTAT(rx_bytes),
SSTAT(tx_bytes),
SSTAT(rx_errors),
SSTAT(tx_errors),
SSTAT(rx_dropped),
SSTAT(tx_dropped),
MIBSTAT(good_octets_received),
MIBSTAT(bad_octets_received),
MIBSTAT(internal_mac_transmit_err),
MIBSTAT(good_frames_received),
MIBSTAT(bad_frames_received),
MIBSTAT(broadcast_frames_received),
MIBSTAT(multicast_frames_received),
MIBSTAT(frames_64_octets),
MIBSTAT(frames_65_to_127_octets),
MIBSTAT(frames_128_to_255_octets),
MIBSTAT(frames_256_to_511_octets),
MIBSTAT(frames_512_to_1023_octets),
MIBSTAT(frames_1024_to_max_octets),
MIBSTAT(good_octets_sent),
MIBSTAT(good_frames_sent),
MIBSTAT(excessive_collision),
MIBSTAT(multicast_frames_sent),
MIBSTAT(broadcast_frames_sent),
MIBSTAT(unrec_mac_control_received),
MIBSTAT(fc_sent),
MIBSTAT(good_fc_received),
MIBSTAT(bad_fc_received),
MIBSTAT(undersize_received),
MIBSTAT(fragments_received),
MIBSTAT(oversize_received),
MIBSTAT(jabber_received),
MIBSTAT(mac_receive_error),
MIBSTAT(bad_crc_event),
MIBSTAT(collision),
MIBSTAT(late_collision),
MIBSTAT(rx_discard),
MIBSTAT(rx_overrun),
};
static int
mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp,
struct ethtool_link_ksettings *cmd)
{
struct net_device *dev = mp->dev;
phy_ethtool_ksettings_get(dev->phydev, cmd);
/*
* The MAC does not support 1000baseT_Half.
*/
linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
cmd->link_modes.supported);
linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
cmd->link_modes.advertising);
return 0;
}
static int
mv643xx_eth_get_link_ksettings_phyless(struct mv643xx_eth_private *mp,
struct ethtool_link_ksettings *cmd)
{
u32 port_status;
u32 supported, advertising;
port_status = rdlp(mp, PORT_STATUS);
supported = SUPPORTED_MII;
advertising = ADVERTISED_MII;
switch (port_status & PORT_SPEED_MASK) {
case PORT_SPEED_10:
cmd->base.speed = SPEED_10;
break;
case PORT_SPEED_100:
cmd->base.speed = SPEED_100;
break;
case PORT_SPEED_1000:
cmd->base.speed = SPEED_1000;
break;
default:
cmd->base.speed = -1;
break;
}
cmd->base.duplex = (port_status & FULL_DUPLEX) ?
DUPLEX_FULL : DUPLEX_HALF;
cmd->base.port = PORT_MII;
cmd->base.phy_address = 0;
cmd->base.autoneg = AUTONEG_DISABLE;
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
supported);
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
advertising);
return 0;
}
static void
mv643xx_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
wol->supported = 0;
wol->wolopts = 0;
if (dev->phydev)
phy_ethtool_get_wol(dev->phydev, wol);
}
static int
mv643xx_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
int err;
if (!dev->phydev)
return -EOPNOTSUPP;
err = phy_ethtool_set_wol(dev->phydev, wol);
/* Given that mv643xx_eth works without the marvell-specific PHY driver,
* this debugging hint is useful to have.
*/
if (err == -EOPNOTSUPP)
netdev_info(dev, "The PHY does not support set_wol, was CONFIG_MARVELL_PHY enabled?\n");
return err;
}
static int
mv643xx_eth_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
if (dev->phydev)
return mv643xx_eth_get_link_ksettings_phy(mp, cmd);
else
return mv643xx_eth_get_link_ksettings_phyless(mp, cmd);
}
static int
mv643xx_eth_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
struct ethtool_link_ksettings c = *cmd;
u32 advertising;
int ret;
if (!dev->phydev)
return -EINVAL;
/*
* The MAC does not support 1000baseT_Half.
*/
ethtool_convert_link_mode_to_legacy_u32(&advertising,
c.link_modes.advertising);
advertising &= ~ADVERTISED_1000baseT_Half;
ethtool_convert_legacy_u32_to_link_mode(c.link_modes.advertising,
advertising);
ret = phy_ethtool_ksettings_set(dev->phydev, &c);
if (!ret)
mv643xx_eth_adjust_link(dev);
return ret;
}
static void mv643xx_eth_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
strlcpy(drvinfo->driver, mv643xx_eth_driver_name,
sizeof(drvinfo->driver));
strlcpy(drvinfo->version, mv643xx_eth_driver_version,
sizeof(drvinfo->version));
strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
}
static int
mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
ec->rx_coalesce_usecs = get_rx_coal(mp);
ec->tx_coalesce_usecs = get_tx_coal(mp);
return 0;
}
static int
mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
set_rx_coal(mp, ec->rx_coalesce_usecs);
set_tx_coal(mp, ec->tx_coalesce_usecs);
return 0;
}
static void
mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
er->rx_max_pending = 4096;
er->tx_max_pending = 4096;
er->rx_pending = mp->rx_ring_size;
er->tx_pending = mp->tx_ring_size;
}
static int
mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
if (er->rx_mini_pending || er->rx_jumbo_pending)
return -EINVAL;
mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending,
MV643XX_MAX_SKB_DESCS * 2, 4096);
if (mp->tx_ring_size != er->tx_pending)
netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
mp->tx_ring_size, er->tx_pending);
if (netif_running(dev)) {
mv643xx_eth_stop(dev);
if (mv643xx_eth_open(dev)) {
netdev_err(dev,
"fatal error on re-opening device after ring param change\n");
return -ENOMEM;
}
}
return 0;
}
static int
mv643xx_eth_set_features(struct net_device *dev, netdev_features_t features)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
bool rx_csum = features & NETIF_F_RXCSUM;
wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
return 0;
}
static void mv643xx_eth_get_strings(struct net_device *dev,
uint32_t stringset, uint8_t *data)
{
int i;
if (stringset == ETH_SS_STATS) {
for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
memcpy(data + i * ETH_GSTRING_LEN,
mv643xx_eth_stats[i].stat_string,
ETH_GSTRING_LEN);
}
}
}
static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats,
uint64_t *data)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
int i;
mv643xx_eth_get_stats(dev);
mib_counters_update(mp);
for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
const struct mv643xx_eth_stats *stat;
void *p;
stat = mv643xx_eth_stats + i;
if (stat->netdev_off >= 0)
p = ((void *)mp->dev) + stat->netdev_off;
else
p = ((void *)mp) + stat->mp_off;
data[i] = (stat->sizeof_stat == 8) ?
*(uint64_t *)p : *(uint32_t *)p;
}
}
static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
{
if (sset == ETH_SS_STATS)
return ARRAY_SIZE(mv643xx_eth_stats);
return -EOPNOTSUPP;
}
static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = mv643xx_eth_get_drvinfo,
.nway_reset = phy_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.get_coalesce = mv643xx_eth_get_coalesce,
.set_coalesce = mv643xx_eth_set_coalesce,
.get_ringparam = mv643xx_eth_get_ringparam,
.set_ringparam = mv643xx_eth_set_ringparam,
.get_strings = mv643xx_eth_get_strings,
.get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
.get_sset_count = mv643xx_eth_get_sset_count,
.get_ts_info = ethtool_op_get_ts_info,
.get_wol = mv643xx_eth_get_wol,
.set_wol = mv643xx_eth_set_wol,
.get_link_ksettings = mv643xx_eth_get_link_ksettings,
.set_link_ksettings = mv643xx_eth_set_link_ksettings,
};
/* address handling *********************************************************/
static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
{
unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH);
unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW);
addr[0] = (mac_h >> 24) & 0xff;
addr[1] = (mac_h >> 16) & 0xff;
addr[2] = (mac_h >> 8) & 0xff;
addr[3] = mac_h & 0xff;
addr[4] = (mac_l >> 8) & 0xff;
addr[5] = mac_l & 0xff;
}
static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
{
wrlp(mp, MAC_ADDR_HIGH,
(addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]);
wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]);
}
static u32 uc_addr_filter_mask(struct net_device *dev)
{
struct netdev_hw_addr *ha;
u32 nibbles;
if (dev->flags & IFF_PROMISC)
return 0;
nibbles = 1 << (dev->dev_addr[5] & 0x0f);
netdev_for_each_uc_addr(ha, dev) {
if (memcmp(dev->dev_addr, ha->addr, 5))
return 0;
if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
return 0;
nibbles |= 1 << (ha->addr[5] & 0x0f);
}
return nibbles;
}
static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
u32 port_config;
u32 nibbles;
int i;
uc_addr_set(mp, dev->dev_addr);
port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE;
nibbles = uc_addr_filter_mask(dev);
if (!nibbles) {
port_config |= UNICAST_PROMISCUOUS_MODE;
nibbles = 0xffff;
}
for (i = 0; i < 16; i += 4) {
int off = UNICAST_TABLE(mp->port_num) + i;
u32 v;
v = 0;
if (nibbles & 1)
v |= 0x00000001;
if (nibbles & 2)
v |= 0x00000100;
if (nibbles & 4)
v |= 0x00010000;
if (nibbles & 8)
v |= 0x01000000;
nibbles >>= 4;
wrl(mp, off, v);
}
wrlp(mp, PORT_CONFIG, port_config);
}
static int addr_crc(unsigned char *addr)
{
int crc = 0;
int i;
for (i = 0; i < 6; i++) {
int j;
crc = (crc ^ addr[i]) << 8;
for (j = 7; j >= 0; j--) {
if (crc & (0x100 << j))
crc ^= 0x107 << j;
}
}
return crc;
}
static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
u32 *mc_spec;
u32 *mc_other;
struct netdev_hw_addr *ha;
int i;
if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI))
goto promiscuous;
/* Allocate both mc_spec and mc_other tables */
mc_spec = kcalloc(128, sizeof(u32), GFP_ATOMIC);
if (!mc_spec)
goto promiscuous;
mc_other = &mc_spec[64];
netdev_for_each_mc_addr(ha, dev) {
u8 *a = ha->addr;
u32 *table;
u8 entry;
if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
table = mc_spec;
entry = a[5];
} else {
table = mc_other;
entry = addr_crc(a);
}
table[entry >> 2] |= 1 << (8 * (entry & 3));
}
for (i = 0; i < 64; i++) {
wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
mc_spec[i]);
wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
mc_other[i]);
}
kfree(mc_spec);
return;
promiscuous:
for (i = 0; i < 64; i++) {
wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
0x01010101u);
wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
0x01010101u);
}
}
static void mv643xx_eth_set_rx_mode(struct net_device *dev)
{
mv643xx_eth_program_unicast_filter(dev);
mv643xx_eth_program_multicast_filter(dev);
}
static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = addr;
if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
netif_addr_lock_bh(dev);
mv643xx_eth_program_unicast_filter(dev);
netif_addr_unlock_bh(dev);
return 0;
}
/* rx/tx queue initialisation ***********************************************/
static int rxq_init(struct mv643xx_eth_private *mp, int index)
{
struct rx_queue *rxq = mp->rxq + index;
struct rx_desc *rx_desc;
int size;
int i;
rxq->index = index;
rxq->rx_ring_size = mp->rx_ring_size;
rxq->rx_desc_count = 0;
rxq->rx_curr_desc = 0;
rxq->rx_used_desc = 0;
size = rxq->rx_ring_size * sizeof(struct rx_desc);
if (index == 0 && size <= mp->rx_desc_sram_size) {
rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
mp->rx_desc_sram_size);
rxq->rx_desc_dma = mp->rx_desc_sram_addr;
} else {
rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
size, &rxq->rx_desc_dma,
GFP_KERNEL);
}
if (rxq->rx_desc_area == NULL) {
netdev_err(mp->dev,
"can't allocate rx ring (%d bytes)\n", size);
goto out;
}
memset(rxq->rx_desc_area, 0, size);
rxq->rx_desc_area_size = size;
rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb),
GFP_KERNEL);
if (rxq->rx_skb == NULL)
goto out_free;
rx_desc = rxq->rx_desc_area;
for (i = 0; i < rxq->rx_ring_size; i++) {
int nexti;
nexti = i + 1;
if (nexti == rxq->rx_ring_size)
nexti = 0;
rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
nexti * sizeof(struct rx_desc);
}
return 0;
out_free:
if (index == 0 && size <= mp->rx_desc_sram_size)
iounmap(rxq->rx_desc_area);
else
dma_free_coherent(mp->dev->dev.parent, size,
rxq->rx_desc_area,
rxq->rx_desc_dma);
out:
return -ENOMEM;
}
static void rxq_deinit(struct rx_queue *rxq)
{
struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
int i;
rxq_disable(rxq);
for (i = 0; i < rxq->rx_ring_size; i++) {
if (rxq->rx_skb[i]) {
dev_consume_skb_any(rxq->rx_skb[i]);
rxq->rx_desc_count--;
}
}
if (rxq->rx_desc_count) {
netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n",
rxq->rx_desc_count);
}
if (rxq->index == 0 &&
rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
iounmap(rxq->rx_desc_area);
else
dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
rxq->rx_desc_area, rxq->rx_desc_dma);
kfree(rxq->rx_skb);
}
static int txq_init(struct mv643xx_eth_private *mp, int index)
{
struct tx_queue *txq = mp->txq + index;
struct tx_desc *tx_desc;
int size;
int ret;
int i;
txq->index = index;
txq->tx_ring_size = mp->tx_ring_size;
/* A queue must always have room for at least one skb.
* Therefore, stop the queue when the free entries reaches
* the maximum number of descriptors per skb.
*/
txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS;
txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
txq->tx_desc_count = 0;
txq->tx_curr_desc = 0;
txq->tx_used_desc = 0;
size = txq->tx_ring_size * sizeof(struct tx_desc);
if (index == 0 && size <= mp->tx_desc_sram_size) {
txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
mp->tx_desc_sram_size);
txq->tx_desc_dma = mp->tx_desc_sram_addr;
} else {
txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
size, &txq->tx_desc_dma,
GFP_KERNEL);
}
if (txq->tx_desc_area == NULL) {
netdev_err(mp->dev,
"can't allocate tx ring (%d bytes)\n", size);
return -ENOMEM;
}
memset(txq->tx_desc_area, 0, size);
txq->tx_desc_area_size = size;
tx_desc = txq->tx_desc_area;
for (i = 0; i < txq->tx_ring_size; i++) {
struct tx_desc *txd = tx_desc + i;
int nexti;
nexti = i + 1;
if (nexti == txq->tx_ring_size)
nexti = 0;
txd->cmd_sts = 0;
txd->next_desc_ptr = txq->tx_desc_dma +
nexti * sizeof(struct tx_desc);
}
txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char),
GFP_KERNEL);
if (!txq->tx_desc_mapping) {
ret = -ENOMEM;
goto err_free_desc_area;
}
/* Allocate DMA buffers for TSO MAC/IP/TCP headers */
txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
txq->tx_ring_size * TSO_HEADER_SIZE,
&txq->tso_hdrs_dma, GFP_KERNEL);
if (txq->tso_hdrs == NULL) {
ret = -ENOMEM;
goto err_free_desc_mapping;
}
skb_queue_head_init(&txq->tx_skb);
return 0;
err_free_desc_mapping:
kfree(txq->tx_desc_mapping);
err_free_desc_area:
if (index == 0 && size <= mp->tx_desc_sram_size)
iounmap(txq->tx_desc_area);
else
dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
txq->tx_desc_area, txq->tx_desc_dma);
return ret;
}
static void txq_deinit(struct tx_queue *txq)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
txq_disable(txq);
txq_reclaim(txq, txq->tx_ring_size, 1);
BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
if (txq->index == 0 &&
txq->tx_desc_area_size <= mp->tx_desc_sram_size)
iounmap(txq->tx_desc_area);
else
dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
txq->tx_desc_area, txq->tx_desc_dma);
kfree(txq->tx_desc_mapping);
if (txq->tso_hdrs)
dma_free_coherent(mp->dev->dev.parent,
txq->tx_ring_size * TSO_HEADER_SIZE,
txq->tso_hdrs, txq->tso_hdrs_dma);
}
/* netdev ops and related ***************************************************/
static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
{
u32 int_cause;
u32 int_cause_ext;
int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask;
if (int_cause == 0)
return 0;
int_cause_ext = 0;
if (int_cause & INT_EXT) {
int_cause &= ~INT_EXT;
int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
}
if (int_cause) {
wrlp(mp, INT_CAUSE, ~int_cause);
mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
~(rdlp(mp, TXQ_COMMAND) & 0xff);
mp->work_rx |= (int_cause & INT_RX) >> 2;
}
int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX;
if (int_cause_ext) {
wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext);
if (int_cause_ext & INT_EXT_LINK_PHY)
mp->work_link = 1;
mp->work_tx |= int_cause_ext & INT_EXT_TX;
}
return 1;
}
static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
struct mv643xx_eth_private *mp = netdev_priv(dev);
if (unlikely(!mv643xx_eth_collect_events(mp)))
return IRQ_NONE;
wrlp(mp, INT_MASK, 0);
napi_schedule(&mp->napi);
return IRQ_HANDLED;
}
static void handle_link_event(struct mv643xx_eth_private *mp)
{
struct net_device *dev = mp->dev;
u32 port_status;
int speed;
int duplex;
int fc;
port_status = rdlp(mp, PORT_STATUS);
if (!(port_status & LINK_UP)) {
if (netif_carrier_ok(dev)) {
int i;
netdev_info(dev, "link down\n");
netif_carrier_off(dev);
for (i = 0; i < mp->txq_count; i++) {
struct tx_queue *txq = mp->txq + i;
txq_reclaim(txq, txq->tx_ring_size, 1);
txq_reset_hw_ptr(txq);
}
}
return;
}
switch (port_status & PORT_SPEED_MASK) {
case PORT_SPEED_10:
speed = 10;
break;
case PORT_SPEED_100:
speed = 100;
break;
case PORT_SPEED_1000:
speed = 1000;
break;
default:
speed = -1;
break;
}
duplex = (port_status & FULL_DUPLEX) ? 1 : 0;
fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0;
netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n",
speed, duplex ? "full" : "half", fc ? "en" : "dis");
if (!netif_carrier_ok(dev))
netif_carrier_on(dev);
}
static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
{
struct mv643xx_eth_private *mp;
int work_done;
mp = container_of(napi, struct mv643xx_eth_private, napi);
if (unlikely(mp->oom)) {
mp->oom = 0;
del_timer(&mp->rx_oom);
}
work_done = 0;
while (work_done < budget) {
u8 queue_mask;
int queue;
int work_tbd;
if (mp->work_link) {
mp->work_link = 0;
handle_link_event(mp);
work_done++;
continue;
}
queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx;
if (likely(!mp->oom))
queue_mask |= mp->work_rx_refill;
if (!queue_mask) {
if (mv643xx_eth_collect_events(mp))
continue;
break;
}
queue = fls(queue_mask) - 1;
queue_mask = 1 << queue;
work_tbd = budget - work_done;
if (work_tbd > 16)
work_tbd = 16;
if (mp->work_tx_end & queue_mask) {
txq_kick(mp->txq + queue);
} else if (mp->work_tx & queue_mask) {
work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
txq_maybe_wake(mp->txq + queue);
} else if (mp->work_rx & queue_mask) {
work_done += rxq_process(mp->rxq + queue, work_tbd);
} else if (!mp->oom && (mp->work_rx_refill & queue_mask)) {
work_done += rxq_refill(mp->rxq + queue, work_tbd);
} else {
BUG();
}
}
if (work_done < budget) {
if (mp->oom)
mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
napi_complete_done(napi, work_done);
wrlp(mp, INT_MASK, mp->int_mask);
}
return work_done;
}
static inline void oom_timer_wrapper(struct timer_list *t)
{
struct mv643xx_eth_private *mp = from_timer(mp, t, rx_oom);
napi_schedule(&mp->napi);
}
static void port_start(struct mv643xx_eth_private *mp)
{
struct net_device *dev = mp->dev;
u32 pscr;
int i;
/*
* Perform PHY reset, if there is a PHY.
*/
if (dev->phydev) {
struct ethtool_link_ksettings cmd;
mv643xx_eth_get_link_ksettings(dev, &cmd);
phy_init_hw(dev->phydev);
mv643xx_eth_set_link_ksettings(
dev, (const struct ethtool_link_ksettings *)&cmd);
phy_start(dev->phydev);
}
/*
* Configure basic link parameters.
*/
pscr = rdlp(mp, PORT_SERIAL_CONTROL);
pscr |= SERIAL_PORT_ENABLE;
wrlp(mp, PORT_SERIAL_CONTROL, pscr);
pscr |= DO_NOT_FORCE_LINK_FAIL;
if (!dev->phydev)
pscr |= FORCE_LINK_PASS;
wrlp(mp, PORT_SERIAL_CONTROL, pscr);
/*
* Configure TX path and queues.
*/
tx_set_rate(mp, 1000000000, 16777216);
for (i = 0; i < mp->txq_count; i++) {
struct tx_queue *txq = mp->txq + i;
txq_reset_hw_ptr(txq);
txq_set_rate(txq, 1000000000, 16777216);
txq_set_fixed_prio_mode(txq);
}
/*
* Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
* frames to RX queue #0, and include the pseudo-header when
* calculating receive checksums.
*/
mv643xx_eth_set_features(mp->dev, mp->dev->features);
/*
* Treat BPDUs as normal multicasts, and disable partition mode.
*/
wrlp(mp, PORT_CONFIG_EXT, 0x00000000);
/*
* Add configured unicast addresses to address filter table.
*/
mv643xx_eth_program_unicast_filter(mp->dev);
/*
* Enable the receive queues.
*/
for (i = 0; i < mp->rxq_count; i++) {
struct rx_queue *rxq = mp->rxq + i;
u32 addr;
addr = (u32)rxq->rx_desc_dma;
addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr);
rxq_enable(rxq);
}
}
static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
{
int skb_size;
/*
* Reserve 2+14 bytes for an ethernet header (the hardware
* automatically prepends 2 bytes of dummy data to each
* received packet), 16 bytes for up to four VLAN tags, and
* 4 bytes for the trailing FCS -- 36 bytes total.
*/
skb_size = mp->dev->mtu + 36;
/*
* Make sure that the skb size is a multiple of 8 bytes, as
* the lower three bits of the receive descriptor's buffer
* size field are ignored by the hardware.
*/
mp->skb_size = (skb_size + 7) & ~7;
/*
* If NET_SKB_PAD is smaller than a cache line,
* netdev_alloc_skb() will cause skb->data to be misaligned
* to a cache line boundary. If this is the case, include
* some extra space to allow re-aligning the data area.
*/
mp->skb_size += SKB_DMA_REALIGN;
}
static int mv643xx_eth_open(struct net_device *dev)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
int err;
int i;
wrlp(mp, INT_CAUSE, 0);
wrlp(mp, INT_CAUSE_EXT, 0);
rdlp(mp, INT_CAUSE_EXT);
err = request_irq(dev->irq, mv643xx_eth_irq,
IRQF_SHARED, dev->name, dev);
if (err) {
netdev_err(dev, "can't assign irq\n");
return -EAGAIN;
}
mv643xx_eth_recalc_skb_size(mp);
napi_enable(&mp->napi);
mp->int_mask = INT_EXT;
for (i = 0; i < mp->rxq_count; i++) {
err = rxq_init(mp, i);
if (err) {
while (--i >= 0)
rxq_deinit(mp->rxq + i);
goto out;
}
rxq_refill(mp->rxq + i, INT_MAX);
mp->int_mask |= INT_RX_0 << i;
}
if (mp->oom) {
mp->rx_oom.expires = jiffies + (HZ / 10);
add_timer(&mp->rx_oom);
}
for (i = 0; i < mp->txq_count; i++) {
err = txq_init(mp, i);
if (err) {
while (--i >= 0)
txq_deinit(mp->txq + i);
goto out_free;
}
mp->int_mask |= INT_TX_END_0 << i;
}
add_timer(&mp->mib_counters_timer);
port_start(mp);
wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
wrlp(mp, INT_MASK, mp->int_mask);
return 0;
out_free:
for (i = 0; i < mp->rxq_count; i++)
rxq_deinit(mp->rxq + i);
out:
free_irq(dev->irq, dev);
return err;
}
static void port_reset(struct mv643xx_eth_private *mp)
{
unsigned int data;
int i;
for (i = 0; i < mp->rxq_count; i++)
rxq_disable(mp->rxq + i);
for (i = 0; i < mp->txq_count; i++)
txq_disable(mp->txq + i);
while (1) {
u32 ps = rdlp(mp, PORT_STATUS);
if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
break;
udelay(10);
}
/* Reset the Enable bit in the Configuration Register */
data = rdlp(mp, PORT_SERIAL_CONTROL);
data &= ~(SERIAL_PORT_ENABLE |
DO_NOT_FORCE_LINK_FAIL |
FORCE_LINK_PASS);
wrlp(mp, PORT_SERIAL_CONTROL, data);
}
static int mv643xx_eth_stop(struct net_device *dev)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
int i;
wrlp(mp, INT_MASK_EXT, 0x00000000);
wrlp(mp, INT_MASK, 0x00000000);
rdlp(mp, INT_MASK);
napi_disable(&mp->napi);
del_timer_sync(&mp->rx_oom);
netif_carrier_off(dev);
if (dev->phydev)
phy_stop(dev->phydev);
free_irq(dev->irq, dev);
port_reset(mp);
mv643xx_eth_get_stats(dev);
mib_counters_update(mp);
del_timer_sync(&mp->mib_counters_timer);
for (i = 0; i < mp->rxq_count; i++)
rxq_deinit(mp->rxq + i);
for (i = 0; i < mp->txq_count; i++)
txq_deinit(mp->txq + i);
return 0;
}
static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
int ret;
if (!dev->phydev)
return -ENOTSUPP;
ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
if (!ret)
mv643xx_eth_adjust_link(dev);
return ret;
}
static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
dev->mtu = new_mtu;
mv643xx_eth_recalc_skb_size(mp);
tx_set_rate(mp, 1000000000, 16777216);
if (!netif_running(dev))
return 0;
/*
* Stop and then re-open the interface. This will allocate RX
* skbs of the new MTU.
* There is a possible danger that the open will not succeed,
* due to memory being full.
*/
mv643xx_eth_stop(dev);
if (mv643xx_eth_open(dev)) {
netdev_err(dev,
"fatal error on re-opening device after MTU change\n");
}
return 0;
}
static void tx_timeout_task(struct work_struct *ugly)
{
struct mv643xx_eth_private *mp;
mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
if (netif_running(mp->dev)) {
netif_tx_stop_all_queues(mp->dev);
port_reset(mp);
port_start(mp);
netif_tx_wake_all_queues(mp->dev);
}
}
static void mv643xx_eth_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
netdev_info(dev, "tx timeout\n");
schedule_work(&mp->tx_timeout_task);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void mv643xx_eth_netpoll(struct net_device *dev)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
wrlp(mp, INT_MASK, 0x00000000);
rdlp(mp, INT_MASK);
mv643xx_eth_irq(dev->irq, dev);
wrlp(mp, INT_MASK, mp->int_mask);
}
#endif
/* platform glue ************************************************************/
static void
mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
const struct mbus_dram_target_info *dram)
{
void __iomem *base = msp->base;
u32 win_enable;
u32 win_protect;
int i;
for (i = 0; i < 6; i++) {
writel(0, base + WINDOW_BASE(i));
writel(0, base + WINDOW_SIZE(i));
if (i < 4)
writel(0, base + WINDOW_REMAP_HIGH(i));
}
win_enable = 0x3f;
win_protect = 0;
for (i = 0; i < dram->num_cs; i++) {
const struct mbus_dram_window *cs = dram->cs + i;
writel((cs->base & 0xffff0000) |
(cs->mbus_attr << 8) |
dram->mbus_dram_target_id, base + WINDOW_BASE(i));
writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
win_enable &= ~(1 << i);
win_protect |= 3 << (2 * i);
}
writel(win_enable, base + WINDOW_BAR_ENABLE);
msp->win_protect = win_protect;
}
static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
{
/*
* Check whether we have a 14-bit coal limit field in bits
* [21:8], or a 16-bit coal limit in bits [25,21:7] of the
* SDMA config register.
*/
writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG);
if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000)
msp->extended_rx_coal_limit = 1;
else
msp->extended_rx_coal_limit = 0;
/*
* Check whether the MAC supports TX rate control, and if
* yes, whether its associated registers are in the old or
* the new place.
*/
writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED);
if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) {
msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT;
} else {
writel(7, msp->base + 0x0400 + TX_BW_RATE);
if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7)
msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT;
else
msp->tx_bw_control = TX_BW_CONTROL_ABSENT;
}
}
#if defined(CONFIG_OF)
static const struct of_device_id mv643xx_eth_shared_ids[] = {
{ .compatible = "marvell,orion-eth", },
{ .compatible = "marvell,kirkwood-eth", },
{ }
};
MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
#endif
#if defined(CONFIG_OF_IRQ) && !defined(CONFIG_MV64X60)
#define mv643xx_eth_property(_np, _name, _v) \
do { \
u32 tmp; \
if (!of_property_read_u32(_np, "marvell," _name, &tmp)) \
_v = tmp; \
} while (0)
static struct platform_device *port_platdev[3];
static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
struct device_node *pnp)
{
struct platform_device *ppdev;
struct mv643xx_eth_platform_data ppd;
struct resource res;
const char *mac_addr;
int ret;
int dev_num = 0;
memset(&ppd, 0, sizeof(ppd));
ppd.shared = pdev;
memset(&res, 0, sizeof(res));
if (of_irq_to_resource(pnp, 0, &res) <= 0) {
dev_err(&pdev->dev, "missing interrupt on %pOFn\n", pnp);
return -EINVAL;
}
if (of_property_read_u32(pnp, "reg", &ppd.port_number)) {
dev_err(&pdev->dev, "missing reg property on %pOFn\n", pnp);
return -EINVAL;
}
if (ppd.port_number >= 3) {
dev_err(&pdev->dev, "invalid reg property on %pOFn\n", pnp);
return -EINVAL;
}
while (dev_num < 3 && port_platdev[dev_num])
dev_num++;
if (dev_num == 3) {
dev_err(&pdev->dev, "too many ports registered\n");
return -EINVAL;
}
mac_addr = of_get_mac_address(pnp);
if (!IS_ERR(mac_addr))
ether_addr_copy(ppd.mac_addr, mac_addr);
mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
mv643xx_eth_property(pnp, "tx-sram-size", ppd.tx_sram_size);
mv643xx_eth_property(pnp, "rx-queue-size", ppd.rx_queue_size);
mv643xx_eth_property(pnp, "rx-sram-addr", ppd.rx_sram_addr);
mv643xx_eth_property(pnp, "rx-sram-size", ppd.rx_sram_size);
ppd.phy_node = of_parse_phandle(pnp, "phy-handle", 0);
if (!ppd.phy_node) {
ppd.phy_addr = MV643XX_ETH_PHY_NONE;
of_property_read_u32(pnp, "speed", &ppd.speed);
of_property_read_u32(pnp, "duplex", &ppd.duplex);
}
ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num);
if (!ppdev)
return -ENOMEM;
ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
ppdev->dev.of_node = pnp;
ret = platform_device_add_resources(ppdev, &res, 1);
if (ret)
goto port_err;
ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd));
if (ret)
goto port_err;
ret = platform_device_add(ppdev);
if (ret)
goto port_err;
port_platdev[dev_num] = ppdev;
return 0;
port_err:
platform_device_put(ppdev);
return ret;
}
static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
{
struct mv643xx_eth_shared_platform_data *pd;
struct device_node *pnp, *np = pdev->dev.of_node;
int ret;
/* bail out if not registered from DT */
if (!np)
return 0;
pd = devm_kzalloc(&pdev->dev, sizeof(*pd), GFP_KERNEL);
if (!pd)
return -ENOMEM;
pdev->dev.platform_data = pd;
mv643xx_eth_property(np, "tx-checksum-limit", pd->tx_csum_limit);
for_each_available_child_of_node(np, pnp) {
ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
if (ret) {
of_node_put(pnp);
return ret;
}
}
return 0;
}
static void mv643xx_eth_shared_of_remove(void)
{
int n;
for (n = 0; n < 3; n++) {
platform_device_del(port_platdev[n]);
port_platdev[n] = NULL;
}
}
#else
static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
{
return 0;
}
static inline void mv643xx_eth_shared_of_remove(void)
{
}
#endif
static int mv643xx_eth_shared_probe(struct platform_device *pdev)
{
static int mv643xx_eth_version_printed;
struct mv643xx_eth_shared_platform_data *pd;
struct mv643xx_eth_shared_private *msp;
const struct mbus_dram_target_info *dram;
struct resource *res;
int ret;
if (!mv643xx_eth_version_printed++)
pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n",
mv643xx_eth_driver_version);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL)
return -EINVAL;
msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
if (msp == NULL)
return -ENOMEM;
platform_set_drvdata(pdev, msp);
msp->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (msp->base == NULL)
return -ENOMEM;
msp->clk = devm_clk_get(&pdev->dev, NULL);
if (!IS_ERR(msp->clk))
clk_prepare_enable(msp->clk);
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
dram = mv_mbus_dram_info();
if (dram)
mv643xx_eth_conf_mbus_windows(msp, dram);
ret = mv643xx_eth_shared_of_probe(pdev);
if (ret)
goto err_put_clk;
pd = dev_get_platdata(&pdev->dev);
msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
pd->tx_csum_limit : 9 * 1024;
infer_hw_params(msp);
return 0;
err_put_clk:
if (!IS_ERR(msp->clk))
clk_disable_unprepare(msp->clk);
return ret;
}
static int mv643xx_eth_shared_remove(struct platform_device *pdev)
{
struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
mv643xx_eth_shared_of_remove();
if (!IS_ERR(msp->clk))
clk_disable_unprepare(msp->clk);
return 0;
}
static struct platform_driver mv643xx_eth_shared_driver = {
.probe = mv643xx_eth_shared_probe,
.remove = mv643xx_eth_shared_remove,
.driver = {
.name = MV643XX_ETH_SHARED_NAME,
.of_match_table = of_match_ptr(mv643xx_eth_shared_ids),
},
};
static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
{
int addr_shift = 5 * mp->port_num;
u32 data;
data = rdl(mp, PHY_ADDR);
data &= ~(0x1f << addr_shift);
data |= (phy_addr & 0x1f) << addr_shift;
wrl(mp, PHY_ADDR, data);
}
static int phy_addr_get(struct mv643xx_eth_private *mp)
{
unsigned int data;
data = rdl(mp, PHY_ADDR);
return (data >> (5 * mp->port_num)) & 0x1f;
}
static void set_params(struct mv643xx_eth_private *mp,
struct mv643xx_eth_platform_data *pd)
{
struct net_device *dev = mp->dev;
unsigned int tx_ring_size;
if (is_valid_ether_addr(pd->mac_addr))
memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
else
uc_addr_get(mp, dev->dev_addr);
mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
if (pd->rx_queue_size)
mp->rx_ring_size = pd->rx_queue_size;
mp->rx_desc_sram_addr = pd->rx_sram_addr;
mp->rx_desc_sram_size = pd->rx_sram_size;
mp->rxq_count = pd->rx_queue_count ? : 1;
tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
if (pd->tx_queue_size)
tx_ring_size = pd->tx_queue_size;
mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size,
MV643XX_MAX_SKB_DESCS * 2, 4096);
if (mp->tx_ring_size != tx_ring_size)
netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
mp->tx_ring_size, tx_ring_size);
mp->tx_desc_sram_addr = pd->tx_sram_addr;
mp->tx_desc_sram_size = pd->tx_sram_size;
mp->txq_count = pd->tx_queue_count ? : 1;
}
static int get_phy_mode(struct mv643xx_eth_private *mp)
{
struct device *dev = mp->dev->dev.parent;
phy_interface_t iface;
int err;
if (dev->of_node)
err = of_get_phy_mode(dev->of_node, &iface);
/* Historical default if unspecified. We could also read/write
* the interface state in the PSC1
*/
if (!dev->of_node || err)
iface = PHY_INTERFACE_MODE_GMII;
return iface;
}
static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
int phy_addr)
{
struct phy_device *phydev;
int start;
int num;
int i;
char phy_id[MII_BUS_ID_SIZE + 3];
if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
start = phy_addr_get(mp) & 0x1f;
num = 32;
} else {
start = phy_addr & 0x1f;
num = 1;
}
/* Attempt to connect to the PHY using orion-mdio */
phydev = ERR_PTR(-ENODEV);
for (i = 0; i < num; i++) {
int addr = (start + i) & 0x1f;
snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
"orion-mdio-mii", addr);
phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link,
get_phy_mode(mp));
if (!IS_ERR(phydev)) {
phy_addr_set(mp, addr);
break;
}
}
return phydev;
}
static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
{
struct net_device *dev = mp->dev;
struct phy_device *phy = dev->phydev;
if (speed == 0) {
phy->autoneg = AUTONEG_ENABLE;
phy->speed = 0;
phy->duplex = 0;
linkmode_copy(phy->advertising, phy->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
phy->advertising);
} else {
phy->autoneg = AUTONEG_DISABLE;
linkmode_zero(phy->advertising);
phy->speed = speed;
phy->duplex = duplex;
}
phy_start_aneg(phy);
}
static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
{
struct net_device *dev = mp->dev;
u32 pscr;
pscr = rdlp(mp, PORT_SERIAL_CONTROL);
if (pscr & SERIAL_PORT_ENABLE) {
pscr &= ~SERIAL_PORT_ENABLE;
wrlp(mp, PORT_SERIAL_CONTROL, pscr);
}
pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
if (!dev->phydev) {
pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
if (speed == SPEED_1000)
pscr |= SET_GMII_SPEED_TO_1000;
else if (speed == SPEED_100)
pscr |= SET_MII_SPEED_TO_100;
pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL;
pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX;
if (duplex == DUPLEX_FULL)
pscr |= SET_FULL_DUPLEX_MODE;
}
wrlp(mp, PORT_SERIAL_CONTROL, pscr);
}
static const struct net_device_ops mv643xx_eth_netdev_ops = {
.ndo_open = mv643xx_eth_open,
.ndo_stop = mv643xx_eth_stop,
.ndo_start_xmit = mv643xx_eth_xmit,
.ndo_set_rx_mode = mv643xx_eth_set_rx_mode,
.ndo_set_mac_address = mv643xx_eth_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = mv643xx_eth_ioctl,
.ndo_change_mtu = mv643xx_eth_change_mtu,
.ndo_set_features = mv643xx_eth_set_features,
.ndo_tx_timeout = mv643xx_eth_tx_timeout,
.ndo_get_stats = mv643xx_eth_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mv643xx_eth_netpoll,
#endif
};
static int mv643xx_eth_probe(struct platform_device *pdev)
{
struct mv643xx_eth_platform_data *pd;
struct mv643xx_eth_private *mp;
struct net_device *dev;
struct phy_device *phydev = NULL;
struct resource *res;
int err;
pd = dev_get_platdata(&pdev->dev);
if (pd == NULL) {
dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
return -ENODEV;
}
if (pd->shared == NULL) {
dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n");
return -ENODEV;
}
dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
if (!dev)
return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
mp = netdev_priv(dev);
platform_set_drvdata(pdev, mp);
mp->shared = platform_get_drvdata(pd->shared);
mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10);
mp->port_num = pd->port_number;
mp->dev = dev;
/* Kirkwood resets some registers on gated clocks. Especially
* CLK125_BYPASS_EN must be cleared but is not available on
* all other SoCs/System Controllers using this driver.
*/
if (of_device_is_compatible(pdev->dev.of_node,
"marvell,kirkwood-eth-port"))
wrlp(mp, PORT_SERIAL_CONTROL1,
rdlp(mp, PORT_SERIAL_CONTROL1) & ~CLK125_BYPASS_EN);
/*
* Start with a default rate, and if there is a clock, allow
* it to override the default.
*/
mp->t_clk = 133000000;
mp->clk = devm_clk_get(&pdev->dev, NULL);
if (!IS_ERR(mp->clk)) {
clk_prepare_enable(mp->clk);
mp->t_clk = clk_get_rate(mp->clk);
} else if (!IS_ERR(mp->shared->clk)) {
mp->t_clk = clk_get_rate(mp->shared->clk);
}
set_params(mp, pd);
netif_set_real_num_tx_queues(dev, mp->txq_count);
netif_set_real_num_rx_queues(dev, mp->rxq_count);
err = 0;
if (pd->phy_node) {
phydev = of_phy_connect(mp->dev, pd->phy_node,
mv643xx_eth_adjust_link, 0,
get_phy_mode(mp));
if (!phydev)
err = -ENODEV;
else
phy_addr_set(mp, phydev->mdio.addr);
} else if (pd->phy_addr != MV643XX_ETH_PHY_NONE) {
phydev = phy_scan(mp, pd->phy_addr);
if (IS_ERR(phydev))
err = PTR_ERR(phydev);
else
phy_init(mp, pd->speed, pd->duplex);
}
if (err == -ENODEV) {
err = -EPROBE_DEFER;
goto out;
}
if (err)
goto out;
dev->ethtool_ops = &mv643xx_eth_ethtool_ops;
init_pscr(mp, pd->speed, pd->duplex);
mib_counters_clear(mp);
timer_setup(&mp->mib_counters_timer, mib_counters_timer_wrapper, 0);
mp->mib_counters_timer.expires = jiffies + 30 * HZ;
spin_lock_init(&mp->mib_counters_lock);
INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
timer_setup(&mp->rx_oom, oom_timer_wrapper, 0);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
BUG_ON(!res);
dev->irq = res->start;
dev->netdev_ops = &mv643xx_eth_netdev_ops;
dev->watchdog_timeo = 2 * HZ;
dev->base_addr = 0;
dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
dev->vlan_features = dev->features;
dev->features |= NETIF_F_RXCSUM;
dev->hw_features = dev->features;
dev->priv_flags |= IFF_UNICAST_FLT;
dev->gso_max_segs = MV643XX_MAX_TSO_SEGS;
/* MTU range: 64 - 9500 */
dev->min_mtu = 64;
dev->max_mtu = 9500;
if (mp->shared->win_protect)
wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
netif_carrier_off(dev);
wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
set_rx_coal(mp, 250);
set_tx_coal(mp, 0);
err = register_netdev(dev);
if (err)
goto out;
netdev_notice(dev, "port %d with MAC address %pM\n",
mp->port_num, dev->dev_addr);
if (mp->tx_desc_sram_size > 0)
netdev_notice(dev, "configured with sram\n");
return 0;
out:
if (!IS_ERR(mp->clk))
clk_disable_unprepare(mp->clk);
free_netdev(dev);
return err;
}
static int mv643xx_eth_remove(struct platform_device *pdev)
{
struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
struct net_device *dev = mp->dev;
unregister_netdev(mp->dev);
if (dev->phydev)
phy_disconnect(dev->phydev);
cancel_work_sync(&mp->tx_timeout_task);
if (!IS_ERR(mp->clk))
clk_disable_unprepare(mp->clk);
free_netdev(mp->dev);
return 0;
}
static void mv643xx_eth_shutdown(struct platform_device *pdev)
{
struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
/* Mask all interrupts on ethernet port */
wrlp(mp, INT_MASK, 0);
rdlp(mp, INT_MASK);
if (netif_running(mp->dev))
port_reset(mp);
}
static struct platform_driver mv643xx_eth_driver = {
.probe = mv643xx_eth_probe,
.remove = mv643xx_eth_remove,
.shutdown = mv643xx_eth_shutdown,
.driver = {
.name = MV643XX_ETH_NAME,
},
};
static struct platform_driver * const drivers[] = {
&mv643xx_eth_shared_driver,
&mv643xx_eth_driver,
};
static int __init mv643xx_eth_init_module(void)
{
return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
}
module_init(mv643xx_eth_init_module);
static void __exit mv643xx_eth_cleanup_module(void)
{
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
}
module_exit(mv643xx_eth_cleanup_module);
MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, "
"Manish Lachwani, Dale Farnsworth and Lennert Buytenhek");
MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
MODULE_ALIAS("platform:" MV643XX_ETH_NAME);
| gpl-2.0 |
freedesktop-unofficial-mirror/gstreamer__gst-plugins-bad | gst/jpegformat/gstjpegformat.c | 70 | 1463 | /* GStreamer
*
* jpegformat: a plugin for JPEG Interchange Format
*
* Copyright (C) <2010> Stefan Kost <ensonic@users.sf.net>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include "gstjpegparse.h"
#include "gstjifmux.h"
static gboolean
plugin_init (GstPlugin * plugin)
{
if (!gst_element_register (plugin, "jpegparse", GST_RANK_NONE,
GST_TYPE_JPEG_PARSE))
return FALSE;
if (!gst_element_register (plugin, "jifmux", GST_RANK_SECONDARY,
GST_TYPE_JIF_MUX))
return FALSE;
return TRUE;
}
GST_PLUGIN_DEFINE (GST_VERSION_MAJOR,
GST_VERSION_MINOR,
jpegformat,
"JPEG interchange format plugin",
plugin_init, VERSION, "LGPL", GST_PACKAGE_NAME, GST_PACKAGE_ORIGIN)
| gpl-2.0 |
bestgames1/android_kernel_samsung_kylepro | sound/caph_hawaii/audio_driver/audio_mqueue.c | 70 | 8839 | /****************************************************************************
*
* Copyright (c) 2009 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
*software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
****************************************************************************/
/*#define AUDIO_MQUEUE_RPC_WAKELOCK*/
#include <linux/sched.h>
#include <linux/kernel.h> /* printk() */
#include <linux/fs.h> /* everything... */
#include <linux/errno.h> /* error codes */
#include <linux/delay.h> /* udelay */
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/timer.h>
#include <linux/poll.h>
#include <linux/unistd.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/fcntl.h>
#include <asm/system.h>
#include <linux/kthread.h>
#if defined(CONFIG_HAS_WAKELOCK) && defined(AUDIO_MQUEUE_RPC_WAKELOCK)
#include <linux/wakelock.h>
#endif
#include "mobcom_types.h"
#include "resultcode.h"
#include "taskmsgs.h"
#include "consts.h"
/*#include "rpc_debug.h"*/
#include "audio_mqueue.h"
#include "bcmlog.h"
#include "audio_trace.h"
#define INVALID_HANDLE(a) (!(a) || !(a->valid))
static int Audio_MQueueKthreadFn(void *param);
int Audio_MsgQueueInit(Audio_MsgQueueHandle_t *mHandle,
Audio_MsgQueueThreadFn_t fn,
char *name, unsigned int optionType,
void *optionData, char* wk_name)
{
int ret = 0;
if (!mHandle) {
aTrace(LOG_AUDIO_DRIVER,
"mq: Audio_MsgQueueInit has Invalid handle\r\n");
return -1;
}
memset(mHandle, 0, sizeof(Audio_MsgQueueHandle_t));
INIT_LIST_HEAD(&(mHandle->mList));
mHandle->mAvailData = 0;
spin_lock_init(&mHandle->mLock);
init_waitqueue_head(&mHandle->mWaitQ);
/*Spawn a kthread if required */
if (fn) {
mHandle->valid = 1;
mHandle->mFn = fn;
mHandle->mThread =
kthread_run(Audio_MQueueKthreadFn, (void *)mHandle, name);
aTrace(LOG_AUDIO_DRIVER,
"mq: Audio_MsgQueueInit mthread=%x\r\n",
(int)mHandle->mThread);
ret = (mHandle->mThread) ? 0 : -1;
} else {
mHandle->mThread = NULL;
mHandle->mFn = 0;
mHandle->valid = 1;
ret = 0;
}
#if defined(CONFIG_HAS_WAKELOCK) && defined(AUDIO_MQUEUE_RPC_WAKELOCK)
wake_lock_init(&(mHandle->mq_wake_lock), WAKE_LOCK_SUSPEND, wk_name);
#endif
strncpy(mHandle->name, name, MAX_NM_LEN);
aTrace(LOG_AUDIO_DRIVER,
"mq: Audio_MsgQueueInit mHandle=%x fn=%x nm=%s ret=%d\r\n",
(int)mHandle, (int)fn, (name) ? name : "", ret);
return ret;
}
int Audio_MsgQueueAdd(Audio_MsgQueueHandle_t *mHandle, void *data)
{
Audio_MsgQueueElement_t *elem;
if (INVALID_HANDLE(mHandle)) {
aTrace(LOG_AUDIO_DRIVER,
"mq: Audio_MsgQueueAdd has Invalid mHandle %x valid %d\r\n",
(int)mHandle, (mHandle) ? mHandle->valid : -1);
return -1;
}
elem = kmalloc(sizeof(Audio_MsgQueueElement_t),
in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
if (!elem) {
aTrace(LOG_AUDIO_DRIVER,
"mq: Audio_MsgQueueAdd kmalloc failed interrupt_context=%d\r\n",
(int)in_interrupt());
return -1;
}
elem->data = data;
/*add to queue */
spin_lock_bh(&mHandle->mLock);
list_add_tail(&elem->mList, &mHandle->mList);
mHandle->mAvailData = 1;
spin_unlock_bh(&mHandle->mLock);
aTrace(LOG_AUDIO_DRIVER,
"mq: Audio_MsgQueueAdd mHandle=%x, data=%d\r\n",
(int)mHandle, (int)data);
wake_up_interruptible(&mHandle->mWaitQ);
return 0;
}
int Audio_MsgQueueIsEmpty(Audio_MsgQueueHandle_t *mHandle)
{
int isEmpty = 1;
if (INVALID_HANDLE(mHandle)) {
aTrace(LOG_AUDIO_DRIVER,
"mq: Audio_MsgQueueIsEmpty has Invalid mHandle\r\n");
return -1;
}
spin_lock_bh(&mHandle->mLock);
isEmpty = (Boolean) list_empty(&mHandle->mList);
spin_unlock_bh(&mHandle->mLock);
aTrace(LOG_AUDIO_DRIVER,
"mq: Audio_MsgQueueIsEmpty mHandle=%x, isEmpty=%d\r\n",
(int)mHandle, (int)isEmpty);
return isEmpty;
}
int Audio_MsgQueueRemove(Audio_MsgQueueHandle_t *mHandle, void **outData)
{
struct list_head *entry;
Audio_MsgQueueElement_t *Item = NULL;
void *data = NULL;
int isEmpty = 1;
if (INVALID_HANDLE(mHandle)) {
aTrace(LOG_AUDIO_DRIVER,
"mq: Audio_MsgQueueRemove has Invalid mHandle\r\n");
return -1;
}
if (!outData) {
aTrace(LOG_AUDIO_DRIVER,
"mq: Audio_MsgQueueRemove Invalid param mHandle=%x data=%x\r\n",
(int)mHandle, (int)outData);
return -1;
}
while (1) {
spin_lock_bh(&mHandle->mLock);
isEmpty = (Boolean)list_empty(&mHandle->mList);
if (isEmpty) {
mHandle->mAvailData = 0;
spin_unlock_bh(&mHandle->mLock);
wait_event_interruptible(mHandle->mWaitQ,
mHandle->mAvailData);
} else {
entry = mHandle->mList.next;
Item = list_entry(entry,
Audio_MsgQueueElement_t,
mList);
data = Item->data;
list_del(entry);
spin_unlock_bh(&mHandle->mLock);
kfree(Item);
*outData = data;
break;
}
}
aTrace(LOG_AUDIO_DRIVER,
"mq: Audio_MsgQueueRemove mHandle=%x, data=%d\r\n",
(int)mHandle, (int)data);
return 0;
}
#if 0
int Audio_MsgQueueDebugList(Audio_MsgQueueHandle_t *mHandle,
RpcOutputContext_t *c)
{
Audio_MsgQueueElement_t *Item = NULL;
struct list_head *listptr, *pos;
if (INVALID_HANDLE(mHandle)) {
aTrace(LOG_AUDIO_DRIVER,
"mq: Audio_MsgQueueDebugList has Invalid mHandle\r\n");
return 0;
}
spin_lock_bh(&mHandle->mLock);
RpcDbgDumpStr(c, "\tkThread: %s tid:%d Rx:%d\r\n",
mHandle->name,
mHandle->mThread->pid,
mHandle->mAvailData);
RpcDumpTaskCallStack(c, mHandle->mThread);
list_for_each_safe(listptr, pos, &mHandle->mList)
{
Item = list_entry(listptr, Audio_MsgQueueElement_t, mList);
RpcDbgDumpStr(c, "\tQUEUED pkt:%d\r\n",
(int)Item->data);
}
spin_unlock_bh(&mHandle->mLock);
return 0;
}
int Audio_MsgQueueCount(Audio_MsgQueueHandle_t *mHandle)
{
int count = 0;
struct list_head *pos;
if (INVALID_HANDLE(mHandle)) {
aTrace(LOG_AUDIO_DRIVER,
"mq: Audio_MsgQueueDebugList has Invalid mHandle\r\n");
return -1;
}
spin_lock_bh(&mHandle->mLock);
list_for_each(pos, &mHandle->mList) count++;
spin_unlock_bh(&mHandle->mLock);
return count;
}
#endif
void *Audio_MsgQueueGet(Audio_MsgQueueHandle_t *mHandle)
{
struct list_head *entry;
Audio_MsgQueueElement_t *Item = NULL;
void *data = NULL;
int isEmpty = 1;
if (INVALID_HANDLE(mHandle)) {
aTrace(LOG_AUDIO_DRIVER,
"mq: Audio_MsgQueueGet has Invalid mHandle\r\n");
return NULL;
}
spin_lock_bh(&mHandle->mLock);
isEmpty = (Boolean)list_empty(&mHandle->mList);
if (isEmpty) {
mHandle->mAvailData = 0;
spin_unlock_bh(&mHandle->mLock);
return NULL;
}
entry = mHandle->mList.next;
Item = list_entry(entry, Audio_MsgQueueElement_t, mList);
data = Item->data;
list_del(entry);
spin_unlock_bh(&mHandle->mLock);
kfree(Item);
aTrace(LOG_AUDIO_DRIVER,
"mq: Audio_MsgQueueGet mHandle=%x, data=%d\r\n",
(int)mHandle, (int)data);
return data;
}
static int Audio_MQueueKthreadFn(void *param)
{
void *data;
int ret = 0;
Audio_MsgQueueHandle_t *mHandle = (Audio_MsgQueueHandle_t *) param;
aTrace(LOG_AUDIO_DRIVER,
"mq: Audio_MQueueKthreadFn mHandle=%x\r\n",
(int)mHandle);
set_user_nice(current, -16);
while (ret == 0) {
data = NULL;
ret = Audio_MsgQueueRemove(mHandle, &data);
aTrace(LOG_AUDIO_DRIVER,
"mq: Audio_MQueueKthreadFn Dispatch mHandle=%x data=%d ret=%d\r\n",
(int)mHandle, (int)data, ret);
if (ret == 0 && data) {
#if defined(CONFIG_HAS_WAKELOCK) && defined(AUDIO_MQUEUE_RPC_WAKELOCK)
wake_lock(&(mHandle->mq_wake_lock));
#endif
/* Call actual handler */
mHandle->mFn(mHandle, data);
#if defined(CONFIG_HAS_WAKELOCK) && defined(AUDIO_MQUEUE_RPC_WAKELOCK)
wake_unlock(&(mHandle->mq_wake_lock));
#endif
} else
break;
}
aTrace(LOG_AUDIO_DRIVER,
"mq: Audio_MQueueKthreadFn QUIT mHandle=%x\r\n",
(int)mHandle);
return 0;
}
int Audio_MsgQueueDeInit(Audio_MsgQueueHandle_t *mHandle)
{
if (INVALID_HANDLE(mHandle)) {
aTrace(LOG_AUDIO_DRIVER,
"mq: Audio_MsgQueueDeInit has Invalid handle\r\n");
return -1;
}
aTrace(LOG_AUDIO_DRIVER,
"mq: Audio_MsgQueueDeInit mHandle=%x thread=%x\r\n",
(int)mHandle, (int)mHandle->mThread);
/*Send NULL data to exit kthread */
if (mHandle->mThread)
Audio_MsgQueueAdd(mHandle, NULL);
#if defined(CONFIG_HAS_WAKELOCK) && defined(AUDIO_MQUEUE_RPC_WAKELOCK)
wake_lock_destroy(&(mHandle->mq_wake_lock));
#endif
mHandle->valid = 0;
return 0;
}
| gpl-2.0 |
vvanpo/linux | drivers/gpu/drm/msm/msm_atomic.c | 326 | 7756 | /*
* Copyright (C) 2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "msm_drv.h"
#include "msm_kms.h"
#include "msm_gem.h"
struct msm_commit {
struct drm_device *dev;
struct drm_atomic_state *state;
uint32_t fence;
struct msm_fence_cb fence_cb;
uint32_t crtc_mask;
};
static void fence_cb(struct msm_fence_cb *cb);
/* block until specified crtcs are no longer pending update, and
* atomically mark them as pending update
*/
static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
{
int ret;
spin_lock(&priv->pending_crtcs_event.lock);
ret = wait_event_interruptible_locked(priv->pending_crtcs_event,
!(priv->pending_crtcs & crtc_mask));
if (ret == 0) {
DBG("start: %08x", crtc_mask);
priv->pending_crtcs |= crtc_mask;
}
spin_unlock(&priv->pending_crtcs_event.lock);
return ret;
}
/* clear specified crtcs (no longer pending update)
*/
static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
{
spin_lock(&priv->pending_crtcs_event.lock);
DBG("end: %08x", crtc_mask);
priv->pending_crtcs &= ~crtc_mask;
wake_up_all_locked(&priv->pending_crtcs_event);
spin_unlock(&priv->pending_crtcs_event.lock);
}
static struct msm_commit *commit_init(struct drm_atomic_state *state)
{
struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return NULL;
c->dev = state->dev;
c->state = state;
/* TODO we might need a way to indicate to run the cb on a
* different wq so wait_for_vblanks() doesn't block retiring
* bo's..
*/
INIT_FENCE_CB(&c->fence_cb, fence_cb);
return c;
}
static void commit_destroy(struct msm_commit *c)
{
end_atomic(c->dev->dev_private, c->crtc_mask);
kfree(c);
}
static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
struct msm_drm_private *priv = old_state->dev->dev_private;
struct msm_kms *kms = priv->kms;
int ncrtcs = old_state->dev->mode_config.num_crtc;
int i;
for (i = 0; i < ncrtcs; i++) {
crtc = old_state->crtcs[i];
if (!crtc)
continue;
if (!crtc->state->enable)
continue;
/* Legacy cursor ioctls are completely unsynced, and userspace
* relies on that (by doing tons of cursor updates). */
if (old_state->legacy_cursor_update)
continue;
kms->funcs->wait_for_crtc_commit_done(kms, crtc);
}
}
/* The (potentially) asynchronous part of the commit. At this point
* nothing can fail short of armageddon.
*/
static void complete_commit(struct msm_commit *c)
{
struct drm_atomic_state *state = c->state;
struct drm_device *dev = state->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
kms->funcs->prepare_commit(kms, state);
drm_atomic_helper_commit_modeset_disables(dev, state);
drm_atomic_helper_commit_planes(dev, state);
drm_atomic_helper_commit_modeset_enables(dev, state);
/* NOTE: _wait_for_vblanks() only waits for vblank on
* enabled CRTCs. So we end up faulting when disabling
* due to (potentially) unref'ing the outgoing fb's
* before the vblank when the disable has latched.
*
* But if it did wait on disabled (or newly disabled)
* CRTCs, that would be racy (ie. we could have missed
* the irq. We need some way to poll for pipe shut
* down. Or just live with occasionally hitting the
* timeout in the CRTC disable path (which really should
* not be critical path)
*/
msm_atomic_wait_for_commit_done(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
kms->funcs->complete_commit(kms, state);
drm_atomic_state_free(state);
commit_destroy(c);
}
static void fence_cb(struct msm_fence_cb *cb)
{
struct msm_commit *c =
container_of(cb, struct msm_commit, fence_cb);
complete_commit(c);
}
static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
{
struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0);
c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
}
int msm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
int ret;
/*
* msm ->atomic_check can update ->mode_changed for pixel format
* changes, hence must be run before we check the modeset changes.
*/
ret = drm_atomic_helper_check_planes(dev, state);
if (ret)
return ret;
ret = drm_atomic_helper_check_modeset(dev, state);
if (ret)
return ret;
return ret;
}
/**
* drm_atomic_helper_commit - commit validated state object
* @dev: DRM device
* @state: the driver state object
* @async: asynchronous commit
*
* This function commits a with drm_atomic_helper_check() pre-validated state
* object. This can still fail when e.g. the framebuffer reservation fails. For
* now this doesn't implement asynchronous commits.
*
* RETURNS
* Zero for success or -errno.
*/
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool async)
{
int nplanes = dev->mode_config.num_total_plane;
int ncrtcs = dev->mode_config.num_crtc;
ktime_t timeout;
struct msm_commit *c;
int i, ret;
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret)
return ret;
c = commit_init(state);
if (!c) {
ret = -ENOMEM;
goto error;
}
/*
* Figure out what crtcs we have:
*/
for (i = 0; i < ncrtcs; i++) {
struct drm_crtc *crtc = state->crtcs[i];
if (!crtc)
continue;
c->crtc_mask |= (1 << drm_crtc_index(crtc));
}
/*
* Figure out what fence to wait for:
*/
for (i = 0; i < nplanes; i++) {
struct drm_plane *plane = state->planes[i];
struct drm_plane_state *new_state = state->plane_states[i];
if (!plane)
continue;
if ((plane->state->fb != new_state->fb) && new_state->fb)
add_fb(c, new_state->fb);
}
/*
* Wait for pending updates on any of the same crtc's and then
* mark our set of crtc's as busy:
*/
ret = start_atomic(dev->dev_private, c->crtc_mask);
if (ret) {
kfree(c);
goto error;
}
/*
* This is the point of no return - everything below never fails except
* when the hw goes bonghits. Which means we can commit the new state on
* the software side now.
*/
drm_atomic_helper_swap_state(dev, state);
/*
* Everything below can be run asynchronously without the need to grab
* any modeset locks at all under one conditions: It must be guaranteed
* that the asynchronous work has either been cancelled (if the driver
* supports it, which at least requires that the framebuffers get
* cleaned up with drm_atomic_helper_cleanup_planes()) or completed
* before the new state gets committed on the software side with
* drm_atomic_helper_swap_state().
*
* This scheme allows new atomic state updates to be prepared and
* checked in parallel to the asynchronous completion of the previous
* update. Which is important since compositors need to figure out the
* composition of the next frame right after having submitted the
* current layout.
*/
if (async) {
msm_queue_fence_cb(dev, &c->fence_cb, c->fence);
return 0;
}
timeout = ktime_add_ms(ktime_get(), 1000);
/* uninterruptible wait */
msm_wait_fence(dev, c->fence, &timeout, false);
complete_commit(c);
return 0;
error:
drm_atomic_helper_cleanup_planes(dev, state);
return ret;
}
| gpl-2.0 |
noplink/Openwet | target/linux/adm5120/files/arch/mips/adm5120/compex/wp54.c | 582 | 2395 | /*
* Compex WP54 board support
*
* Copyright (C) 2007-2008 Gabor Juhos <juhosg@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
*/
#include "compex.h"
#define WP54_KEYS_POLL_INTERVAL 20
#define WP54_KEYS_DEBOUNCE_INTERVAL (3 * WP54_KEYS_POLL_INTERVAL)
static struct mtd_partition wp54g_wrt_partitions[] = {
{
.name = "cfe",
.offset = 0,
.size = 0x050000,
.mask_flags = MTD_WRITEABLE,
} , {
.name = "trx",
.offset = MTDPART_OFS_APPEND,
.size = 0x3A0000,
} , {
.name = "nvram",
.offset = MTDPART_OFS_APPEND,
.size = 0x010000,
}
};
static struct adm5120_pci_irq wp54_pci_irqs[] __initdata = {
PCIIRQ(2, 0, 1, ADM5120_IRQ_PCI0),
};
static struct gpio_keys_button wp54_gpio_buttons[] __initdata = {
{
.desc = "reset_button",
.type = EV_KEY,
.code = KEY_RESTART,
.debounce_interval = WP54_KEYS_DEBOUNCE_INTERVAL,
.gpio = ADM5120_GPIO_PIN4,
}
};
static struct gpio_led wp54_gpio_leds[] __initdata = {
GPIO_LED_INV(ADM5120_GPIO_PIN2, "diag", NULL),
GPIO_LED_INV(ADM5120_GPIO_PIN6, "wlan", NULL),
GPIO_LED_INV(ADM5120_GPIO_PIN7, "wan", NULL),
GPIO_LED_INV(ADM5120_GPIO_P0L0, "lan1", NULL),
GPIO_LED_INV(ADM5120_GPIO_P1L0, "lan2", NULL),
};
static u8 wp54_vlans[6] __initdata = {
0x41, 0x42, 0x00, 0x00, 0x00, 0x00
};
static void wp54_reset(void)
{
gpio_set_value(ADM5120_GPIO_PIN3, 0);
}
static void __init wp54_setup(void)
{
compex_generic_setup();
/* setup reset line */
gpio_request(ADM5120_GPIO_PIN3, NULL);
gpio_direction_output(ADM5120_GPIO_PIN3, 1);
adm5120_board_reset = wp54_reset;
adm5120_add_device_switch(2, wp54_vlans);
adm5120_register_gpio_buttons(-1, WP54_KEYS_POLL_INTERVAL,
ARRAY_SIZE(wp54_gpio_buttons),
wp54_gpio_buttons);
adm5120_add_device_gpio_leds(ARRAY_SIZE(wp54_gpio_leds),
wp54_gpio_leds);
adm5120_pci_set_irq_map(ARRAY_SIZE(wp54_pci_irqs), wp54_pci_irqs);
}
MIPS_MACHINE(MACH_ADM5120_WP54, "WP54", "Compex WP54 family", wp54_setup);
static void __init wp54_wrt_setup(void)
{
adm5120_flash0_data.nr_parts = ARRAY_SIZE(wp54g_wrt_partitions);
adm5120_flash0_data.parts = wp54g_wrt_partitions;
wp54_setup();
}
MIPS_MACHINE(MACH_ADM5120_WP54G_WRT, "WP54G-WRT", "Compex WP54G-WRT",
wp54_wrt_setup);
| gpl-2.0 |
mathkid95/linux_samsung_gb | lib/decompress_bunzip2.c | 838 | 23922 | /* vi: set sw = 4 ts = 4: */
/* Small bzip2 deflate implementation, by Rob Landley (rob@landley.net).
Based on bzip2 decompression code by Julian R Seward (jseward@acm.org),
which also acknowledges contributions by Mike Burrows, David Wheeler,
Peter Fenwick, Alistair Moffat, Radford Neal, Ian H. Witten,
Robert Sedgewick, and Jon L. Bentley.
This code is licensed under the LGPLv2:
LGPL (http://www.gnu.org/copyleft/lgpl.html
*/
/*
Size and speed optimizations by Manuel Novoa III (mjn3@codepoet.org).
More efficient reading of Huffman codes, a streamlined read_bunzip()
function, and various other tweaks. In (limited) tests, approximately
20% faster than bzcat on x86 and about 10% faster on arm.
Note that about 2/3 of the time is spent in read_unzip() reversing
the Burrows-Wheeler transformation. Much of that time is delay
resulting from cache misses.
I would ask that anyone benefiting from this work, especially those
using it in commercial products, consider making a donation to my local
non-profit hospice organization in the name of the woman I loved, who
passed away Feb. 12, 2003.
In memory of Toni W. Hagan
Hospice of Acadiana, Inc.
2600 Johnston St., Suite 200
Lafayette, LA 70503-3240
Phone (337) 232-1234 or 1-800-738-2226
Fax (337) 232-1297
http://www.hospiceacadiana.com/
Manuel
*/
/*
Made it fit for running in Linux Kernel by Alain Knaff (alain@knaff.lu)
*/
#ifdef STATIC
#define PREBOOT
#else
#include <linux/decompress/bunzip2.h>
#include <linux/slab.h>
#endif /* STATIC */
#include <linux/decompress/mm.h>
#ifndef INT_MAX
#define INT_MAX 0x7fffffff
#endif
/* Constants for Huffman coding */
#define MAX_GROUPS 6
#define GROUP_SIZE 50 /* 64 would have been more efficient */
#define MAX_HUFCODE_BITS 20 /* Longest Huffman code allowed */
#define MAX_SYMBOLS 258 /* 256 literals + RUNA + RUNB */
#define SYMBOL_RUNA 0
#define SYMBOL_RUNB 1
/* Status return values */
#define RETVAL_OK 0
#define RETVAL_LAST_BLOCK (-1)
#define RETVAL_NOT_BZIP_DATA (-2)
#define RETVAL_UNEXPECTED_INPUT_EOF (-3)
#define RETVAL_UNEXPECTED_OUTPUT_EOF (-4)
#define RETVAL_DATA_ERROR (-5)
#define RETVAL_OUT_OF_MEMORY (-6)
#define RETVAL_OBSOLETE_INPUT (-7)
/* Other housekeeping constants */
#define BZIP2_IOBUF_SIZE 4096
/* This is what we know about each Huffman coding group */
struct group_data {
/* We have an extra slot at the end of limit[] for a sentinal value. */
int limit[MAX_HUFCODE_BITS+1];
int base[MAX_HUFCODE_BITS];
int permute[MAX_SYMBOLS];
int minLen, maxLen;
};
/* Structure holding all the housekeeping data, including IO buffers and
memory that persists between calls to bunzip */
struct bunzip_data {
/* State for interrupting output loop */
int writeCopies, writePos, writeRunCountdown, writeCount, writeCurrent;
/* I/O tracking data (file handles, buffers, positions, etc.) */
int (*fill)(void*, unsigned int);
int inbufCount, inbufPos /*, outbufPos*/;
unsigned char *inbuf /*,*outbuf*/;
unsigned int inbufBitCount, inbufBits;
/* The CRC values stored in the block header and calculated from the
data */
unsigned int crc32Table[256], headerCRC, totalCRC, writeCRC;
/* Intermediate buffer and its size (in bytes) */
unsigned int *dbuf, dbufSize;
/* These things are a bit too big to go on the stack */
unsigned char selectors[32768]; /* nSelectors = 15 bits */
struct group_data groups[MAX_GROUPS]; /* Huffman coding tables */
int io_error; /* non-zero if we have IO error */
};
/* Return the next nnn bits of input. All reads from the compressed input
are done through this function. All reads are big endian */
static unsigned int INIT get_bits(struct bunzip_data *bd, char bits_wanted)
{
unsigned int bits = 0;
/* If we need to get more data from the byte buffer, do so.
(Loop getting one byte at a time to enforce endianness and avoid
unaligned access.) */
while (bd->inbufBitCount < bits_wanted) {
/* If we need to read more data from file into byte buffer, do
so */
if (bd->inbufPos == bd->inbufCount) {
if (bd->io_error)
return 0;
bd->inbufCount = bd->fill(bd->inbuf, BZIP2_IOBUF_SIZE);
if (bd->inbufCount <= 0) {
bd->io_error = RETVAL_UNEXPECTED_INPUT_EOF;
return 0;
}
bd->inbufPos = 0;
}
/* Avoid 32-bit overflow (dump bit buffer to top of output) */
if (bd->inbufBitCount >= 24) {
bits = bd->inbufBits&((1 << bd->inbufBitCount)-1);
bits_wanted -= bd->inbufBitCount;
bits <<= bits_wanted;
bd->inbufBitCount = 0;
}
/* Grab next 8 bits of input from buffer. */
bd->inbufBits = (bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++];
bd->inbufBitCount += 8;
}
/* Calculate result */
bd->inbufBitCount -= bits_wanted;
bits |= (bd->inbufBits >> bd->inbufBitCount)&((1 << bits_wanted)-1);
return bits;
}
/* Unpacks the next block and sets up for the inverse burrows-wheeler step. */
static int INIT get_next_block(struct bunzip_data *bd)
{
struct group_data *hufGroup = NULL;
int *base = NULL;
int *limit = NULL;
int dbufCount, nextSym, dbufSize, groupCount, selector,
i, j, k, t, runPos, symCount, symTotal, nSelectors,
byteCount[256];
unsigned char uc, symToByte[256], mtfSymbol[256], *selectors;
unsigned int *dbuf, origPtr;
dbuf = bd->dbuf;
dbufSize = bd->dbufSize;
selectors = bd->selectors;
/* Read in header signature and CRC, then validate signature.
(last block signature means CRC is for whole file, return now) */
i = get_bits(bd, 24);
j = get_bits(bd, 24);
bd->headerCRC = get_bits(bd, 32);
if ((i == 0x177245) && (j == 0x385090))
return RETVAL_LAST_BLOCK;
if ((i != 0x314159) || (j != 0x265359))
return RETVAL_NOT_BZIP_DATA;
/* We can add support for blockRandomised if anybody complains.
There was some code for this in busybox 1.0.0-pre3, but nobody ever
noticed that it didn't actually work. */
if (get_bits(bd, 1))
return RETVAL_OBSOLETE_INPUT;
origPtr = get_bits(bd, 24);
if (origPtr > dbufSize)
return RETVAL_DATA_ERROR;
/* mapping table: if some byte values are never used (encoding things
like ascii text), the compression code removes the gaps to have fewer
symbols to deal with, and writes a sparse bitfield indicating which
values were present. We make a translation table to convert the
symbols back to the corresponding bytes. */
t = get_bits(bd, 16);
symTotal = 0;
for (i = 0; i < 16; i++) {
if (t&(1 << (15-i))) {
k = get_bits(bd, 16);
for (j = 0; j < 16; j++)
if (k&(1 << (15-j)))
symToByte[symTotal++] = (16*i)+j;
}
}
/* How many different Huffman coding groups does this block use? */
groupCount = get_bits(bd, 3);
if (groupCount < 2 || groupCount > MAX_GROUPS)
return RETVAL_DATA_ERROR;
/* nSelectors: Every GROUP_SIZE many symbols we select a new
Huffman coding group. Read in the group selector list,
which is stored as MTF encoded bit runs. (MTF = Move To
Front, as each value is used it's moved to the start of the
list.) */
nSelectors = get_bits(bd, 15);
if (!nSelectors)
return RETVAL_DATA_ERROR;
for (i = 0; i < groupCount; i++)
mtfSymbol[i] = i;
for (i = 0; i < nSelectors; i++) {
/* Get next value */
for (j = 0; get_bits(bd, 1); j++)
if (j >= groupCount)
return RETVAL_DATA_ERROR;
/* Decode MTF to get the next selector */
uc = mtfSymbol[j];
for (; j; j--)
mtfSymbol[j] = mtfSymbol[j-1];
mtfSymbol[0] = selectors[i] = uc;
}
/* Read the Huffman coding tables for each group, which code
for symTotal literal symbols, plus two run symbols (RUNA,
RUNB) */
symCount = symTotal+2;
for (j = 0; j < groupCount; j++) {
unsigned char length[MAX_SYMBOLS], temp[MAX_HUFCODE_BITS+1];
int minLen, maxLen, pp;
/* Read Huffman code lengths for each symbol. They're
stored in a way similar to mtf; record a starting
value for the first symbol, and an offset from the
previous value for everys symbol after that.
(Subtracting 1 before the loop and then adding it
back at the end is an optimization that makes the
test inside the loop simpler: symbol length 0
becomes negative, so an unsigned inequality catches
it.) */
t = get_bits(bd, 5)-1;
for (i = 0; i < symCount; i++) {
for (;;) {
if (((unsigned)t) > (MAX_HUFCODE_BITS-1))
return RETVAL_DATA_ERROR;
/* If first bit is 0, stop. Else
second bit indicates whether to
increment or decrement the value.
Optimization: grab 2 bits and unget
the second if the first was 0. */
k = get_bits(bd, 2);
if (k < 2) {
bd->inbufBitCount++;
break;
}
/* Add one if second bit 1, else
* subtract 1. Avoids if/else */
t += (((k+1)&2)-1);
}
/* Correct for the initial -1, to get the
* final symbol length */
length[i] = t+1;
}
/* Find largest and smallest lengths in this group */
minLen = maxLen = length[0];
for (i = 1; i < symCount; i++) {
if (length[i] > maxLen)
maxLen = length[i];
else if (length[i] < minLen)
minLen = length[i];
}
/* Calculate permute[], base[], and limit[] tables from
* length[].
*
* permute[] is the lookup table for converting
* Huffman coded symbols into decoded symbols. base[]
* is the amount to subtract from the value of a
* Huffman symbol of a given length when using
* permute[].
*
* limit[] indicates the largest numerical value a
* symbol with a given number of bits can have. This
* is how the Huffman codes can vary in length: each
* code with a value > limit[length] needs another
* bit.
*/
hufGroup = bd->groups+j;
hufGroup->minLen = minLen;
hufGroup->maxLen = maxLen;
/* Note that minLen can't be smaller than 1, so we
adjust the base and limit array pointers so we're
not always wasting the first entry. We do this
again when using them (during symbol decoding).*/
base = hufGroup->base-1;
limit = hufGroup->limit-1;
/* Calculate permute[]. Concurrently, initialize
* temp[] and limit[]. */
pp = 0;
for (i = minLen; i <= maxLen; i++) {
temp[i] = limit[i] = 0;
for (t = 0; t < symCount; t++)
if (length[t] == i)
hufGroup->permute[pp++] = t;
}
/* Count symbols coded for at each bit length */
for (i = 0; i < symCount; i++)
temp[length[i]]++;
/* Calculate limit[] (the largest symbol-coding value
*at each bit length, which is (previous limit <<
*1)+symbols at this level), and base[] (number of
*symbols to ignore at each bit length, which is limit
*minus the cumulative count of symbols coded for
*already). */
pp = t = 0;
for (i = minLen; i < maxLen; i++) {
pp += temp[i];
/* We read the largest possible symbol size
and then unget bits after determining how
many we need, and those extra bits could be
set to anything. (They're noise from
future symbols.) At each level we're
really only interested in the first few
bits, so here we set all the trailing
to-be-ignored bits to 1 so they don't
affect the value > limit[length]
comparison. */
limit[i] = (pp << (maxLen - i)) - 1;
pp <<= 1;
base[i+1] = pp-(t += temp[i]);
}
limit[maxLen+1] = INT_MAX; /* Sentinal value for
* reading next sym. */
limit[maxLen] = pp+temp[maxLen]-1;
base[minLen] = 0;
}
/* We've finished reading and digesting the block header. Now
read this block's Huffman coded symbols from the file and
undo the Huffman coding and run length encoding, saving the
result into dbuf[dbufCount++] = uc */
/* Initialize symbol occurrence counters and symbol Move To
* Front table */
for (i = 0; i < 256; i++) {
byteCount[i] = 0;
mtfSymbol[i] = (unsigned char)i;
}
/* Loop through compressed symbols. */
runPos = dbufCount = symCount = selector = 0;
for (;;) {
/* Determine which Huffman coding group to use. */
if (!(symCount--)) {
symCount = GROUP_SIZE-1;
if (selector >= nSelectors)
return RETVAL_DATA_ERROR;
hufGroup = bd->groups+selectors[selector++];
base = hufGroup->base-1;
limit = hufGroup->limit-1;
}
/* Read next Huffman-coded symbol. */
/* Note: It is far cheaper to read maxLen bits and
back up than it is to read minLen bits and then an
additional bit at a time, testing as we go.
Because there is a trailing last block (with file
CRC), there is no danger of the overread causing an
unexpected EOF for a valid compressed file. As a
further optimization, we do the read inline
(falling back to a call to get_bits if the buffer
runs dry). The following (up to got_huff_bits:) is
equivalent to j = get_bits(bd, hufGroup->maxLen);
*/
while (bd->inbufBitCount < hufGroup->maxLen) {
if (bd->inbufPos == bd->inbufCount) {
j = get_bits(bd, hufGroup->maxLen);
goto got_huff_bits;
}
bd->inbufBits =
(bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++];
bd->inbufBitCount += 8;
};
bd->inbufBitCount -= hufGroup->maxLen;
j = (bd->inbufBits >> bd->inbufBitCount)&
((1 << hufGroup->maxLen)-1);
got_huff_bits:
/* Figure how how many bits are in next symbol and
* unget extras */
i = hufGroup->minLen;
while (j > limit[i])
++i;
bd->inbufBitCount += (hufGroup->maxLen - i);
/* Huffman decode value to get nextSym (with bounds checking) */
if ((i > hufGroup->maxLen)
|| (((unsigned)(j = (j>>(hufGroup->maxLen-i))-base[i]))
>= MAX_SYMBOLS))
return RETVAL_DATA_ERROR;
nextSym = hufGroup->permute[j];
/* We have now decoded the symbol, which indicates
either a new literal byte, or a repeated run of the
most recent literal byte. First, check if nextSym
indicates a repeated run, and if so loop collecting
how many times to repeat the last literal. */
if (((unsigned)nextSym) <= SYMBOL_RUNB) { /* RUNA or RUNB */
/* If this is the start of a new run, zero out
* counter */
if (!runPos) {
runPos = 1;
t = 0;
}
/* Neat trick that saves 1 symbol: instead of
or-ing 0 or 1 at each bit position, add 1
or 2 instead. For example, 1011 is 1 << 0
+ 1 << 1 + 2 << 2. 1010 is 2 << 0 + 2 << 1
+ 1 << 2. You can make any bit pattern
that way using 1 less symbol than the basic
or 0/1 method (except all bits 0, which
would use no symbols, but a run of length 0
doesn't mean anything in this context).
Thus space is saved. */
t += (runPos << nextSym);
/* +runPos if RUNA; +2*runPos if RUNB */
runPos <<= 1;
continue;
}
/* When we hit the first non-run symbol after a run,
we now know how many times to repeat the last
literal, so append that many copies to our buffer
of decoded symbols (dbuf) now. (The last literal
used is the one at the head of the mtfSymbol
array.) */
if (runPos) {
runPos = 0;
if (dbufCount+t >= dbufSize)
return RETVAL_DATA_ERROR;
uc = symToByte[mtfSymbol[0]];
byteCount[uc] += t;
while (t--)
dbuf[dbufCount++] = uc;
}
/* Is this the terminating symbol? */
if (nextSym > symTotal)
break;
/* At this point, nextSym indicates a new literal
character. Subtract one to get the position in the
MTF array at which this literal is currently to be
found. (Note that the result can't be -1 or 0,
because 0 and 1 are RUNA and RUNB. But another
instance of the first symbol in the mtf array,
position 0, would have been handled as part of a
run above. Therefore 1 unused mtf position minus 2
non-literal nextSym values equals -1.) */
if (dbufCount >= dbufSize)
return RETVAL_DATA_ERROR;
i = nextSym - 1;
uc = mtfSymbol[i];
/* Adjust the MTF array. Since we typically expect to
*move only a small number of symbols, and are bound
*by 256 in any case, using memmove here would
*typically be bigger and slower due to function call
*overhead and other assorted setup costs. */
do {
mtfSymbol[i] = mtfSymbol[i-1];
} while (--i);
mtfSymbol[0] = uc;
uc = symToByte[uc];
/* We have our literal byte. Save it into dbuf. */
byteCount[uc]++;
dbuf[dbufCount++] = (unsigned int)uc;
}
/* At this point, we've read all the Huffman-coded symbols
(and repeated runs) for this block from the input stream,
and decoded them into the intermediate buffer. There are
dbufCount many decoded bytes in dbuf[]. Now undo the
Burrows-Wheeler transform on dbuf. See
http://dogma.net/markn/articles/bwt/bwt.htm
*/
/* Turn byteCount into cumulative occurrence counts of 0 to n-1. */
j = 0;
for (i = 0; i < 256; i++) {
k = j+byteCount[i];
byteCount[i] = j;
j = k;
}
/* Figure out what order dbuf would be in if we sorted it. */
for (i = 0; i < dbufCount; i++) {
uc = (unsigned char)(dbuf[i] & 0xff);
dbuf[byteCount[uc]] |= (i << 8);
byteCount[uc]++;
}
/* Decode first byte by hand to initialize "previous" byte.
Note that it doesn't get output, and if the first three
characters are identical it doesn't qualify as a run (hence
writeRunCountdown = 5). */
if (dbufCount) {
if (origPtr >= dbufCount)
return RETVAL_DATA_ERROR;
bd->writePos = dbuf[origPtr];
bd->writeCurrent = (unsigned char)(bd->writePos&0xff);
bd->writePos >>= 8;
bd->writeRunCountdown = 5;
}
bd->writeCount = dbufCount;
return RETVAL_OK;
}
/* Undo burrows-wheeler transform on intermediate buffer to produce output.
If start_bunzip was initialized with out_fd =-1, then up to len bytes of
data are written to outbuf. Return value is number of bytes written or
error (all errors are negative numbers). If out_fd!=-1, outbuf and len
are ignored, data is written to out_fd and return is RETVAL_OK or error.
*/
static int INIT read_bunzip(struct bunzip_data *bd, char *outbuf, int len)
{
const unsigned int *dbuf;
int pos, xcurrent, previous, gotcount;
/* If last read was short due to end of file, return last block now */
if (bd->writeCount < 0)
return bd->writeCount;
gotcount = 0;
dbuf = bd->dbuf;
pos = bd->writePos;
xcurrent = bd->writeCurrent;
/* We will always have pending decoded data to write into the output
buffer unless this is the very first call (in which case we haven't
Huffman-decoded a block into the intermediate buffer yet). */
if (bd->writeCopies) {
/* Inside the loop, writeCopies means extra copies (beyond 1) */
--bd->writeCopies;
/* Loop outputting bytes */
for (;;) {
/* If the output buffer is full, snapshot
* state and return */
if (gotcount >= len) {
bd->writePos = pos;
bd->writeCurrent = xcurrent;
bd->writeCopies++;
return len;
}
/* Write next byte into output buffer, updating CRC */
outbuf[gotcount++] = xcurrent;
bd->writeCRC = (((bd->writeCRC) << 8)
^bd->crc32Table[((bd->writeCRC) >> 24)
^xcurrent]);
/* Loop now if we're outputting multiple
* copies of this byte */
if (bd->writeCopies) {
--bd->writeCopies;
continue;
}
decode_next_byte:
if (!bd->writeCount--)
break;
/* Follow sequence vector to undo
* Burrows-Wheeler transform */
previous = xcurrent;
pos = dbuf[pos];
xcurrent = pos&0xff;
pos >>= 8;
/* After 3 consecutive copies of the same
byte, the 4th is a repeat count. We count
down from 4 instead *of counting up because
testing for non-zero is faster */
if (--bd->writeRunCountdown) {
if (xcurrent != previous)
bd->writeRunCountdown = 4;
} else {
/* We have a repeated run, this byte
* indicates the count */
bd->writeCopies = xcurrent;
xcurrent = previous;
bd->writeRunCountdown = 5;
/* Sometimes there are just 3 bytes
* (run length 0) */
if (!bd->writeCopies)
goto decode_next_byte;
/* Subtract the 1 copy we'd output
* anyway to get extras */
--bd->writeCopies;
}
}
/* Decompression of this block completed successfully */
bd->writeCRC = ~bd->writeCRC;
bd->totalCRC = ((bd->totalCRC << 1) |
(bd->totalCRC >> 31)) ^ bd->writeCRC;
/* If this block had a CRC error, force file level CRC error. */
if (bd->writeCRC != bd->headerCRC) {
bd->totalCRC = bd->headerCRC+1;
return RETVAL_LAST_BLOCK;
}
}
/* Refill the intermediate buffer by Huffman-decoding next
* block of input */
/* (previous is just a convenient unused temp variable here) */
previous = get_next_block(bd);
if (previous) {
bd->writeCount = previous;
return (previous != RETVAL_LAST_BLOCK) ? previous : gotcount;
}
bd->writeCRC = 0xffffffffUL;
pos = bd->writePos;
xcurrent = bd->writeCurrent;
goto decode_next_byte;
}
static int INIT nofill(void *buf, unsigned int len)
{
return -1;
}
/* Allocate the structure, read file header. If in_fd ==-1, inbuf must contain
a complete bunzip file (len bytes long). If in_fd!=-1, inbuf and len are
ignored, and data is read from file handle into temporary buffer. */
static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len,
int (*fill)(void*, unsigned int))
{
struct bunzip_data *bd;
unsigned int i, j, c;
const unsigned int BZh0 =
(((unsigned int)'B') << 24)+(((unsigned int)'Z') << 16)
+(((unsigned int)'h') << 8)+(unsigned int)'0';
/* Figure out how much data to allocate */
i = sizeof(struct bunzip_data);
/* Allocate bunzip_data. Most fields initialize to zero. */
bd = *bdp = malloc(i);
if (!bd)
return RETVAL_OUT_OF_MEMORY;
memset(bd, 0, sizeof(struct bunzip_data));
/* Setup input buffer */
bd->inbuf = inbuf;
bd->inbufCount = len;
if (fill != NULL)
bd->fill = fill;
else
bd->fill = nofill;
/* Init the CRC32 table (big endian) */
for (i = 0; i < 256; i++) {
c = i << 24;
for (j = 8; j; j--)
c = c&0x80000000 ? (c << 1)^0x04c11db7 : (c << 1);
bd->crc32Table[i] = c;
}
/* Ensure that file starts with "BZh['1'-'9']." */
i = get_bits(bd, 32);
if (((unsigned int)(i-BZh0-1)) >= 9)
return RETVAL_NOT_BZIP_DATA;
/* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of
uncompressed data. Allocate intermediate buffer for block. */
bd->dbufSize = 100000*(i-BZh0);
bd->dbuf = large_malloc(bd->dbufSize * sizeof(int));
if (!bd->dbuf)
return RETVAL_OUT_OF_MEMORY;
return RETVAL_OK;
}
/* Example usage: decompress src_fd to dst_fd. (Stops at end of bzip2 data,
not end of file.) */
STATIC int INIT bunzip2(unsigned char *buf, int len,
int(*fill)(void*, unsigned int),
int(*flush)(void*, unsigned int),
unsigned char *outbuf,
int *pos,
void(*error_fn)(char *x))
{
struct bunzip_data *bd;
int i = -1;
unsigned char *inbuf;
set_error_fn(error_fn);
if (flush)
outbuf = malloc(BZIP2_IOBUF_SIZE);
if (!outbuf) {
error("Could not allocate output bufer");
return RETVAL_OUT_OF_MEMORY;
}
if (buf)
inbuf = buf;
else
inbuf = malloc(BZIP2_IOBUF_SIZE);
if (!inbuf) {
error("Could not allocate input bufer");
i = RETVAL_OUT_OF_MEMORY;
goto exit_0;
}
i = start_bunzip(&bd, inbuf, len, fill);
if (!i) {
for (;;) {
i = read_bunzip(bd, outbuf, BZIP2_IOBUF_SIZE);
if (i <= 0)
break;
if (!flush)
outbuf += i;
else
if (i != flush(outbuf, i)) {
i = RETVAL_UNEXPECTED_OUTPUT_EOF;
break;
}
}
}
/* Check CRC and release memory */
if (i == RETVAL_LAST_BLOCK) {
if (bd->headerCRC != bd->totalCRC)
error("Data integrity error when decompressing.");
else
i = RETVAL_OK;
} else if (i == RETVAL_UNEXPECTED_OUTPUT_EOF) {
error("Compressed file ends unexpectedly");
}
if (!bd)
goto exit_1;
if (bd->dbuf)
large_free(bd->dbuf);
if (pos)
*pos = bd->inbufPos;
free(bd);
exit_1:
if (!buf)
free(inbuf);
exit_0:
if (flush)
free(outbuf);
return i;
}
#ifdef PREBOOT
STATIC int INIT decompress(unsigned char *buf, int len,
int(*fill)(void*, unsigned int),
int(*flush)(void*, unsigned int),
unsigned char *outbuf,
int *pos,
void(*error_fn)(char *x))
{
return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error_fn);
}
#endif
| gpl-2.0 |
dagnarf/sgh-i717-dagkernel | drivers/ata/pata_rb532_cf.c | 1094 | 5601 | /*
* A low-level PATA driver to handle a Compact Flash connected on the
* Mikrotik's RouterBoard 532 board.
*
* Copyright (C) 2007 Gabor Juhos <juhosg at openwrt.org>
* Copyright (C) 2008 Florian Fainelli <florian@openwrt.org>
*
* This file was based on: drivers/ata/pata_ixp4xx_cf.c
* Copyright (C) 2006-07 Tower Technologies
* Author: Alessandro Zummo <a.zummo@towertech.it>
*
* Also was based on the driver for Linux 2.4.xx published by Mikrotik for
* their RouterBoard 1xx and 5xx series devices. The original Mikrotik code
* seems not to have a license.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/libata.h>
#include <scsi/scsi_host.h>
#include <asm/gpio.h>
#define DRV_NAME "pata-rb532-cf"
#define DRV_VERSION "0.1.0"
#define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash"
#define RB500_CF_MAXPORTS 1
#define RB500_CF_IO_DELAY 400
#define RB500_CF_REG_BASE 0x0800
#define RB500_CF_REG_ERR 0x080D
#define RB500_CF_REG_CTRL 0x080E
/* 32bit buffered data register offset */
#define RB500_CF_REG_DBUF32 0x0C00
struct rb532_cf_info {
void __iomem *iobase;
unsigned int gpio_line;
unsigned int irq;
};
/* ------------------------------------------------------------------------ */
static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
{
struct ata_host *ah = dev_instance;
struct rb532_cf_info *info = ah->private_data;
if (gpio_get_value(info->gpio_line)) {
set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW);
ata_sff_interrupt(info->irq, dev_instance);
} else {
set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH);
}
return IRQ_HANDLED;
}
static struct ata_port_operations rb532_pata_port_ops = {
.inherits = &ata_sff_port_ops,
.sff_data_xfer = ata_sff_data_xfer32,
};
/* ------------------------------------------------------------------------ */
static struct scsi_host_template rb532_pata_sht = {
ATA_PIO_SHT(DRV_NAME),
};
/* ------------------------------------------------------------------------ */
static void rb532_pata_setup_ports(struct ata_host *ah)
{
struct rb532_cf_info *info = ah->private_data;
struct ata_port *ap;
ap = ah->ports[0];
ap->ops = &rb532_pata_port_ops;
ap->pio_mask = ATA_PIO4;
ap->flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO;
ap->ioaddr.cmd_addr = info->iobase + RB500_CF_REG_BASE;
ap->ioaddr.ctl_addr = info->iobase + RB500_CF_REG_CTRL;
ap->ioaddr.altstatus_addr = info->iobase + RB500_CF_REG_CTRL;
ata_sff_std_ports(&ap->ioaddr);
ap->ioaddr.data_addr = info->iobase + RB500_CF_REG_DBUF32;
ap->ioaddr.error_addr = info->iobase + RB500_CF_REG_ERR;
}
static __devinit int rb532_pata_driver_probe(struct platform_device *pdev)
{
int irq;
int gpio;
struct resource *res;
struct ata_host *ah;
struct rb532_cf_info *info;
int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "no IOMEM resource found\n");
return -EINVAL;
}
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
dev_err(&pdev->dev, "no IRQ resource found\n");
return -ENOENT;
}
gpio = irq_to_gpio(irq);
if (gpio < 0) {
dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq);
return -ENOENT;
}
ret = gpio_request(gpio, DRV_NAME);
if (ret) {
dev_err(&pdev->dev, "GPIO request failed\n");
return ret;
}
/* allocate host */
ah = ata_host_alloc(&pdev->dev, RB500_CF_MAXPORTS);
if (!ah)
return -ENOMEM;
platform_set_drvdata(pdev, ah);
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
ah->private_data = info;
info->gpio_line = gpio;
info->irq = irq;
info->iobase = devm_ioremap_nocache(&pdev->dev, res->start,
resource_size(res));
if (!info->iobase)
return -ENOMEM;
ret = gpio_direction_input(gpio);
if (ret) {
dev_err(&pdev->dev, "unable to set GPIO direction, err=%d\n",
ret);
goto err_free_gpio;
}
rb532_pata_setup_ports(ah);
ret = ata_host_activate(ah, irq, rb532_pata_irq_handler,
IRQF_TRIGGER_LOW, &rb532_pata_sht);
if (ret)
goto err_free_gpio;
return 0;
err_free_gpio:
gpio_free(gpio);
return ret;
}
static __devexit int rb532_pata_driver_remove(struct platform_device *pdev)
{
struct ata_host *ah = platform_get_drvdata(pdev);
struct rb532_cf_info *info = ah->private_data;
ata_host_detach(ah);
gpio_free(info->gpio_line);
return 0;
}
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:" DRV_NAME);
static struct platform_driver rb532_pata_platform_driver = {
.probe = rb532_pata_driver_probe,
.remove = __devexit_p(rb532_pata_driver_remove),
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
};
/* ------------------------------------------------------------------------ */
#define DRV_INFO DRV_DESC " version " DRV_VERSION
static int __init rb532_pata_module_init(void)
{
printk(KERN_INFO DRV_INFO "\n");
return platform_driver_register(&rb532_pata_platform_driver);
}
static void __exit rb532_pata_module_exit(void)
{
platform_driver_unregister(&rb532_pata_platform_driver);
}
MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>");
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
MODULE_DESCRIPTION(DRV_DESC);
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
module_init(rb532_pata_module_init);
module_exit(rb532_pata_module_exit);
| gpl-2.0 |
huhuikevin/kernel_imx | arch/arm/mach-msm/devices-qsd8x50.c | 1350 | 8517 | /*
* Copyright (C) 2008 Google, Inc.
* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/clkdev.h>
#include <linux/dma-mapping.h>
#include <mach/irqs.h>
#include <mach/msm_iomap.h>
#include <mach/dma.h>
#include <mach/board.h>
#include "devices.h"
#include <asm/mach/flash.h>
#include <mach/mmc.h>
#include "clock-pcom.h"
static struct resource resources_uart3[] = {
{
.start = INT_UART3,
.end = INT_UART3,
.flags = IORESOURCE_IRQ,
},
{
.start = MSM_UART3_PHYS,
.end = MSM_UART3_PHYS + MSM_UART3_SIZE - 1,
.flags = IORESOURCE_MEM,
.name = "uart_resource"
},
};
struct platform_device msm_device_uart3 = {
.name = "msm_serial",
.id = 2,
.num_resources = ARRAY_SIZE(resources_uart3),
.resource = resources_uart3,
};
struct platform_device msm_device_smd = {
.name = "msm_smd",
.id = -1,
};
static struct resource resources_otg[] = {
{
.start = MSM_HSUSB_PHYS,
.end = MSM_HSUSB_PHYS + MSM_HSUSB_SIZE,
.flags = IORESOURCE_MEM,
},
{
.start = INT_USB_HS,
.end = INT_USB_HS,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device msm_device_otg = {
.name = "msm_otg",
.id = -1,
.num_resources = ARRAY_SIZE(resources_otg),
.resource = resources_otg,
.dev = {
.coherent_dma_mask = 0xffffffff,
},
};
static struct resource resources_hsusb[] = {
{
.start = MSM_HSUSB_PHYS,
.end = MSM_HSUSB_PHYS + MSM_HSUSB_SIZE,
.flags = IORESOURCE_MEM,
},
{
.start = INT_USB_HS,
.end = INT_USB_HS,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device msm_device_hsusb = {
.name = "msm_hsusb",
.id = -1,
.num_resources = ARRAY_SIZE(resources_hsusb),
.resource = resources_hsusb,
.dev = {
.coherent_dma_mask = 0xffffffff,
},
};
static u64 dma_mask = 0xffffffffULL;
static struct resource resources_hsusb_host[] = {
{
.start = MSM_HSUSB_PHYS,
.end = MSM_HSUSB_PHYS + MSM_HSUSB_SIZE,
.flags = IORESOURCE_MEM,
},
{
.start = INT_USB_HS,
.end = INT_USB_HS,
.flags = IORESOURCE_IRQ,
},
};
struct platform_device msm_device_hsusb_host = {
.name = "msm_hsusb_host",
.id = -1,
.num_resources = ARRAY_SIZE(resources_hsusb_host),
.resource = resources_hsusb_host,
.dev = {
.dma_mask = &dma_mask,
.coherent_dma_mask = 0xffffffffULL,
},
};
static struct resource resources_sdc1[] = {
{
.start = MSM_SDC1_PHYS,
.end = MSM_SDC1_PHYS + MSM_SDC1_SIZE - 1,
.flags = IORESOURCE_MEM,
},
{
.start = INT_SDC1_0,
.end = INT_SDC1_0,
.flags = IORESOURCE_IRQ,
.name = "cmd_irq",
},
{
.flags = IORESOURCE_IRQ | IORESOURCE_DISABLED,
.name = "status_irq"
},
{
.start = 8,
.end = 8,
.flags = IORESOURCE_DMA,
},
};
static struct resource resources_sdc2[] = {
{
.start = MSM_SDC2_PHYS,
.end = MSM_SDC2_PHYS + MSM_SDC2_SIZE - 1,
.flags = IORESOURCE_MEM,
},
{
.start = INT_SDC2_0,
.end = INT_SDC2_0,
.flags = IORESOURCE_IRQ,
.name = "cmd_irq",
},
{
.flags = IORESOURCE_IRQ | IORESOURCE_DISABLED,
.name = "status_irq"
},
{
.start = 8,
.end = 8,
.flags = IORESOURCE_DMA,
},
};
static struct resource resources_sdc3[] = {
{
.start = MSM_SDC3_PHYS,
.end = MSM_SDC3_PHYS + MSM_SDC3_SIZE - 1,
.flags = IORESOURCE_MEM,
},
{
.start = INT_SDC3_0,
.end = INT_SDC3_0,
.flags = IORESOURCE_IRQ,
.name = "cmd_irq",
},
{
.flags = IORESOURCE_IRQ | IORESOURCE_DISABLED,
.name = "status_irq"
},
{
.start = 8,
.end = 8,
.flags = IORESOURCE_DMA,
},
};
static struct resource resources_sdc4[] = {
{
.start = MSM_SDC4_PHYS,
.end = MSM_SDC4_PHYS + MSM_SDC4_SIZE - 1,
.flags = IORESOURCE_MEM,
},
{
.start = INT_SDC4_0,
.end = INT_SDC4_0,
.flags = IORESOURCE_IRQ,
.name = "cmd_irq",
},
{
.flags = IORESOURCE_IRQ | IORESOURCE_DISABLED,
.name = "status_irq"
},
{
.start = 8,
.end = 8,
.flags = IORESOURCE_DMA,
},
};
struct platform_device msm_device_sdc1 = {
.name = "msm_sdcc",
.id = 1,
.num_resources = ARRAY_SIZE(resources_sdc1),
.resource = resources_sdc1,
.dev = {
.coherent_dma_mask = 0xffffffff,
},
};
struct platform_device msm_device_sdc2 = {
.name = "msm_sdcc",
.id = 2,
.num_resources = ARRAY_SIZE(resources_sdc2),
.resource = resources_sdc2,
.dev = {
.coherent_dma_mask = 0xffffffff,
},
};
struct platform_device msm_device_sdc3 = {
.name = "msm_sdcc",
.id = 3,
.num_resources = ARRAY_SIZE(resources_sdc3),
.resource = resources_sdc3,
.dev = {
.coherent_dma_mask = 0xffffffff,
},
};
struct platform_device msm_device_sdc4 = {
.name = "msm_sdcc",
.id = 4,
.num_resources = ARRAY_SIZE(resources_sdc4),
.resource = resources_sdc4,
.dev = {
.coherent_dma_mask = 0xffffffff,
},
};
static struct platform_device *msm_sdcc_devices[] __initdata = {
&msm_device_sdc1,
&msm_device_sdc2,
&msm_device_sdc3,
&msm_device_sdc4,
};
int __init msm_add_sdcc(unsigned int controller,
struct msm_mmc_platform_data *plat,
unsigned int stat_irq, unsigned long stat_irq_flags)
{
struct platform_device *pdev;
struct resource *res;
if (controller < 1 || controller > 4)
return -EINVAL;
pdev = msm_sdcc_devices[controller-1];
pdev->dev.platform_data = plat;
res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "status_irq");
if (!res)
return -EINVAL;
else if (stat_irq) {
res->start = res->end = stat_irq;
res->flags &= ~IORESOURCE_DISABLED;
res->flags |= stat_irq_flags;
}
return platform_device_register(pdev);
}
struct clk_lookup msm_clocks_8x50[] = {
CLK_PCOM("adm_clk", ADM_CLK, NULL, 0),
CLK_PCOM("ce_clk", CE_CLK, NULL, 0),
CLK_PCOM("ebi1_clk", EBI1_CLK, NULL, CLK_MIN),
CLK_PCOM("ebi2_clk", EBI2_CLK, NULL, 0),
CLK_PCOM("ecodec_clk", ECODEC_CLK, NULL, 0),
CLK_PCOM("emdh_clk", EMDH_CLK, NULL, OFF | CLK_MINMAX),
CLK_PCOM("gp_clk", GP_CLK, NULL, 0),
CLK_PCOM("grp_clk", GRP_3D_CLK, NULL, 0),
CLK_PCOM("i2c_clk", I2C_CLK, NULL, 0),
CLK_PCOM("icodec_rx_clk", ICODEC_RX_CLK, NULL, 0),
CLK_PCOM("icodec_tx_clk", ICODEC_TX_CLK, NULL, 0),
CLK_PCOM("imem_clk", IMEM_CLK, NULL, OFF),
CLK_PCOM("mdc_clk", MDC_CLK, NULL, 0),
CLK_PCOM("mddi_clk", PMDH_CLK, NULL, OFF | CLK_MINMAX),
CLK_PCOM("mdp_clk", MDP_CLK, NULL, OFF),
CLK_PCOM("mdp_lcdc_pclk_clk", MDP_LCDC_PCLK_CLK, NULL, 0),
CLK_PCOM("mdp_lcdc_pad_pclk_clk", MDP_LCDC_PAD_PCLK_CLK, NULL, 0),
CLK_PCOM("mdp_vsync_clk", MDP_VSYNC_CLK, NULL, 0),
CLK_PCOM("pbus_clk", PBUS_CLK, NULL, CLK_MIN),
CLK_PCOM("pcm_clk", PCM_CLK, NULL, 0),
CLK_PCOM("sdac_clk", SDAC_CLK, NULL, OFF),
CLK_PCOM("sdc_clk", SDC1_CLK, "msm_sdcc.1", OFF),
CLK_PCOM("sdc_pclk", SDC1_P_CLK, "msm_sdcc.1", OFF),
CLK_PCOM("sdc_clk", SDC2_CLK, "msm_sdcc.2", OFF),
CLK_PCOM("sdc_pclk", SDC2_P_CLK, "msm_sdcc.2", OFF),
CLK_PCOM("sdc_clk", SDC3_CLK, "msm_sdcc.3", OFF),
CLK_PCOM("sdc_pclk", SDC3_P_CLK, "msm_sdcc.3", OFF),
CLK_PCOM("sdc_clk", SDC4_CLK, "msm_sdcc.4", OFF),
CLK_PCOM("sdc_pclk", SDC4_P_CLK, "msm_sdcc.4", OFF),
CLK_PCOM("spi_clk", SPI_CLK, NULL, 0),
CLK_PCOM("tsif_clk", TSIF_CLK, NULL, 0),
CLK_PCOM("tsif_ref_clk", TSIF_REF_CLK, NULL, 0),
CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0),
CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0),
CLK_PCOM("uart_clk", UART1_CLK, NULL, OFF),
CLK_PCOM("uart_clk", UART2_CLK, NULL, 0),
CLK_PCOM("uart_clk", UART3_CLK, "msm_serial.2", OFF),
CLK_PCOM("uartdm_clk", UART1DM_CLK, NULL, OFF),
CLK_PCOM("uartdm_clk", UART2DM_CLK, NULL, 0),
CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF),
CLK_PCOM("usb_hs_pclk", USB_HS_P_CLK, NULL, OFF),
CLK_PCOM("usb_otg_clk", USB_OTG_CLK, NULL, 0),
CLK_PCOM("vdc_clk", VDC_CLK, NULL, OFF | CLK_MIN),
CLK_PCOM("vfe_clk", VFE_CLK, NULL, OFF),
CLK_PCOM("vfe_mdc_clk", VFE_MDC_CLK, NULL, OFF),
CLK_PCOM("vfe_axi_clk", VFE_AXI_CLK, NULL, OFF),
CLK_PCOM("usb_hs2_clk", USB_HS2_CLK, NULL, OFF),
CLK_PCOM("usb_hs2_pclk", USB_HS2_P_CLK, NULL, OFF),
CLK_PCOM("usb_hs3_clk", USB_HS3_CLK, NULL, OFF),
CLK_PCOM("usb_hs3_pclk", USB_HS3_P_CLK, NULL, OFF),
CLK_PCOM("usb_phy_clk", USB_PHY_CLK, NULL, 0),
};
unsigned msm_num_clocks_8x50 = ARRAY_SIZE(msm_clocks_8x50);
| gpl-2.0 |
p2pjack/AK-eva333 | lib/digsig.c | 1606 | 5868 | /*
* Copyright (C) 2011 Nokia Corporation
* Copyright (C) 2011 Intel Corporation
*
* Author:
* Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
* <dmitry.kasatkin@intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 2 of the License.
*
* File: sign.c
* implements signature (RSA) verification
* pkcs decoding is based on LibTomCrypt code
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/err.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/key.h>
#include <linux/crypto.h>
#include <crypto/hash.h>
#include <crypto/sha.h>
#include <keys/user-type.h>
#include <linux/mpi.h>
#include <linux/digsig.h>
static struct crypto_shash *shash;
static int pkcs_1_v1_5_decode_emsa(const unsigned char *msg,
unsigned long msglen,
unsigned long modulus_bitlen,
unsigned char *out,
unsigned long *outlen)
{
unsigned long modulus_len, ps_len, i;
modulus_len = (modulus_bitlen >> 3) + (modulus_bitlen & 7 ? 1 : 0);
/* test message size */
if ((msglen > modulus_len) || (modulus_len < 11))
return -EINVAL;
/* separate encoded message */
if ((msg[0] != 0x00) || (msg[1] != (unsigned char)1))
return -EINVAL;
for (i = 2; i < modulus_len - 1; i++)
if (msg[i] != 0xFF)
break;
/* separator check */
if (msg[i] != 0)
/* There was no octet with hexadecimal value 0x00
to separate ps from m. */
return -EINVAL;
ps_len = i - 2;
if (*outlen < (msglen - (2 + ps_len + 1))) {
*outlen = msglen - (2 + ps_len + 1);
return -EOVERFLOW;
}
*outlen = (msglen - (2 + ps_len + 1));
memcpy(out, &msg[2 + ps_len + 1], *outlen);
return 0;
}
/*
* RSA Signature verification with public key
*/
static int digsig_verify_rsa(struct key *key,
const char *sig, int siglen,
const char *h, int hlen)
{
int err = -EINVAL;
unsigned long len;
unsigned long mlen, mblen;
unsigned nret, l;
int head, i;
unsigned char *out1 = NULL, *out2 = NULL;
MPI in = NULL, res = NULL, pkey[2];
uint8_t *p, *datap, *endp;
struct user_key_payload *ukp;
struct pubkey_hdr *pkh;
down_read(&key->sem);
ukp = key->payload.data;
if (ukp->datalen < sizeof(*pkh))
goto err1;
pkh = (struct pubkey_hdr *)ukp->data;
if (pkh->version != 1)
goto err1;
if (pkh->algo != PUBKEY_ALGO_RSA)
goto err1;
if (pkh->nmpi != 2)
goto err1;
datap = pkh->mpi;
endp = ukp->data + ukp->datalen;
err = -ENOMEM;
for (i = 0; i < pkh->nmpi; i++) {
unsigned int remaining = endp - datap;
pkey[i] = mpi_read_from_buffer(datap, &remaining);
if (!pkey[i])
goto err;
datap += remaining;
}
mblen = mpi_get_nbits(pkey[0]);
mlen = (mblen + 7)/8;
if (mlen == 0)
goto err;
out1 = kzalloc(mlen, GFP_KERNEL);
if (!out1)
goto err;
out2 = kzalloc(mlen, GFP_KERNEL);
if (!out2)
goto err;
nret = siglen;
in = mpi_read_from_buffer(sig, &nret);
if (!in)
goto err;
res = mpi_alloc(mpi_get_nlimbs(in) * 2);
if (!res)
goto err;
err = mpi_powm(res, in, pkey[1], pkey[0]);
if (err)
goto err;
if (mpi_get_nlimbs(res) * BYTES_PER_MPI_LIMB > mlen) {
err = -EINVAL;
goto err;
}
p = mpi_get_buffer(res, &l, NULL);
if (!p) {
err = -EINVAL;
goto err;
}
len = mlen;
head = len - l;
memset(out1, 0, head);
memcpy(out1 + head, p, l);
kfree(p);
err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len);
if (err)
goto err;
if (len != hlen || memcmp(out2, h, hlen))
err = -EINVAL;
err:
mpi_free(in);
mpi_free(res);
kfree(out1);
kfree(out2);
while (--i >= 0)
mpi_free(pkey[i]);
err1:
up_read(&key->sem);
return err;
}
/**
* digsig_verify() - digital signature verification with public key
* @keyring: keyring to search key in
* @sig: digital signature
* @sigen: length of the signature
* @data: data
* @datalen: length of the data
* @return: 0 on success, -EINVAL otherwise
*
* Verifies data integrity against digital signature.
* Currently only RSA is supported.
* Normally hash of the content is used as a data for this function.
*
*/
int digsig_verify(struct key *keyring, const char *sig, int siglen,
const char *data, int datalen)
{
int err = -ENOMEM;
struct signature_hdr *sh = (struct signature_hdr *)sig;
struct shash_desc *desc = NULL;
unsigned char hash[SHA1_DIGEST_SIZE];
struct key *key;
char name[20];
if (siglen < sizeof(*sh) + 2)
return -EINVAL;
if (sh->algo != PUBKEY_ALGO_RSA)
return -ENOTSUPP;
sprintf(name, "%llX", __be64_to_cpup((uint64_t *)sh->keyid));
if (keyring) {
/* search in specific keyring */
key_ref_t kref;
kref = keyring_search(make_key_ref(keyring, 1UL),
&key_type_user, name);
if (IS_ERR(kref))
key = ERR_PTR(PTR_ERR(kref));
else
key = key_ref_to_ptr(kref);
} else {
key = request_key(&key_type_user, name, NULL);
}
if (IS_ERR(key)) {
pr_err("key not found, id: %s\n", name);
return PTR_ERR(key);
}
desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(shash),
GFP_KERNEL);
if (!desc)
goto err;
desc->tfm = shash;
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
crypto_shash_init(desc);
crypto_shash_update(desc, data, datalen);
crypto_shash_update(desc, sig, sizeof(*sh));
crypto_shash_final(desc, hash);
kfree(desc);
/* pass signature mpis address */
err = digsig_verify_rsa(key, sig + sizeof(*sh), siglen - sizeof(*sh),
hash, sizeof(hash));
err:
key_put(key);
return err ? -EINVAL : 0;
}
EXPORT_SYMBOL_GPL(digsig_verify);
static int __init digsig_init(void)
{
shash = crypto_alloc_shash("sha1", 0, 0);
if (IS_ERR(shash)) {
pr_err("shash allocation failed\n");
return PTR_ERR(shash);
}
return 0;
}
static void __exit digsig_cleanup(void)
{
crypto_free_shash(shash);
}
module_init(digsig_init);
module_exit(digsig_cleanup);
MODULE_LICENSE("GPL");
| gpl-2.0 |
DutchDanny/pyramidLE-ICS | net/l2tp/l2tp_eth.c | 1862 | 7371 | /*
* L2TPv3 ethernet pseudowire driver
*
* Copyright (c) 2008,2009,2010 Katalix Systems Ltd
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
#include <linux/hash.h>
#include <linux/l2tp.h>
#include <linux/in.h>
#include <linux/etherdevice.h>
#include <linux/spinlock.h>
#include <net/sock.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/udp.h>
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
#include <net/tcp_states.h>
#include <net/protocol.h>
#include <net/xfrm.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include "l2tp_core.h"
/* Default device name. May be overridden by name specified by user */
#define L2TP_ETH_DEV_NAME "l2tpeth%d"
/* via netdev_priv() */
struct l2tp_eth {
struct net_device *dev;
struct sock *tunnel_sock;
struct l2tp_session *session;
struct list_head list;
};
/* via l2tp_session_priv() */
struct l2tp_eth_sess {
struct net_device *dev;
};
/* per-net private data for this module */
static unsigned int l2tp_eth_net_id;
struct l2tp_eth_net {
struct list_head l2tp_eth_dev_list;
spinlock_t l2tp_eth_lock;
};
static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net)
{
return net_generic(net, l2tp_eth_net_id);
}
static int l2tp_eth_dev_init(struct net_device *dev)
{
struct l2tp_eth *priv = netdev_priv(dev);
priv->dev = dev;
random_ether_addr(dev->dev_addr);
memset(&dev->broadcast[0], 0xff, 6);
return 0;
}
static void l2tp_eth_dev_uninit(struct net_device *dev)
{
struct l2tp_eth *priv = netdev_priv(dev);
struct l2tp_eth_net *pn = l2tp_eth_pernet(dev_net(dev));
spin_lock(&pn->l2tp_eth_lock);
list_del_init(&priv->list);
spin_unlock(&pn->l2tp_eth_lock);
dev_put(dev);
}
static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct l2tp_eth *priv = netdev_priv(dev);
struct l2tp_session *session = priv->session;
l2tp_xmit_skb(session, skb, session->hdr_len);
dev->stats.tx_bytes += skb->len;
dev->stats.tx_packets++;
return 0;
}
static struct net_device_ops l2tp_eth_netdev_ops = {
.ndo_init = l2tp_eth_dev_init,
.ndo_uninit = l2tp_eth_dev_uninit,
.ndo_start_xmit = l2tp_eth_dev_xmit,
};
static void l2tp_eth_dev_setup(struct net_device *dev)
{
ether_setup(dev);
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->netdev_ops = &l2tp_eth_netdev_ops;
dev->destructor = free_netdev;
}
static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
{
struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
struct net_device *dev = spriv->dev;
if (session->debug & L2TP_MSG_DATA) {
unsigned int length;
int offset;
u8 *ptr = skb->data;
length = min(32u, skb->len);
if (!pskb_may_pull(skb, length))
goto error;
printk(KERN_DEBUG "%s: eth recv: ", session->name);
offset = 0;
do {
printk(" %02X", ptr[offset]);
} while (++offset < length);
printk("\n");
}
if (!pskb_may_pull(skb, sizeof(ETH_HLEN)))
goto error;
secpath_reset(skb);
/* checksums verified by L2TP */
skb->ip_summed = CHECKSUM_NONE;
skb_dst_drop(skb);
nf_reset(skb);
if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
dev->stats.rx_packets++;
dev->stats.rx_bytes += data_len;
} else
dev->stats.rx_errors++;
return;
error:
dev->stats.rx_errors++;
kfree_skb(skb);
}
static void l2tp_eth_delete(struct l2tp_session *session)
{
struct l2tp_eth_sess *spriv;
struct net_device *dev;
if (session) {
spriv = l2tp_session_priv(session);
dev = spriv->dev;
if (dev) {
unregister_netdev(dev);
spriv->dev = NULL;
}
}
}
#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
static void l2tp_eth_show(struct seq_file *m, void *arg)
{
struct l2tp_session *session = arg;
struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
struct net_device *dev = spriv->dev;
seq_printf(m, " interface %s\n", dev->name);
}
#endif
static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
{
struct net_device *dev;
char name[IFNAMSIZ];
struct l2tp_tunnel *tunnel;
struct l2tp_session *session;
struct l2tp_eth *priv;
struct l2tp_eth_sess *spriv;
int rc;
struct l2tp_eth_net *pn;
tunnel = l2tp_tunnel_find(net, tunnel_id);
if (!tunnel) {
rc = -ENODEV;
goto out;
}
session = l2tp_session_find(net, tunnel, session_id);
if (session) {
rc = -EEXIST;
goto out;
}
if (cfg->ifname) {
dev = dev_get_by_name(net, cfg->ifname);
if (dev) {
dev_put(dev);
rc = -EEXIST;
goto out;
}
strlcpy(name, cfg->ifname, IFNAMSIZ);
} else
strcpy(name, L2TP_ETH_DEV_NAME);
session = l2tp_session_create(sizeof(*spriv), tunnel, session_id,
peer_session_id, cfg);
if (!session) {
rc = -ENOMEM;
goto out;
}
dev = alloc_netdev(sizeof(*priv), name, l2tp_eth_dev_setup);
if (!dev) {
rc = -ENOMEM;
goto out_del_session;
}
dev_net_set(dev, net);
if (session->mtu == 0)
session->mtu = dev->mtu - session->hdr_len;
dev->mtu = session->mtu;
dev->needed_headroom += session->hdr_len;
priv = netdev_priv(dev);
priv->dev = dev;
priv->session = session;
INIT_LIST_HEAD(&priv->list);
priv->tunnel_sock = tunnel->sock;
session->recv_skb = l2tp_eth_dev_recv;
session->session_close = l2tp_eth_delete;
#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
session->show = l2tp_eth_show;
#endif
spriv = l2tp_session_priv(session);
spriv->dev = dev;
rc = register_netdev(dev);
if (rc < 0)
goto out_del_dev;
/* Must be done after register_netdev() */
strlcpy(session->ifname, dev->name, IFNAMSIZ);
dev_hold(dev);
pn = l2tp_eth_pernet(dev_net(dev));
spin_lock(&pn->l2tp_eth_lock);
list_add(&priv->list, &pn->l2tp_eth_dev_list);
spin_unlock(&pn->l2tp_eth_lock);
return 0;
out_del_dev:
free_netdev(dev);
out_del_session:
l2tp_session_delete(session);
out:
return rc;
}
static __net_init int l2tp_eth_init_net(struct net *net)
{
struct l2tp_eth_net *pn = net_generic(net, l2tp_eth_net_id);
INIT_LIST_HEAD(&pn->l2tp_eth_dev_list);
spin_lock_init(&pn->l2tp_eth_lock);
return 0;
}
static struct pernet_operations l2tp_eth_net_ops = {
.init = l2tp_eth_init_net,
.id = &l2tp_eth_net_id,
.size = sizeof(struct l2tp_eth_net),
};
static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = {
.session_create = l2tp_eth_create,
.session_delete = l2tp_session_delete,
};
static int __init l2tp_eth_init(void)
{
int err = 0;
err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops);
if (err)
goto out;
err = register_pernet_device(&l2tp_eth_net_ops);
if (err)
goto out_unreg;
printk(KERN_INFO "L2TP ethernet pseudowire support (L2TPv3)\n");
return 0;
out_unreg:
l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
out:
return err;
}
static void __exit l2tp_eth_exit(void)
{
unregister_pernet_device(&l2tp_eth_net_ops);
l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
}
module_init(l2tp_eth_init);
module_exit(l2tp_eth_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
MODULE_DESCRIPTION("L2TP ethernet pseudowire driver");
MODULE_VERSION("1.0");
| gpl-2.0 |
civato/9005-LL-DEV | block/cfq-iosched.c | 2118 | 102099 | /*
* CFQ, or complete fairness queueing, disk scheduler.
*
* Based on ideas from a previously unfinished io
* scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
*
* Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/jiffies.h>
#include <linux/rbtree.h>
#include <linux/ioprio.h>
#include <linux/blktrace_api.h>
#include "blk.h"
#include "cfq.h"
/*
* tunables
*/
/* max queue in one round of service */
static const int cfq_quantum = 8;
static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
/* maximum backwards seek, in KiB */
static const int cfq_back_max = 16 * 1024;
/* penalty of a backwards seek */
static const int cfq_back_penalty = 2;
static const int cfq_slice_sync = HZ / 10;
static int cfq_slice_async = HZ / 25;
static const int cfq_slice_async_rq = 2;
static int cfq_slice_idle = HZ / 125;
static int cfq_group_idle = HZ / 125;
static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
static const int cfq_hist_divisor = 4;
/*
* offset from end of service tree
*/
#define CFQ_IDLE_DELAY (HZ / 5)
/*
* below this threshold, we consider thinktime immediate
*/
#define CFQ_MIN_TT (2)
#define CFQ_SLICE_SCALE (5)
#define CFQ_HW_QUEUE_MIN (5)
#define CFQ_SERVICE_SHIFT 12
#define CFQQ_SEEK_THR (sector_t)(8 * 100)
#define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
#define RQ_CIC(rq) icq_to_cic((rq)->elv.icq)
#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0])
#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1])
static struct kmem_cache *cfq_pool;
#define CFQ_PRIO_LISTS IOPRIO_BE_NR
#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
#define sample_valid(samples) ((samples) > 80)
#define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
struct cfq_ttime {
unsigned long last_end_request;
unsigned long ttime_total;
unsigned long ttime_samples;
unsigned long ttime_mean;
};
/*
* Most of our rbtree usage is for sorting with min extraction, so
* if we cache the leftmost node we don't have to walk down the tree
* to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
* move this into the elevator for the rq sorting as well.
*/
struct cfq_rb_root {
struct rb_root rb;
struct rb_node *left;
unsigned count;
unsigned total_weight;
u64 min_vdisktime;
struct cfq_ttime ttime;
};
#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, \
.ttime = {.last_end_request = jiffies,},}
/*
* Per process-grouping structure
*/
struct cfq_queue {
/* reference count */
int ref;
/* various state flags, see below */
unsigned int flags;
/* parent cfq_data */
struct cfq_data *cfqd;
/* service_tree member */
struct rb_node rb_node;
/* service_tree key */
unsigned long rb_key;
/* prio tree member */
struct rb_node p_node;
/* prio tree root we belong to, if any */
struct rb_root *p_root;
/* sorted list of pending requests */
struct rb_root sort_list;
/* if fifo isn't expired, next request to serve */
struct request *next_rq;
/* requests queued in sort_list */
int queued[2];
/* currently allocated requests */
int allocated[2];
/* fifo list of requests in sort_list */
struct list_head fifo;
/* time when queue got scheduled in to dispatch first request. */
unsigned long dispatch_start;
unsigned int allocated_slice;
unsigned int slice_dispatch;
/* time when first request from queue completed and slice started. */
unsigned long slice_start;
unsigned long slice_end;
long slice_resid;
/* pending priority requests */
int prio_pending;
/* number of requests that are on the dispatch list or inside driver */
int dispatched;
/* io prio of this group */
unsigned short ioprio, org_ioprio;
unsigned short ioprio_class;
pid_t pid;
u32 seek_history;
sector_t last_request_pos;
struct cfq_rb_root *service_tree;
struct cfq_queue *new_cfqq;
struct cfq_group *cfqg;
/* Number of sectors dispatched from queue in single dispatch round */
unsigned long nr_sectors;
};
/*
* First index in the service_trees.
* IDLE is handled separately, so it has negative index
*/
enum wl_prio_t {
BE_WORKLOAD = 0,
RT_WORKLOAD = 1,
IDLE_WORKLOAD = 2,
CFQ_PRIO_NR,
};
/*
* Second index in the service_trees.
*/
enum wl_type_t {
ASYNC_WORKLOAD = 0,
SYNC_NOIDLE_WORKLOAD = 1,
SYNC_WORKLOAD = 2
};
/* This is per cgroup per device grouping structure */
struct cfq_group {
/* group service_tree member */
struct rb_node rb_node;
/* group service_tree key */
u64 vdisktime;
unsigned int weight;
unsigned int new_weight;
bool needs_update;
/* number of cfqq currently on this group */
int nr_cfqq;
/*
* Per group busy queues average. Useful for workload slice calc. We
* create the array for each prio class but at run time it is used
* only for RT and BE class and slot for IDLE class remains unused.
* This is primarily done to avoid confusion and a gcc warning.
*/
unsigned int busy_queues_avg[CFQ_PRIO_NR];
/*
* rr lists of queues with requests. We maintain service trees for
* RT and BE classes. These trees are subdivided in subclasses
* of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
* class there is no subclassification and all the cfq queues go on
* a single tree service_tree_idle.
* Counts are embedded in the cfq_rb_root
*/
struct cfq_rb_root service_trees[2][3];
struct cfq_rb_root service_tree_idle;
unsigned long saved_workload_slice;
enum wl_type_t saved_workload;
enum wl_prio_t saved_serving_prio;
struct blkio_group blkg;
#ifdef CONFIG_CFQ_GROUP_IOSCHED
struct hlist_node cfqd_node;
int ref;
#endif
/* number of requests that are on the dispatch list or inside driver */
int dispatched;
struct cfq_ttime ttime;
};
struct cfq_io_cq {
struct io_cq icq; /* must be the first member */
struct cfq_queue *cfqq[2];
struct cfq_ttime ttime;
};
/*
* Per block device queue structure
*/
struct cfq_data {
struct request_queue *queue;
/* Root service tree for cfq_groups */
struct cfq_rb_root grp_service_tree;
struct cfq_group root_group;
/*
* The priority currently being served
*/
enum wl_prio_t serving_prio;
enum wl_type_t serving_type;
unsigned long workload_expires;
struct cfq_group *serving_group;
/*
* Each priority tree is sorted by next_request position. These
* trees are used when determining if two or more queues are
* interleaving requests (see cfq_close_cooperator).
*/
struct rb_root prio_trees[CFQ_PRIO_LISTS];
unsigned int busy_queues;
unsigned int busy_sync_queues;
int rq_in_driver;
int rq_in_flight[2];
/*
* queue-depth detection
*/
int rq_queued;
int hw_tag;
/*
* hw_tag can be
* -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
* 1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
* 0 => no NCQ
*/
int hw_tag_est_depth;
unsigned int hw_tag_samples;
/*
* idle window management
*/
struct timer_list idle_slice_timer;
struct work_struct unplug_work;
struct cfq_queue *active_queue;
struct cfq_io_cq *active_cic;
/*
* async queue for each priority case
*/
struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
struct cfq_queue *async_idle_cfqq;
sector_t last_position;
/*
* tunables, see top of file
*/
unsigned int cfq_quantum;
unsigned int cfq_fifo_expire[2];
unsigned int cfq_back_penalty;
unsigned int cfq_back_max;
unsigned int cfq_slice[2];
unsigned int cfq_slice_async_rq;
unsigned int cfq_slice_idle;
unsigned int cfq_group_idle;
unsigned int cfq_latency;
unsigned int cfq_target_latency;
/*
* Fallback dummy cfqq for extreme OOM conditions
*/
struct cfq_queue oom_cfqq;
unsigned long last_delayed_sync;
/* List of cfq groups being managed on this device*/
struct hlist_head cfqg_list;
/* Number of groups which are on blkcg->blkg_list */
unsigned int nr_blkcg_linked_grps;
};
static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
enum wl_prio_t prio,
enum wl_type_t type)
{
if (!cfqg)
return NULL;
if (prio == IDLE_WORKLOAD)
return &cfqg->service_tree_idle;
return &cfqg->service_trees[prio][type];
}
enum cfqq_state_flags {
CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
CFQ_CFQQ_FLAG_sync, /* synchronous queue */
CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
};
#define CFQ_CFQQ_FNS(name) \
static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
{ \
(cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
} \
static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
{ \
(cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
} \
static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
{ \
return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
}
CFQ_CFQQ_FNS(on_rr);
CFQ_CFQQ_FNS(wait_request);
CFQ_CFQQ_FNS(must_dispatch);
CFQ_CFQQ_FNS(must_alloc_slice);
CFQ_CFQQ_FNS(fifo_expire);
CFQ_CFQQ_FNS(idle_window);
CFQ_CFQQ_FNS(prio_changed);
CFQ_CFQQ_FNS(slice_new);
CFQ_CFQQ_FNS(sync);
CFQ_CFQQ_FNS(coop);
CFQ_CFQQ_FNS(split_coop);
CFQ_CFQQ_FNS(deep);
CFQ_CFQQ_FNS(wait_busy);
#undef CFQ_CFQQ_FNS
#ifdef CONFIG_CFQ_GROUP_IOSCHED
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
blkg_path(&(cfqq)->cfqg->blkg), ##args)
#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \
blk_add_trace_msg((cfqd)->queue, "%s " fmt, \
blkg_path(&(cfqg)->blkg), ##args) \
#else
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
#endif
#define cfq_log(cfqd, fmt, args...) \
blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
/* Traverses through cfq group service trees */
#define for_each_cfqg_st(cfqg, i, j, st) \
for (i = 0; i <= IDLE_WORKLOAD; i++) \
for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
: &cfqg->service_tree_idle; \
(i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
(i == IDLE_WORKLOAD && j == 0); \
j++, st = i < IDLE_WORKLOAD ? \
&cfqg->service_trees[i][j]: NULL) \
static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
struct cfq_ttime *ttime, bool group_idle)
{
unsigned long slice;
if (!sample_valid(ttime->ttime_samples))
return false;
if (group_idle)
slice = cfqd->cfq_group_idle;
else
slice = cfqd->cfq_slice_idle;
return ttime->ttime_mean > slice;
}
static inline bool iops_mode(struct cfq_data *cfqd)
{
/*
* If we are not idling on queues and it is a NCQ drive, parallel
* execution of requests is on and measuring time is not possible
* in most of the cases until and unless we drive shallower queue
* depths and that becomes a performance bottleneck. In such cases
* switch to start providing fairness in terms of number of IOs.
*/
if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
return true;
else
return false;
}
static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
{
if (cfq_class_idle(cfqq))
return IDLE_WORKLOAD;
if (cfq_class_rt(cfqq))
return RT_WORKLOAD;
return BE_WORKLOAD;
}
static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
{
if (!cfq_cfqq_sync(cfqq))
return ASYNC_WORKLOAD;
if (!cfq_cfqq_idle_window(cfqq))
return SYNC_NOIDLE_WORKLOAD;
return SYNC_WORKLOAD;
}
static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
struct cfq_data *cfqd,
struct cfq_group *cfqg)
{
if (wl == IDLE_WORKLOAD)
return cfqg->service_tree_idle.count;
return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
+ cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
+ cfqg->service_trees[wl][SYNC_WORKLOAD].count;
}
static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
struct cfq_group *cfqg)
{
return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
+ cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
}
static void cfq_dispatch_insert(struct request_queue *, struct request *);
static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
struct io_context *, gfp_t);
static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
{
/* cic->icq is the first member, %NULL will convert to %NULL */
return container_of(icq, struct cfq_io_cq, icq);
}
static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
struct io_context *ioc)
{
if (ioc)
return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
return NULL;
}
static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
{
return cic->cfqq[is_sync];
}
static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
bool is_sync)
{
cic->cfqq[is_sync] = cfqq;
}
static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
{
return cic->icq.q->elevator->elevator_data;
}
/*
* We regard a request as SYNC, if it's either a read or has the SYNC bit
* set (in which case it could also be direct WRITE).
*/
static inline bool cfq_bio_sync(struct bio *bio)
{
return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
}
/*
* scheduler run of queue, if there are requests pending and no one in the
* driver that will restart queueing
*/
static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
{
if (cfqd->busy_queues) {
cfq_log(cfqd, "schedule dispatch");
kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
}
}
/*
* Scale schedule slice based on io priority. Use the sync time slice only
* if a queue is marked sync and has sync io queued. A sync queue with async
* io only, should not get full sync slice length.
*/
static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
unsigned short prio)
{
const int base_slice = cfqd->cfq_slice[sync];
WARN_ON(prio >= IOPRIO_BE_NR);
return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
}
static inline int
cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
}
static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
{
u64 d = delta << CFQ_SERVICE_SHIFT;
d = d * BLKIO_WEIGHT_DEFAULT;
do_div(d, cfqg->weight);
return d;
}
static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
{
s64 delta = (s64)(vdisktime - min_vdisktime);
if (delta > 0)
min_vdisktime = vdisktime;
return min_vdisktime;
}
static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
{
s64 delta = (s64)(vdisktime - min_vdisktime);
if (delta < 0)
min_vdisktime = vdisktime;
return min_vdisktime;
}
static void update_min_vdisktime(struct cfq_rb_root *st)
{
struct cfq_group *cfqg;
if (st->left) {
cfqg = rb_entry_cfqg(st->left);
st->min_vdisktime = max_vdisktime(st->min_vdisktime,
cfqg->vdisktime);
}
}
/*
* get averaged number of queues of RT/BE priority.
* average is updated, with a formula that gives more weight to higher numbers,
* to quickly follows sudden increases and decrease slowly
*/
static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
struct cfq_group *cfqg, bool rt)
{
unsigned min_q, max_q;
unsigned mult = cfq_hist_divisor - 1;
unsigned round = cfq_hist_divisor / 2;
unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
min_q = min(cfqg->busy_queues_avg[rt], busy);
max_q = max(cfqg->busy_queues_avg[rt], busy);
cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
cfq_hist_divisor;
return cfqg->busy_queues_avg[rt];
}
static inline unsigned
cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
{
struct cfq_rb_root *st = &cfqd->grp_service_tree;
return cfqd->cfq_target_latency * cfqg->weight / st->total_weight;
}
static inline unsigned
cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
if (cfqd->cfq_latency) {
/*
* interested queues (we consider only the ones with the same
* priority class in the cfq group)
*/
unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
cfq_class_rt(cfqq));
unsigned sync_slice = cfqd->cfq_slice[1];
unsigned expect_latency = sync_slice * iq;
unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
if (expect_latency > group_slice) {
unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
/* scale low_slice according to IO priority
* and sync vs async */
unsigned low_slice =
min(slice, base_low_slice * slice / sync_slice);
/* the adapted slice value is scaled to fit all iqs
* into the target latency */
slice = max(slice * group_slice / expect_latency,
low_slice);
}
}
return slice;
}
static inline void
cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
cfqq->slice_start = jiffies;
cfqq->slice_end = jiffies + slice;
cfqq->allocated_slice = slice;
cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
}
/*
* We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
* isn't valid until the first request from the dispatch is activated
* and the slice time set.
*/
static inline bool cfq_slice_used(struct cfq_queue *cfqq)
{
if (cfq_cfqq_slice_new(cfqq))
return false;
if (time_before(jiffies, cfqq->slice_end))
return false;
return true;
}
/*
* Lifted from AS - choose which of rq1 and rq2 that is best served now.
* We choose the request that is closest to the head right now. Distance
* behind the head is penalized and only allowed to a certain extent.
*/
static struct request *
cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
{
sector_t s1, s2, d1 = 0, d2 = 0;
unsigned long back_max;
#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
unsigned wrap = 0; /* bit mask: requests behind the disk head? */
if (rq1 == NULL || rq1 == rq2)
return rq2;
if (rq2 == NULL)
return rq1;
if (rq_is_sync(rq1) != rq_is_sync(rq2))
return rq_is_sync(rq1) ? rq1 : rq2;
if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
s1 = blk_rq_pos(rq1);
s2 = blk_rq_pos(rq2);
/*
* by definition, 1KiB is 2 sectors
*/
back_max = cfqd->cfq_back_max * 2;
/*
* Strict one way elevator _except_ in the case where we allow
* short backward seeks which are biased as twice the cost of a
* similar forward seek.
*/
if (s1 >= last)
d1 = s1 - last;
else if (s1 + back_max >= last)
d1 = (last - s1) * cfqd->cfq_back_penalty;
else
wrap |= CFQ_RQ1_WRAP;
if (s2 >= last)
d2 = s2 - last;
else if (s2 + back_max >= last)
d2 = (last - s2) * cfqd->cfq_back_penalty;
else
wrap |= CFQ_RQ2_WRAP;
/* Found required data */
/*
* By doing switch() on the bit mask "wrap" we avoid having to
* check two variables for all permutations: --> faster!
*/
switch (wrap) {
case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
if (d1 < d2)
return rq1;
else if (d2 < d1)
return rq2;
else {
if (s1 >= s2)
return rq1;
else
return rq2;
}
case CFQ_RQ2_WRAP:
return rq1;
case CFQ_RQ1_WRAP:
return rq2;
case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
default:
/*
* Since both rqs are wrapped,
* start with the one that's further behind head
* (--> only *one* back seek required),
* since back seek takes more time than forward.
*/
if (s1 <= s2)
return rq1;
else
return rq2;
}
}
/*
* The below is leftmost cache rbtree addon
*/
static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
{
/* Service tree is empty */
if (!root->count)
return NULL;
if (!root->left)
root->left = rb_first(&root->rb);
if (root->left)
return rb_entry(root->left, struct cfq_queue, rb_node);
return NULL;
}
static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
{
if (!root->left)
root->left = rb_first(&root->rb);
if (root->left)
return rb_entry_cfqg(root->left);
return NULL;
}
static void rb_erase_init(struct rb_node *n, struct rb_root *root)
{
rb_erase(n, root);
RB_CLEAR_NODE(n);
}
static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
{
if (root->left == n)
root->left = NULL;
rb_erase_init(n, &root->rb);
--root->count;
}
/*
* would be nice to take fifo expire time into account as well
*/
static struct request *
cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct request *last)
{
struct rb_node *rbnext = rb_next(&last->rb_node);
struct rb_node *rbprev = rb_prev(&last->rb_node);
struct request *next = NULL, *prev = NULL;
BUG_ON(RB_EMPTY_NODE(&last->rb_node));
if (rbprev)
prev = rb_entry_rq(rbprev);
if (rbnext)
next = rb_entry_rq(rbnext);
else {
rbnext = rb_first(&cfqq->sort_list);
if (rbnext && rbnext != &last->rb_node)
next = rb_entry_rq(rbnext);
}
return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
}
static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
struct cfq_queue *cfqq)
{
/*
* just an approximation, should be ok.
*/
return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
}
static inline s64
cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
{
return cfqg->vdisktime - st->min_vdisktime;
}
static void
__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
{
struct rb_node **node = &st->rb.rb_node;
struct rb_node *parent = NULL;
struct cfq_group *__cfqg;
s64 key = cfqg_key(st, cfqg);
int left = 1;
while (*node != NULL) {
parent = *node;
__cfqg = rb_entry_cfqg(parent);
if (key < cfqg_key(st, __cfqg))
node = &parent->rb_left;
else {
node = &parent->rb_right;
left = 0;
}
}
if (left)
st->left = &cfqg->rb_node;
rb_link_node(&cfqg->rb_node, parent, node);
rb_insert_color(&cfqg->rb_node, &st->rb);
}
static void
cfq_update_group_weight(struct cfq_group *cfqg)
{
BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
if (cfqg->needs_update) {
cfqg->weight = cfqg->new_weight;
cfqg->needs_update = false;
}
}
static void
cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
{
BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
cfq_update_group_weight(cfqg);
__cfq_group_service_tree_add(st, cfqg);
st->total_weight += cfqg->weight;
}
static void
cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
{
struct cfq_rb_root *st = &cfqd->grp_service_tree;
struct cfq_group *__cfqg;
struct rb_node *n;
cfqg->nr_cfqq++;
if (!RB_EMPTY_NODE(&cfqg->rb_node))
return;
/*
* Currently put the group at the end. Later implement something
* so that groups get lesser vtime based on their weights, so that
* if group does not loose all if it was not continuously backlogged.
*/
n = rb_last(&st->rb);
if (n) {
__cfqg = rb_entry_cfqg(n);
cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
} else
cfqg->vdisktime = st->min_vdisktime;
cfq_group_service_tree_add(st, cfqg);
}
static void
cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
{
st->total_weight -= cfqg->weight;
if (!RB_EMPTY_NODE(&cfqg->rb_node))
cfq_rb_erase(&cfqg->rb_node, st);
}
static void
cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
{
struct cfq_rb_root *st = &cfqd->grp_service_tree;
BUG_ON(cfqg->nr_cfqq < 1);
cfqg->nr_cfqq--;
/* If there are other cfq queues under this group, don't delete it */
if (cfqg->nr_cfqq)
return;
cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
cfq_group_service_tree_del(st, cfqg);
cfqg->saved_workload_slice = 0;
cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
}
static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
unsigned int *unaccounted_time)
{
unsigned int slice_used;
/*
* Queue got expired before even a single request completed or
* got expired immediately after first request completion.
*/
if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
/*
* Also charge the seek time incurred to the group, otherwise
* if there are mutiple queues in the group, each can dispatch
* a single request on seeky media and cause lots of seek time
* and group will never know it.
*/
slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
1);
} else {
slice_used = jiffies - cfqq->slice_start;
if (slice_used > cfqq->allocated_slice) {
*unaccounted_time = slice_used - cfqq->allocated_slice;
slice_used = cfqq->allocated_slice;
}
if (time_after(cfqq->slice_start, cfqq->dispatch_start))
*unaccounted_time += cfqq->slice_start -
cfqq->dispatch_start;
}
return slice_used;
}
static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
struct cfq_queue *cfqq)
{
struct cfq_rb_root *st = &cfqd->grp_service_tree;
unsigned int used_sl, charge, unaccounted_sl = 0;
int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
- cfqg->service_tree_idle.count;
BUG_ON(nr_sync < 0);
used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
if (iops_mode(cfqd))
charge = cfqq->slice_dispatch;
else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
charge = cfqq->allocated_slice;
/* Can't update vdisktime while group is on service tree */
cfq_group_service_tree_del(st, cfqg);
cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
/* If a new weight was requested, update now, off tree */
cfq_group_service_tree_add(st, cfqg);
/* This group is being expired. Save the context */
if (time_after(cfqd->workload_expires, jiffies)) {
cfqg->saved_workload_slice = cfqd->workload_expires
- jiffies;
cfqg->saved_workload = cfqd->serving_type;
cfqg->saved_serving_prio = cfqd->serving_prio;
} else
cfqg->saved_workload_slice = 0;
cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
st->min_vdisktime);
cfq_log_cfqq(cfqq->cfqd, cfqq,
"sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
used_sl, cfqq->slice_dispatch, charge,
iops_mode(cfqd), cfqq->nr_sectors);
cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
unaccounted_sl);
cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
}
#ifdef CONFIG_CFQ_GROUP_IOSCHED
static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
{
if (blkg)
return container_of(blkg, struct cfq_group, blkg);
return NULL;
}
static void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
unsigned int weight)
{
struct cfq_group *cfqg = cfqg_of_blkg(blkg);
cfqg->new_weight = weight;
cfqg->needs_update = true;
}
static void cfq_init_add_cfqg_lists(struct cfq_data *cfqd,
struct cfq_group *cfqg, struct blkio_cgroup *blkcg)
{
struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
unsigned int major, minor;
/*
* Add group onto cgroup list. It might happen that bdi->dev is
* not initialized yet. Initialize this new group without major
* and minor info and this info will be filled in once a new thread
* comes for IO.
*/
if (bdi->dev) {
sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
(void *)cfqd, MKDEV(major, minor));
} else
cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
(void *)cfqd, 0);
cfqd->nr_blkcg_linked_grps++;
cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
/* Add group on cfqd list */
hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
}
/*
* Should be called from sleepable context. No request queue lock as per
* cpu stats are allocated dynamically and alloc_percpu needs to be called
* from sleepable context.
*/
static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd)
{
struct cfq_group *cfqg = NULL;
int i, j, ret;
struct cfq_rb_root *st;
cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
if (!cfqg)
return NULL;
for_each_cfqg_st(cfqg, i, j, st)
*st = CFQ_RB_ROOT;
RB_CLEAR_NODE(&cfqg->rb_node);
cfqg->ttime.last_end_request = jiffies;
/*
* Take the initial reference that will be released on destroy
* This can be thought of a joint reference by cgroup and
* elevator which will be dropped by either elevator exit
* or cgroup deletion path depending on who is exiting first.
*/
cfqg->ref = 1;
ret = blkio_alloc_blkg_stats(&cfqg->blkg);
if (ret) {
kfree(cfqg);
return NULL;
}
return cfqg;
}
static struct cfq_group *
cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg)
{
struct cfq_group *cfqg = NULL;
void *key = cfqd;
struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
unsigned int major, minor;
/*
* This is the common case when there are no blkio cgroups.
* Avoid lookup in this case
*/
if (blkcg == &blkio_root_cgroup)
cfqg = &cfqd->root_group;
else
cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
cfqg->blkg.dev = MKDEV(major, minor);
}
return cfqg;
}
/*
* Search for the cfq group current task belongs to. request_queue lock must
* be held.
*/
static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
{
struct blkio_cgroup *blkcg;
struct cfq_group *cfqg = NULL, *__cfqg = NULL;
struct request_queue *q = cfqd->queue;
rcu_read_lock();
blkcg = task_blkio_cgroup(current);
cfqg = cfq_find_cfqg(cfqd, blkcg);
if (cfqg) {
rcu_read_unlock();
return cfqg;
}
/*
* Need to allocate a group. Allocation of group also needs allocation
* of per cpu stats which in-turn takes a mutex() and can block. Hence
* we need to drop rcu lock and queue_lock before we call alloc.
*
* Not taking any queue reference here and assuming that queue is
* around by the time we return. CFQ queue allocation code does
* the same. It might be racy though.
*/
rcu_read_unlock();
spin_unlock_irq(q->queue_lock);
cfqg = cfq_alloc_cfqg(cfqd);
spin_lock_irq(q->queue_lock);
rcu_read_lock();
blkcg = task_blkio_cgroup(current);
/*
* If some other thread already allocated the group while we were
* not holding queue lock, free up the group
*/
__cfqg = cfq_find_cfqg(cfqd, blkcg);
if (__cfqg) {
kfree(cfqg);
rcu_read_unlock();
return __cfqg;
}
if (!cfqg)
cfqg = &cfqd->root_group;
cfq_init_add_cfqg_lists(cfqd, cfqg, blkcg);
rcu_read_unlock();
return cfqg;
}
static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
{
cfqg->ref++;
return cfqg;
}
static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
{
/* Currently, all async queues are mapped to root group */
if (!cfq_cfqq_sync(cfqq))
cfqg = &cfqq->cfqd->root_group;
cfqq->cfqg = cfqg;
/* cfqq reference on cfqg */
cfqq->cfqg->ref++;
}
static void cfq_put_cfqg(struct cfq_group *cfqg)
{
struct cfq_rb_root *st;
int i, j;
BUG_ON(cfqg->ref <= 0);
cfqg->ref--;
if (cfqg->ref)
return;
for_each_cfqg_st(cfqg, i, j, st)
BUG_ON(!RB_EMPTY_ROOT(&st->rb));
free_percpu(cfqg->blkg.stats_cpu);
kfree(cfqg);
}
static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
{
/* Something wrong if we are trying to remove same group twice */
BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
hlist_del_init(&cfqg->cfqd_node);
BUG_ON(cfqd->nr_blkcg_linked_grps <= 0);
cfqd->nr_blkcg_linked_grps--;
/*
* Put the reference taken at the time of creation so that when all
* queues are gone, group can be destroyed.
*/
cfq_put_cfqg(cfqg);
}
static void cfq_release_cfq_groups(struct cfq_data *cfqd)
{
struct hlist_node *pos, *n;
struct cfq_group *cfqg;
hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
/*
* If cgroup removal path got to blk_group first and removed
* it from cgroup list, then it will take care of destroying
* cfqg also.
*/
if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
cfq_destroy_cfqg(cfqd, cfqg);
}
}
/*
* Blk cgroup controller notification saying that blkio_group object is being
* delinked as associated cgroup object is going away. That also means that
* no new IO will come in this group. So get rid of this group as soon as
* any pending IO in the group is finished.
*
* This function is called under rcu_read_lock(). key is the rcu protected
* pointer. That means "key" is a valid cfq_data pointer as long as we are rcu
* read lock.
*
* "key" was fetched from blkio_group under blkio_cgroup->lock. That means
* it should not be NULL as even if elevator was exiting, cgroup deltion
* path got to it first.
*/
static void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
{
unsigned long flags;
struct cfq_data *cfqd = key;
spin_lock_irqsave(cfqd->queue->queue_lock, flags);
cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
}
#else /* GROUP_IOSCHED */
static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
{
return &cfqd->root_group;
}
static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
{
return cfqg;
}
static inline void
cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
cfqq->cfqg = cfqg;
}
static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
#endif /* GROUP_IOSCHED */
/*
* The cfqd->service_trees holds all pending cfq_queue's that have
* requests waiting to be processed. It is sorted in the order that
* we will service the queues.
*/
static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
bool add_front)
{
struct rb_node **p, *parent;
struct cfq_queue *__cfqq;
unsigned long rb_key;
struct cfq_rb_root *service_tree;
int left;
int new_cfqq = 1;
service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
cfqq_type(cfqq));
if (cfq_class_idle(cfqq)) {
rb_key = CFQ_IDLE_DELAY;
parent = rb_last(&service_tree->rb);
if (parent && parent != &cfqq->rb_node) {
__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
rb_key += __cfqq->rb_key;
} else
rb_key += jiffies;
} else if (!add_front) {
/*
* Get our rb key offset. Subtract any residual slice
* value carried from last service. A negative resid
* count indicates slice overrun, and this should position
* the next service time further away in the tree.
*/
rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
rb_key -= cfqq->slice_resid;
cfqq->slice_resid = 0;
} else {
rb_key = -HZ;
__cfqq = cfq_rb_first(service_tree);
rb_key += __cfqq ? __cfqq->rb_key : jiffies;
}
if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
new_cfqq = 0;
/*
* same position, nothing more to do
*/
if (rb_key == cfqq->rb_key &&
cfqq->service_tree == service_tree)
return;
cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
cfqq->service_tree = NULL;
}
left = 1;
parent = NULL;
cfqq->service_tree = service_tree;
p = &service_tree->rb.rb_node;
while (*p) {
struct rb_node **n;
parent = *p;
__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
/*
* sort by key, that represents service time.
*/
if (time_before(rb_key, __cfqq->rb_key))
n = &(*p)->rb_left;
else {
n = &(*p)->rb_right;
left = 0;
}
p = n;
}
if (left)
service_tree->left = &cfqq->rb_node;
cfqq->rb_key = rb_key;
rb_link_node(&cfqq->rb_node, parent, p);
rb_insert_color(&cfqq->rb_node, &service_tree->rb);
service_tree->count++;
if (add_front || !new_cfqq)
return;
cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
}
static struct cfq_queue *
cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
sector_t sector, struct rb_node **ret_parent,
struct rb_node ***rb_link)
{
struct rb_node **p, *parent;
struct cfq_queue *cfqq = NULL;
parent = NULL;
p = &root->rb_node;
while (*p) {
struct rb_node **n;
parent = *p;
cfqq = rb_entry(parent, struct cfq_queue, p_node);
/*
* Sort strictly based on sector. Smallest to the left,
* largest to the right.
*/
if (sector > blk_rq_pos(cfqq->next_rq))
n = &(*p)->rb_right;
else if (sector < blk_rq_pos(cfqq->next_rq))
n = &(*p)->rb_left;
else
break;
p = n;
cfqq = NULL;
}
*ret_parent = parent;
if (rb_link)
*rb_link = p;
return cfqq;
}
static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
struct rb_node **p, *parent;
struct cfq_queue *__cfqq;
if (cfqq->p_root) {
rb_erase(&cfqq->p_node, cfqq->p_root);
cfqq->p_root = NULL;
}
if (cfq_class_idle(cfqq))
return;
if (!cfqq->next_rq)
return;
cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
__cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
blk_rq_pos(cfqq->next_rq), &parent, &p);
if (!__cfqq) {
rb_link_node(&cfqq->p_node, parent, p);
rb_insert_color(&cfqq->p_node, cfqq->p_root);
} else
cfqq->p_root = NULL;
}
/*
* Update cfqq's position in the service tree.
*/
static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
/*
* Resorting requires the cfqq to be on the RR list already.
*/
if (cfq_cfqq_on_rr(cfqq)) {
cfq_service_tree_add(cfqd, cfqq, 0);
cfq_prio_tree_add(cfqd, cfqq);
}
}
/*
* add to busy list of queues for service, trying to be fair in ordering
* the pending list according to last request service
*/
static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
BUG_ON(cfq_cfqq_on_rr(cfqq));
cfq_mark_cfqq_on_rr(cfqq);
cfqd->busy_queues++;
if (cfq_cfqq_sync(cfqq))
cfqd->busy_sync_queues++;
cfq_resort_rr_list(cfqd, cfqq);
}
/*
* Called when the cfqq no longer has requests pending, remove it from
* the service tree.
*/
static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
BUG_ON(!cfq_cfqq_on_rr(cfqq));
cfq_clear_cfqq_on_rr(cfqq);
if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
cfqq->service_tree = NULL;
}
if (cfqq->p_root) {
rb_erase(&cfqq->p_node, cfqq->p_root);
cfqq->p_root = NULL;
}
cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
BUG_ON(!cfqd->busy_queues);
cfqd->busy_queues--;
if (cfq_cfqq_sync(cfqq))
cfqd->busy_sync_queues--;
}
/*
* rb tree support functions
*/
static void cfq_del_rq_rb(struct request *rq)
{
struct cfq_queue *cfqq = RQ_CFQQ(rq);
const int sync = rq_is_sync(rq);
BUG_ON(!cfqq->queued[sync]);
cfqq->queued[sync]--;
elv_rb_del(&cfqq->sort_list, rq);
if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
/*
* Queue will be deleted from service tree when we actually
* expire it later. Right now just remove it from prio tree
* as it is empty.
*/
if (cfqq->p_root) {
rb_erase(&cfqq->p_node, cfqq->p_root);
cfqq->p_root = NULL;
}
}
}
static void cfq_add_rq_rb(struct request *rq)
{
struct cfq_queue *cfqq = RQ_CFQQ(rq);
struct cfq_data *cfqd = cfqq->cfqd;
struct request *prev;
cfqq->queued[rq_is_sync(rq)]++;
elv_rb_add(&cfqq->sort_list, rq);
if (!cfq_cfqq_on_rr(cfqq))
cfq_add_cfqq_rr(cfqd, cfqq);
/*
* check if this request is a better next-serve candidate
*/
prev = cfqq->next_rq;
cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
/*
* adjust priority tree position, if ->next_rq changes
*/
if (prev != cfqq->next_rq)
cfq_prio_tree_add(cfqd, cfqq);
BUG_ON(!cfqq->next_rq);
}
static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
{
elv_rb_del(&cfqq->sort_list, rq);
cfqq->queued[rq_is_sync(rq)]--;
cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
rq_data_dir(rq), rq_is_sync(rq));
cfq_add_rq_rb(rq);
cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
&cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
rq_is_sync(rq));
}
static struct request *
cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
{
struct task_struct *tsk = current;
struct cfq_io_cq *cic;
struct cfq_queue *cfqq;
cic = cfq_cic_lookup(cfqd, tsk->io_context);
if (!cic)
return NULL;
cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
if (cfqq) {
sector_t sector = bio->bi_sector + bio_sectors(bio);
return elv_rb_find(&cfqq->sort_list, sector);
}
return NULL;
}
static void cfq_activate_request(struct request_queue *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
cfqd->rq_in_driver++;
cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
cfqd->rq_in_driver);
cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
}
static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
WARN_ON(!cfqd->rq_in_driver);
cfqd->rq_in_driver--;
cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
cfqd->rq_in_driver);
}
static void cfq_remove_request(struct request *rq)
{
struct cfq_queue *cfqq = RQ_CFQQ(rq);
if (cfqq->next_rq == rq)
cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
list_del_init(&rq->queuelist);
cfq_del_rq_rb(rq);
cfqq->cfqd->rq_queued--;
cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
rq_data_dir(rq), rq_is_sync(rq));
if (rq->cmd_flags & REQ_PRIO) {
WARN_ON(!cfqq->prio_pending);
cfqq->prio_pending--;
}
}
static int cfq_merge(struct request_queue *q, struct request **req,
struct bio *bio)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct request *__rq;
__rq = cfq_find_rq_fmerge(cfqd, bio);
if (__rq && elv_rq_merge_ok(__rq, bio)) {
*req = __rq;
return ELEVATOR_FRONT_MERGE;
}
return ELEVATOR_NO_MERGE;
}
static void cfq_merged_request(struct request_queue *q, struct request *req,
int type)
{
if (type == ELEVATOR_FRONT_MERGE) {
struct cfq_queue *cfqq = RQ_CFQQ(req);
cfq_reposition_rq_rb(cfqq, req);
}
}
static void cfq_bio_merged(struct request_queue *q, struct request *req,
struct bio *bio)
{
cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
bio_data_dir(bio), cfq_bio_sync(bio));
}
static void
cfq_merged_requests(struct request_queue *q, struct request *rq,
struct request *next)
{
struct cfq_queue *cfqq = RQ_CFQQ(rq);
struct cfq_data *cfqd = q->elevator->elevator_data;
/*
* reposition in fifo if next is older than rq
*/
if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
list_move(&rq->queuelist, &next->queuelist);
rq_set_fifo_time(rq, rq_fifo_time(next));
}
if (cfqq->next_rq == next)
cfqq->next_rq = rq;
cfq_remove_request(next);
cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
rq_data_dir(next), rq_is_sync(next));
cfqq = RQ_CFQQ(next);
/*
* all requests of this queue are merged to other queues, delete it
* from the service tree. If it's the active_queue,
* cfq_dispatch_requests() will choose to expire it or do idle
*/
if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
cfqq != cfqd->active_queue)
cfq_del_cfqq_rr(cfqd, cfqq);
}
static int cfq_allow_merge(struct request_queue *q, struct request *rq,
struct bio *bio)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_io_cq *cic;
struct cfq_queue *cfqq;
/*
* Disallow merge of a sync bio into an async request.
*/
if (cfq_bio_sync(bio) && !rq_is_sync(rq))
return false;
/*
* Lookup the cfqq that this bio will be queued with and allow
* merge only if rq is queued there.
*/
cic = cfq_cic_lookup(cfqd, current->io_context);
if (!cic)
return false;
cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
return cfqq == RQ_CFQQ(rq);
}
static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
del_timer(&cfqd->idle_slice_timer);
cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
}
static void __cfq_set_active_queue(struct cfq_data *cfqd,
struct cfq_queue *cfqq)
{
if (cfqq) {
cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
cfqd->serving_prio, cfqd->serving_type);
cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
cfqq->slice_start = 0;
cfqq->dispatch_start = jiffies;
cfqq->allocated_slice = 0;
cfqq->slice_end = 0;
cfqq->slice_dispatch = 0;
cfqq->nr_sectors = 0;
cfq_clear_cfqq_wait_request(cfqq);
cfq_clear_cfqq_must_dispatch(cfqq);
cfq_clear_cfqq_must_alloc_slice(cfqq);
cfq_clear_cfqq_fifo_expire(cfqq);
cfq_mark_cfqq_slice_new(cfqq);
cfq_del_timer(cfqd, cfqq);
}
cfqd->active_queue = cfqq;
}
/*
* current cfqq expired its slice (or was too idle), select new one
*/
static void
__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
bool timed_out)
{
cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
if (cfq_cfqq_wait_request(cfqq))
cfq_del_timer(cfqd, cfqq);
cfq_clear_cfqq_wait_request(cfqq);
cfq_clear_cfqq_wait_busy(cfqq);
/*
* If this cfqq is shared between multiple processes, check to
* make sure that those processes are still issuing I/Os within
* the mean seek distance. If not, it may be time to break the
* queues apart again.
*/
if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
cfq_mark_cfqq_split_coop(cfqq);
/*
* store what was left of this slice, if the queue idled/timed out
*/
if (timed_out) {
if (cfq_cfqq_slice_new(cfqq))
cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
else
cfqq->slice_resid = cfqq->slice_end - jiffies;
cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
}
cfq_group_served(cfqd, cfqq->cfqg, cfqq);
if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
cfq_del_cfqq_rr(cfqd, cfqq);
cfq_resort_rr_list(cfqd, cfqq);
if (cfqq == cfqd->active_queue)
cfqd->active_queue = NULL;
if (cfqd->active_cic) {
put_io_context(cfqd->active_cic->icq.ioc);
cfqd->active_cic = NULL;
}
}
static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
{
struct cfq_queue *cfqq = cfqd->active_queue;
if (cfqq)
__cfq_slice_expired(cfqd, cfqq, timed_out);
}
/*
* Get next queue for service. Unless we have a queue preemption,
* we'll simply select the first cfqq in the service tree.
*/
static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
{
struct cfq_rb_root *service_tree =
service_tree_for(cfqd->serving_group, cfqd->serving_prio,
cfqd->serving_type);
if (!cfqd->rq_queued)
return NULL;
/* There is nothing to dispatch */
if (!service_tree)
return NULL;
if (RB_EMPTY_ROOT(&service_tree->rb))
return NULL;
return cfq_rb_first(service_tree);
}
static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
{
struct cfq_group *cfqg;
struct cfq_queue *cfqq;
int i, j;
struct cfq_rb_root *st;
if (!cfqd->rq_queued)
return NULL;
cfqg = cfq_get_next_cfqg(cfqd);
if (!cfqg)
return NULL;
for_each_cfqg_st(cfqg, i, j, st)
if ((cfqq = cfq_rb_first(st)) != NULL)
return cfqq;
return NULL;
}
/*
* Get and set a new active queue for service.
*/
static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
struct cfq_queue *cfqq)
{
if (!cfqq)
cfqq = cfq_get_next_queue(cfqd);
__cfq_set_active_queue(cfqd, cfqq);
return cfqq;
}
static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
struct request *rq)
{
if (blk_rq_pos(rq) >= cfqd->last_position)
return blk_rq_pos(rq) - cfqd->last_position;
else
return cfqd->last_position - blk_rq_pos(rq);
}
static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct request *rq)
{
return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
}
static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
struct cfq_queue *cur_cfqq)
{
struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
struct rb_node *parent, *node;
struct cfq_queue *__cfqq;
sector_t sector = cfqd->last_position;
if (RB_EMPTY_ROOT(root))
return NULL;
/*
* First, if we find a request starting at the end of the last
* request, choose it.
*/
__cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
if (__cfqq)
return __cfqq;
/*
* If the exact sector wasn't found, the parent of the NULL leaf
* will contain the closest sector.
*/
__cfqq = rb_entry(parent, struct cfq_queue, p_node);
if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
return __cfqq;
if (blk_rq_pos(__cfqq->next_rq) < sector)
node = rb_next(&__cfqq->p_node);
else
node = rb_prev(&__cfqq->p_node);
if (!node)
return NULL;
__cfqq = rb_entry(node, struct cfq_queue, p_node);
if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
return __cfqq;
return NULL;
}
/*
* cfqd - obvious
* cur_cfqq - passed in so that we don't decide that the current queue is
* closely cooperating with itself.
*
* So, basically we're assuming that that cur_cfqq has dispatched at least
* one request, and that cfqd->last_position reflects a position on the disk
* associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
* assumption.
*/
static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
struct cfq_queue *cur_cfqq)
{
struct cfq_queue *cfqq;
if (cfq_class_idle(cur_cfqq))
return NULL;
if (!cfq_cfqq_sync(cur_cfqq))
return NULL;
if (CFQQ_SEEKY(cur_cfqq))
return NULL;
/*
* Don't search priority tree if it's the only queue in the group.
*/
if (cur_cfqq->cfqg->nr_cfqq == 1)
return NULL;
/*
* We should notice if some of the queues are cooperating, eg
* working closely on the same area of the disk. In that case,
* we can group them together and don't waste time idling.
*/
cfqq = cfqq_close(cfqd, cur_cfqq);
if (!cfqq)
return NULL;
/* If new queue belongs to different cfq_group, don't choose it */
if (cur_cfqq->cfqg != cfqq->cfqg)
return NULL;
/*
* It only makes sense to merge sync queues.
*/
if (!cfq_cfqq_sync(cfqq))
return NULL;
if (CFQQ_SEEKY(cfqq))
return NULL;
/*
* Do not merge queues of different priority classes
*/
if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
return NULL;
return cfqq;
}
/*
* Determine whether we should enforce idle window for this queue.
*/
static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
enum wl_prio_t prio = cfqq_prio(cfqq);
struct cfq_rb_root *service_tree = cfqq->service_tree;
BUG_ON(!service_tree);
BUG_ON(!service_tree->count);
if (!cfqd->cfq_slice_idle)
return false;
/* We never do for idle class queues. */
if (prio == IDLE_WORKLOAD)
return false;
/* We do for queues that were marked with idle window flag. */
if (cfq_cfqq_idle_window(cfqq) &&
!(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
return true;
/*
* Otherwise, we do only if they are the last ones
* in their service tree.
*/
if (service_tree->count == 1 && cfq_cfqq_sync(cfqq) &&
!cfq_io_thinktime_big(cfqd, &service_tree->ttime, false))
return true;
cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
service_tree->count);
return false;
}
static void cfq_arm_slice_timer(struct cfq_data *cfqd)
{
struct cfq_queue *cfqq = cfqd->active_queue;
struct cfq_io_cq *cic;
unsigned long sl, group_idle = 0;
/*
* SSD device without seek penalty, disable idling. But only do so
* for devices that support queuing, otherwise we still have a problem
* with sync vs async workloads.
*/
if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
return;
WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
WARN_ON(cfq_cfqq_slice_new(cfqq));
/*
* idle is disabled, either manually or by past process history
*/
if (!cfq_should_idle(cfqd, cfqq)) {
/* no queue idling. Check for group idling */
if (cfqd->cfq_group_idle)
group_idle = cfqd->cfq_group_idle;
else
return;
}
/*
* still active requests from this queue, don't idle
*/
if (cfqq->dispatched)
return;
/*
* task has exited, don't wait
*/
cic = cfqd->active_cic;
if (!cic || !atomic_read(&cic->icq.ioc->nr_tasks))
return;
/*
* If our average think time is larger than the remaining time
* slice, then don't idle. This avoids overrunning the allotted
* time slice.
*/
if (sample_valid(cic->ttime.ttime_samples) &&
(cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
cic->ttime.ttime_mean);
return;
}
/* There are other queues in the group, don't do group idle */
if (group_idle && cfqq->cfqg->nr_cfqq > 1)
return;
cfq_mark_cfqq_wait_request(cfqq);
if (group_idle)
sl = cfqd->cfq_group_idle;
else
sl = cfqd->cfq_slice_idle;
mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
group_idle ? 1 : 0);
}
/*
* Move request from internal lists to the request queue dispatch list.
*/
static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq = RQ_CFQQ(rq);
cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
cfq_remove_request(rq);
cfqq->dispatched++;
(RQ_CFQG(rq))->dispatched++;
elv_dispatch_sort(q, rq);
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
cfqq->nr_sectors += blk_rq_sectors(rq);
cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
rq_data_dir(rq), rq_is_sync(rq));
}
/*
* return expired entry, or NULL to just start from scratch in rbtree
*/
static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
{
struct request *rq = NULL;
if (cfq_cfqq_fifo_expire(cfqq))
return NULL;
cfq_mark_cfqq_fifo_expire(cfqq);
if (list_empty(&cfqq->fifo))
return NULL;
rq = rq_entry_fifo(cfqq->fifo.next);
if (time_before(jiffies, rq_fifo_time(rq)))
rq = NULL;
cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
return rq;
}
static inline int
cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
const int base_rq = cfqd->cfq_slice_async_rq;
WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
}
/*
* Must be called with the queue_lock held.
*/
static int cfqq_process_refs(struct cfq_queue *cfqq)
{
int process_refs, io_refs;
io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
process_refs = cfqq->ref - io_refs;
BUG_ON(process_refs < 0);
return process_refs;
}
static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
{
int process_refs, new_process_refs;
struct cfq_queue *__cfqq;
/*
* If there are no process references on the new_cfqq, then it is
* unsafe to follow the ->new_cfqq chain as other cfqq's in the
* chain may have dropped their last reference (not just their
* last process reference).
*/
if (!cfqq_process_refs(new_cfqq))
return;
/* Avoid a circular list and skip interim queue merges */
while ((__cfqq = new_cfqq->new_cfqq)) {
if (__cfqq == cfqq)
return;
new_cfqq = __cfqq;
}
process_refs = cfqq_process_refs(cfqq);
new_process_refs = cfqq_process_refs(new_cfqq);
/*
* If the process for the cfqq has gone away, there is no
* sense in merging the queues.
*/
if (process_refs == 0 || new_process_refs == 0)
return;
/*
* Merge in the direction of the lesser amount of work.
*/
if (new_process_refs >= process_refs) {
cfqq->new_cfqq = new_cfqq;
new_cfqq->ref += process_refs;
} else {
new_cfqq->new_cfqq = cfqq;
cfqq->ref += new_process_refs;
}
}
static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
struct cfq_group *cfqg, enum wl_prio_t prio)
{
struct cfq_queue *queue;
int i;
bool key_valid = false;
unsigned long lowest_key = 0;
enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
for (i = 0; i <= SYNC_WORKLOAD; ++i) {
/* select the one with lowest rb_key */
queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
if (queue &&
(!key_valid || time_before(queue->rb_key, lowest_key))) {
lowest_key = queue->rb_key;
cur_best = i;
key_valid = true;
}
}
return cur_best;
}
static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
{
unsigned slice;
unsigned count;
struct cfq_rb_root *st;
unsigned group_slice;
enum wl_prio_t original_prio = cfqd->serving_prio;
/* Choose next priority. RT > BE > IDLE */
if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
cfqd->serving_prio = RT_WORKLOAD;
else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
cfqd->serving_prio = BE_WORKLOAD;
else {
cfqd->serving_prio = IDLE_WORKLOAD;
cfqd->workload_expires = jiffies + 1;
return;
}
if (original_prio != cfqd->serving_prio)
goto new_workload;
/*
* For RT and BE, we have to choose also the type
* (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
* expiration time
*/
st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
count = st->count;
/*
* check workload expiration, and that we still have other queues ready
*/
if (count && !time_after(jiffies, cfqd->workload_expires))
return;
new_workload:
/* otherwise select new workload type */
cfqd->serving_type =
cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
count = st->count;
/*
* the workload slice is computed as a fraction of target latency
* proportional to the number of queues in that workload, over
* all the queues in the same priority class
*/
group_slice = cfq_group_slice(cfqd, cfqg);
slice = group_slice * count /
max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
if (cfqd->serving_type == ASYNC_WORKLOAD) {
unsigned int tmp;
/*
* Async queues are currently system wide. Just taking
* proportion of queues with-in same group will lead to higher
* async ratio system wide as generally root group is going
* to have higher weight. A more accurate thing would be to
* calculate system wide asnc/sync ratio.
*/
tmp = cfqd->cfq_target_latency *
cfqg_busy_async_queues(cfqd, cfqg);
tmp = tmp/cfqd->busy_queues;
slice = min_t(unsigned, slice, tmp);
/* async workload slice is scaled down according to
* the sync/async slice ratio. */
slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
} else
/* sync workload slice is at least 2 * cfq_slice_idle */
slice = max(slice, 2 * cfqd->cfq_slice_idle);
slice = max_t(unsigned, slice, CFQ_MIN_TT);
cfq_log(cfqd, "workload slice:%d", slice);
cfqd->workload_expires = jiffies + slice;
}
static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
{
struct cfq_rb_root *st = &cfqd->grp_service_tree;
struct cfq_group *cfqg;
if (RB_EMPTY_ROOT(&st->rb))
return NULL;
cfqg = cfq_rb_first_group(st);
update_min_vdisktime(st);
return cfqg;
}
static void cfq_choose_cfqg(struct cfq_data *cfqd)
{
struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
if (!cfqg)
return;
cfqd->serving_group = cfqg;
/* Restore the workload type data */
if (cfqg->saved_workload_slice) {
cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
cfqd->serving_type = cfqg->saved_workload;
cfqd->serving_prio = cfqg->saved_serving_prio;
} else
cfqd->workload_expires = jiffies - 1;
choose_service_tree(cfqd, cfqg);
}
/*
* Select a queue for service. If we have a current active queue,
* check whether to continue servicing it, or retrieve and set a new one.
*/
static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
{
struct cfq_queue *cfqq, *new_cfqq = NULL;
cfqq = cfqd->active_queue;
if (!cfqq)
goto new_queue;
if (!cfqd->rq_queued)
return NULL;
/*
* We were waiting for group to get backlogged. Expire the queue
*/
if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
goto expire;
/*
* The active queue has run out of time, expire it and select new.
*/
if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
/*
* If slice had not expired at the completion of last request
* we might not have turned on wait_busy flag. Don't expire
* the queue yet. Allow the group to get backlogged.
*
* The very fact that we have used the slice, that means we
* have been idling all along on this queue and it should be
* ok to wait for this request to complete.
*/
if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
&& cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
cfqq = NULL;
goto keep_queue;
} else
goto check_group_idle;
}
/*
* The active queue has requests and isn't expired, allow it to
* dispatch.
*/
if (!RB_EMPTY_ROOT(&cfqq->sort_list))
goto keep_queue;
/*
* If another queue has a request waiting within our mean seek
* distance, let it run. The expire code will check for close
* cooperators and put the close queue at the front of the service
* tree. If possible, merge the expiring queue with the new cfqq.
*/
new_cfqq = cfq_close_cooperator(cfqd, cfqq);
if (new_cfqq) {
if (!cfqq->new_cfqq)
cfq_setup_merge(cfqq, new_cfqq);
goto expire;
}
/*
* No requests pending. If the active queue still has requests in
* flight or is idling for a new request, allow either of these
* conditions to happen (or time out) before selecting a new queue.
*/
if (timer_pending(&cfqd->idle_slice_timer)) {
cfqq = NULL;
goto keep_queue;
}
/*
* This is a deep seek queue, but the device is much faster than
* the queue can deliver, don't idle
**/
if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
(cfq_cfqq_slice_new(cfqq) ||
(cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
cfq_clear_cfqq_deep(cfqq);
cfq_clear_cfqq_idle_window(cfqq);
}
if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
cfqq = NULL;
goto keep_queue;
}
/*
* If group idle is enabled and there are requests dispatched from
* this group, wait for requests to complete.
*/
check_group_idle:
if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
cfqq->cfqg->dispatched &&
!cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
cfqq = NULL;
goto keep_queue;
}
expire:
cfq_slice_expired(cfqd, 0);
new_queue:
/*
* Current queue expired. Check if we have to switch to a new
* service tree
*/
if (!new_cfqq)
cfq_choose_cfqg(cfqd);
cfqq = cfq_set_active_queue(cfqd, new_cfqq);
keep_queue:
return cfqq;
}
static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
{
int dispatched = 0;
while (cfqq->next_rq) {
cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
dispatched++;
}
BUG_ON(!list_empty(&cfqq->fifo));
/* By default cfqq is not expired if it is empty. Do it explicitly */
__cfq_slice_expired(cfqq->cfqd, cfqq, 0);
return dispatched;
}
/*
* Drain our current requests. Used for barriers and when switching
* io schedulers on-the-fly.
*/
static int cfq_forced_dispatch(struct cfq_data *cfqd)
{
struct cfq_queue *cfqq;
int dispatched = 0;
/* Expire the timeslice of the current active queue first */
cfq_slice_expired(cfqd, 0);
while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
__cfq_set_active_queue(cfqd, cfqq);
dispatched += __cfq_forced_dispatch_cfqq(cfqq);
}
BUG_ON(cfqd->busy_queues);
cfq_log(cfqd, "forced_dispatch=%d", dispatched);
return dispatched;
}
static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
struct cfq_queue *cfqq)
{
/* the queue hasn't finished any request, can't estimate */
if (cfq_cfqq_slice_new(cfqq))
return true;
if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
cfqq->slice_end))
return true;
return false;
}
static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
unsigned int max_dispatch;
/*
* Drain async requests before we start sync IO
*/
if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
return false;
/*
* If this is an async queue and we have sync IO in flight, let it wait
*/
if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
return false;
max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
if (cfq_class_idle(cfqq))
max_dispatch = 1;
/*
* Does this cfqq already have too much IO in flight?
*/
if (cfqq->dispatched >= max_dispatch) {
bool promote_sync = false;
/*
* idle queue must always only have a single IO in flight
*/
if (cfq_class_idle(cfqq))
return false;
/*
* If there is only one sync queue
* we can ignore async queue here and give the sync
* queue no dispatch limit. The reason is a sync queue can
* preempt async queue, limiting the sync queue doesn't make
* sense. This is useful for aiostress test.
*/
if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
promote_sync = true;
/*
* We have other queues, don't allow more IO from this one
*/
if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
!promote_sync)
return false;
/*
* Sole queue user, no limit
*/
if (cfqd->busy_queues == 1 || promote_sync)
max_dispatch = -1;
else
/*
* Normally we start throttling cfqq when cfq_quantum/2
* requests have been dispatched. But we can drive
* deeper queue depths at the beginning of slice
* subjected to upper limit of cfq_quantum.
* */
max_dispatch = cfqd->cfq_quantum;
}
/*
* Async queues must wait a bit before being allowed dispatch.
* We also ramp up the dispatch depth gradually for async IO,
* based on the last sync IO we serviced
*/
if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
unsigned int depth;
depth = last_sync / cfqd->cfq_slice[1];
if (!depth && !cfqq->dispatched)
depth = 1;
if (depth < max_dispatch)
max_dispatch = depth;
}
/*
* If we're below the current max, allow a dispatch
*/
return cfqq->dispatched < max_dispatch;
}
/*
* Dispatch a request from cfqq, moving them to the request queue
* dispatch list.
*/
static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
struct request *rq;
BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
if (!cfq_may_dispatch(cfqd, cfqq))
return false;
/*
* follow expired path, else get first next available
*/
rq = cfq_check_fifo(cfqq);
if (!rq)
rq = cfqq->next_rq;
/*
* insert request into driver dispatch list
*/
cfq_dispatch_insert(cfqd->queue, rq);
if (!cfqd->active_cic) {
struct cfq_io_cq *cic = RQ_CIC(rq);
atomic_long_inc(&cic->icq.ioc->refcount);
cfqd->active_cic = cic;
}
return true;
}
/*
* Find the cfqq that we need to service and move a request from that to the
* dispatch list
*/
static int cfq_dispatch_requests(struct request_queue *q, int force)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq;
if (!cfqd->busy_queues)
return 0;
if (unlikely(force))
return cfq_forced_dispatch(cfqd);
cfqq = cfq_select_queue(cfqd);
if (!cfqq)
return 0;
/*
* Dispatch a request from this cfqq, if it is allowed
*/
if (!cfq_dispatch_request(cfqd, cfqq))
return 0;
cfqq->slice_dispatch++;
cfq_clear_cfqq_must_dispatch(cfqq);
/*
* expire an async queue immediately if it has used up its slice. idle
* queue always expire after 1 dispatch round.
*/
if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
cfq_class_idle(cfqq))) {
cfqq->slice_end = jiffies + 1;
cfq_slice_expired(cfqd, 0);
}
cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
return 1;
}
/*
* task holds one reference to the queue, dropped when task exits. each rq
* in-flight on this queue also holds a reference, dropped when rq is freed.
*
* Each cfq queue took a reference on the parent group. Drop it now.
* queue lock must be held here.
*/
static void cfq_put_queue(struct cfq_queue *cfqq)
{
struct cfq_data *cfqd = cfqq->cfqd;
struct cfq_group *cfqg;
BUG_ON(cfqq->ref <= 0);
cfqq->ref--;
if (cfqq->ref)
return;
cfq_log_cfqq(cfqd, cfqq, "put_queue");
BUG_ON(rb_first(&cfqq->sort_list));
BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
cfqg = cfqq->cfqg;
if (unlikely(cfqd->active_queue == cfqq)) {
__cfq_slice_expired(cfqd, cfqq, 0);
cfq_schedule_dispatch(cfqd);
}
BUG_ON(cfq_cfqq_on_rr(cfqq));
kmem_cache_free(cfq_pool, cfqq);
cfq_put_cfqg(cfqg);
}
static void cfq_put_cooperator(struct cfq_queue *cfqq)
{
struct cfq_queue *__cfqq, *next;
/*
* If this queue was scheduled to merge with another queue, be
* sure to drop the reference taken on that queue (and others in
* the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
*/
__cfqq = cfqq->new_cfqq;
while (__cfqq) {
if (__cfqq == cfqq) {
WARN(1, "cfqq->new_cfqq loop detected\n");
break;
}
next = __cfqq->new_cfqq;
cfq_put_queue(__cfqq);
__cfqq = next;
}
}
static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
if (unlikely(cfqq == cfqd->active_queue)) {
__cfq_slice_expired(cfqd, cfqq, 0);
cfq_schedule_dispatch(cfqd);
}
cfq_put_cooperator(cfqq);
cfq_put_queue(cfqq);
}
static void cfq_init_icq(struct io_cq *icq)
{
struct cfq_io_cq *cic = icq_to_cic(icq);
cic->ttime.last_end_request = jiffies;
}
static void cfq_exit_icq(struct io_cq *icq)
{
struct cfq_io_cq *cic = icq_to_cic(icq);
struct cfq_data *cfqd = cic_to_cfqd(cic);
if (cic->cfqq[BLK_RW_ASYNC]) {
cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
cic->cfqq[BLK_RW_ASYNC] = NULL;
}
if (cic->cfqq[BLK_RW_SYNC]) {
cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
cic->cfqq[BLK_RW_SYNC] = NULL;
}
}
static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
{
struct task_struct *tsk = current;
int ioprio_class;
if (!cfq_cfqq_prio_changed(cfqq))
return;
ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
switch (ioprio_class) {
default:
printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
case IOPRIO_CLASS_NONE:
/*
* no prio set, inherit CPU scheduling settings
*/
cfqq->ioprio = task_nice_ioprio(tsk);
cfqq->ioprio_class = task_nice_ioclass(tsk);
break;
case IOPRIO_CLASS_RT:
cfqq->ioprio = task_ioprio(ioc);
cfqq->ioprio_class = IOPRIO_CLASS_RT;
break;
case IOPRIO_CLASS_BE:
cfqq->ioprio = task_ioprio(ioc);
cfqq->ioprio_class = IOPRIO_CLASS_BE;
break;
case IOPRIO_CLASS_IDLE:
cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
cfqq->ioprio = 7;
cfq_clear_cfqq_idle_window(cfqq);
break;
}
/*
* keep track of original prio settings in case we have to temporarily
* elevate the priority of this queue
*/
cfqq->org_ioprio = cfqq->ioprio;
cfq_clear_cfqq_prio_changed(cfqq);
}
static void changed_ioprio(struct cfq_io_cq *cic)
{
struct cfq_data *cfqd = cic_to_cfqd(cic);
struct cfq_queue *cfqq;
if (unlikely(!cfqd))
return;
cfqq = cic->cfqq[BLK_RW_ASYNC];
if (cfqq) {
struct cfq_queue *new_cfqq;
new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->icq.ioc,
GFP_ATOMIC);
if (new_cfqq) {
cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
cfq_put_queue(cfqq);
}
}
cfqq = cic->cfqq[BLK_RW_SYNC];
if (cfqq)
cfq_mark_cfqq_prio_changed(cfqq);
}
static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
pid_t pid, bool is_sync)
{
RB_CLEAR_NODE(&cfqq->rb_node);
RB_CLEAR_NODE(&cfqq->p_node);
INIT_LIST_HEAD(&cfqq->fifo);
cfqq->ref = 0;
cfqq->cfqd = cfqd;
cfq_mark_cfqq_prio_changed(cfqq);
if (is_sync) {
if (!cfq_class_idle(cfqq))
cfq_mark_cfqq_idle_window(cfqq);
cfq_mark_cfqq_sync(cfqq);
}
cfqq->pid = pid;
}
#ifdef CONFIG_CFQ_GROUP_IOSCHED
static void changed_cgroup(struct cfq_io_cq *cic)
{
struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
struct cfq_data *cfqd = cic_to_cfqd(cic);
struct request_queue *q;
if (unlikely(!cfqd))
return;
q = cfqd->queue;
if (sync_cfqq) {
/*
* Drop reference to sync queue. A new sync queue will be
* assigned in new group upon arrival of a fresh request.
*/
cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
cic_set_cfqq(cic, NULL, 1);
cfq_put_queue(sync_cfqq);
}
}
#endif /* CONFIG_CFQ_GROUP_IOSCHED */
static struct cfq_queue *
cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
struct io_context *ioc, gfp_t gfp_mask)
{
struct cfq_queue *cfqq, *new_cfqq = NULL;
struct cfq_io_cq *cic;
struct cfq_group *cfqg;
retry:
cfqg = cfq_get_cfqg(cfqd);
cic = cfq_cic_lookup(cfqd, ioc);
/* cic always exists here */
cfqq = cic_to_cfqq(cic, is_sync);
/*
* Always try a new alloc if we fell back to the OOM cfqq
* originally, since it should just be a temporary situation.
*/
if (!cfqq || cfqq == &cfqd->oom_cfqq) {
cfqq = NULL;
if (new_cfqq) {
cfqq = new_cfqq;
new_cfqq = NULL;
} else if (gfp_mask & __GFP_WAIT) {
spin_unlock_irq(cfqd->queue->queue_lock);
new_cfqq = kmem_cache_alloc_node(cfq_pool,
gfp_mask | __GFP_ZERO,
cfqd->queue->node);
spin_lock_irq(cfqd->queue->queue_lock);
if (new_cfqq)
goto retry;
} else {
cfqq = kmem_cache_alloc_node(cfq_pool,
gfp_mask | __GFP_ZERO,
cfqd->queue->node);
}
if (cfqq) {
cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
cfq_init_prio_data(cfqq, ioc);
cfq_link_cfqq_cfqg(cfqq, cfqg);
cfq_log_cfqq(cfqd, cfqq, "alloced");
} else
cfqq = &cfqd->oom_cfqq;
}
if (new_cfqq)
kmem_cache_free(cfq_pool, new_cfqq);
return cfqq;
}
static struct cfq_queue **
cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
{
switch (ioprio_class) {
case IOPRIO_CLASS_RT:
return &cfqd->async_cfqq[0][ioprio];
case IOPRIO_CLASS_BE:
return &cfqd->async_cfqq[1][ioprio];
case IOPRIO_CLASS_IDLE:
return &cfqd->async_idle_cfqq;
default:
BUG();
}
}
static struct cfq_queue *
cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
gfp_t gfp_mask)
{
const int ioprio = task_ioprio(ioc);
const int ioprio_class = task_ioprio_class(ioc);
struct cfq_queue **async_cfqq = NULL;
struct cfq_queue *cfqq = NULL;
if (!is_sync) {
async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
cfqq = *async_cfqq;
}
if (!cfqq)
cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
/*
* pin the queue now that it's allocated, scheduler exit will prune it
*/
if (!is_sync && !(*async_cfqq)) {
cfqq->ref++;
*async_cfqq = cfqq;
}
cfqq->ref++;
return cfqq;
}
static void
__cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
{
unsigned long elapsed = jiffies - ttime->last_end_request;
elapsed = min(elapsed, 2UL * slice_idle);
ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
}
static void
cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct cfq_io_cq *cic)
{
if (cfq_cfqq_sync(cfqq)) {
__cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
__cfq_update_io_thinktime(&cfqq->service_tree->ttime,
cfqd->cfq_slice_idle);
}
#ifdef CONFIG_CFQ_GROUP_IOSCHED
__cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
#endif
}
static void
cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct request *rq)
{
sector_t sdist = 0;
sector_t n_sec = blk_rq_sectors(rq);
if (cfqq->last_request_pos) {
if (cfqq->last_request_pos < blk_rq_pos(rq))
sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
else
sdist = cfqq->last_request_pos - blk_rq_pos(rq);
}
cfqq->seek_history <<= 1;
if (blk_queue_nonrot(cfqd->queue))
cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
else
cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
}
/*
* Disable idle window if the process thinks too long or seeks so much that
* it doesn't matter
*/
static void
cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct cfq_io_cq *cic)
{
int old_idle, enable_idle;
/*
* Don't idle for async or idle io prio class
*/
if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
return;
enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
if (cfqq->queued[0] + cfqq->queued[1] >= 4)
cfq_mark_cfqq_deep(cfqq);
if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
enable_idle = 0;
else if (!atomic_read(&cic->icq.ioc->nr_tasks) ||
!cfqd->cfq_slice_idle ||
(!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
enable_idle = 0;
else if (sample_valid(cic->ttime.ttime_samples)) {
if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
enable_idle = 0;
else
enable_idle = 1;
}
if (old_idle != enable_idle) {
cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
if (enable_idle)
cfq_mark_cfqq_idle_window(cfqq);
else
cfq_clear_cfqq_idle_window(cfqq);
}
}
/*
* Check if new_cfqq should preempt the currently active queue. Return 0 for
* no or if we aren't sure, a 1 will cause a preempt.
*/
static bool
cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
struct request *rq)
{
struct cfq_queue *cfqq;
cfqq = cfqd->active_queue;
if (!cfqq)
return false;
if (cfq_class_idle(new_cfqq))
return false;
if (cfq_class_idle(cfqq))
return true;
/*
* Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
*/
if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
return false;
/*
* if the new request is sync, but the currently running queue is
* not, let the sync request have priority.
*/
if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
return true;
if (new_cfqq->cfqg != cfqq->cfqg)
return false;
if (cfq_slice_used(cfqq))
return true;
/* Allow preemption only if we are idling on sync-noidle tree */
if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
new_cfqq->service_tree->count == 2 &&
RB_EMPTY_ROOT(&cfqq->sort_list))
return true;
/*
* So both queues are sync. Let the new request get disk time if
* it's a metadata request and the current queue is doing regular IO.
*/
if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
return true;
/*
* Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
*/
if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
return true;
/* An idle queue should not be idle now for some reason */
if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
return true;
if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
return false;
/*
* if this request is as-good as one we would expect from the
* current cfqq, let it preempt
*/
if (cfq_rq_close(cfqd, cfqq, rq))
return true;
return false;
}
/*
* cfqq preempts the active queue. if we allowed preempt with no slice left,
* let it have half of its nominal slice.
*/
static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
cfq_log_cfqq(cfqd, cfqq, "preempt");
cfq_slice_expired(cfqd, 1);
/*
* workload type is changed, don't save slice, otherwise preempt
* doesn't happen
*/
if (old_type != cfqq_type(cfqq))
cfqq->cfqg->saved_workload_slice = 0;
/*
* Put the new queue at the front of the of the current list,
* so we know that it will be selected next.
*/
BUG_ON(!cfq_cfqq_on_rr(cfqq));
cfq_service_tree_add(cfqd, cfqq, 1);
cfqq->slice_end = 0;
cfq_mark_cfqq_slice_new(cfqq);
}
/*
* Called when a new fs request (rq) is added (to cfqq). Check if there's
* something we should do about it
*/
static void
cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct request *rq)
{
struct cfq_io_cq *cic = RQ_CIC(rq);
cfqd->rq_queued++;
if (rq->cmd_flags & REQ_PRIO)
cfqq->prio_pending++;
cfq_update_io_thinktime(cfqd, cfqq, cic);
cfq_update_io_seektime(cfqd, cfqq, rq);
cfq_update_idle_window(cfqd, cfqq, cic);
cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
if (cfqq == cfqd->active_queue) {
/*
* Remember that we saw a request from this process, but
* don't start queuing just yet. Otherwise we risk seeing lots
* of tiny requests, because we disrupt the normal plugging
* and merging. If the request is already larger than a single
* page, let it rip immediately. For that case we assume that
* merging is already done. Ditto for a busy system that
* has other work pending, don't risk delaying until the
* idle timer unplug to continue working.
*/
if (cfq_cfqq_wait_request(cfqq)) {
if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
cfqd->busy_queues > 1) {
cfq_del_timer(cfqd, cfqq);
cfq_clear_cfqq_wait_request(cfqq);
__blk_run_queue(cfqd->queue);
} else {
cfq_blkiocg_update_idle_time_stats(
&cfqq->cfqg->blkg);
cfq_mark_cfqq_must_dispatch(cfqq);
}
}
} else if (cfq_should_preempt(cfqd, cfqq, rq)) {
/*
* not the active queue - expire current slice if it is
* idle and has expired it's mean thinktime or this new queue
* has some old slice time left and is of higher priority or
* this new queue is RT and the current one is BE
*/
cfq_preempt_queue(cfqd, cfqq);
__blk_run_queue(cfqd->queue);
}
}
static void cfq_insert_request(struct request_queue *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq = RQ_CFQQ(rq);
cfq_log_cfqq(cfqd, cfqq, "insert_request");
cfq_init_prio_data(cfqq, RQ_CIC(rq)->icq.ioc);
rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
list_add_tail(&rq->queuelist, &cfqq->fifo);
cfq_add_rq_rb(rq);
cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
&cfqd->serving_group->blkg, rq_data_dir(rq),
rq_is_sync(rq));
cfq_rq_enqueued(cfqd, cfqq, rq);
}
/*
* Update hw_tag based on peak queue depth over 50 samples under
* sufficient load.
*/
static void cfq_update_hw_tag(struct cfq_data *cfqd)
{
struct cfq_queue *cfqq = cfqd->active_queue;
if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
if (cfqd->hw_tag == 1)
return;
if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
return;
/*
* If active queue hasn't enough requests and can idle, cfq might not
* dispatch sufficient requests to hardware. Don't zero hw_tag in this
* case
*/
if (cfqq && cfq_cfqq_idle_window(cfqq) &&
cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
return;
if (cfqd->hw_tag_samples++ < 50)
return;
if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
cfqd->hw_tag = 1;
else
cfqd->hw_tag = 0;
}
static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
struct cfq_io_cq *cic = cfqd->active_cic;
/* If the queue already has requests, don't wait */
if (!RB_EMPTY_ROOT(&cfqq->sort_list))
return false;
/* If there are other queues in the group, don't wait */
if (cfqq->cfqg->nr_cfqq > 1)
return false;
/* the only queue in the group, but think time is big */
if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
return false;
if (cfq_slice_used(cfqq))
return true;
/* if slice left is less than think time, wait busy */
if (cic && sample_valid(cic->ttime.ttime_samples)
&& (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
return true;
/*
* If think times is less than a jiffy than ttime_mean=0 and above
* will not be true. It might happen that slice has not expired yet
* but will expire soon (4-5 ns) during select_queue(). To cover the
* case where think time is less than a jiffy, mark the queue wait
* busy if only 1 jiffy is left in the slice.
*/
if (cfqq->slice_end - jiffies == 1)
return true;
return false;
}
static void cfq_completed_request(struct request_queue *q, struct request *rq)
{
struct cfq_queue *cfqq = RQ_CFQQ(rq);
struct cfq_data *cfqd = cfqq->cfqd;
const int sync = rq_is_sync(rq);
unsigned long now;
now = jiffies;
cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
!!(rq->cmd_flags & REQ_NOIDLE));
cfq_update_hw_tag(cfqd);
WARN_ON(!cfqd->rq_in_driver);
WARN_ON(!cfqq->dispatched);
cfqd->rq_in_driver--;
cfqq->dispatched--;
(RQ_CFQG(rq))->dispatched--;
cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
rq_start_time_ns(rq), rq_io_start_time_ns(rq),
rq_data_dir(rq), rq_is_sync(rq));
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
if (sync) {
struct cfq_rb_root *service_tree;
RQ_CIC(rq)->ttime.last_end_request = now;
if (cfq_cfqq_on_rr(cfqq))
service_tree = cfqq->service_tree;
else
service_tree = service_tree_for(cfqq->cfqg,
cfqq_prio(cfqq), cfqq_type(cfqq));
service_tree->ttime.last_end_request = now;
if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
cfqd->last_delayed_sync = now;
}
#ifdef CONFIG_CFQ_GROUP_IOSCHED
cfqq->cfqg->ttime.last_end_request = now;
#endif
/*
* If this is the active queue, check if it needs to be expired,
* or if we want to idle in case it has no pending requests.
*/
if (cfqd->active_queue == cfqq) {
const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
if (cfq_cfqq_slice_new(cfqq)) {
cfq_set_prio_slice(cfqd, cfqq);
cfq_clear_cfqq_slice_new(cfqq);
}
/*
* Should we wait for next request to come in before we expire
* the queue.
*/
if (cfq_should_wait_busy(cfqd, cfqq)) {
unsigned long extend_sl = cfqd->cfq_slice_idle;
if (!cfqd->cfq_slice_idle)
extend_sl = cfqd->cfq_group_idle;
cfqq->slice_end = jiffies + extend_sl;
cfq_mark_cfqq_wait_busy(cfqq);
cfq_log_cfqq(cfqd, cfqq, "will busy wait");
}
/*
* Idling is not enabled on:
* - expired queues
* - idle-priority queues
* - async queues
* - queues with still some requests queued
* - when there is a close cooperator
*/
if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
cfq_slice_expired(cfqd, 1);
else if (sync && cfqq_empty &&
!cfq_close_cooperator(cfqd, cfqq)) {
cfq_arm_slice_timer(cfqd);
}
}
if (!cfqd->rq_in_driver)
cfq_schedule_dispatch(cfqd);
}
static inline int __cfq_may_queue(struct cfq_queue *cfqq)
{
if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
cfq_mark_cfqq_must_alloc_slice(cfqq);
return ELV_MQUEUE_MUST;
}
return ELV_MQUEUE_MAY;
}
static int cfq_may_queue(struct request_queue *q, int rw)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct task_struct *tsk = current;
struct cfq_io_cq *cic;
struct cfq_queue *cfqq;
/*
* don't force setup of a queue from here, as a call to may_queue
* does not necessarily imply that a request actually will be queued.
* so just lookup a possibly existing queue, or return 'may queue'
* if that fails
*/
cic = cfq_cic_lookup(cfqd, tsk->io_context);
if (!cic)
return ELV_MQUEUE_MAY;
cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
if (cfqq) {
cfq_init_prio_data(cfqq, cic->icq.ioc);
return __cfq_may_queue(cfqq);
}
return ELV_MQUEUE_MAY;
}
/*
* queue lock held here
*/
static void cfq_put_request(struct request *rq)
{
struct cfq_queue *cfqq = RQ_CFQQ(rq);
if (cfqq) {
const int rw = rq_data_dir(rq);
BUG_ON(!cfqq->allocated[rw]);
cfqq->allocated[rw]--;
/* Put down rq reference on cfqg */
cfq_put_cfqg(RQ_CFQG(rq));
rq->elv.priv[0] = NULL;
rq->elv.priv[1] = NULL;
cfq_put_queue(cfqq);
}
}
static struct cfq_queue *
cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
struct cfq_queue *cfqq)
{
cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
cic_set_cfqq(cic, cfqq->new_cfqq, 1);
cfq_mark_cfqq_coop(cfqq->new_cfqq);
cfq_put_queue(cfqq);
return cic_to_cfqq(cic, 1);
}
/*
* Returns NULL if a new cfqq should be allocated, or the old cfqq if this
* was the last process referring to said cfqq.
*/
static struct cfq_queue *
split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
{
if (cfqq_process_refs(cfqq) == 1) {
cfqq->pid = current->pid;
cfq_clear_cfqq_coop(cfqq);
cfq_clear_cfqq_split_coop(cfqq);
return cfqq;
}
cic_set_cfqq(cic, NULL, 1);
cfq_put_cooperator(cfqq);
cfq_put_queue(cfqq);
return NULL;
}
/*
* Allocate cfq data structures associated with this request.
*/
static int
cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
const int rw = rq_data_dir(rq);
const bool is_sync = rq_is_sync(rq);
struct cfq_queue *cfqq;
unsigned int changed;
might_sleep_if(gfp_mask & __GFP_WAIT);
spin_lock_irq(q->queue_lock);
/* handle changed notifications */
changed = icq_get_changed(&cic->icq);
if (unlikely(changed & ICQ_IOPRIO_CHANGED))
changed_ioprio(cic);
#ifdef CONFIG_CFQ_GROUP_IOSCHED
if (unlikely(changed & ICQ_CGROUP_CHANGED))
changed_cgroup(cic);
#endif
new_queue:
cfqq = cic_to_cfqq(cic, is_sync);
if (!cfqq || cfqq == &cfqd->oom_cfqq) {
cfqq = cfq_get_queue(cfqd, is_sync, cic->icq.ioc, gfp_mask);
cic_set_cfqq(cic, cfqq, is_sync);
} else {
/*
* If the queue was seeky for too long, break it apart.
*/
if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
cfqq = split_cfqq(cic, cfqq);
if (!cfqq)
goto new_queue;
}
/*
* Check to see if this queue is scheduled to merge with
* another, closely cooperating queue. The merging of
* queues happens here as it must be done in process context.
* The reference on new_cfqq was taken in merge_cfqqs.
*/
if (cfqq->new_cfqq)
cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
}
cfqq->allocated[rw]++;
cfqq->ref++;
rq->elv.priv[0] = cfqq;
rq->elv.priv[1] = cfq_ref_get_cfqg(cfqq->cfqg);
spin_unlock_irq(q->queue_lock);
return 0;
}
static void cfq_kick_queue(struct work_struct *work)
{
struct cfq_data *cfqd =
container_of(work, struct cfq_data, unplug_work);
struct request_queue *q = cfqd->queue;
spin_lock_irq(q->queue_lock);
__blk_run_queue(cfqd->queue);
spin_unlock_irq(q->queue_lock);
}
/*
* Timer running if the active_queue is currently idling inside its time slice
*/
static void cfq_idle_slice_timer(unsigned long data)
{
struct cfq_data *cfqd = (struct cfq_data *) data;
struct cfq_queue *cfqq;
unsigned long flags;
int timed_out = 1;
cfq_log(cfqd, "idle timer fired");
spin_lock_irqsave(cfqd->queue->queue_lock, flags);
cfqq = cfqd->active_queue;
if (cfqq) {
timed_out = 0;
/*
* We saw a request before the queue expired, let it through
*/
if (cfq_cfqq_must_dispatch(cfqq))
goto out_kick;
/*
* expired
*/
if (cfq_slice_used(cfqq))
goto expire;
/*
* only expire and reinvoke request handler, if there are
* other queues with pending requests
*/
if (!cfqd->busy_queues)
goto out_cont;
/*
* not expired and it has a request pending, let it dispatch
*/
if (!RB_EMPTY_ROOT(&cfqq->sort_list))
goto out_kick;
/*
* Queue depth flag is reset only when the idle didn't succeed
*/
cfq_clear_cfqq_deep(cfqq);
}
expire:
cfq_slice_expired(cfqd, timed_out);
out_kick:
cfq_schedule_dispatch(cfqd);
out_cont:
spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
}
static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
{
del_timer_sync(&cfqd->idle_slice_timer);
cancel_work_sync(&cfqd->unplug_work);
}
static void cfq_put_async_queues(struct cfq_data *cfqd)
{
int i;
for (i = 0; i < IOPRIO_BE_NR; i++) {
if (cfqd->async_cfqq[0][i])
cfq_put_queue(cfqd->async_cfqq[0][i]);
if (cfqd->async_cfqq[1][i])
cfq_put_queue(cfqd->async_cfqq[1][i]);
}
if (cfqd->async_idle_cfqq)
cfq_put_queue(cfqd->async_idle_cfqq);
}
static void cfq_exit_queue(struct elevator_queue *e)
{
struct cfq_data *cfqd = e->elevator_data;
struct request_queue *q = cfqd->queue;
bool wait = false;
cfq_shutdown_timer_wq(cfqd);
spin_lock_irq(q->queue_lock);
if (cfqd->active_queue)
__cfq_slice_expired(cfqd, cfqd->active_queue, 0);
cfq_put_async_queues(cfqd);
cfq_release_cfq_groups(cfqd);
/*
* If there are groups which we could not unlink from blkcg list,
* wait for a rcu period for them to be freed.
*/
if (cfqd->nr_blkcg_linked_grps)
wait = true;
spin_unlock_irq(q->queue_lock);
cfq_shutdown_timer_wq(cfqd);
/*
* Wait for cfqg->blkg->key accessors to exit their grace periods.
* Do this wait only if there are other unlinked groups out
* there. This can happen if cgroup deletion path claimed the
* responsibility of cleaning up a group before queue cleanup code
* get to the group.
*
* Do not call synchronize_rcu() unconditionally as there are drivers
* which create/delete request queue hundreds of times during scan/boot
* and synchronize_rcu() can take significant time and slow down boot.
*/
if (wait)
synchronize_rcu();
#ifdef CONFIG_CFQ_GROUP_IOSCHED
/* Free up per cpu stats for root group */
free_percpu(cfqd->root_group.blkg.stats_cpu);
#endif
kfree(cfqd);
}
static void *cfq_init_queue(struct request_queue *q)
{
struct cfq_data *cfqd;
int i, j;
struct cfq_group *cfqg;
struct cfq_rb_root *st;
cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
if (!cfqd)
return NULL;
/* Init root service tree */
cfqd->grp_service_tree = CFQ_RB_ROOT;
/* Init root group */
cfqg = &cfqd->root_group;
for_each_cfqg_st(cfqg, i, j, st)
*st = CFQ_RB_ROOT;
RB_CLEAR_NODE(&cfqg->rb_node);
/* Give preference to root group over other groups */
cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
#ifdef CONFIG_CFQ_GROUP_IOSCHED
/*
* Set root group reference to 2. One reference will be dropped when
* all groups on cfqd->cfqg_list are being deleted during queue exit.
* Other reference will remain there as we don't want to delete this
* group as it is statically allocated and gets destroyed when
* throtl_data goes away.
*/
cfqg->ref = 2;
if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
kfree(cfqg);
kfree(cfqd);
return NULL;
}
rcu_read_lock();
cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
(void *)cfqd, 0);
rcu_read_unlock();
cfqd->nr_blkcg_linked_grps++;
/* Add group on cfqd->cfqg_list */
hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
#endif
/*
* Not strictly needed (since RB_ROOT just clears the node and we
* zeroed cfqd on alloc), but better be safe in case someone decides
* to add magic to the rb code
*/
for (i = 0; i < CFQ_PRIO_LISTS; i++)
cfqd->prio_trees[i] = RB_ROOT;
/*
* Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
* Grab a permanent reference to it, so that the normal code flow
* will not attempt to free it.
*/
cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
cfqd->oom_cfqq.ref++;
cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
cfqd->queue = q;
init_timer(&cfqd->idle_slice_timer);
cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
cfqd->idle_slice_timer.data = (unsigned long) cfqd;
INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
cfqd->cfq_quantum = cfq_quantum;
cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
cfqd->cfq_back_max = cfq_back_max;
cfqd->cfq_back_penalty = cfq_back_penalty;
cfqd->cfq_slice[0] = cfq_slice_async;
cfqd->cfq_slice[1] = cfq_slice_sync;
cfqd->cfq_target_latency = cfq_target_latency;
cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
cfqd->cfq_slice_idle = cfq_slice_idle;
cfqd->cfq_group_idle = cfq_group_idle;
cfqd->cfq_latency = 1;
cfqd->hw_tag = -1;
/*
* we optimistically start assuming sync ops weren't delayed in last
* second, in order to have larger depth for async operations.
*/
cfqd->last_delayed_sync = jiffies - HZ;
return cfqd;
}
/*
* sysfs parts below -->
*/
static ssize_t
cfq_var_show(unsigned int var, char *page)
{
return sprintf(page, "%d\n", var);
}
static ssize_t
cfq_var_store(unsigned int *var, const char *page, size_t count)
{
char *p = (char *) page;
*var = simple_strtoul(p, &p, 10);
return count;
}
#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
static ssize_t __FUNC(struct elevator_queue *e, char *page) \
{ \
struct cfq_data *cfqd = e->elevator_data; \
unsigned int __data = __VAR; \
if (__CONV) \
__data = jiffies_to_msecs(__data); \
return cfq_var_show(__data, (page)); \
}
SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
#undef SHOW_FUNCTION
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
{ \
struct cfq_data *cfqd = e->elevator_data; \
unsigned int __data; \
int ret = cfq_var_store(&__data, (page), count); \
if (__data < (MIN)) \
__data = (MIN); \
else if (__data > (MAX)) \
__data = (MAX); \
if (__CONV) \
*(__PTR) = msecs_to_jiffies(__data); \
else \
*(__PTR) = __data; \
return ret; \
}
STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
UINT_MAX, 1);
STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
UINT_MAX, 1);
STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
UINT_MAX, 0);
STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
UINT_MAX, 0);
STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
#undef STORE_FUNCTION
#define CFQ_ATTR(name) \
__ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
static struct elv_fs_entry cfq_attrs[] = {
CFQ_ATTR(quantum),
CFQ_ATTR(fifo_expire_sync),
CFQ_ATTR(fifo_expire_async),
CFQ_ATTR(back_seek_max),
CFQ_ATTR(back_seek_penalty),
CFQ_ATTR(slice_sync),
CFQ_ATTR(slice_async),
CFQ_ATTR(slice_async_rq),
CFQ_ATTR(slice_idle),
CFQ_ATTR(group_idle),
CFQ_ATTR(low_latency),
CFQ_ATTR(target_latency),
__ATTR_NULL
};
static struct elevator_type iosched_cfq = {
.ops = {
.elevator_merge_fn = cfq_merge,
.elevator_merged_fn = cfq_merged_request,
.elevator_merge_req_fn = cfq_merged_requests,
.elevator_allow_merge_fn = cfq_allow_merge,
.elevator_bio_merged_fn = cfq_bio_merged,
.elevator_dispatch_fn = cfq_dispatch_requests,
.elevator_add_req_fn = cfq_insert_request,
.elevator_activate_req_fn = cfq_activate_request,
.elevator_deactivate_req_fn = cfq_deactivate_request,
.elevator_completed_req_fn = cfq_completed_request,
.elevator_former_req_fn = elv_rb_former_request,
.elevator_latter_req_fn = elv_rb_latter_request,
.elevator_init_icq_fn = cfq_init_icq,
.elevator_exit_icq_fn = cfq_exit_icq,
.elevator_set_req_fn = cfq_set_request,
.elevator_put_req_fn = cfq_put_request,
.elevator_may_queue_fn = cfq_may_queue,
.elevator_init_fn = cfq_init_queue,
.elevator_exit_fn = cfq_exit_queue,
},
.icq_size = sizeof(struct cfq_io_cq),
.icq_align = __alignof__(struct cfq_io_cq),
.elevator_attrs = cfq_attrs,
.elevator_name = "cfq",
.elevator_owner = THIS_MODULE,
};
#ifdef CONFIG_CFQ_GROUP_IOSCHED
static struct blkio_policy_type blkio_policy_cfq = {
.ops = {
.blkio_unlink_group_fn = cfq_unlink_blkio_group,
.blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
},
.plid = BLKIO_POLICY_PROP,
};
#else
static struct blkio_policy_type blkio_policy_cfq;
#endif
static int __init cfq_init(void)
{
int ret;
/*
* could be 0 on HZ < 1000 setups
*/
if (!cfq_slice_async)
cfq_slice_async = 1;
if (!cfq_slice_idle)
cfq_slice_idle = 1;
#ifdef CONFIG_CFQ_GROUP_IOSCHED
if (!cfq_group_idle)
cfq_group_idle = 1;
#else
cfq_group_idle = 0;
#endif
cfq_pool = KMEM_CACHE(cfq_queue, 0);
if (!cfq_pool)
return -ENOMEM;
ret = elv_register(&iosched_cfq);
if (ret) {
kmem_cache_destroy(cfq_pool);
return ret;
}
blkio_policy_register(&blkio_policy_cfq);
return 0;
}
static void __exit cfq_exit(void)
{
blkio_policy_unregister(&blkio_policy_cfq);
elv_unregister(&iosched_cfq);
kmem_cache_destroy(cfq_pool);
}
module_init(cfq_init);
module_exit(cfq_exit);
MODULE_AUTHOR("Jens Axboe");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");
| gpl-2.0 |
scanno/android_kernel_motorola_msm8992 | arch/arm/mach-s3c24xx/clock-s3c2416.c | 2118 | 4143 | /* linux/arch/arm/mach-s3c2416/clock.c
*
* Copyright (c) 2010 Simtec Electronics
* Copyright (c) 2010 Ben Dooks <ben-linux@fluff.org>
*
* S3C2416 Clock control support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/init.h>
#include <linux/clk.h>
#include <plat/clock.h>
#include <plat/clock-clksrc.h>
#include <plat/cpu.h>
#include <plat/cpu-freq.h>
#include <plat/pll.h>
#include <asm/mach/map.h>
#include <mach/regs-clock.h>
#include <mach/regs-s3c2443-clock.h>
/* armdiv
*
* this clock is sourced from msysclk and can have a number of
* divider values applied to it to then be fed into armclk.
* The real clock definition is done in s3c2443-clock.c,
* only the armdiv divisor table must be defined here.
*/
static unsigned int armdiv[8] = {
[0] = 1,
[1] = 2,
[2] = 3,
[3] = 4,
[5] = 6,
[7] = 8,
};
static struct clksrc_clk hsspi_eplldiv = {
.clk = {
.name = "hsspi-eplldiv",
.parent = &clk_esysclk.clk,
.ctrlbit = (1 << 14),
.enable = s3c2443_clkcon_enable_s,
},
.reg_div = { .reg = S3C2443_CLKDIV1, .size = 2, .shift = 24 },
};
static struct clk *hsspi_sources[] = {
[0] = &hsspi_eplldiv.clk,
[1] = NULL, /* to fix */
};
static struct clksrc_clk hsspi_mux = {
.clk = {
.name = "hsspi-if",
},
.sources = &(struct clksrc_sources) {
.sources = hsspi_sources,
.nr_sources = ARRAY_SIZE(hsspi_sources),
},
.reg_src = { .reg = S3C2443_CLKSRC, .size = 1, .shift = 18 },
};
static struct clksrc_clk hsmmc_div[] = {
[0] = {
.clk = {
.name = "hsmmc-div",
.devname = "s3c-sdhci.0",
.parent = &clk_esysclk.clk,
},
.reg_div = { .reg = S3C2416_CLKDIV2, .size = 2, .shift = 6 },
},
[1] = {
.clk = {
.name = "hsmmc-div",
.devname = "s3c-sdhci.1",
.parent = &clk_esysclk.clk,
},
.reg_div = { .reg = S3C2443_CLKDIV1, .size = 2, .shift = 6 },
},
};
static struct clksrc_clk hsmmc_mux0 = {
.clk = {
.name = "hsmmc-if",
.devname = "s3c-sdhci.0",
.ctrlbit = (1 << 6),
.enable = s3c2443_clkcon_enable_s,
},
.sources = &(struct clksrc_sources) {
.nr_sources = 2,
.sources = (struct clk * []) {
[0] = &hsmmc_div[0].clk,
[1] = NULL, /* to fix */
},
},
.reg_src = { .reg = S3C2443_CLKSRC, .size = 1, .shift = 16 },
};
static struct clksrc_clk hsmmc_mux1 = {
.clk = {
.name = "hsmmc-if",
.devname = "s3c-sdhci.1",
.ctrlbit = (1 << 12),
.enable = s3c2443_clkcon_enable_s,
},
.sources = &(struct clksrc_sources) {
.nr_sources = 2,
.sources = (struct clk * []) {
[0] = &hsmmc_div[1].clk,
[1] = NULL, /* to fix */
},
},
.reg_src = { .reg = S3C2443_CLKSRC, .size = 1, .shift = 17 },
};
static struct clk hsmmc0_clk = {
.name = "hsmmc",
.devname = "s3c-sdhci.0",
.parent = &clk_h,
.enable = s3c2443_clkcon_enable_h,
.ctrlbit = S3C2416_HCLKCON_HSMMC0,
};
static struct clksrc_clk *clksrcs[] __initdata = {
&hsspi_eplldiv,
&hsspi_mux,
&hsmmc_div[0],
&hsmmc_div[1],
&hsmmc_mux0,
&hsmmc_mux1,
};
static struct clk_lookup s3c2416_clk_lookup[] = {
CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.0", &hsmmc0_clk),
CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &hsmmc_mux0.clk),
CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &hsmmc_mux1.clk),
/* s3c2443-spi.0 is used on s3c2416 and s3c2450 as well */
CLKDEV_INIT("s3c2443-spi.0", "spi_busclk2", &hsspi_mux.clk),
};
void __init s3c2416_init_clocks(int xtal)
{
u32 epllcon = __raw_readl(S3C2443_EPLLCON);
u32 epllcon1 = __raw_readl(S3C2443_EPLLCON+4);
int ptr;
/* s3c2416 EPLL compatible with s3c64xx */
clk_epll.rate = s3c_get_pll6553x(xtal, epllcon, epllcon1);
clk_epll.parent = &clk_epllref.clk;
s3c2443_common_init_clocks(xtal, s3c2416_get_pll,
armdiv, ARRAY_SIZE(armdiv),
S3C2416_CLKDIV0_ARMDIV_MASK);
for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++)
s3c_register_clksrc(clksrcs[ptr], 1);
s3c24xx_register_clock(&hsmmc0_clk);
clkdev_add_table(s3c2416_clk_lookup, ARRAY_SIZE(s3c2416_clk_lookup));
s3c_pwmclk_init();
}
| gpl-2.0 |
CM-zenfone2/android_kernel_asus_moorefield | arch/blackfin/mach-bf527/boards/ezkit.c | 2118 | 32426 | /*
* Copyright 2004-2009 Analog Devices Inc.
* 2005 National ICT Australia (NICTA)
* Aidan Williams <aidan@nicta.com.au>
*
* Licensed under the GPL-2 or later.
*/
#include <linux/device.h>
#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/i2c.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/usb/musb.h>
#include <linux/leds.h>
#include <linux/input.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/reboot.h>
#include <asm/nand.h>
#include <asm/portmux.h>
#include <asm/dpmc.h>
#include <linux/spi/ad7877.h>
#include <asm/bfin_sport.h>
/*
* Name the Board for the /proc/cpuinfo
*/
#ifdef CONFIG_BFIN527_EZKIT_V2
const char bfin_board_name[] = "ADI BF527-EZKIT V2";
#else
const char bfin_board_name[] = "ADI BF527-EZKIT";
#endif
/*
* Driver needs to know address, irq and flag pin.
*/
#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
#include <linux/usb/isp1760.h>
static struct resource bfin_isp1760_resources[] = {
[0] = {
.start = 0x203C0000,
.end = 0x203C0000 + 0x000fffff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_PF7,
.end = IRQ_PF7,
.flags = IORESOURCE_IRQ,
},
};
static struct isp1760_platform_data isp1760_priv = {
.is_isp1761 = 0,
.bus_width_16 = 1,
.port1_otg = 0,
.analog_oc = 0,
.dack_polarity_high = 0,
.dreq_polarity_high = 0,
};
static struct platform_device bfin_isp1760_device = {
.name = "isp1760",
.id = 0,
.dev = {
.platform_data = &isp1760_priv,
},
.num_resources = ARRAY_SIZE(bfin_isp1760_resources),
.resource = bfin_isp1760_resources,
};
#endif
#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE)
static struct resource musb_resources[] = {
[0] = {
.start = 0xffc03800,
.end = 0xffc03cff,
.flags = IORESOURCE_MEM,
},
[1] = { /* general IRQ */
.start = IRQ_USB_INT0,
.end = IRQ_USB_INT0,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
.name = "mc"
},
[2] = { /* DMA IRQ */
.start = IRQ_USB_DMA,
.end = IRQ_USB_DMA,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
.name = "dma"
},
};
static struct musb_hdrc_config musb_config = {
.multipoint = 0,
.dyn_fifo = 0,
.soft_con = 1,
.dma = 1,
.num_eps = 8,
.dma_channels = 8,
.gpio_vrsel = GPIO_PG13,
/* Some custom boards need to be active low, just set it to "0"
* if it is the case.
*/
.gpio_vrsel_active = 1,
.clkin = 24, /* musb CLKIN in MHZ */
};
static struct musb_hdrc_platform_data musb_plat = {
#if defined(CONFIG_USB_MUSB_HDRC) && defined(CONFIG_USB_GADGET_MUSB_HDRC)
.mode = MUSB_OTG,
#elif defined(CONFIG_USB_MUSB_HDRC)
.mode = MUSB_HOST,
#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
.mode = MUSB_PERIPHERAL,
#endif
.config = &musb_config,
};
static u64 musb_dmamask = ~(u32)0;
static struct platform_device musb_device = {
.name = "musb-blackfin",
.id = 0,
.dev = {
.dma_mask = &musb_dmamask,
.coherent_dma_mask = 0xffffffff,
.platform_data = &musb_plat,
},
.num_resources = ARRAY_SIZE(musb_resources),
.resource = musb_resources,
};
#endif
#if defined(CONFIG_FB_BFIN_T350MCQB) || defined(CONFIG_FB_BFIN_T350MCQB_MODULE)
static struct resource bf52x_t350mcqb_resources[] = {
{
.start = IRQ_PPI_ERROR,
.end = IRQ_PPI_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bf52x_t350mcqb_device = {
.name = "bfin-t350mcqb",
.id = -1,
.num_resources = ARRAY_SIZE(bf52x_t350mcqb_resources),
.resource = bf52x_t350mcqb_resources,
};
#endif
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
#include <asm/bfin-lq035q1.h>
static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = {
.mode = LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB,
.ppi_mode = USE_RGB565_8_BIT_PPI,
};
static struct resource bfin_lq035q1_resources[] = {
{
.start = IRQ_PPI_ERROR,
.end = IRQ_PPI_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_lq035q1_device = {
.name = "bfin-lq035q1",
.id = -1,
.num_resources = ARRAY_SIZE(bfin_lq035q1_resources),
.resource = bfin_lq035q1_resources,
.dev = {
.platform_data = &bfin_lq035q1_data,
},
};
#endif
#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
static struct mtd_partition ezkit_partitions[] = {
{
.name = "bootloader(nor)",
.size = 0x40000,
.offset = 0,
}, {
.name = "linux kernel(nor)",
.size = 0x1C0000,
.offset = MTDPART_OFS_APPEND,
}, {
.name = "file system(nor)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
};
static struct physmap_flash_data ezkit_flash_data = {
.width = 2,
.parts = ezkit_partitions,
.nr_parts = ARRAY_SIZE(ezkit_partitions),
};
static struct resource ezkit_flash_resource = {
.start = 0x20000000,
.end = 0x203fffff,
.flags = IORESOURCE_MEM,
};
static struct platform_device ezkit_flash_device = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &ezkit_flash_data,
},
.num_resources = 1,
.resource = &ezkit_flash_resource,
};
#endif
#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE)
static struct mtd_partition partition_info[] = {
{
.name = "bootloader(nand)",
.offset = 0,
.size = 0x40000,
}, {
.name = "linux kernel(nand)",
.offset = MTDPART_OFS_APPEND,
.size = 4 * 1024 * 1024,
},
{
.name = "file system(nand)",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
},
};
static struct bf5xx_nand_platform bf5xx_nand_platform = {
.data_width = NFC_NWIDTH_8,
.partitions = partition_info,
.nr_partitions = ARRAY_SIZE(partition_info),
.rd_dly = 3,
.wr_dly = 3,
};
static struct resource bf5xx_nand_resources[] = {
{
.start = NFC_CTL,
.end = NFC_DATA_RD + 2,
.flags = IORESOURCE_MEM,
},
{
.start = CH_NFC,
.end = CH_NFC,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bf5xx_nand_device = {
.name = "bf5xx-nand",
.id = 0,
.num_resources = ARRAY_SIZE(bf5xx_nand_resources),
.resource = bf5xx_nand_resources,
.dev = {
.platform_data = &bf5xx_nand_platform,
},
};
#endif
#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE)
static struct resource bfin_pcmcia_cf_resources[] = {
{
.start = 0x20310000, /* IO PORT */
.end = 0x20312000,
.flags = IORESOURCE_MEM,
}, {
.start = 0x20311000, /* Attribute Memory */
.end = 0x20311FFF,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PF4,
.end = IRQ_PF4,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
}, {
.start = 6, /* Card Detect PF6 */
.end = 6,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_pcmcia_cf_device = {
.name = "bfin_cf_pcmcia",
.id = -1,
.num_resources = ARRAY_SIZE(bfin_pcmcia_cf_resources),
.resource = bfin_pcmcia_cf_resources,
};
#endif
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
static struct platform_device rtc_device = {
.name = "rtc-bfin",
.id = -1,
};
#endif
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
#include <linux/smc91x.h>
static struct smc91x_platdata smc91x_info = {
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
.leda = RPC_LED_100_10,
.ledb = RPC_LED_TX_RX,
};
static struct resource smc91x_resources[] = {
{
.name = "smc91x-regs",
.start = 0x20300300,
.end = 0x20300300 + 16,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PF7,
.end = IRQ_PF7,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
},
};
static struct platform_device smc91x_device = {
.name = "smc91x",
.id = 0,
.num_resources = ARRAY_SIZE(smc91x_resources),
.resource = smc91x_resources,
.dev = {
.platform_data = &smc91x_info,
},
};
#endif
#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE)
static struct resource dm9000_resources[] = {
[0] = {
.start = 0x203FB800,
.end = 0x203FB800 + 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 0x203FB800 + 4,
.end = 0x203FB800 + 5,
.flags = IORESOURCE_MEM,
},
[2] = {
.start = IRQ_PF9,
.end = IRQ_PF9,
.flags = (IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE),
},
};
static struct platform_device dm9000_device = {
.name = "dm9000",
.id = -1,
.num_resources = ARRAY_SIZE(dm9000_resources),
.resource = dm9000_resources,
};
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
#include <linux/bfin_mac.h>
static const unsigned short bfin_mac_peripherals[] = P_RMII0;
static struct bfin_phydev_platform_data bfin_phydev_data[] = {
{
.addr = 1,
.irq = IRQ_MAC_PHYINT,
},
};
static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
.phydev_number = 1,
.phydev_data = bfin_phydev_data,
.phy_mode = PHY_INTERFACE_MODE_RMII,
.mac_peripherals = bfin_mac_peripherals,
};
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
.dev = {
.platform_data = &bfin_mii_bus_data,
}
};
static struct platform_device bfin_mac_device = {
.name = "bfin_mac",
.dev = {
.platform_data = &bfin_mii_bus,
}
};
#endif
#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
static struct resource net2272_bfin_resources[] = {
{
.start = 0x20300000,
.end = 0x20300000 + 0x100,
.flags = IORESOURCE_MEM,
}, {
.start = 1,
.flags = IORESOURCE_BUS,
}, {
.start = IRQ_PF7,
.end = IRQ_PF7,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
},
};
static struct platform_device net2272_bfin_device = {
.name = "net2272",
.id = -1,
.num_resources = ARRAY_SIZE(net2272_bfin_resources),
.resource = net2272_bfin_resources,
};
#endif
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
.name = "bootloader(spi)",
.size = 0x00040000,
.offset = 0,
.mask_flags = MTD_CAP_ROM
}, {
.name = "linux kernel(spi)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
};
static struct flash_platform_data bfin_spi_flash_data = {
.name = "m25p80",
.parts = bfin_spi_flash_partitions,
.nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions),
.type = "m25p16",
};
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
static const struct ad7877_platform_data bfin_ad7877_ts_info = {
.model = 7877,
.vref_delay_usecs = 50, /* internal, no capacitor */
.x_plate_ohms = 419,
.y_plate_ohms = 486,
.pressure_max = 1000,
.pressure_min = 0,
.stopacq_polarity = 1,
.first_conversion_delay = 3,
.acquisition_time = 1,
.averaging = 1,
.pen_down_acc_interval = 1,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7879) || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE)
#include <linux/spi/ad7879.h>
static const struct ad7879_platform_data bfin_ad7879_ts_info = {
.model = 7879, /* Model = AD7879 */
.x_plate_ohms = 620, /* 620 Ohm from the touch datasheet */
.pressure_max = 10000,
.pressure_min = 0,
.first_conversion_delay = 3, /* wait 512us before do a first conversion */
.acquisition_time = 1, /* 4us acquisition time per sample */
.median = 2, /* do 8 measurements */
.averaging = 1, /* take the average of 4 middle samples */
.pen_down_acc_interval = 255, /* 9.4 ms */
.gpio_export = 0, /* Export GPIO to gpiolib */
};
#endif
#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \
defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
static const u16 bfin_snd_pin[][7] = {
{P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0, 0},
{P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_TFS, 0},
};
static struct bfin_snd_platform_data bfin_snd_data[] = {
{
.pin_req = &bfin_snd_pin[0][0],
},
{
.pin_req = &bfin_snd_pin[1][0],
},
};
#define BFIN_SND_RES(x) \
[x] = { \
{ \
.start = SPORT##x##_TCR1, \
.end = SPORT##x##_TCR1, \
.flags = IORESOURCE_MEM \
}, \
{ \
.start = CH_SPORT##x##_RX, \
.end = CH_SPORT##x##_RX, \
.flags = IORESOURCE_DMA, \
}, \
{ \
.start = CH_SPORT##x##_TX, \
.end = CH_SPORT##x##_TX, \
.flags = IORESOURCE_DMA, \
}, \
{ \
.start = IRQ_SPORT##x##_ERROR, \
.end = IRQ_SPORT##x##_ERROR, \
.flags = IORESOURCE_IRQ, \
} \
}
static struct resource bfin_snd_resources[][4] = {
BFIN_SND_RES(0),
BFIN_SND_RES(1),
};
#endif
#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
static struct platform_device bfin_i2s_pcm = {
.name = "bfin-i2s-pcm-audio",
.id = -1,
};
#endif
#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
static struct platform_device bfin_tdm_pcm = {
.name = "bfin-tdm-pcm-audio",
.id = -1,
};
#endif
#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
static struct platform_device bfin_ac97_pcm = {
.name = "bfin-ac97-pcm-audio",
.id = -1,
};
#endif
#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
static struct platform_device bfin_i2s = {
.name = "bfin-i2s",
.id = CONFIG_SND_BF5XX_SPORT_NUM,
.num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
.resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
.dev = {
.platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
},
};
#endif
#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
static struct platform_device bfin_tdm = {
.name = "bfin-tdm",
.id = CONFIG_SND_BF5XX_SPORT_NUM,
.num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
.resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
.dev = {
.platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
},
};
#endif
#if defined(CONFIG_SND_BF5XX_SOC_AD1836) \
|| defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE)
static const char * const ad1836_link[] = {
"bfin-tdm.0",
"spi0.4",
};
static struct platform_device bfin_ad1836_machine = {
.name = "bfin-snd-ad1836",
.id = -1,
.dev = {
.platform_data = (void *)ad1836_link,
},
};
#endif
static struct spi_board_info bfin_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
{
/* the modalias must be the same as spi device driver name */
.modalias = "m25p80", /* Name of spi_driver for this device */
.max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0, /* Framework bus number */
.chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/
.platform_data = &bfin_spi_flash_data,
.controller_data = &spi_flash_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
|| defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
{
.modalias = "ad183x",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
.platform_data = "ad1836",
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
{
.modalias = "mmc_spi",
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 3,
.controller_data = &mmc_spi_chip_info,
.mode = SPI_MODE_0,
},
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
{
.modalias = "ad7877",
.platform_data = &bfin_ad7877_ts_info,
.irq = IRQ_PF8,
.max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
},
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
{
.modalias = "ad7879",
.platform_data = &bfin_ad7879_ts_info,
.irq = IRQ_PF8,
.max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 3,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
{
.modalias = "spidev",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
},
#endif
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
{
.modalias = "bfin-lq035q1-spi",
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 7,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
};
#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
/* SPI controller data */
static struct bfin5xx_spi_master bfin_spi0_info = {
.num_chipselect = 8,
.enable_dma = 1, /* master has the ability to do dma transfer */
.pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
};
/* SPI (0) */
static struct resource bfin_spi0_resource[] = {
[0] = {
.start = SPI0_REGBASE,
.end = SPI0_REGBASE + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = CH_SPI,
.end = CH_SPI,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI,
.end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_spi0_device = {
.name = "bfin-spi",
.id = 0, /* Bus number */
.num_resources = ARRAY_SIZE(bfin_spi0_resource),
.resource = bfin_spi0_resource,
.dev = {
.platform_data = &bfin_spi0_info, /* Passed to driver */
},
};
#endif /* spi master and devices */
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
static struct resource bfin_uart0_resources[] = {
{
.start = UART0_THR,
.end = UART0_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_TX,
.end = IRQ_UART0_TX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART0_ERROR,
.end = IRQ_UART0_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_TX,
.end = CH_UART0_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX,
.flags = IORESOURCE_DMA,
},
};
static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
static struct platform_device bfin_uart0_device = {
.name = "bfin-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_uart0_resources),
.resource = bfin_uart0_resources,
.dev = {
.platform_data = &bfin_uart0_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
static struct resource bfin_uart1_resources[] = {
{
.start = UART1_THR,
.end = UART1_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART1_TX,
.end = IRQ_UART1_TX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART1_RX,
.end = IRQ_UART1_RX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART1_ERROR,
.end = IRQ_UART1_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART1_TX,
.end = CH_UART1_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART1_RX,
.end = CH_UART1_RX,
.flags = IORESOURCE_DMA,
},
#ifdef CONFIG_BFIN_UART1_CTSRTS
{ /* CTS pin */
.start = GPIO_PF9,
.end = GPIO_PF9,
.flags = IORESOURCE_IO,
},
{ /* RTS pin */
.start = GPIO_PF10,
.end = GPIO_PF10,
.flags = IORESOURCE_IO,
},
#endif
};
static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX, 0
};
static struct platform_device bfin_uart1_device = {
.name = "bfin-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_uart1_resources),
.resource = bfin_uart1_resources,
.dev = {
.platform_data = &bfin_uart1_peripherals, /* Passed to driver */
},
};
#endif
#endif
#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
#ifdef CONFIG_BFIN_SIR0
static struct resource bfin_sir0_resources[] = {
{
.start = 0xFFC00400,
.end = 0xFFC004FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir0_device = {
.name = "bfin_sir",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sir0_resources),
.resource = bfin_sir0_resources,
};
#endif
#ifdef CONFIG_BFIN_SIR1
static struct resource bfin_sir1_resources[] = {
{
.start = 0xFFC02000,
.end = 0xFFC020FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART1_RX,
.end = IRQ_UART1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART1_RX,
.end = CH_UART1_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir1_device = {
.name = "bfin_sir",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_sir1_resources),
.resource = bfin_sir1_resources,
};
#endif
#endif
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0};
static struct resource bfin_twi0_resource[] = {
[0] = {
.start = TWI0_REGBASE,
.end = TWI0_REGBASE,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_TWI,
.end = IRQ_TWI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device i2c_bfin_twi_device = {
.name = "i2c-bfin-twi",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_twi0_resource),
.resource = bfin_twi0_resource,
.dev = {
.platform_data = &bfin_twi0_pins,
},
};
#endif
#if defined(CONFIG_PMIC_ADP5520) || defined(CONFIG_PMIC_ADP5520_MODULE)
#include <linux/mfd/adp5520.h>
/*
* ADP5520/5501 LEDs Data
*/
static struct led_info adp5520_leds[] = {
{
.name = "adp5520-led1",
.default_trigger = "none",
.flags = FLAG_ID_ADP5520_LED1_ADP5501_LED0 | ADP5520_LED_OFFT_600ms,
},
};
static struct adp5520_leds_platform_data adp5520_leds_data = {
.num_leds = ARRAY_SIZE(adp5520_leds),
.leds = adp5520_leds,
.fade_in = ADP5520_FADE_T_600ms,
.fade_out = ADP5520_FADE_T_600ms,
.led_on_time = ADP5520_LED_ONT_600ms,
};
/*
* ADP5520 Keypad Data
*/
static const unsigned short adp5520_keymap[ADP5520_KEYMAPSIZE] = {
[ADP5520_KEY(3, 3)] = KEY_1,
[ADP5520_KEY(2, 3)] = KEY_2,
[ADP5520_KEY(1, 3)] = KEY_3,
[ADP5520_KEY(0, 3)] = KEY_UP,
[ADP5520_KEY(3, 2)] = KEY_4,
[ADP5520_KEY(2, 2)] = KEY_5,
[ADP5520_KEY(1, 2)] = KEY_6,
[ADP5520_KEY(0, 2)] = KEY_DOWN,
[ADP5520_KEY(3, 1)] = KEY_7,
[ADP5520_KEY(2, 1)] = KEY_8,
[ADP5520_KEY(1, 1)] = KEY_9,
[ADP5520_KEY(0, 1)] = KEY_DOT,
[ADP5520_KEY(3, 0)] = KEY_BACKSPACE,
[ADP5520_KEY(2, 0)] = KEY_0,
[ADP5520_KEY(1, 0)] = KEY_HELP,
[ADP5520_KEY(0, 0)] = KEY_ENTER,
};
static struct adp5520_keys_platform_data adp5520_keys_data = {
.rows_en_mask = ADP5520_ROW_R3 | ADP5520_ROW_R2 | ADP5520_ROW_R1 | ADP5520_ROW_R0,
.cols_en_mask = ADP5520_COL_C3 | ADP5520_COL_C2 | ADP5520_COL_C1 | ADP5520_COL_C0,
.keymap = adp5520_keymap,
.keymapsize = ARRAY_SIZE(adp5520_keymap),
.repeat = 0,
};
/*
* ADP5520/5501 Multifunction Device Init Data
*/
static struct adp5520_platform_data adp5520_pdev_data = {
.leds = &adp5520_leds_data,
.keys = &adp5520_keys_data,
};
#endif
static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE)
{
I2C_BOARD_INFO("pcf8574_lcd", 0x22),
},
#endif
#if defined(CONFIG_INPUT_PCF8574) || defined(CONFIG_INPUT_PCF8574_MODULE)
{
I2C_BOARD_INFO("pcf8574_keypad", 0x27),
.irq = IRQ_PF8,
},
#endif
#if defined(CONFIG_FB_BFIN_7393) || defined(CONFIG_FB_BFIN_7393_MODULE)
{
I2C_BOARD_INFO("bfin-adv7393", 0x2B),
},
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7879_I2C) || defined(CONFIG_TOUCHSCREEN_AD7879_I2C_MODULE)
{
I2C_BOARD_INFO("ad7879", 0x2C),
.irq = IRQ_PF8,
.platform_data = (void *)&bfin_ad7879_ts_info,
},
#endif
#if defined(CONFIG_PMIC_ADP5520) || defined(CONFIG_PMIC_ADP5520_MODULE)
{
I2C_BOARD_INFO("pmic-adp5520", 0x32),
.irq = IRQ_PF9,
.platform_data = (void *)&adp5520_pdev_data,
},
#endif
#if defined(CONFIG_SND_SOC_SSM2602) || defined(CONFIG_SND_SOC_SSM2602_MODULE)
{
I2C_BOARD_INFO("ssm2602", 0x1b),
},
#endif
#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE)
{
I2C_BOARD_INFO("ad5252", 0x2f),
},
#endif
#if defined(CONFIG_SND_SOC_ADAU1373) || defined(CONFIG_SND_SOC_ADAU1373_MODULE)
{
I2C_BOARD_INFO("adau1373", 0x1A),
},
#endif
};
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
static struct resource bfin_sport0_uart_resources[] = {
{
.start = SPORT0_TCR1,
.end = SPORT0_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT0_RX,
.end = IRQ_SPORT0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT0_ERROR,
.end = IRQ_SPORT0_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
.name = "bfin-sport-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
.resource = bfin_sport0_uart_resources,
.dev = {
.platform_data = &bfin_sport0_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
static struct resource bfin_sport1_uart_resources[] = {
{
.start = SPORT1_TCR1,
.end = SPORT1_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT1_RX,
.end = IRQ_SPORT1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT1_ERROR,
.end = IRQ_SPORT1_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
.name = "bfin-sport-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
.resource = bfin_sport1_uart_resources,
.dev = {
.platform_data = &bfin_sport1_peripherals, /* Passed to driver */
},
};
#endif
#endif
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
#include <linux/gpio_keys.h>
static struct gpio_keys_button bfin_gpio_keys_table[] = {
{BTN_0, GPIO_PG0, 1, "gpio-keys: BTN0"},
{BTN_1, GPIO_PG13, 1, "gpio-keys: BTN1"},
};
static struct gpio_keys_platform_data bfin_gpio_keys_data = {
.buttons = bfin_gpio_keys_table,
.nbuttons = ARRAY_SIZE(bfin_gpio_keys_table),
};
static struct platform_device bfin_device_gpiokeys = {
.name = "gpio-keys",
.dev = {
.platform_data = &bfin_gpio_keys_data,
},
};
#endif
#if defined(CONFIG_INPUT_BFIN_ROTARY) || defined(CONFIG_INPUT_BFIN_ROTARY_MODULE)
#include <asm/bfin_rotary.h>
static struct bfin_rotary_platform_data bfin_rotary_data = {
/*.rotary_up_key = KEY_UP,*/
/*.rotary_down_key = KEY_DOWN,*/
.rotary_rel_code = REL_WHEEL,
.rotary_button_key = KEY_ENTER,
.debounce = 10, /* 0..17 */
.mode = ROT_QUAD_ENC | ROT_DEBE,
.pm_wakeup = 1,
};
static struct resource bfin_rotary_resources[] = {
{
.start = IRQ_CNT,
.end = IRQ_CNT,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_rotary_device = {
.name = "bfin-rotary",
.id = -1,
.num_resources = ARRAY_SIZE(bfin_rotary_resources),
.resource = bfin_rotary_resources,
.dev = {
.platform_data = &bfin_rotary_data,
},
};
#endif
static const unsigned int cclk_vlev_datasheet[] =
{
VRPAIR(VLEV_100, 400000000),
VRPAIR(VLEV_105, 426000000),
VRPAIR(VLEV_110, 500000000),
VRPAIR(VLEV_115, 533000000),
VRPAIR(VLEV_120, 600000000),
};
static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = {
.tuple_tab = cclk_vlev_datasheet,
.tabsize = ARRAY_SIZE(cclk_vlev_datasheet),
.vr_settling_time = 25 /* us */,
};
static struct platform_device bfin_dpmc = {
.name = "bfin dpmc",
.dev = {
.platform_data = &bfin_dmpc_vreg_data,
},
};
static struct platform_device *stamp_devices[] __initdata = {
&bfin_dpmc,
#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE)
&bf5xx_nand_device,
#endif
#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE)
&bfin_pcmcia_cf_device,
#endif
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
&rtc_device,
#endif
#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
&bfin_isp1760_device,
#endif
#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE)
&musb_device,
#endif
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
&smc91x_device,
#endif
#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE)
&dm9000_device,
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
&bfin_mii_bus,
&bfin_mac_device,
#endif
#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
&net2272_bfin_device,
#endif
#if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE)
&bfin_spi0_device,
#endif
#if defined(CONFIG_FB_BFIN_T350MCQB) || defined(CONFIG_FB_BFIN_T350MCQB_MODULE)
&bf52x_t350mcqb_device,
#endif
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
&bfin_lq035q1_device,
#endif
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
&bfin_uart1_device,
#endif
#endif
#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
#ifdef CONFIG_BFIN_SIR0
&bfin_sir0_device,
#endif
#ifdef CONFIG_BFIN_SIR1
&bfin_sir1_device,
#endif
#endif
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
&i2c_bfin_twi_device,
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#endif
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
&bfin_device_gpiokeys,
#endif
#if defined(CONFIG_INPUT_BFIN_ROTARY) || defined(CONFIG_INPUT_BFIN_ROTARY_MODULE)
&bfin_rotary_device,
#endif
#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
&ezkit_flash_device,
#endif
#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
&bfin_i2s_pcm,
#endif
#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
&bfin_tdm_pcm,
#endif
#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
&bfin_ac97_pcm,
#endif
#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
&bfin_i2s,
#endif
#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE)
&bfin_tdm,
#endif
#if defined(CONFIG_SND_BF5XX_SOC_AD1836) || \
defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE)
&bfin_ad1836_machine,
#endif
};
static int __init ezkit_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
i2c_register_board_info(0, bfin_i2c_board_info,
ARRAY_SIZE(bfin_i2c_board_info));
platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices));
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
return 0;
}
arch_initcall(ezkit_init);
static struct platform_device *ezkit_early_devices[] __initdata = {
#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
&bfin_uart1_device,
#endif
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#endif
};
void __init native_machine_early_platform_add_devices(void)
{
printk(KERN_INFO "register early platform devices\n");
early_platform_add_devices(ezkit_early_devices,
ARRAY_SIZE(ezkit_early_devices));
}
void native_machine_restart(char *cmd)
{
/* workaround reboot hang when booting from SPI */
if ((bfin_read_SYSCR() & 0x7) == 0x3)
bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS);
}
int bfin_get_ether_addr(char *addr)
{
/* the MAC is stored in OTP memory page 0xDF */
u32 ret;
u64 otp_mac;
u32 (*otp_read)(u32 page, u32 flags, u64 *page_content) = (void *)0xEF00001A;
ret = otp_read(0xDF, 0x00, &otp_mac);
if (!(ret & 0x1)) {
char *otp_mac_p = (char *)&otp_mac;
for (ret = 0; ret < 6; ++ret)
addr[ret] = otp_mac_p[5 - ret];
}
return 0;
}
EXPORT_SYMBOL(bfin_get_ether_addr);
| gpl-2.0 |
simar7/singh-kernel-shamu | arch/arm/mach-s5p64x0/mach-smdk6450.c | 2118 | 7369 | /* linux/arch/arm/mach-s5p64x0/mach-smdk6450.c
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/gpio.h>
#include <linux/pwm_backlight.h>
#include <linux/fb.h>
#include <linux/mmc/host.h>
#include <video/platform_lcd.h>
#include <video/samsung_fimd.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
#include <mach/hardware.h>
#include <mach/map.h>
#include <mach/regs-clock.h>
#include <mach/regs-gpio.h>
#include <plat/regs-serial.h>
#include <plat/gpio-cfg.h>
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <linux/platform_data/i2c-s3c2410.h>
#include <plat/pll.h>
#include <plat/adc.h>
#include <linux/platform_data/touchscreen-s3c2410.h>
#include <plat/samsung-time.h>
#include <plat/backlight.h>
#include <plat/fb.h>
#include <plat/sdhci.h>
#include "common.h"
#include "i2c.h"
#define SMDK6450_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
S3C2410_UCON_RXILEVEL | \
S3C2410_UCON_TXIRQMODE | \
S3C2410_UCON_RXIRQMODE | \
S3C2410_UCON_RXFIFO_TOI | \
S3C2443_UCON_RXERR_IRQEN)
#define SMDK6450_ULCON_DEFAULT S3C2410_LCON_CS8
#define SMDK6450_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \
S3C2440_UFCON_TXTRIG16 | \
S3C2410_UFCON_RXTRIG8)
static struct s3c2410_uartcfg smdk6450_uartcfgs[] __initdata = {
[0] = {
.hwport = 0,
.flags = 0,
.ucon = SMDK6450_UCON_DEFAULT,
.ulcon = SMDK6450_ULCON_DEFAULT,
.ufcon = SMDK6450_UFCON_DEFAULT,
},
[1] = {
.hwport = 1,
.flags = 0,
.ucon = SMDK6450_UCON_DEFAULT,
.ulcon = SMDK6450_ULCON_DEFAULT,
.ufcon = SMDK6450_UFCON_DEFAULT,
},
[2] = {
.hwport = 2,
.flags = 0,
.ucon = SMDK6450_UCON_DEFAULT,
.ulcon = SMDK6450_ULCON_DEFAULT,
.ufcon = SMDK6450_UFCON_DEFAULT,
},
[3] = {
.hwport = 3,
.flags = 0,
.ucon = SMDK6450_UCON_DEFAULT,
.ulcon = SMDK6450_ULCON_DEFAULT,
.ufcon = SMDK6450_UFCON_DEFAULT,
},
#if CONFIG_SERIAL_SAMSUNG_UARTS > 4
[4] = {
.hwport = 4,
.flags = 0,
.ucon = SMDK6450_UCON_DEFAULT,
.ulcon = SMDK6450_ULCON_DEFAULT,
.ufcon = SMDK6450_UFCON_DEFAULT,
},
#endif
#if CONFIG_SERIAL_SAMSUNG_UARTS > 5
[5] = {
.hwport = 5,
.flags = 0,
.ucon = SMDK6450_UCON_DEFAULT,
.ulcon = SMDK6450_ULCON_DEFAULT,
.ufcon = SMDK6450_UFCON_DEFAULT,
},
#endif
};
/* Frame Buffer */
static struct s3c_fb_pd_win smdk6450_fb_win0 = {
.max_bpp = 32,
.default_bpp = 24,
.xres = 800,
.yres = 480,
};
static struct fb_videomode smdk6450_lcd_timing = {
.left_margin = 8,
.right_margin = 13,
.upper_margin = 7,
.lower_margin = 5,
.hsync_len = 3,
.vsync_len = 1,
.xres = 800,
.yres = 480,
};
static struct s3c_fb_platdata smdk6450_lcd_pdata __initdata = {
.win[0] = &smdk6450_fb_win0,
.vtiming = &smdk6450_lcd_timing,
.vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
.vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
.setup_gpio = s5p64x0_fb_gpio_setup_24bpp,
};
/* LCD power controller */
static void smdk6450_lte480_reset_power(struct plat_lcd_data *pd,
unsigned int power)
{
int err;
if (power) {
err = gpio_request(S5P6450_GPN(5), "GPN");
if (err) {
printk(KERN_ERR "failed to request GPN for lcd reset\n");
return;
}
gpio_direction_output(S5P6450_GPN(5), 1);
gpio_set_value(S5P6450_GPN(5), 0);
gpio_set_value(S5P6450_GPN(5), 1);
gpio_free(S5P6450_GPN(5));
}
}
static struct plat_lcd_data smdk6450_lcd_power_data = {
.set_power = smdk6450_lte480_reset_power,
};
static struct platform_device smdk6450_lcd_lte480wv = {
.name = "platform-lcd",
.dev.parent = &s3c_device_fb.dev,
.dev.platform_data = &smdk6450_lcd_power_data,
};
static struct platform_device *smdk6450_devices[] __initdata = {
&s3c_device_adc,
&s3c_device_rtc,
&s3c_device_i2c0,
&s3c_device_i2c1,
&s3c_device_ts,
&s3c_device_wdt,
&s5p6450_device_iis0,
&s3c_device_fb,
&smdk6450_lcd_lte480wv,
&s3c_device_hsmmc0,
&s3c_device_hsmmc1,
&s3c_device_hsmmc2,
/* s5p6450_device_spi0 will be added */
};
static struct s3c_sdhci_platdata smdk6450_hsmmc0_pdata __initdata = {
.cd_type = S3C_SDHCI_CD_NONE,
};
static struct s3c_sdhci_platdata smdk6450_hsmmc1_pdata __initdata = {
.cd_type = S3C_SDHCI_CD_NONE,
#if defined(CONFIG_S5P64X0_SD_CH1_8BIT)
.max_width = 8,
.host_caps = MMC_CAP_8_BIT_DATA,
#endif
};
static struct s3c_sdhci_platdata smdk6450_hsmmc2_pdata __initdata = {
.cd_type = S3C_SDHCI_CD_NONE,
};
static struct s3c2410_platform_i2c s5p6450_i2c0_data __initdata = {
.flags = 0,
.slave_addr = 0x10,
.frequency = 100*1000,
.sda_delay = 100,
.cfg_gpio = s5p6450_i2c0_cfg_gpio,
};
static struct s3c2410_platform_i2c s5p6450_i2c1_data __initdata = {
.flags = 0,
.bus_num = 1,
.slave_addr = 0x10,
.frequency = 100*1000,
.sda_delay = 100,
.cfg_gpio = s5p6450_i2c1_cfg_gpio,
};
static struct i2c_board_info smdk6450_i2c_devs0[] __initdata = {
{ I2C_BOARD_INFO("wm8580", 0x1b), },
{ I2C_BOARD_INFO("24c08", 0x50), }, /* Samsung KS24C080C EEPROM */
};
static struct i2c_board_info smdk6450_i2c_devs1[] __initdata = {
{ I2C_BOARD_INFO("24c128", 0x57), },/* Samsung S524AD0XD1 EEPROM */
};
/* LCD Backlight data */
static struct samsung_bl_gpio_info smdk6450_bl_gpio_info = {
.no = S5P6450_GPF(15),
.func = S3C_GPIO_SFN(2),
};
static struct platform_pwm_backlight_data smdk6450_bl_data = {
.pwm_id = 1,
};
static void __init smdk6450_map_io(void)
{
s5p64x0_init_io(NULL, 0);
s3c24xx_init_clocks(19200000);
s3c24xx_init_uarts(smdk6450_uartcfgs, ARRAY_SIZE(smdk6450_uartcfgs));
samsung_set_timer_source(SAMSUNG_PWM3, SAMSUNG_PWM4);
}
static void s5p6450_set_lcd_interface(void)
{
unsigned int cfg;
/* select TFT LCD type (RGB I/F) */
cfg = __raw_readl(S5P64X0_SPCON0);
cfg &= ~S5P64X0_SPCON0_LCD_SEL_MASK;
cfg |= S5P64X0_SPCON0_LCD_SEL_RGB;
__raw_writel(cfg, S5P64X0_SPCON0);
}
static void __init smdk6450_machine_init(void)
{
s3c24xx_ts_set_platdata(NULL);
s3c_i2c0_set_platdata(&s5p6450_i2c0_data);
s3c_i2c1_set_platdata(&s5p6450_i2c1_data);
i2c_register_board_info(0, smdk6450_i2c_devs0,
ARRAY_SIZE(smdk6450_i2c_devs0));
i2c_register_board_info(1, smdk6450_i2c_devs1,
ARRAY_SIZE(smdk6450_i2c_devs1));
samsung_bl_set(&smdk6450_bl_gpio_info, &smdk6450_bl_data);
s5p6450_set_lcd_interface();
s3c_fb_set_platdata(&smdk6450_lcd_pdata);
s3c_sdhci0_set_platdata(&smdk6450_hsmmc0_pdata);
s3c_sdhci1_set_platdata(&smdk6450_hsmmc1_pdata);
s3c_sdhci2_set_platdata(&smdk6450_hsmmc2_pdata);
platform_add_devices(smdk6450_devices, ARRAY_SIZE(smdk6450_devices));
}
MACHINE_START(SMDK6450, "SMDK6450")
/* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
.atag_offset = 0x100,
.init_irq = s5p6450_init_irq,
.map_io = smdk6450_map_io,
.init_machine = smdk6450_machine_init,
.init_time = samsung_timer_init,
.restart = s5p64x0_restart,
MACHINE_END
| gpl-2.0 |
CoolDevelopment/VSMC-i9105p | drivers/video/aty/atyfb_base.c | 2374 | 111571 | /*
* ATI Frame Buffer Device Driver Core
*
* Copyright (C) 2004 Alex Kern <alex.kern@gmx.de>
* Copyright (C) 1997-2001 Geert Uytterhoeven
* Copyright (C) 1998 Bernd Harries
* Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
*
* This driver supports the following ATI graphics chips:
* - ATI Mach64
*
* To do: add support for
* - ATI Rage128 (from aty128fb.c)
* - ATI Radeon (from radeonfb.c)
*
* This driver is partly based on the PowerMac console driver:
*
* Copyright (C) 1996 Paul Mackerras
*
* and on the PowerMac ATI/mach64 display driver:
*
* Copyright (C) 1997 Michael AK Tesch
*
* with work by Jon Howell
* Harry AC Eaton
* Anthony Tong <atong@uiuc.edu>
*
* Generic LCD support written by Daniel Mantione, ported from 2.4.20 by Alex Kern
* Many Thanks to Ville Syrjälä for patches and fixing nasting 16 bit color bug.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*
* Many thanks to Nitya from ATI devrel for support and patience !
*/
/******************************************************************************
TODO:
- cursor support on all cards and all ramdacs.
- cursor parameters controlable via ioctl()s.
- guess PLL and MCLK based on the original PLL register values initialized
by Open Firmware (if they are initialized). BIOS is done
(Anyone with Mac to help with this?)
******************************************************************************/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
#include <linux/console.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/backlight.h>
#include <linux/reboot.h>
#include <linux/dmi.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include <video/mach64.h>
#include "atyfb.h"
#include "ati_ids.h"
#ifdef __powerpc__
#include <asm/machdep.h>
#include <asm/prom.h>
#include "../macmodes.h"
#endif
#ifdef __sparc__
#include <asm/fbio.h>
#include <asm/oplib.h>
#include <asm/prom.h>
#endif
#ifdef CONFIG_ADB_PMU
#include <linux/adb.h>
#include <linux/pmu.h>
#endif
#ifdef CONFIG_BOOTX_TEXT
#include <asm/btext.h>
#endif
#ifdef CONFIG_PMAC_BACKLIGHT
#include <asm/backlight.h>
#endif
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
/*
* Debug flags.
*/
#undef DEBUG
/*#define DEBUG*/
/* Make sure n * PAGE_SIZE is protected at end of Aperture for GUI-regs */
/* - must be large enough to catch all GUI-Regs */
/* - must be aligned to a PAGE boundary */
#define GUI_RESERVE (1 * PAGE_SIZE)
/* FIXME: remove the FAIL definition */
#define FAIL(msg) do { \
if (!(var->activate & FB_ACTIVATE_TEST)) \
printk(KERN_CRIT "atyfb: " msg "\n"); \
return -EINVAL; \
} while (0)
#define FAIL_MAX(msg, x, _max_) do { \
if (x > _max_) { \
if (!(var->activate & FB_ACTIVATE_TEST)) \
printk(KERN_CRIT "atyfb: " msg " %x(%x)\n", x, _max_); \
return -EINVAL; \
} \
} while (0)
#ifdef DEBUG
#define DPRINTK(fmt, args...) printk(KERN_DEBUG "atyfb: " fmt, ## args)
#else
#define DPRINTK(fmt, args...)
#endif
#define PRINTKI(fmt, args...) printk(KERN_INFO "atyfb: " fmt, ## args)
#define PRINTKE(fmt, args...) printk(KERN_ERR "atyfb: " fmt, ## args)
#if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || \
defined (CONFIG_FB_ATY_GENERIC_LCD) || defined(CONFIG_FB_ATY_BACKLIGHT)
static const u32 lt_lcd_regs[] = {
CNFG_PANEL_LG,
LCD_GEN_CNTL_LG,
DSTN_CONTROL_LG,
HFB_PITCH_ADDR_LG,
HORZ_STRETCHING_LG,
VERT_STRETCHING_LG,
0, /* EXT_VERT_STRETCH */
LT_GIO_LG,
POWER_MANAGEMENT_LG
};
void aty_st_lcd(int index, u32 val, const struct atyfb_par *par)
{
if (M64_HAS(LT_LCD_REGS)) {
aty_st_le32(lt_lcd_regs[index], val, par);
} else {
unsigned long temp;
/* write addr byte */
temp = aty_ld_le32(LCD_INDEX, par);
aty_st_le32(LCD_INDEX, (temp & ~LCD_INDEX_MASK) | index, par);
/* write the register value */
aty_st_le32(LCD_DATA, val, par);
}
}
u32 aty_ld_lcd(int index, const struct atyfb_par *par)
{
if (M64_HAS(LT_LCD_REGS)) {
return aty_ld_le32(lt_lcd_regs[index], par);
} else {
unsigned long temp;
/* write addr byte */
temp = aty_ld_le32(LCD_INDEX, par);
aty_st_le32(LCD_INDEX, (temp & ~LCD_INDEX_MASK) | index, par);
/* read the register value */
return aty_ld_le32(LCD_DATA, par);
}
}
#endif /* defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) */
#ifdef CONFIG_FB_ATY_GENERIC_LCD
/*
* ATIReduceRatio --
*
* Reduce a fraction by factoring out the largest common divider of the
* fraction's numerator and denominator.
*/
static void ATIReduceRatio(int *Numerator, int *Denominator)
{
int Multiplier, Divider, Remainder;
Multiplier = *Numerator;
Divider = *Denominator;
while ((Remainder = Multiplier % Divider)) {
Multiplier = Divider;
Divider = Remainder;
}
*Numerator /= Divider;
*Denominator /= Divider;
}
#endif
/*
* The Hardware parameters for each card
*/
struct pci_mmap_map {
unsigned long voff;
unsigned long poff;
unsigned long size;
unsigned long prot_flag;
unsigned long prot_mask;
};
static struct fb_fix_screeninfo atyfb_fix __devinitdata = {
.id = "ATY Mach64",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
.xpanstep = 8,
.ypanstep = 1,
};
/*
* Frame buffer device API
*/
static int atyfb_open(struct fb_info *info, int user);
static int atyfb_release(struct fb_info *info, int user);
static int atyfb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info);
static int atyfb_set_par(struct fb_info *info);
static int atyfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *info);
static int atyfb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info);
static int atyfb_blank(int blank, struct fb_info *info);
static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg);
#ifdef __sparc__
static int atyfb_mmap(struct fb_info *info, struct vm_area_struct *vma);
#endif
static int atyfb_sync(struct fb_info *info);
/*
* Internal routines
*/
static int aty_init(struct fb_info *info);
static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc);
static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc);
static int aty_var_to_crtc(const struct fb_info *info,
const struct fb_var_screeninfo *var,
struct crtc *crtc);
static int aty_crtc_to_var(const struct crtc *crtc,
struct fb_var_screeninfo *var);
static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info);
#ifdef CONFIG_PPC
static int read_aty_sense(const struct atyfb_par *par);
#endif
static DEFINE_MUTEX(reboot_lock);
static struct fb_info *reboot_info;
/*
* Interface used by the world
*/
static struct fb_var_screeninfo default_var = {
/* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */
640, 480, 640, 480, 0, 0, 8, 0,
{0, 8, 0}, {0, 8, 0}, {0, 8, 0}, {0, 0, 0},
0, 0, -1, -1, 0, 39722, 48, 16, 33, 10, 96, 2,
0, FB_VMODE_NONINTERLACED
};
static struct fb_videomode defmode = {
/* 640x480 @ 60 Hz, 31.5 kHz hsync */
NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2,
0, FB_VMODE_NONINTERLACED
};
static struct fb_ops atyfb_ops = {
.owner = THIS_MODULE,
.fb_open = atyfb_open,
.fb_release = atyfb_release,
.fb_check_var = atyfb_check_var,
.fb_set_par = atyfb_set_par,
.fb_setcolreg = atyfb_setcolreg,
.fb_pan_display = atyfb_pan_display,
.fb_blank = atyfb_blank,
.fb_ioctl = atyfb_ioctl,
.fb_fillrect = atyfb_fillrect,
.fb_copyarea = atyfb_copyarea,
.fb_imageblit = atyfb_imageblit,
#ifdef __sparc__
.fb_mmap = atyfb_mmap,
#endif
.fb_sync = atyfb_sync,
};
static int noaccel;
#ifdef CONFIG_MTRR
static int nomtrr;
#endif
static int vram;
static int pll;
static int mclk;
static int xclk;
static int comp_sync __devinitdata = -1;
static char *mode;
#ifdef CONFIG_PMAC_BACKLIGHT
static int backlight __devinitdata = 1;
#else
static int backlight __devinitdata = 0;
#endif
#ifdef CONFIG_PPC
static int default_vmode __devinitdata = VMODE_CHOOSE;
static int default_cmode __devinitdata = CMODE_CHOOSE;
module_param_named(vmode, default_vmode, int, 0);
MODULE_PARM_DESC(vmode, "int: video mode for mac");
module_param_named(cmode, default_cmode, int, 0);
MODULE_PARM_DESC(cmode, "int: color mode for mac");
#endif
#ifdef CONFIG_ATARI
static unsigned int mach64_count __devinitdata = 0;
static unsigned long phys_vmembase[FB_MAX] __devinitdata = { 0, };
static unsigned long phys_size[FB_MAX] __devinitdata = { 0, };
static unsigned long phys_guiregbase[FB_MAX] __devinitdata = { 0, };
#endif
/* top -> down is an evolution of mach64 chipset, any corrections? */
#define ATI_CHIP_88800GX (M64F_GX)
#define ATI_CHIP_88800CX (M64F_GX)
#define ATI_CHIP_264CT (M64F_CT | M64F_INTEGRATED | M64F_CT_BUS | M64F_MAGIC_FIFO)
#define ATI_CHIP_264ET (M64F_CT | M64F_INTEGRATED | M64F_CT_BUS | M64F_MAGIC_FIFO)
#define ATI_CHIP_264VT (M64F_VT | M64F_INTEGRATED | M64F_VT_BUS | M64F_MAGIC_FIFO)
#define ATI_CHIP_264GT (M64F_GT | M64F_INTEGRATED | M64F_MAGIC_FIFO | M64F_EXTRA_BRIGHT)
#define ATI_CHIP_264VTB (M64F_VT | M64F_INTEGRATED | M64F_VT_BUS | M64F_GTB_DSP)
#define ATI_CHIP_264VT3 (M64F_VT | M64F_INTEGRATED | M64F_VT_BUS | M64F_GTB_DSP | M64F_SDRAM_MAGIC_PLL)
#define ATI_CHIP_264VT4 (M64F_VT | M64F_INTEGRATED | M64F_GTB_DSP)
/* FIXME what is this chip? */
#define ATI_CHIP_264LT (M64F_GT | M64F_INTEGRATED | M64F_GTB_DSP)
/* make sets shorter */
#define ATI_MODERN_SET (M64F_GT | M64F_INTEGRATED | M64F_GTB_DSP | M64F_EXTRA_BRIGHT)
#define ATI_CHIP_264GTB (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL)
/*#define ATI_CHIP_264GTDVD ?*/
#define ATI_CHIP_264LTG (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL)
#define ATI_CHIP_264GT2C (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL | M64F_HW_TRIPLE)
#define ATI_CHIP_264GTPRO (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D)
#define ATI_CHIP_264LTPRO (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D)
#define ATI_CHIP_264XL (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_XL_MEM)
#define ATI_CHIP_MOBILITY (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_XL_MEM | M64F_MOBIL_BUS)
static struct {
u16 pci_id;
const char *name;
int pll, mclk, xclk, ecp_max;
u32 features;
} aty_chips[] __devinitdata = {
#ifdef CONFIG_FB_ATY_GX
/* Mach64 GX */
{ PCI_CHIP_MACH64GX, "ATI888GX00 (Mach64 GX)", 135, 50, 50, 0, ATI_CHIP_88800GX },
{ PCI_CHIP_MACH64CX, "ATI888CX00 (Mach64 CX)", 135, 50, 50, 0, ATI_CHIP_88800CX },
#endif /* CONFIG_FB_ATY_GX */
#ifdef CONFIG_FB_ATY_CT
{ PCI_CHIP_MACH64CT, "ATI264CT (Mach64 CT)", 135, 60, 60, 0, ATI_CHIP_264CT },
{ PCI_CHIP_MACH64ET, "ATI264ET (Mach64 ET)", 135, 60, 60, 0, ATI_CHIP_264ET },
/* FIXME what is this chip? */
{ PCI_CHIP_MACH64LT, "ATI264LT (Mach64 LT)", 135, 63, 63, 0, ATI_CHIP_264LT },
{ PCI_CHIP_MACH64VT, "ATI264VT (Mach64 VT)", 170, 67, 67, 80, ATI_CHIP_264VT },
{ PCI_CHIP_MACH64GT, "3D RAGE (Mach64 GT)", 135, 63, 63, 80, ATI_CHIP_264GT },
{ PCI_CHIP_MACH64VU, "ATI264VT3 (Mach64 VU)", 200, 67, 67, 80, ATI_CHIP_264VT3 },
{ PCI_CHIP_MACH64GU, "3D RAGE II+ (Mach64 GU)", 200, 67, 67, 100, ATI_CHIP_264GTB },
{ PCI_CHIP_MACH64LG, "3D RAGE LT (Mach64 LG)", 230, 63, 63, 100, ATI_CHIP_264LTG | M64F_LT_LCD_REGS | M64F_G3_PB_1024x768 },
{ PCI_CHIP_MACH64VV, "ATI264VT4 (Mach64 VV)", 230, 83, 83, 100, ATI_CHIP_264VT4 },
{ PCI_CHIP_MACH64GV, "3D RAGE IIC (Mach64 GV, PCI)", 230, 83, 83, 100, ATI_CHIP_264GT2C },
{ PCI_CHIP_MACH64GW, "3D RAGE IIC (Mach64 GW, AGP)", 230, 83, 83, 100, ATI_CHIP_264GT2C },
{ PCI_CHIP_MACH64GY, "3D RAGE IIC (Mach64 GY, PCI)", 230, 83, 83, 100, ATI_CHIP_264GT2C },
{ PCI_CHIP_MACH64GZ, "3D RAGE IIC (Mach64 GZ, AGP)", 230, 83, 83, 100, ATI_CHIP_264GT2C },
{ PCI_CHIP_MACH64GB, "3D RAGE PRO (Mach64 GB, BGA, AGP)", 230, 100, 100, 125, ATI_CHIP_264GTPRO },
{ PCI_CHIP_MACH64GD, "3D RAGE PRO (Mach64 GD, BGA, AGP 1x)", 230, 100, 100, 125, ATI_CHIP_264GTPRO },
{ PCI_CHIP_MACH64GI, "3D RAGE PRO (Mach64 GI, BGA, PCI)", 230, 100, 100, 125, ATI_CHIP_264GTPRO | M64F_MAGIC_VRAM_SIZE },
{ PCI_CHIP_MACH64GP, "3D RAGE PRO (Mach64 GP, PQFP, PCI)", 230, 100, 100, 125, ATI_CHIP_264GTPRO },
{ PCI_CHIP_MACH64GQ, "3D RAGE PRO (Mach64 GQ, PQFP, PCI, limited 3D)", 230, 100, 100, 125, ATI_CHIP_264GTPRO },
{ PCI_CHIP_MACH64LB, "3D RAGE LT PRO (Mach64 LB, AGP)", 236, 75, 100, 135, ATI_CHIP_264LTPRO },
{ PCI_CHIP_MACH64LD, "3D RAGE LT PRO (Mach64 LD, AGP)", 230, 100, 100, 135, ATI_CHIP_264LTPRO },
{ PCI_CHIP_MACH64LI, "3D RAGE LT PRO (Mach64 LI, PCI)", 230, 100, 100, 135, ATI_CHIP_264LTPRO | M64F_G3_PB_1_1 | M64F_G3_PB_1024x768 },
{ PCI_CHIP_MACH64LP, "3D RAGE LT PRO (Mach64 LP, PCI)", 230, 100, 100, 135, ATI_CHIP_264LTPRO | M64F_G3_PB_1024x768 },
{ PCI_CHIP_MACH64LQ, "3D RAGE LT PRO (Mach64 LQ, PCI)", 230, 100, 100, 135, ATI_CHIP_264LTPRO },
{ PCI_CHIP_MACH64GM, "3D RAGE XL (Mach64 GM, AGP 2x)", 230, 83, 63, 135, ATI_CHIP_264XL },
{ PCI_CHIP_MACH64GN, "3D RAGE XC (Mach64 GN, AGP 2x)", 230, 83, 63, 135, ATI_CHIP_264XL },
{ PCI_CHIP_MACH64GO, "3D RAGE XL (Mach64 GO, PCI-66)", 230, 83, 63, 135, ATI_CHIP_264XL },
{ PCI_CHIP_MACH64GL, "3D RAGE XC (Mach64 GL, PCI-66)", 230, 83, 63, 135, ATI_CHIP_264XL },
{ PCI_CHIP_MACH64GR, "3D RAGE XL (Mach64 GR, PCI-33)", 230, 83, 63, 135, ATI_CHIP_264XL | M64F_SDRAM_MAGIC_PLL },
{ PCI_CHIP_MACH64GS, "3D RAGE XC (Mach64 GS, PCI-33)", 230, 83, 63, 135, ATI_CHIP_264XL },
{ PCI_CHIP_MACH64LM, "3D RAGE Mobility P/M (Mach64 LM, AGP 2x)", 230, 83, 125, 135, ATI_CHIP_MOBILITY },
{ PCI_CHIP_MACH64LN, "3D RAGE Mobility L (Mach64 LN, AGP 2x)", 230, 83, 125, 135, ATI_CHIP_MOBILITY },
{ PCI_CHIP_MACH64LR, "3D RAGE Mobility P/M (Mach64 LR, PCI)", 230, 83, 125, 135, ATI_CHIP_MOBILITY },
{ PCI_CHIP_MACH64LS, "3D RAGE Mobility L (Mach64 LS, PCI)", 230, 83, 125, 135, ATI_CHIP_MOBILITY },
#endif /* CONFIG_FB_ATY_CT */
};
static int __devinit correct_chipset(struct atyfb_par *par)
{
u8 rev;
u16 type;
u32 chip_id;
const char *name;
int i;
for (i = ARRAY_SIZE(aty_chips) - 1; i >= 0; i--)
if (par->pci_id == aty_chips[i].pci_id)
break;
if (i < 0)
return -ENODEV;
name = aty_chips[i].name;
par->pll_limits.pll_max = aty_chips[i].pll;
par->pll_limits.mclk = aty_chips[i].mclk;
par->pll_limits.xclk = aty_chips[i].xclk;
par->pll_limits.ecp_max = aty_chips[i].ecp_max;
par->features = aty_chips[i].features;
chip_id = aty_ld_le32(CNFG_CHIP_ID, par);
type = chip_id & CFG_CHIP_TYPE;
rev = (chip_id & CFG_CHIP_REV) >> 24;
switch (par->pci_id) {
#ifdef CONFIG_FB_ATY_GX
case PCI_CHIP_MACH64GX:
if (type != 0x00d7)
return -ENODEV;
break;
case PCI_CHIP_MACH64CX:
if (type != 0x0057)
return -ENODEV;
break;
#endif
#ifdef CONFIG_FB_ATY_CT
case PCI_CHIP_MACH64VT:
switch (rev & 0x07) {
case 0x00:
switch (rev & 0xc0) {
case 0x00:
name = "ATI264VT (A3) (Mach64 VT)";
par->pll_limits.pll_max = 170;
par->pll_limits.mclk = 67;
par->pll_limits.xclk = 67;
par->pll_limits.ecp_max = 80;
par->features = ATI_CHIP_264VT;
break;
case 0x40:
name = "ATI264VT2 (A4) (Mach64 VT)";
par->pll_limits.pll_max = 200;
par->pll_limits.mclk = 67;
par->pll_limits.xclk = 67;
par->pll_limits.ecp_max = 80;
par->features = ATI_CHIP_264VT | M64F_MAGIC_POSTDIV;
break;
}
break;
case 0x01:
name = "ATI264VT3 (B1) (Mach64 VT)";
par->pll_limits.pll_max = 200;
par->pll_limits.mclk = 67;
par->pll_limits.xclk = 67;
par->pll_limits.ecp_max = 80;
par->features = ATI_CHIP_264VTB;
break;
case 0x02:
name = "ATI264VT3 (B2) (Mach64 VT)";
par->pll_limits.pll_max = 200;
par->pll_limits.mclk = 67;
par->pll_limits.xclk = 67;
par->pll_limits.ecp_max = 80;
par->features = ATI_CHIP_264VT3;
break;
}
break;
case PCI_CHIP_MACH64GT:
switch (rev & 0x07) {
case 0x01:
name = "3D RAGE II (Mach64 GT)";
par->pll_limits.pll_max = 170;
par->pll_limits.mclk = 67;
par->pll_limits.xclk = 67;
par->pll_limits.ecp_max = 80;
par->features = ATI_CHIP_264GTB;
break;
case 0x02:
name = "3D RAGE II+ (Mach64 GT)";
par->pll_limits.pll_max = 200;
par->pll_limits.mclk = 67;
par->pll_limits.xclk = 67;
par->pll_limits.ecp_max = 100;
par->features = ATI_CHIP_264GTB;
break;
}
break;
#endif
}
PRINTKI("%s [0x%04x rev 0x%02x]\n", name, type, rev);
return 0;
}
static char ram_dram[] __devinitdata = "DRAM";
static char ram_resv[] __devinitdata = "RESV";
#ifdef CONFIG_FB_ATY_GX
static char ram_vram[] __devinitdata = "VRAM";
#endif /* CONFIG_FB_ATY_GX */
#ifdef CONFIG_FB_ATY_CT
static char ram_edo[] __devinitdata = "EDO";
static char ram_sdram[] __devinitdata = "SDRAM (1:1)";
static char ram_sgram[] __devinitdata = "SGRAM (1:1)";
static char ram_sdram32[] __devinitdata = "SDRAM (2:1) (32-bit)";
static char ram_wram[] __devinitdata = "WRAM";
static char ram_off[] __devinitdata = "OFF";
#endif /* CONFIG_FB_ATY_CT */
#ifdef CONFIG_FB_ATY_GX
static char *aty_gx_ram[8] __devinitdata = {
ram_dram, ram_vram, ram_vram, ram_dram,
ram_dram, ram_vram, ram_vram, ram_resv
};
#endif /* CONFIG_FB_ATY_GX */
#ifdef CONFIG_FB_ATY_CT
static char *aty_ct_ram[8] __devinitdata = {
ram_off, ram_dram, ram_edo, ram_edo,
ram_sdram, ram_sgram, ram_wram, ram_resv
};
static char *aty_xl_ram[8] __devinitdata = {
ram_off, ram_dram, ram_edo, ram_edo,
ram_sdram, ram_sgram, ram_sdram32, ram_resv
};
#endif /* CONFIG_FB_ATY_CT */
static u32 atyfb_get_pixclock(struct fb_var_screeninfo *var,
struct atyfb_par *par)
{
u32 pixclock = var->pixclock;
#ifdef CONFIG_FB_ATY_GENERIC_LCD
u32 lcd_on_off;
par->pll.ct.xres = 0;
if (par->lcd_table != 0) {
lcd_on_off = aty_ld_lcd(LCD_GEN_CNTL, par);
if (lcd_on_off & LCD_ON) {
par->pll.ct.xres = var->xres;
pixclock = par->lcd_pixclock;
}
}
#endif
return pixclock;
}
#if defined(CONFIG_PPC)
/*
* Apple monitor sense
*/
static int __devinit read_aty_sense(const struct atyfb_par *par)
{
int sense, i;
aty_st_le32(GP_IO, 0x31003100, par); /* drive outputs high */
__delay(200);
aty_st_le32(GP_IO, 0, par); /* turn off outputs */
__delay(2000);
i = aty_ld_le32(GP_IO, par); /* get primary sense value */
sense = ((i & 0x3000) >> 3) | (i & 0x100);
/* drive each sense line low in turn and collect the other 2 */
aty_st_le32(GP_IO, 0x20000000, par); /* drive A low */
__delay(2000);
i = aty_ld_le32(GP_IO, par);
sense |= ((i & 0x1000) >> 7) | ((i & 0x100) >> 4);
aty_st_le32(GP_IO, 0x20002000, par); /* drive A high again */
__delay(200);
aty_st_le32(GP_IO, 0x10000000, par); /* drive B low */
__delay(2000);
i = aty_ld_le32(GP_IO, par);
sense |= ((i & 0x2000) >> 10) | ((i & 0x100) >> 6);
aty_st_le32(GP_IO, 0x10001000, par); /* drive B high again */
__delay(200);
aty_st_le32(GP_IO, 0x01000000, par); /* drive C low */
__delay(2000);
sense |= (aty_ld_le32(GP_IO, par) & 0x3000) >> 12;
aty_st_le32(GP_IO, 0, par); /* turn off outputs */
return sense;
}
#endif /* defined(CONFIG_PPC) */
/* ------------------------------------------------------------------------- */
/*
* CRTC programming
*/
static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc)
{
#ifdef CONFIG_FB_ATY_GENERIC_LCD
if (par->lcd_table != 0) {
if (!M64_HAS(LT_LCD_REGS)) {
crtc->lcd_index = aty_ld_le32(LCD_INDEX, par);
aty_st_le32(LCD_INDEX, crtc->lcd_index, par);
}
crtc->lcd_config_panel = aty_ld_lcd(CNFG_PANEL, par);
crtc->lcd_gen_cntl = aty_ld_lcd(LCD_GEN_CNTL, par);
/* switch to non shadow registers */
aty_st_lcd(LCD_GEN_CNTL, crtc->lcd_gen_cntl &
~(CRTC_RW_SELECT | SHADOW_EN | SHADOW_RW_EN), par);
/* save stretching */
crtc->horz_stretching = aty_ld_lcd(HORZ_STRETCHING, par);
crtc->vert_stretching = aty_ld_lcd(VERT_STRETCHING, par);
if (!M64_HAS(LT_LCD_REGS))
crtc->ext_vert_stretch = aty_ld_lcd(EXT_VERT_STRETCH, par);
}
#endif
crtc->h_tot_disp = aty_ld_le32(CRTC_H_TOTAL_DISP, par);
crtc->h_sync_strt_wid = aty_ld_le32(CRTC_H_SYNC_STRT_WID, par);
crtc->v_tot_disp = aty_ld_le32(CRTC_V_TOTAL_DISP, par);
crtc->v_sync_strt_wid = aty_ld_le32(CRTC_V_SYNC_STRT_WID, par);
crtc->vline_crnt_vline = aty_ld_le32(CRTC_VLINE_CRNT_VLINE, par);
crtc->off_pitch = aty_ld_le32(CRTC_OFF_PITCH, par);
crtc->gen_cntl = aty_ld_le32(CRTC_GEN_CNTL, par);
#ifdef CONFIG_FB_ATY_GENERIC_LCD
if (par->lcd_table != 0) {
/* switch to shadow registers */
aty_st_lcd(LCD_GEN_CNTL, (crtc->lcd_gen_cntl & ~CRTC_RW_SELECT) |
SHADOW_EN | SHADOW_RW_EN, par);
crtc->shadow_h_tot_disp = aty_ld_le32(CRTC_H_TOTAL_DISP, par);
crtc->shadow_h_sync_strt_wid = aty_ld_le32(CRTC_H_SYNC_STRT_WID, par);
crtc->shadow_v_tot_disp = aty_ld_le32(CRTC_V_TOTAL_DISP, par);
crtc->shadow_v_sync_strt_wid = aty_ld_le32(CRTC_V_SYNC_STRT_WID, par);
aty_st_le32(LCD_GEN_CNTL, crtc->lcd_gen_cntl, par);
}
#endif /* CONFIG_FB_ATY_GENERIC_LCD */
}
static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc)
{
#ifdef CONFIG_FB_ATY_GENERIC_LCD
if (par->lcd_table != 0) {
/* stop CRTC */
aty_st_le32(CRTC_GEN_CNTL, crtc->gen_cntl &
~(CRTC_EXT_DISP_EN | CRTC_EN), par);
/* update non-shadow registers first */
aty_st_lcd(CNFG_PANEL, crtc->lcd_config_panel, par);
aty_st_lcd(LCD_GEN_CNTL, crtc->lcd_gen_cntl &
~(CRTC_RW_SELECT | SHADOW_EN | SHADOW_RW_EN), par);
/* temporarily disable stretching */
aty_st_lcd(HORZ_STRETCHING, crtc->horz_stretching &
~(HORZ_STRETCH_MODE | HORZ_STRETCH_EN), par);
aty_st_lcd(VERT_STRETCHING, crtc->vert_stretching &
~(VERT_STRETCH_RATIO1 | VERT_STRETCH_RATIO2 |
VERT_STRETCH_USE0 | VERT_STRETCH_EN), par);
}
#endif
/* turn off CRT */
aty_st_le32(CRTC_GEN_CNTL, crtc->gen_cntl & ~CRTC_EN, par);
DPRINTK("setting up CRTC\n");
DPRINTK("set primary CRT to %ix%i %c%c composite %c\n",
((((crtc->h_tot_disp >> 16) & 0xff) + 1) << 3),
(((crtc->v_tot_disp >> 16) & 0x7ff) + 1),
(crtc->h_sync_strt_wid & 0x200000) ? 'N' : 'P',
(crtc->v_sync_strt_wid & 0x200000) ? 'N' : 'P',
(crtc->gen_cntl & CRTC_CSYNC_EN) ? 'P' : 'N');
DPRINTK("CRTC_H_TOTAL_DISP: %x\n", crtc->h_tot_disp);
DPRINTK("CRTC_H_SYNC_STRT_WID: %x\n", crtc->h_sync_strt_wid);
DPRINTK("CRTC_V_TOTAL_DISP: %x\n", crtc->v_tot_disp);
DPRINTK("CRTC_V_SYNC_STRT_WID: %x\n", crtc->v_sync_strt_wid);
DPRINTK("CRTC_OFF_PITCH: %x\n", crtc->off_pitch);
DPRINTK("CRTC_VLINE_CRNT_VLINE: %x\n", crtc->vline_crnt_vline);
DPRINTK("CRTC_GEN_CNTL: %x\n", crtc->gen_cntl);
aty_st_le32(CRTC_H_TOTAL_DISP, crtc->h_tot_disp, par);
aty_st_le32(CRTC_H_SYNC_STRT_WID, crtc->h_sync_strt_wid, par);
aty_st_le32(CRTC_V_TOTAL_DISP, crtc->v_tot_disp, par);
aty_st_le32(CRTC_V_SYNC_STRT_WID, crtc->v_sync_strt_wid, par);
aty_st_le32(CRTC_OFF_PITCH, crtc->off_pitch, par);
aty_st_le32(CRTC_VLINE_CRNT_VLINE, crtc->vline_crnt_vline, par);
aty_st_le32(CRTC_GEN_CNTL, crtc->gen_cntl, par);
#if 0
FIXME
if (par->accel_flags & FB_ACCELF_TEXT)
aty_init_engine(par, info);
#endif
#ifdef CONFIG_FB_ATY_GENERIC_LCD
/* after setting the CRTC registers we should set the LCD registers. */
if (par->lcd_table != 0) {
/* switch to shadow registers */
aty_st_lcd(LCD_GEN_CNTL, (crtc->lcd_gen_cntl & ~CRTC_RW_SELECT) |
SHADOW_EN | SHADOW_RW_EN, par);
DPRINTK("set shadow CRT to %ix%i %c%c\n",
((((crtc->shadow_h_tot_disp >> 16) & 0xff) + 1) << 3),
(((crtc->shadow_v_tot_disp >> 16) & 0x7ff) + 1),
(crtc->shadow_h_sync_strt_wid & 0x200000) ? 'N' : 'P',
(crtc->shadow_v_sync_strt_wid & 0x200000) ? 'N' : 'P');
DPRINTK("SHADOW CRTC_H_TOTAL_DISP: %x\n",
crtc->shadow_h_tot_disp);
DPRINTK("SHADOW CRTC_H_SYNC_STRT_WID: %x\n",
crtc->shadow_h_sync_strt_wid);
DPRINTK("SHADOW CRTC_V_TOTAL_DISP: %x\n",
crtc->shadow_v_tot_disp);
DPRINTK("SHADOW CRTC_V_SYNC_STRT_WID: %x\n",
crtc->shadow_v_sync_strt_wid);
aty_st_le32(CRTC_H_TOTAL_DISP, crtc->shadow_h_tot_disp, par);
aty_st_le32(CRTC_H_SYNC_STRT_WID, crtc->shadow_h_sync_strt_wid, par);
aty_st_le32(CRTC_V_TOTAL_DISP, crtc->shadow_v_tot_disp, par);
aty_st_le32(CRTC_V_SYNC_STRT_WID, crtc->shadow_v_sync_strt_wid, par);
/* restore CRTC selection & shadow state and enable stretching */
DPRINTK("LCD_GEN_CNTL: %x\n", crtc->lcd_gen_cntl);
DPRINTK("HORZ_STRETCHING: %x\n", crtc->horz_stretching);
DPRINTK("VERT_STRETCHING: %x\n", crtc->vert_stretching);
if (!M64_HAS(LT_LCD_REGS))
DPRINTK("EXT_VERT_STRETCH: %x\n", crtc->ext_vert_stretch);
aty_st_lcd(LCD_GEN_CNTL, crtc->lcd_gen_cntl, par);
aty_st_lcd(HORZ_STRETCHING, crtc->horz_stretching, par);
aty_st_lcd(VERT_STRETCHING, crtc->vert_stretching, par);
if (!M64_HAS(LT_LCD_REGS)) {
aty_st_lcd(EXT_VERT_STRETCH, crtc->ext_vert_stretch, par);
aty_ld_le32(LCD_INDEX, par);
aty_st_le32(LCD_INDEX, crtc->lcd_index, par);
}
}
#endif /* CONFIG_FB_ATY_GENERIC_LCD */
}
static u32 calc_line_length(struct atyfb_par *par, u32 vxres, u32 bpp)
{
u32 line_length = vxres * bpp / 8;
if (par->ram_type == SGRAM ||
(!M64_HAS(XL_MEM) && par->ram_type == WRAM))
line_length = (line_length + 63) & ~63;
return line_length;
}
static int aty_var_to_crtc(const struct fb_info *info,
const struct fb_var_screeninfo *var,
struct crtc *crtc)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
u32 xres, yres, vxres, vyres, xoffset, yoffset, bpp;
u32 sync, vmode, vdisplay;
u32 h_total, h_disp, h_sync_strt, h_sync_end, h_sync_dly, h_sync_wid, h_sync_pol;
u32 v_total, v_disp, v_sync_strt, v_sync_end, v_sync_wid, v_sync_pol, c_sync;
u32 pix_width, dp_pix_width, dp_chain_mask;
u32 line_length;
/* input */
xres = (var->xres + 7) & ~7;
yres = var->yres;
vxres = (var->xres_virtual + 7) & ~7;
vyres = var->yres_virtual;
xoffset = (var->xoffset + 7) & ~7;
yoffset = var->yoffset;
bpp = var->bits_per_pixel;
if (bpp == 16)
bpp = (var->green.length == 5) ? 15 : 16;
sync = var->sync;
vmode = var->vmode;
/* convert (and round up) and validate */
if (vxres < xres + xoffset)
vxres = xres + xoffset;
h_disp = xres;
if (vyres < yres + yoffset)
vyres = yres + yoffset;
v_disp = yres;
if (bpp <= 8) {
bpp = 8;
pix_width = CRTC_PIX_WIDTH_8BPP;
dp_pix_width = HOST_8BPP | SRC_8BPP | DST_8BPP |
BYTE_ORDER_LSB_TO_MSB;
dp_chain_mask = DP_CHAIN_8BPP;
} else if (bpp <= 15) {
bpp = 16;
pix_width = CRTC_PIX_WIDTH_15BPP;
dp_pix_width = HOST_15BPP | SRC_15BPP | DST_15BPP |
BYTE_ORDER_LSB_TO_MSB;
dp_chain_mask = DP_CHAIN_15BPP;
} else if (bpp <= 16) {
bpp = 16;
pix_width = CRTC_PIX_WIDTH_16BPP;
dp_pix_width = HOST_16BPP | SRC_16BPP | DST_16BPP |
BYTE_ORDER_LSB_TO_MSB;
dp_chain_mask = DP_CHAIN_16BPP;
} else if (bpp <= 24 && M64_HAS(INTEGRATED)) {
bpp = 24;
pix_width = CRTC_PIX_WIDTH_24BPP;
dp_pix_width = HOST_8BPP | SRC_8BPP | DST_8BPP |
BYTE_ORDER_LSB_TO_MSB;
dp_chain_mask = DP_CHAIN_24BPP;
} else if (bpp <= 32) {
bpp = 32;
pix_width = CRTC_PIX_WIDTH_32BPP;
dp_pix_width = HOST_32BPP | SRC_32BPP | DST_32BPP |
BYTE_ORDER_LSB_TO_MSB;
dp_chain_mask = DP_CHAIN_32BPP;
} else
FAIL("invalid bpp");
line_length = calc_line_length(par, vxres, bpp);
if (vyres * line_length > info->fix.smem_len)
FAIL("not enough video RAM");
h_sync_pol = sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1;
v_sync_pol = sync & FB_SYNC_VERT_HIGH_ACT ? 0 : 1;
if ((xres > 1600) || (yres > 1200)) {
FAIL("MACH64 chips are designed for max 1600x1200\n"
"select anoter resolution.");
}
h_sync_strt = h_disp + var->right_margin;
h_sync_end = h_sync_strt + var->hsync_len;
h_sync_dly = var->right_margin & 7;
h_total = h_sync_end + h_sync_dly + var->left_margin;
v_sync_strt = v_disp + var->lower_margin;
v_sync_end = v_sync_strt + var->vsync_len;
v_total = v_sync_end + var->upper_margin;
#ifdef CONFIG_FB_ATY_GENERIC_LCD
if (par->lcd_table != 0) {
if (!M64_HAS(LT_LCD_REGS)) {
u32 lcd_index = aty_ld_le32(LCD_INDEX, par);
crtc->lcd_index = lcd_index &
~(LCD_INDEX_MASK | LCD_DISPLAY_DIS |
LCD_SRC_SEL | CRTC2_DISPLAY_DIS);
aty_st_le32(LCD_INDEX, lcd_index, par);
}
if (!M64_HAS(MOBIL_BUS))
crtc->lcd_index |= CRTC2_DISPLAY_DIS;
crtc->lcd_config_panel = aty_ld_lcd(CNFG_PANEL, par) | 0x4000;
crtc->lcd_gen_cntl = aty_ld_lcd(LCD_GEN_CNTL, par) & ~CRTC_RW_SELECT;
crtc->lcd_gen_cntl &=
~(HORZ_DIVBY2_EN | DIS_HOR_CRT_DIVBY2 | TVCLK_PM_EN |
/*VCLK_DAC_PM_EN | USE_SHADOWED_VEND |*/
USE_SHADOWED_ROWCUR | SHADOW_EN | SHADOW_RW_EN);
crtc->lcd_gen_cntl |= DONT_SHADOW_VPAR | LOCK_8DOT;
if ((crtc->lcd_gen_cntl & LCD_ON) &&
((xres > par->lcd_width) || (yres > par->lcd_height))) {
/*
* We cannot display the mode on the LCD. If the CRT is
* enabled we can turn off the LCD.
* If the CRT is off, it isn't a good idea to switch it
* on; we don't know if one is connected. So it's better
* to fail then.
*/
if (crtc->lcd_gen_cntl & CRT_ON) {
if (!(var->activate & FB_ACTIVATE_TEST))
PRINTKI("Disable LCD panel, because video mode does not fit.\n");
crtc->lcd_gen_cntl &= ~LCD_ON;
/*aty_st_lcd(LCD_GEN_CNTL, crtc->lcd_gen_cntl, par);*/
} else {
if (!(var->activate & FB_ACTIVATE_TEST))
PRINTKE("Video mode exceeds size of LCD panel.\nConnect this computer to a conventional monitor if you really need this mode.\n");
return -EINVAL;
}
}
}
if ((par->lcd_table != 0) && (crtc->lcd_gen_cntl & LCD_ON)) {
int VScan = 1;
/* bpp -> bytespp, 1,4 -> 0; 8 -> 2; 15,16 -> 1; 24 -> 6; 32 -> 5
const u8 DFP_h_sync_dly_LT[] = { 0, 2, 1, 6, 5 };
const u8 ADD_to_strt_wid_and_dly_LT_DAC[] = { 0, 5, 6, 9, 9, 12, 12 }; */
vmode &= ~(FB_VMODE_DOUBLE | FB_VMODE_INTERLACED);
/*
* This is horror! When we simulate, say 640x480 on an 800x600
* LCD monitor, the CRTC should be programmed 800x600 values for
* the non visible part, but 640x480 for the visible part.
* This code has been tested on a laptop with it's 1400x1050 LCD
* monitor and a conventional monitor both switched on.
* Tested modes: 1280x1024, 1152x864, 1024x768, 800x600,
* works with little glitches also with DOUBLESCAN modes
*/
if (yres < par->lcd_height) {
VScan = par->lcd_height / yres;
if (VScan > 1) {
VScan = 2;
vmode |= FB_VMODE_DOUBLE;
}
}
h_sync_strt = h_disp + par->lcd_right_margin;
h_sync_end = h_sync_strt + par->lcd_hsync_len;
h_sync_dly = /*DFP_h_sync_dly[ ( bpp + 1 ) / 3 ]; */par->lcd_hsync_dly;
h_total = h_disp + par->lcd_hblank_len;
v_sync_strt = v_disp + par->lcd_lower_margin / VScan;
v_sync_end = v_sync_strt + par->lcd_vsync_len / VScan;
v_total = v_disp + par->lcd_vblank_len / VScan;
}
#endif /* CONFIG_FB_ATY_GENERIC_LCD */
h_disp = (h_disp >> 3) - 1;
h_sync_strt = (h_sync_strt >> 3) - 1;
h_sync_end = (h_sync_end >> 3) - 1;
h_total = (h_total >> 3) - 1;
h_sync_wid = h_sync_end - h_sync_strt;
FAIL_MAX("h_disp too large", h_disp, 0xff);
FAIL_MAX("h_sync_strt too large", h_sync_strt, 0x1ff);
/*FAIL_MAX("h_sync_wid too large", h_sync_wid, 0x1f);*/
if (h_sync_wid > 0x1f)
h_sync_wid = 0x1f;
FAIL_MAX("h_total too large", h_total, 0x1ff);
if (vmode & FB_VMODE_DOUBLE) {
v_disp <<= 1;
v_sync_strt <<= 1;
v_sync_end <<= 1;
v_total <<= 1;
}
vdisplay = yres;
#ifdef CONFIG_FB_ATY_GENERIC_LCD
if ((par->lcd_table != 0) && (crtc->lcd_gen_cntl & LCD_ON))
vdisplay = par->lcd_height;
#endif
v_disp--;
v_sync_strt--;
v_sync_end--;
v_total--;
v_sync_wid = v_sync_end - v_sync_strt;
FAIL_MAX("v_disp too large", v_disp, 0x7ff);
FAIL_MAX("v_sync_stsrt too large", v_sync_strt, 0x7ff);
/*FAIL_MAX("v_sync_wid too large", v_sync_wid, 0x1f);*/
if (v_sync_wid > 0x1f)
v_sync_wid = 0x1f;
FAIL_MAX("v_total too large", v_total, 0x7ff);
c_sync = sync & FB_SYNC_COMP_HIGH_ACT ? CRTC_CSYNC_EN : 0;
/* output */
crtc->vxres = vxres;
crtc->vyres = vyres;
crtc->xoffset = xoffset;
crtc->yoffset = yoffset;
crtc->bpp = bpp;
crtc->off_pitch =
((yoffset * line_length + xoffset * bpp / 8) / 8) |
((line_length / bpp) << 22);
crtc->vline_crnt_vline = 0;
crtc->h_tot_disp = h_total | (h_disp << 16);
crtc->h_sync_strt_wid = (h_sync_strt & 0xff) | (h_sync_dly << 8) |
((h_sync_strt & 0x100) << 4) | (h_sync_wid << 16) |
(h_sync_pol << 21);
crtc->v_tot_disp = v_total | (v_disp << 16);
crtc->v_sync_strt_wid = v_sync_strt | (v_sync_wid << 16) |
(v_sync_pol << 21);
/* crtc->gen_cntl = aty_ld_le32(CRTC_GEN_CNTL, par) & CRTC_PRESERVED_MASK; */
crtc->gen_cntl = CRTC_EXT_DISP_EN | CRTC_EN | pix_width | c_sync;
crtc->gen_cntl |= CRTC_VGA_LINEAR;
/* Enable doublescan mode if requested */
if (vmode & FB_VMODE_DOUBLE)
crtc->gen_cntl |= CRTC_DBL_SCAN_EN;
/* Enable interlaced mode if requested */
if (vmode & FB_VMODE_INTERLACED)
crtc->gen_cntl |= CRTC_INTERLACE_EN;
#ifdef CONFIG_FB_ATY_GENERIC_LCD
if (par->lcd_table != 0) {
vdisplay = yres;
if (vmode & FB_VMODE_DOUBLE)
vdisplay <<= 1;
crtc->gen_cntl &= ~(CRTC2_EN | CRTC2_PIX_WIDTH);
crtc->lcd_gen_cntl &= ~(HORZ_DIVBY2_EN | DIS_HOR_CRT_DIVBY2 |
/*TVCLK_PM_EN | VCLK_DAC_PM_EN |*/
USE_SHADOWED_VEND |
USE_SHADOWED_ROWCUR |
SHADOW_EN | SHADOW_RW_EN);
crtc->lcd_gen_cntl |= DONT_SHADOW_VPAR/* | LOCK_8DOT*/;
/* MOBILITY M1 tested, FIXME: LT */
crtc->horz_stretching = aty_ld_lcd(HORZ_STRETCHING, par);
if (!M64_HAS(LT_LCD_REGS))
crtc->ext_vert_stretch = aty_ld_lcd(EXT_VERT_STRETCH, par) &
~(AUTO_VERT_RATIO | VERT_STRETCH_MODE | VERT_STRETCH_RATIO3);
crtc->horz_stretching &= ~(HORZ_STRETCH_RATIO |
HORZ_STRETCH_LOOP | AUTO_HORZ_RATIO |
HORZ_STRETCH_MODE | HORZ_STRETCH_EN);
if (xres < par->lcd_width && crtc->lcd_gen_cntl & LCD_ON) {
do {
/*
* The horizontal blender misbehaves when
* HDisplay is less than a certain threshold
* (440 for a 1024-wide panel). It doesn't
* stretch such modes enough. Use pixel
* replication instead of blending to stretch
* modes that can be made to exactly fit the
* panel width. The undocumented "NoLCDBlend"
* option allows the pixel-replicated mode to
* be slightly wider or narrower than the
* panel width. It also causes a mode that is
* exactly half as wide as the panel to be
* pixel-replicated, rather than blended.
*/
int HDisplay = xres & ~7;
int nStretch = par->lcd_width / HDisplay;
int Remainder = par->lcd_width % HDisplay;
if ((!Remainder && ((nStretch > 2))) ||
(((HDisplay * 16) / par->lcd_width) < 7)) {
static const char StretchLoops[] = { 10, 12, 13, 15, 16 };
int horz_stretch_loop = -1, BestRemainder;
int Numerator = HDisplay, Denominator = par->lcd_width;
int Index = 5;
ATIReduceRatio(&Numerator, &Denominator);
BestRemainder = (Numerator * 16) / Denominator;
while (--Index >= 0) {
Remainder = ((Denominator - Numerator) * StretchLoops[Index]) %
Denominator;
if (Remainder < BestRemainder) {
horz_stretch_loop = Index;
if (!(BestRemainder = Remainder))
break;
}
}
if ((horz_stretch_loop >= 0) && !BestRemainder) {
int horz_stretch_ratio = 0, Accumulator = 0;
int reuse_previous = 1;
Index = StretchLoops[horz_stretch_loop];
while (--Index >= 0) {
if (Accumulator > 0)
horz_stretch_ratio |= reuse_previous;
else
Accumulator += Denominator;
Accumulator -= Numerator;
reuse_previous <<= 1;
}
crtc->horz_stretching |= (HORZ_STRETCH_EN |
((horz_stretch_loop & HORZ_STRETCH_LOOP) << 16) |
(horz_stretch_ratio & HORZ_STRETCH_RATIO));
break; /* Out of the do { ... } while (0) */
}
}
crtc->horz_stretching |= (HORZ_STRETCH_MODE | HORZ_STRETCH_EN |
(((HDisplay * (HORZ_STRETCH_BLEND + 1)) / par->lcd_width) & HORZ_STRETCH_BLEND));
} while (0);
}
if (vdisplay < par->lcd_height && crtc->lcd_gen_cntl & LCD_ON) {
crtc->vert_stretching = (VERT_STRETCH_USE0 | VERT_STRETCH_EN |
(((vdisplay * (VERT_STRETCH_RATIO0 + 1)) / par->lcd_height) & VERT_STRETCH_RATIO0));
if (!M64_HAS(LT_LCD_REGS) &&
xres <= (M64_HAS(MOBIL_BUS) ? 1024 : 800))
crtc->ext_vert_stretch |= VERT_STRETCH_MODE;
} else {
/*
* Don't use vertical blending if the mode is too wide
* or not vertically stretched.
*/
crtc->vert_stretching = 0;
}
/* copy to shadow crtc */
crtc->shadow_h_tot_disp = crtc->h_tot_disp;
crtc->shadow_h_sync_strt_wid = crtc->h_sync_strt_wid;
crtc->shadow_v_tot_disp = crtc->v_tot_disp;
crtc->shadow_v_sync_strt_wid = crtc->v_sync_strt_wid;
}
#endif /* CONFIG_FB_ATY_GENERIC_LCD */
if (M64_HAS(MAGIC_FIFO)) {
/* FIXME: display FIFO low watermark values */
crtc->gen_cntl |= (aty_ld_le32(CRTC_GEN_CNTL, par) & CRTC_FIFO_LWM);
}
crtc->dp_pix_width = dp_pix_width;
crtc->dp_chain_mask = dp_chain_mask;
return 0;
}
static int aty_crtc_to_var(const struct crtc *crtc,
struct fb_var_screeninfo *var)
{
u32 xres, yres, bpp, left, right, upper, lower, hslen, vslen, sync;
u32 h_total, h_disp, h_sync_strt, h_sync_dly, h_sync_wid, h_sync_pol;
u32 v_total, v_disp, v_sync_strt, v_sync_wid, v_sync_pol, c_sync;
u32 pix_width;
u32 double_scan, interlace;
/* input */
h_total = crtc->h_tot_disp & 0x1ff;
h_disp = (crtc->h_tot_disp >> 16) & 0xff;
h_sync_strt = (crtc->h_sync_strt_wid & 0xff) | ((crtc->h_sync_strt_wid >> 4) & 0x100);
h_sync_dly = (crtc->h_sync_strt_wid >> 8) & 0x7;
h_sync_wid = (crtc->h_sync_strt_wid >> 16) & 0x1f;
h_sync_pol = (crtc->h_sync_strt_wid >> 21) & 0x1;
v_total = crtc->v_tot_disp & 0x7ff;
v_disp = (crtc->v_tot_disp >> 16) & 0x7ff;
v_sync_strt = crtc->v_sync_strt_wid & 0x7ff;
v_sync_wid = (crtc->v_sync_strt_wid >> 16) & 0x1f;
v_sync_pol = (crtc->v_sync_strt_wid >> 21) & 0x1;
c_sync = crtc->gen_cntl & CRTC_CSYNC_EN ? 1 : 0;
pix_width = crtc->gen_cntl & CRTC_PIX_WIDTH_MASK;
double_scan = crtc->gen_cntl & CRTC_DBL_SCAN_EN;
interlace = crtc->gen_cntl & CRTC_INTERLACE_EN;
/* convert */
xres = (h_disp + 1) * 8;
yres = v_disp + 1;
left = (h_total - h_sync_strt - h_sync_wid) * 8 - h_sync_dly;
right = (h_sync_strt - h_disp) * 8 + h_sync_dly;
hslen = h_sync_wid * 8;
upper = v_total - v_sync_strt - v_sync_wid;
lower = v_sync_strt - v_disp;
vslen = v_sync_wid;
sync = (h_sync_pol ? 0 : FB_SYNC_HOR_HIGH_ACT) |
(v_sync_pol ? 0 : FB_SYNC_VERT_HIGH_ACT) |
(c_sync ? FB_SYNC_COMP_HIGH_ACT : 0);
switch (pix_width) {
#if 0
case CRTC_PIX_WIDTH_4BPP:
bpp = 4;
var->red.offset = 0;
var->red.length = 8;
var->green.offset = 0;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
var->transp.offset = 0;
var->transp.length = 0;
break;
#endif
case CRTC_PIX_WIDTH_8BPP:
bpp = 8;
var->red.offset = 0;
var->red.length = 8;
var->green.offset = 0;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
var->transp.offset = 0;
var->transp.length = 0;
break;
case CRTC_PIX_WIDTH_15BPP: /* RGB 555 */
bpp = 16;
var->red.offset = 10;
var->red.length = 5;
var->green.offset = 5;
var->green.length = 5;
var->blue.offset = 0;
var->blue.length = 5;
var->transp.offset = 0;
var->transp.length = 0;
break;
case CRTC_PIX_WIDTH_16BPP: /* RGB 565 */
bpp = 16;
var->red.offset = 11;
var->red.length = 5;
var->green.offset = 5;
var->green.length = 6;
var->blue.offset = 0;
var->blue.length = 5;
var->transp.offset = 0;
var->transp.length = 0;
break;
case CRTC_PIX_WIDTH_24BPP: /* RGB 888 */
bpp = 24;
var->red.offset = 16;
var->red.length = 8;
var->green.offset = 8;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
var->transp.offset = 0;
var->transp.length = 0;
break;
case CRTC_PIX_WIDTH_32BPP: /* ARGB 8888 */
bpp = 32;
var->red.offset = 16;
var->red.length = 8;
var->green.offset = 8;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
var->transp.offset = 24;
var->transp.length = 8;
break;
default:
PRINTKE("Invalid pixel width\n");
return -EINVAL;
}
/* output */
var->xres = xres;
var->yres = yres;
var->xres_virtual = crtc->vxres;
var->yres_virtual = crtc->vyres;
var->bits_per_pixel = bpp;
var->left_margin = left;
var->right_margin = right;
var->upper_margin = upper;
var->lower_margin = lower;
var->hsync_len = hslen;
var->vsync_len = vslen;
var->sync = sync;
var->vmode = FB_VMODE_NONINTERLACED;
/*
* In double scan mode, the vertical parameters are doubled,
* so we need to halve them to get the right values.
* In interlaced mode the values are already correct,
* so no correction is necessary.
*/
if (interlace)
var->vmode = FB_VMODE_INTERLACED;
if (double_scan) {
var->vmode = FB_VMODE_DOUBLE;
var->yres >>= 1;
var->upper_margin >>= 1;
var->lower_margin >>= 1;
var->vsync_len >>= 1;
}
return 0;
}
/* ------------------------------------------------------------------------- */
static int atyfb_set_par(struct fb_info *info)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
struct fb_var_screeninfo *var = &info->var;
u32 tmp, pixclock;
int err;
#ifdef DEBUG
struct fb_var_screeninfo debug;
u32 pixclock_in_ps;
#endif
if (par->asleep)
return 0;
err = aty_var_to_crtc(info, var, &par->crtc);
if (err)
return err;
pixclock = atyfb_get_pixclock(var, par);
if (pixclock == 0) {
PRINTKE("Invalid pixclock\n");
return -EINVAL;
} else {
err = par->pll_ops->var_to_pll(info, pixclock,
var->bits_per_pixel, &par->pll);
if (err)
return err;
}
par->accel_flags = var->accel_flags; /* hack */
if (var->accel_flags) {
info->fbops->fb_sync = atyfb_sync;
info->flags &= ~FBINFO_HWACCEL_DISABLED;
} else {
info->fbops->fb_sync = NULL;
info->flags |= FBINFO_HWACCEL_DISABLED;
}
if (par->blitter_may_be_busy)
wait_for_idle(par);
aty_set_crtc(par, &par->crtc);
par->dac_ops->set_dac(info, &par->pll,
var->bits_per_pixel, par->accel_flags);
par->pll_ops->set_pll(info, &par->pll);
#ifdef DEBUG
if (par->pll_ops && par->pll_ops->pll_to_var)
pixclock_in_ps = par->pll_ops->pll_to_var(info, &par->pll);
else
pixclock_in_ps = 0;
if (0 == pixclock_in_ps) {
PRINTKE("ALERT ops->pll_to_var get 0\n");
pixclock_in_ps = pixclock;
}
memset(&debug, 0, sizeof(debug));
if (!aty_crtc_to_var(&par->crtc, &debug)) {
u32 hSync, vRefresh;
u32 h_disp, h_sync_strt, h_sync_end, h_total;
u32 v_disp, v_sync_strt, v_sync_end, v_total;
h_disp = debug.xres;
h_sync_strt = h_disp + debug.right_margin;
h_sync_end = h_sync_strt + debug.hsync_len;
h_total = h_sync_end + debug.left_margin;
v_disp = debug.yres;
v_sync_strt = v_disp + debug.lower_margin;
v_sync_end = v_sync_strt + debug.vsync_len;
v_total = v_sync_end + debug.upper_margin;
hSync = 1000000000 / (pixclock_in_ps * h_total);
vRefresh = (hSync * 1000) / v_total;
if (par->crtc.gen_cntl & CRTC_INTERLACE_EN)
vRefresh *= 2;
if (par->crtc.gen_cntl & CRTC_DBL_SCAN_EN)
vRefresh /= 2;
DPRINTK("atyfb_set_par\n");
DPRINTK(" Set Visible Mode to %ix%i-%i\n",
var->xres, var->yres, var->bits_per_pixel);
DPRINTK(" Virtual resolution %ix%i, "
"pixclock_in_ps %i (calculated %i)\n",
var->xres_virtual, var->yres_virtual,
pixclock, pixclock_in_ps);
DPRINTK(" Dot clock: %i MHz\n",
1000000 / pixclock_in_ps);
DPRINTK(" Horizontal sync: %i kHz\n", hSync);
DPRINTK(" Vertical refresh: %i Hz\n", vRefresh);
DPRINTK(" x style: %i.%03i %i %i %i %i %i %i %i %i\n",
1000000 / pixclock_in_ps, 1000000 % pixclock_in_ps,
h_disp, h_sync_strt, h_sync_end, h_total,
v_disp, v_sync_strt, v_sync_end, v_total);
DPRINTK(" fb style: %i %i %i %i %i %i %i %i %i\n",
pixclock_in_ps,
debug.left_margin, h_disp, debug.right_margin, debug.hsync_len,
debug.upper_margin, v_disp, debug.lower_margin, debug.vsync_len);
}
#endif /* DEBUG */
if (!M64_HAS(INTEGRATED)) {
/* Don't forget MEM_CNTL */
tmp = aty_ld_le32(MEM_CNTL, par) & 0xf0ffffff;
switch (var->bits_per_pixel) {
case 8:
tmp |= 0x02000000;
break;
case 16:
tmp |= 0x03000000;
break;
case 32:
tmp |= 0x06000000;
break;
}
aty_st_le32(MEM_CNTL, tmp, par);
} else {
tmp = aty_ld_le32(MEM_CNTL, par) & 0xf00fffff;
if (!M64_HAS(MAGIC_POSTDIV))
tmp |= par->mem_refresh_rate << 20;
switch (var->bits_per_pixel) {
case 8:
case 24:
tmp |= 0x00000000;
break;
case 16:
tmp |= 0x04000000;
break;
case 32:
tmp |= 0x08000000;
break;
}
if (M64_HAS(CT_BUS)) {
aty_st_le32(DAC_CNTL, 0x87010184, par);
aty_st_le32(BUS_CNTL, 0x680000f9, par);
} else if (M64_HAS(VT_BUS)) {
aty_st_le32(DAC_CNTL, 0x87010184, par);
aty_st_le32(BUS_CNTL, 0x680000f9, par);
} else if (M64_HAS(MOBIL_BUS)) {
aty_st_le32(DAC_CNTL, 0x80010102, par);
aty_st_le32(BUS_CNTL, 0x7b33a040 | (par->aux_start ? BUS_APER_REG_DIS : 0), par);
} else {
/* GT */
aty_st_le32(DAC_CNTL, 0x86010102, par);
aty_st_le32(BUS_CNTL, 0x7b23a040 | (par->aux_start ? BUS_APER_REG_DIS : 0), par);
aty_st_le32(EXT_MEM_CNTL, aty_ld_le32(EXT_MEM_CNTL, par) | 0x5000001, par);
}
aty_st_le32(MEM_CNTL, tmp, par);
}
aty_st_8(DAC_MASK, 0xff, par);
info->fix.line_length = calc_line_length(par, var->xres_virtual,
var->bits_per_pixel);
info->fix.visual = var->bits_per_pixel <= 8 ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
/* Initialize the graphics engine */
if (par->accel_flags & FB_ACCELF_TEXT)
aty_init_engine(par, info);
#ifdef CONFIG_BOOTX_TEXT
btext_update_display(info->fix.smem_start,
(((par->crtc.h_tot_disp >> 16) & 0xff) + 1) * 8,
((par->crtc.v_tot_disp >> 16) & 0x7ff) + 1,
var->bits_per_pixel,
par->crtc.vxres * var->bits_per_pixel / 8);
#endif /* CONFIG_BOOTX_TEXT */
#if 0
/* switch to accelerator mode */
if (!(par->crtc.gen_cntl & CRTC_EXT_DISP_EN))
aty_st_le32(CRTC_GEN_CNTL, par->crtc.gen_cntl | CRTC_EXT_DISP_EN, par);
#endif
#ifdef DEBUG
{
/* dump non shadow CRTC, pll, LCD registers */
int i; u32 base;
/* CRTC registers */
base = 0x2000;
printk("debug atyfb: Mach64 non-shadow register values:");
for (i = 0; i < 256; i = i+4) {
if (i % 16 == 0)
printk("\ndebug atyfb: 0x%04X: ", base + i);
printk(" %08X", aty_ld_le32(i, par));
}
printk("\n\n");
#ifdef CONFIG_FB_ATY_CT
/* PLL registers */
base = 0x00;
printk("debug atyfb: Mach64 PLL register values:");
for (i = 0; i < 64; i++) {
if (i % 16 == 0)
printk("\ndebug atyfb: 0x%02X: ", base + i);
if (i % 4 == 0)
printk(" ");
printk("%02X", aty_ld_pll_ct(i, par));
}
printk("\n\n");
#endif /* CONFIG_FB_ATY_CT */
#ifdef CONFIG_FB_ATY_GENERIC_LCD
if (par->lcd_table != 0) {
/* LCD registers */
base = 0x00;
printk("debug atyfb: LCD register values:");
if (M64_HAS(LT_LCD_REGS)) {
for (i = 0; i <= POWER_MANAGEMENT; i++) {
if (i == EXT_VERT_STRETCH)
continue;
printk("\ndebug atyfb: 0x%04X: ",
lt_lcd_regs[i]);
printk(" %08X", aty_ld_lcd(i, par));
}
} else {
for (i = 0; i < 64; i++) {
if (i % 4 == 0)
printk("\ndebug atyfb: 0x%02X: ",
base + i);
printk(" %08X", aty_ld_lcd(i, par));
}
}
printk("\n\n");
}
#endif /* CONFIG_FB_ATY_GENERIC_LCD */
}
#endif /* DEBUG */
return 0;
}
static int atyfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
int err;
struct crtc crtc;
union aty_pll pll;
u32 pixclock;
memcpy(&pll, &par->pll, sizeof(pll));
err = aty_var_to_crtc(info, var, &crtc);
if (err)
return err;
pixclock = atyfb_get_pixclock(var, par);
if (pixclock == 0) {
if (!(var->activate & FB_ACTIVATE_TEST))
PRINTKE("Invalid pixclock\n");
return -EINVAL;
} else {
err = par->pll_ops->var_to_pll(info, pixclock,
var->bits_per_pixel, &pll);
if (err)
return err;
}
if (var->accel_flags & FB_ACCELF_TEXT)
info->var.accel_flags = FB_ACCELF_TEXT;
else
info->var.accel_flags = 0;
aty_crtc_to_var(&crtc, var);
var->pixclock = par->pll_ops->pll_to_var(info, &pll);
return 0;
}
static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info)
{
u32 xoffset = info->var.xoffset;
u32 yoffset = info->var.yoffset;
u32 line_length = info->fix.line_length;
u32 bpp = info->var.bits_per_pixel;
par->crtc.off_pitch =
((yoffset * line_length + xoffset * bpp / 8) / 8) |
((line_length / bpp) << 22);
}
/*
* Open/Release the frame buffer device
*/
static int atyfb_open(struct fb_info *info, int user)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
if (user) {
par->open++;
#ifdef __sparc__
par->mmaped = 0;
#endif
}
return 0;
}
static irqreturn_t aty_irq(int irq, void *dev_id)
{
struct atyfb_par *par = dev_id;
int handled = 0;
u32 int_cntl;
spin_lock(&par->int_lock);
int_cntl = aty_ld_le32(CRTC_INT_CNTL, par);
if (int_cntl & CRTC_VBLANK_INT) {
/* clear interrupt */
aty_st_le32(CRTC_INT_CNTL, (int_cntl & CRTC_INT_EN_MASK) |
CRTC_VBLANK_INT_AK, par);
par->vblank.count++;
if (par->vblank.pan_display) {
par->vblank.pan_display = 0;
aty_st_le32(CRTC_OFF_PITCH, par->crtc.off_pitch, par);
}
wake_up_interruptible(&par->vblank.wait);
handled = 1;
}
spin_unlock(&par->int_lock);
return IRQ_RETVAL(handled);
}
static int aty_enable_irq(struct atyfb_par *par, int reenable)
{
u32 int_cntl;
if (!test_and_set_bit(0, &par->irq_flags)) {
if (request_irq(par->irq, aty_irq, IRQF_SHARED, "atyfb", par)) {
clear_bit(0, &par->irq_flags);
return -EINVAL;
}
spin_lock_irq(&par->int_lock);
int_cntl = aty_ld_le32(CRTC_INT_CNTL, par) & CRTC_INT_EN_MASK;
/* clear interrupt */
aty_st_le32(CRTC_INT_CNTL, int_cntl | CRTC_VBLANK_INT_AK, par);
/* enable interrupt */
aty_st_le32(CRTC_INT_CNTL, int_cntl | CRTC_VBLANK_INT_EN, par);
spin_unlock_irq(&par->int_lock);
} else if (reenable) {
spin_lock_irq(&par->int_lock);
int_cntl = aty_ld_le32(CRTC_INT_CNTL, par) & CRTC_INT_EN_MASK;
if (!(int_cntl & CRTC_VBLANK_INT_EN)) {
printk("atyfb: someone disabled IRQ [%08x]\n",
int_cntl);
/* re-enable interrupt */
aty_st_le32(CRTC_INT_CNTL, int_cntl |
CRTC_VBLANK_INT_EN, par);
}
spin_unlock_irq(&par->int_lock);
}
return 0;
}
static int aty_disable_irq(struct atyfb_par *par)
{
u32 int_cntl;
if (test_and_clear_bit(0, &par->irq_flags)) {
if (par->vblank.pan_display) {
par->vblank.pan_display = 0;
aty_st_le32(CRTC_OFF_PITCH, par->crtc.off_pitch, par);
}
spin_lock_irq(&par->int_lock);
int_cntl = aty_ld_le32(CRTC_INT_CNTL, par) & CRTC_INT_EN_MASK;
/* disable interrupt */
aty_st_le32(CRTC_INT_CNTL, int_cntl & ~CRTC_VBLANK_INT_EN, par);
spin_unlock_irq(&par->int_lock);
free_irq(par->irq, par);
}
return 0;
}
static int atyfb_release(struct fb_info *info, int user)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
#ifdef __sparc__
int was_mmaped;
#endif
if (!user)
return 0;
par->open--;
mdelay(1);
wait_for_idle(par);
if (par->open)
return 0;
#ifdef __sparc__
was_mmaped = par->mmaped;
par->mmaped = 0;
if (was_mmaped) {
struct fb_var_screeninfo var;
/*
* Now reset the default display config, we have
* no idea what the program(s) which mmap'd the
* chip did to the configuration, nor whether it
* restored it correctly.
*/
var = default_var;
if (noaccel)
var.accel_flags &= ~FB_ACCELF_TEXT;
else
var.accel_flags |= FB_ACCELF_TEXT;
if (var.yres == var.yres_virtual) {
u32 videoram = (info->fix.smem_len - (PAGE_SIZE << 2));
var.yres_virtual =
((videoram * 8) / var.bits_per_pixel) /
var.xres_virtual;
if (var.yres_virtual < var.yres)
var.yres_virtual = var.yres;
}
}
#endif
aty_disable_irq(par);
return 0;
}
/*
* Pan or Wrap the Display
*
* This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
*/
static int atyfb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
u32 xres, yres, xoffset, yoffset;
xres = (((par->crtc.h_tot_disp >> 16) & 0xff) + 1) * 8;
yres = ((par->crtc.v_tot_disp >> 16) & 0x7ff) + 1;
if (par->crtc.gen_cntl & CRTC_DBL_SCAN_EN)
yres >>= 1;
xoffset = (var->xoffset + 7) & ~7;
yoffset = var->yoffset;
if (xoffset + xres > par->crtc.vxres ||
yoffset + yres > par->crtc.vyres)
return -EINVAL;
info->var.xoffset = xoffset;
info->var.yoffset = yoffset;
if (par->asleep)
return 0;
set_off_pitch(par, info);
if ((var->activate & FB_ACTIVATE_VBL) && !aty_enable_irq(par, 0)) {
par->vblank.pan_display = 1;
} else {
par->vblank.pan_display = 0;
aty_st_le32(CRTC_OFF_PITCH, par->crtc.off_pitch, par);
}
return 0;
}
static int aty_waitforvblank(struct atyfb_par *par, u32 crtc)
{
struct aty_interrupt *vbl;
unsigned int count;
int ret;
switch (crtc) {
case 0:
vbl = &par->vblank;
break;
default:
return -ENODEV;
}
ret = aty_enable_irq(par, 0);
if (ret)
return ret;
count = vbl->count;
ret = wait_event_interruptible_timeout(vbl->wait,
count != vbl->count, HZ/10);
if (ret < 0)
return ret;
if (ret == 0) {
aty_enable_irq(par, 1);
return -ETIMEDOUT;
}
return 0;
}
#ifdef DEBUG
#define ATYIO_CLKR 0x41545900 /* ATY\00 */
#define ATYIO_CLKW 0x41545901 /* ATY\01 */
struct atyclk {
u32 ref_clk_per;
u8 pll_ref_div;
u8 mclk_fb_div;
u8 mclk_post_div; /* 1,2,3,4,8 */
u8 mclk_fb_mult; /* 2 or 4 */
u8 xclk_post_div; /* 1,2,3,4,8 */
u8 vclk_fb_div;
u8 vclk_post_div; /* 1,2,3,4,6,8,12 */
u32 dsp_xclks_per_row; /* 0-16383 */
u32 dsp_loop_latency; /* 0-15 */
u32 dsp_precision; /* 0-7 */
u32 dsp_on; /* 0-2047 */
u32 dsp_off; /* 0-2047 */
};
#define ATYIO_FEATR 0x41545902 /* ATY\02 */
#define ATYIO_FEATW 0x41545903 /* ATY\03 */
#endif
static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
#ifdef __sparc__
struct fbtype fbtyp;
#endif
switch (cmd) {
#ifdef __sparc__
case FBIOGTYPE:
fbtyp.fb_type = FBTYPE_PCI_GENERIC;
fbtyp.fb_width = par->crtc.vxres;
fbtyp.fb_height = par->crtc.vyres;
fbtyp.fb_depth = info->var.bits_per_pixel;
fbtyp.fb_cmsize = info->cmap.len;
fbtyp.fb_size = info->fix.smem_len;
if (copy_to_user((struct fbtype __user *) arg, &fbtyp,
sizeof(fbtyp)))
return -EFAULT;
break;
#endif /* __sparc__ */
case FBIO_WAITFORVSYNC:
{
u32 crtc;
if (get_user(crtc, (__u32 __user *) arg))
return -EFAULT;
return aty_waitforvblank(par, crtc);
}
break;
#if defined(DEBUG) && defined(CONFIG_FB_ATY_CT)
case ATYIO_CLKR:
if (M64_HAS(INTEGRATED)) {
struct atyclk clk;
union aty_pll *pll = &par->pll;
u32 dsp_config = pll->ct.dsp_config;
u32 dsp_on_off = pll->ct.dsp_on_off;
clk.ref_clk_per = par->ref_clk_per;
clk.pll_ref_div = pll->ct.pll_ref_div;
clk.mclk_fb_div = pll->ct.mclk_fb_div;
clk.mclk_post_div = pll->ct.mclk_post_div_real;
clk.mclk_fb_mult = pll->ct.mclk_fb_mult;
clk.xclk_post_div = pll->ct.xclk_post_div_real;
clk.vclk_fb_div = pll->ct.vclk_fb_div;
clk.vclk_post_div = pll->ct.vclk_post_div_real;
clk.dsp_xclks_per_row = dsp_config & 0x3fff;
clk.dsp_loop_latency = (dsp_config >> 16) & 0xf;
clk.dsp_precision = (dsp_config >> 20) & 7;
clk.dsp_off = dsp_on_off & 0x7ff;
clk.dsp_on = (dsp_on_off >> 16) & 0x7ff;
if (copy_to_user((struct atyclk __user *) arg, &clk,
sizeof(clk)))
return -EFAULT;
} else
return -EINVAL;
break;
case ATYIO_CLKW:
if (M64_HAS(INTEGRATED)) {
struct atyclk clk;
union aty_pll *pll = &par->pll;
if (copy_from_user(&clk, (struct atyclk __user *) arg,
sizeof(clk)))
return -EFAULT;
par->ref_clk_per = clk.ref_clk_per;
pll->ct.pll_ref_div = clk.pll_ref_div;
pll->ct.mclk_fb_div = clk.mclk_fb_div;
pll->ct.mclk_post_div_real = clk.mclk_post_div;
pll->ct.mclk_fb_mult = clk.mclk_fb_mult;
pll->ct.xclk_post_div_real = clk.xclk_post_div;
pll->ct.vclk_fb_div = clk.vclk_fb_div;
pll->ct.vclk_post_div_real = clk.vclk_post_div;
pll->ct.dsp_config = (clk.dsp_xclks_per_row & 0x3fff) |
((clk.dsp_loop_latency & 0xf) << 16) |
((clk.dsp_precision & 7) << 20);
pll->ct.dsp_on_off = (clk.dsp_off & 0x7ff) |
((clk.dsp_on & 0x7ff) << 16);
/*aty_calc_pll_ct(info, &pll->ct);*/
aty_set_pll_ct(info, pll);
} else
return -EINVAL;
break;
case ATYIO_FEATR:
if (get_user(par->features, (u32 __user *) arg))
return -EFAULT;
break;
case ATYIO_FEATW:
if (put_user(par->features, (u32 __user *) arg))
return -EFAULT;
break;
#endif /* DEBUG && CONFIG_FB_ATY_CT */
default:
return -EINVAL;
}
return 0;
}
static int atyfb_sync(struct fb_info *info)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
if (par->blitter_may_be_busy)
wait_for_idle(par);
return 0;
}
#ifdef __sparc__
static int atyfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
unsigned int size, page, map_size = 0;
unsigned long map_offset = 0;
unsigned long off;
int i;
if (!par->mmap_map)
return -ENXIO;
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
return -EINVAL;
off = vma->vm_pgoff << PAGE_SHIFT;
size = vma->vm_end - vma->vm_start;
/* To stop the swapper from even considering these pages. */
vma->vm_flags |= (VM_IO | VM_RESERVED);
if (((vma->vm_pgoff == 0) && (size == info->fix.smem_len)) ||
((off == info->fix.smem_len) && (size == PAGE_SIZE)))
off += 0x8000000000000000UL;
vma->vm_pgoff = off >> PAGE_SHIFT; /* propagate off changes */
/* Each page, see which map applies */
for (page = 0; page < size;) {
map_size = 0;
for (i = 0; par->mmap_map[i].size; i++) {
unsigned long start = par->mmap_map[i].voff;
unsigned long end = start + par->mmap_map[i].size;
unsigned long offset = off + page;
if (start > offset)
continue;
if (offset >= end)
continue;
map_size = par->mmap_map[i].size - (offset - start);
map_offset = par->mmap_map[i].poff + (offset - start);
break;
}
if (!map_size) {
page += PAGE_SIZE;
continue;
}
if (page + map_size > size)
map_size = size - page;
pgprot_val(vma->vm_page_prot) &= ~(par->mmap_map[i].prot_mask);
pgprot_val(vma->vm_page_prot) |= par->mmap_map[i].prot_flag;
if (remap_pfn_range(vma, vma->vm_start + page,
map_offset >> PAGE_SHIFT, map_size, vma->vm_page_prot))
return -EAGAIN;
page += map_size;
}
if (!map_size)
return -EINVAL;
if (!par->mmaped)
par->mmaped = 1;
return 0;
}
#endif /* __sparc__ */
#if defined(CONFIG_PM) && defined(CONFIG_PCI)
#ifdef CONFIG_PPC_PMAC
/* Power management routines. Those are used for PowerBook sleep.
*/
static int aty_power_mgmt(int sleep, struct atyfb_par *par)
{
u32 pm;
int timeout;
pm = aty_ld_lcd(POWER_MANAGEMENT, par);
pm = (pm & ~PWR_MGT_MODE_MASK) | PWR_MGT_MODE_REG;
aty_st_lcd(POWER_MANAGEMENT, pm, par);
pm = aty_ld_lcd(POWER_MANAGEMENT, par);
timeout = 2000;
if (sleep) {
/* Sleep */
pm &= ~PWR_MGT_ON;
aty_st_lcd(POWER_MANAGEMENT, pm, par);
pm = aty_ld_lcd(POWER_MANAGEMENT, par);
udelay(10);
pm &= ~(PWR_BLON | AUTO_PWR_UP);
pm |= SUSPEND_NOW;
aty_st_lcd(POWER_MANAGEMENT, pm, par);
pm = aty_ld_lcd(POWER_MANAGEMENT, par);
udelay(10);
pm |= PWR_MGT_ON;
aty_st_lcd(POWER_MANAGEMENT, pm, par);
do {
pm = aty_ld_lcd(POWER_MANAGEMENT, par);
mdelay(1);
if ((--timeout) == 0)
break;
} while ((pm & PWR_MGT_STATUS_MASK) != PWR_MGT_STATUS_SUSPEND);
} else {
/* Wakeup */
pm &= ~PWR_MGT_ON;
aty_st_lcd(POWER_MANAGEMENT, pm, par);
pm = aty_ld_lcd(POWER_MANAGEMENT, par);
udelay(10);
pm &= ~SUSPEND_NOW;
pm |= (PWR_BLON | AUTO_PWR_UP);
aty_st_lcd(POWER_MANAGEMENT, pm, par);
pm = aty_ld_lcd(POWER_MANAGEMENT, par);
udelay(10);
pm |= PWR_MGT_ON;
aty_st_lcd(POWER_MANAGEMENT, pm, par);
do {
pm = aty_ld_lcd(POWER_MANAGEMENT, par);
mdelay(1);
if ((--timeout) == 0)
break;
} while ((pm & PWR_MGT_STATUS_MASK) != 0);
}
mdelay(500);
return timeout ? 0 : -EIO;
}
#endif /* CONFIG_PPC_PMAC */
static int atyfb_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct atyfb_par *par = (struct atyfb_par *) info->par;
if (state.event == pdev->dev.power.power_state.event)
return 0;
console_lock();
fb_set_suspend(info, 1);
/* Idle & reset engine */
wait_for_idle(par);
aty_reset_engine(par);
/* Blank display and LCD */
atyfb_blank(FB_BLANK_POWERDOWN, info);
par->asleep = 1;
par->lock_blank = 1;
/*
* Because we may change PCI D state ourselves, we need to
* first save the config space content so the core can
* restore it properly on resume.
*/
pci_save_state(pdev);
#ifdef CONFIG_PPC_PMAC
/* Set chip to "suspend" mode */
if (machine_is(powermac) && aty_power_mgmt(1, par)) {
par->asleep = 0;
par->lock_blank = 0;
atyfb_blank(FB_BLANK_UNBLANK, info);
fb_set_suspend(info, 0);
console_unlock();
return -EIO;
}
#else
pci_set_power_state(pdev, pci_choose_state(pdev, state));
#endif
console_unlock();
pdev->dev.power.power_state = state;
return 0;
}
static void aty_resume_chip(struct fb_info *info)
{
struct atyfb_par *par = info->par;
aty_st_le32(MEM_CNTL, par->mem_cntl, par);
if (par->pll_ops->resume_pll)
par->pll_ops->resume_pll(info, &par->pll);
if (par->aux_start)
aty_st_le32(BUS_CNTL,
aty_ld_le32(BUS_CNTL, par) | BUS_APER_REG_DIS, par);
}
static int atyfb_pci_resume(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct atyfb_par *par = (struct atyfb_par *) info->par;
if (pdev->dev.power.power_state.event == PM_EVENT_ON)
return 0;
console_lock();
/*
* PCI state will have been restored by the core, so
* we should be in D0 now with our config space fully
* restored
*/
#ifdef CONFIG_PPC_PMAC
if (machine_is(powermac) &&
pdev->dev.power.power_state.event == PM_EVENT_SUSPEND)
aty_power_mgmt(0, par);
#endif
aty_resume_chip(info);
par->asleep = 0;
/* Restore display */
atyfb_set_par(info);
/* Refresh */
fb_set_suspend(info, 0);
/* Unblank */
par->lock_blank = 0;
atyfb_blank(FB_BLANK_UNBLANK, info);
console_unlock();
pdev->dev.power.power_state = PMSG_ON;
return 0;
}
#endif /* defined(CONFIG_PM) && defined(CONFIG_PCI) */
/* Backlight */
#ifdef CONFIG_FB_ATY_BACKLIGHT
#define MAX_LEVEL 0xFF
static int aty_bl_get_level_brightness(struct atyfb_par *par, int level)
{
struct fb_info *info = pci_get_drvdata(par->pdev);
int atylevel;
/* Get and convert the value */
/* No locking of bl_curve since we read a single value */
atylevel = info->bl_curve[level] * FB_BACKLIGHT_MAX / MAX_LEVEL;
if (atylevel < 0)
atylevel = 0;
else if (atylevel > MAX_LEVEL)
atylevel = MAX_LEVEL;
return atylevel;
}
static int aty_bl_update_status(struct backlight_device *bd)
{
struct atyfb_par *par = bl_get_data(bd);
unsigned int reg = aty_ld_lcd(LCD_MISC_CNTL, par);
int level;
if (bd->props.power != FB_BLANK_UNBLANK ||
bd->props.fb_blank != FB_BLANK_UNBLANK)
level = 0;
else
level = bd->props.brightness;
reg |= (BLMOD_EN | BIASMOD_EN);
if (level > 0) {
reg &= ~BIAS_MOD_LEVEL_MASK;
reg |= (aty_bl_get_level_brightness(par, level) << BIAS_MOD_LEVEL_SHIFT);
} else {
reg &= ~BIAS_MOD_LEVEL_MASK;
reg |= (aty_bl_get_level_brightness(par, 0) << BIAS_MOD_LEVEL_SHIFT);
}
aty_st_lcd(LCD_MISC_CNTL, reg, par);
return 0;
}
static int aty_bl_get_brightness(struct backlight_device *bd)
{
return bd->props.brightness;
}
static const struct backlight_ops aty_bl_data = {
.get_brightness = aty_bl_get_brightness,
.update_status = aty_bl_update_status,
};
static void aty_bl_init(struct atyfb_par *par)
{
struct backlight_properties props;
struct fb_info *info = pci_get_drvdata(par->pdev);
struct backlight_device *bd;
char name[12];
#ifdef CONFIG_PMAC_BACKLIGHT
if (!pmac_has_backlight_type("ati"))
return;
#endif
snprintf(name, sizeof(name), "atybl%d", info->node);
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
bd = backlight_device_register(name, info->dev, par, &aty_bl_data,
&props);
if (IS_ERR(bd)) {
info->bl_dev = NULL;
printk(KERN_WARNING "aty: Backlight registration failed\n");
goto error;
}
info->bl_dev = bd;
fb_bl_default_curve(info, 0,
0x3F * FB_BACKLIGHT_MAX / MAX_LEVEL,
0xFF * FB_BACKLIGHT_MAX / MAX_LEVEL);
bd->props.brightness = bd->props.max_brightness;
bd->props.power = FB_BLANK_UNBLANK;
backlight_update_status(bd);
printk("aty: Backlight initialized (%s)\n", name);
return;
error:
return;
}
#ifdef CONFIG_PCI
static void aty_bl_exit(struct backlight_device *bd)
{
backlight_device_unregister(bd);
printk("aty: Backlight unloaded\n");
}
#endif /* CONFIG_PCI */
#endif /* CONFIG_FB_ATY_BACKLIGHT */
static void __devinit aty_calc_mem_refresh(struct atyfb_par *par, int xclk)
{
const int ragepro_tbl[] = {
44, 50, 55, 66, 75, 80, 100
};
const int ragexl_tbl[] = {
50, 66, 75, 83, 90, 95, 100, 105,
110, 115, 120, 125, 133, 143, 166
};
const int *refresh_tbl;
int i, size;
if (M64_HAS(XL_MEM)) {
refresh_tbl = ragexl_tbl;
size = ARRAY_SIZE(ragexl_tbl);
} else {
refresh_tbl = ragepro_tbl;
size = ARRAY_SIZE(ragepro_tbl);
}
for (i = 0; i < size; i++) {
if (xclk < refresh_tbl[i])
break;
}
par->mem_refresh_rate = i;
}
/*
* Initialisation
*/
static struct fb_info *fb_list = NULL;
#if defined(__i386__) && defined(CONFIG_FB_ATY_GENERIC_LCD)
static int __devinit atyfb_get_timings_from_lcd(struct atyfb_par *par,
struct fb_var_screeninfo *var)
{
int ret = -EINVAL;
if (par->lcd_table != 0 && (aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) {
*var = default_var;
var->xres = var->xres_virtual = par->lcd_hdisp;
var->right_margin = par->lcd_right_margin;
var->left_margin = par->lcd_hblank_len -
(par->lcd_right_margin + par->lcd_hsync_dly +
par->lcd_hsync_len);
var->hsync_len = par->lcd_hsync_len + par->lcd_hsync_dly;
var->yres = var->yres_virtual = par->lcd_vdisp;
var->lower_margin = par->lcd_lower_margin;
var->upper_margin = par->lcd_vblank_len -
(par->lcd_lower_margin + par->lcd_vsync_len);
var->vsync_len = par->lcd_vsync_len;
var->pixclock = par->lcd_pixclock;
ret = 0;
}
return ret;
}
#endif /* defined(__i386__) && defined(CONFIG_FB_ATY_GENERIC_LCD) */
static int __devinit aty_init(struct fb_info *info)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
const char *ramname = NULL, *xtal;
int gtb_memsize, has_var = 0;
struct fb_var_screeninfo var;
int ret;
init_waitqueue_head(&par->vblank.wait);
spin_lock_init(&par->int_lock);
#ifdef CONFIG_FB_ATY_GX
if (!M64_HAS(INTEGRATED)) {
u32 stat0;
u8 dac_type, dac_subtype, clk_type;
stat0 = aty_ld_le32(CNFG_STAT0, par);
par->bus_type = (stat0 >> 0) & 0x07;
par->ram_type = (stat0 >> 3) & 0x07;
ramname = aty_gx_ram[par->ram_type];
/* FIXME: clockchip/RAMDAC probing? */
dac_type = (aty_ld_le32(DAC_CNTL, par) >> 16) & 0x07;
#ifdef CONFIG_ATARI
clk_type = CLK_ATI18818_1;
dac_type = (stat0 >> 9) & 0x07;
if (dac_type == 0x07)
dac_subtype = DAC_ATT20C408;
else
dac_subtype = (aty_ld_8(SCRATCH_REG1 + 1, par) & 0xF0) | dac_type;
#else
dac_type = DAC_IBMRGB514;
dac_subtype = DAC_IBMRGB514;
clk_type = CLK_IBMRGB514;
#endif
switch (dac_subtype) {
case DAC_IBMRGB514:
par->dac_ops = &aty_dac_ibm514;
break;
#ifdef CONFIG_ATARI
case DAC_ATI68860_B:
case DAC_ATI68860_C:
par->dac_ops = &aty_dac_ati68860b;
break;
case DAC_ATT20C408:
case DAC_ATT21C498:
par->dac_ops = &aty_dac_att21c498;
break;
#endif
default:
PRINTKI("aty_init: DAC type not implemented yet!\n");
par->dac_ops = &aty_dac_unsupported;
break;
}
switch (clk_type) {
#ifdef CONFIG_ATARI
case CLK_ATI18818_1:
par->pll_ops = &aty_pll_ati18818_1;
break;
#else
case CLK_IBMRGB514:
par->pll_ops = &aty_pll_ibm514;
break;
#endif
#if 0 /* dead code */
case CLK_STG1703:
par->pll_ops = &aty_pll_stg1703;
break;
case CLK_CH8398:
par->pll_ops = &aty_pll_ch8398;
break;
case CLK_ATT20C408:
par->pll_ops = &aty_pll_att20c408;
break;
#endif
default:
PRINTKI("aty_init: CLK type not implemented yet!");
par->pll_ops = &aty_pll_unsupported;
break;
}
}
#endif /* CONFIG_FB_ATY_GX */
#ifdef CONFIG_FB_ATY_CT
if (M64_HAS(INTEGRATED)) {
par->dac_ops = &aty_dac_ct;
par->pll_ops = &aty_pll_ct;
par->bus_type = PCI;
par->ram_type = (aty_ld_le32(CNFG_STAT0, par) & 0x07);
if (M64_HAS(XL_MEM))
ramname = aty_xl_ram[par->ram_type];
else
ramname = aty_ct_ram[par->ram_type];
/* for many chips, the mclk is 67 MHz for SDRAM, 63 MHz otherwise */
if (par->pll_limits.mclk == 67 && par->ram_type < SDRAM)
par->pll_limits.mclk = 63;
/* Mobility + 32bit memory interface need halved XCLK. */
if (M64_HAS(MOBIL_BUS) && par->ram_type == SDRAM32)
par->pll_limits.xclk = (par->pll_limits.xclk + 1) >> 1;
}
#endif
#ifdef CONFIG_PPC_PMAC
/*
* The Apple iBook1 uses non-standard memory frequencies.
* We detect it and set the frequency manually.
*/
if (of_machine_is_compatible("PowerBook2,1")) {
par->pll_limits.mclk = 70;
par->pll_limits.xclk = 53;
}
#endif
/* Allow command line to override clocks. */
if (pll)
par->pll_limits.pll_max = pll;
if (mclk)
par->pll_limits.mclk = mclk;
if (xclk)
par->pll_limits.xclk = xclk;
aty_calc_mem_refresh(par, par->pll_limits.xclk);
par->pll_per = 1000000/par->pll_limits.pll_max;
par->mclk_per = 1000000/par->pll_limits.mclk;
par->xclk_per = 1000000/par->pll_limits.xclk;
par->ref_clk_per = 1000000000000ULL / 14318180;
xtal = "14.31818";
#ifdef CONFIG_FB_ATY_CT
if (M64_HAS(GTB_DSP)) {
u8 pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par);
if (pll_ref_div) {
int diff1, diff2;
diff1 = 510 * 14 / pll_ref_div - par->pll_limits.pll_max;
diff2 = 510 * 29 / pll_ref_div - par->pll_limits.pll_max;
if (diff1 < 0)
diff1 = -diff1;
if (diff2 < 0)
diff2 = -diff2;
if (diff2 < diff1) {
par->ref_clk_per = 1000000000000ULL / 29498928;
xtal = "29.498928";
}
}
}
#endif /* CONFIG_FB_ATY_CT */
/* save previous video mode */
aty_get_crtc(par, &par->saved_crtc);
if (par->pll_ops->get_pll)
par->pll_ops->get_pll(info, &par->saved_pll);
par->mem_cntl = aty_ld_le32(MEM_CNTL, par);
gtb_memsize = M64_HAS(GTB_DSP);
if (gtb_memsize)
/* 0xF used instead of MEM_SIZE_ALIAS */
switch (par->mem_cntl & 0xF) {
case MEM_SIZE_512K:
info->fix.smem_len = 0x80000;
break;
case MEM_SIZE_1M:
info->fix.smem_len = 0x100000;
break;
case MEM_SIZE_2M_GTB:
info->fix.smem_len = 0x200000;
break;
case MEM_SIZE_4M_GTB:
info->fix.smem_len = 0x400000;
break;
case MEM_SIZE_6M_GTB:
info->fix.smem_len = 0x600000;
break;
case MEM_SIZE_8M_GTB:
info->fix.smem_len = 0x800000;
break;
default:
info->fix.smem_len = 0x80000;
} else
switch (par->mem_cntl & MEM_SIZE_ALIAS) {
case MEM_SIZE_512K:
info->fix.smem_len = 0x80000;
break;
case MEM_SIZE_1M:
info->fix.smem_len = 0x100000;
break;
case MEM_SIZE_2M:
info->fix.smem_len = 0x200000;
break;
case MEM_SIZE_4M:
info->fix.smem_len = 0x400000;
break;
case MEM_SIZE_6M:
info->fix.smem_len = 0x600000;
break;
case MEM_SIZE_8M:
info->fix.smem_len = 0x800000;
break;
default:
info->fix.smem_len = 0x80000;
}
if (M64_HAS(MAGIC_VRAM_SIZE)) {
if (aty_ld_le32(CNFG_STAT1, par) & 0x40000000)
info->fix.smem_len += 0x400000;
}
if (vram) {
info->fix.smem_len = vram * 1024;
par->mem_cntl &= ~(gtb_memsize ? 0xF : MEM_SIZE_ALIAS);
if (info->fix.smem_len <= 0x80000)
par->mem_cntl |= MEM_SIZE_512K;
else if (info->fix.smem_len <= 0x100000)
par->mem_cntl |= MEM_SIZE_1M;
else if (info->fix.smem_len <= 0x200000)
par->mem_cntl |= gtb_memsize ? MEM_SIZE_2M_GTB : MEM_SIZE_2M;
else if (info->fix.smem_len <= 0x400000)
par->mem_cntl |= gtb_memsize ? MEM_SIZE_4M_GTB : MEM_SIZE_4M;
else if (info->fix.smem_len <= 0x600000)
par->mem_cntl |= gtb_memsize ? MEM_SIZE_6M_GTB : MEM_SIZE_6M;
else
par->mem_cntl |= gtb_memsize ? MEM_SIZE_8M_GTB : MEM_SIZE_8M;
aty_st_le32(MEM_CNTL, par->mem_cntl, par);
}
/*
* Reg Block 0 (CT-compatible block) is at mmio_start
* Reg Block 1 (multimedia extensions) is at mmio_start - 0x400
*/
if (M64_HAS(GX)) {
info->fix.mmio_len = 0x400;
info->fix.accel = FB_ACCEL_ATI_MACH64GX;
} else if (M64_HAS(CT)) {
info->fix.mmio_len = 0x400;
info->fix.accel = FB_ACCEL_ATI_MACH64CT;
} else if (M64_HAS(VT)) {
info->fix.mmio_start -= 0x400;
info->fix.mmio_len = 0x800;
info->fix.accel = FB_ACCEL_ATI_MACH64VT;
} else {/* GT */
info->fix.mmio_start -= 0x400;
info->fix.mmio_len = 0x800;
info->fix.accel = FB_ACCEL_ATI_MACH64GT;
}
PRINTKI("%d%c %s, %s MHz XTAL, %d MHz PLL, %d Mhz MCLK, %d MHz XCLK\n",
info->fix.smem_len == 0x80000 ? 512 : (info->fix.smem_len>>20),
info->fix.smem_len == 0x80000 ? 'K' : 'M', ramname, xtal,
par->pll_limits.pll_max, par->pll_limits.mclk,
par->pll_limits.xclk);
#if defined(DEBUG) && defined(CONFIG_FB_ATY_CT)
if (M64_HAS(INTEGRATED)) {
int i;
printk("debug atyfb: BUS_CNTL DAC_CNTL MEM_CNTL "
"EXT_MEM_CNTL CRTC_GEN_CNTL DSP_CONFIG "
"DSP_ON_OFF CLOCK_CNTL\n"
"debug atyfb: %08x %08x %08x "
"%08x %08x %08x "
"%08x %08x\n"
"debug atyfb: PLL",
aty_ld_le32(BUS_CNTL, par),
aty_ld_le32(DAC_CNTL, par),
aty_ld_le32(MEM_CNTL, par),
aty_ld_le32(EXT_MEM_CNTL, par),
aty_ld_le32(CRTC_GEN_CNTL, par),
aty_ld_le32(DSP_CONFIG, par),
aty_ld_le32(DSP_ON_OFF, par),
aty_ld_le32(CLOCK_CNTL, par));
for (i = 0; i < 40; i++)
printk(" %02x", aty_ld_pll_ct(i, par));
printk("\n");
}
#endif
if (par->pll_ops->init_pll)
par->pll_ops->init_pll(info, &par->pll);
if (par->pll_ops->resume_pll)
par->pll_ops->resume_pll(info, &par->pll);
/*
* Last page of 8 MB (4 MB on ISA) aperture is MMIO,
* unless the auxiliary register aperture is used.
*/
if (!par->aux_start &&
(info->fix.smem_len == 0x800000 ||
(par->bus_type == ISA && info->fix.smem_len == 0x400000)))
info->fix.smem_len -= GUI_RESERVE;
/*
* Disable register access through the linear aperture
* if the auxiliary aperture is used so we can access
* the full 8 MB of video RAM on 8 MB boards.
*/
if (par->aux_start)
aty_st_le32(BUS_CNTL, aty_ld_le32(BUS_CNTL, par) |
BUS_APER_REG_DIS, par);
#ifdef CONFIG_MTRR
par->mtrr_aper = -1;
par->mtrr_reg = -1;
if (!nomtrr) {
/* Cover the whole resource. */
par->mtrr_aper = mtrr_add(par->res_start, par->res_size,
MTRR_TYPE_WRCOMB, 1);
if (par->mtrr_aper >= 0 && !par->aux_start) {
/* Make a hole for mmio. */
par->mtrr_reg = mtrr_add(par->res_start + 0x800000 -
GUI_RESERVE, GUI_RESERVE,
MTRR_TYPE_UNCACHABLE, 1);
if (par->mtrr_reg < 0) {
mtrr_del(par->mtrr_aper, 0, 0);
par->mtrr_aper = -1;
}
}
}
#endif
info->fbops = &atyfb_ops;
info->pseudo_palette = par->pseudo_palette;
info->flags = FBINFO_DEFAULT |
FBINFO_HWACCEL_IMAGEBLIT |
FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_YPAN;
#ifdef CONFIG_PMAC_BACKLIGHT
if (M64_HAS(G3_PB_1_1) && of_machine_is_compatible("PowerBook1,1")) {
/*
* these bits let the 101 powerbook
* wake up from sleep -- paulus
*/
aty_st_lcd(POWER_MANAGEMENT, aty_ld_lcd(POWER_MANAGEMENT, par) |
USE_F32KHZ | TRISTATE_MEM_EN, par);
} else
#endif
if (M64_HAS(MOBIL_BUS) && backlight) {
#ifdef CONFIG_FB_ATY_BACKLIGHT
aty_bl_init(par);
#endif
}
memset(&var, 0, sizeof(var));
#ifdef CONFIG_PPC
if (machine_is(powermac)) {
/*
* FIXME: The NVRAM stuff should be put in a Mac-specific file,
* as it applies to all Mac video cards
*/
if (mode) {
if (mac_find_mode(&var, info, mode, 8))
has_var = 1;
} else {
if (default_vmode == VMODE_CHOOSE) {
int sense;
if (M64_HAS(G3_PB_1024x768))
/* G3 PowerBook with 1024x768 LCD */
default_vmode = VMODE_1024_768_60;
else if (of_machine_is_compatible("iMac"))
default_vmode = VMODE_1024_768_75;
else if (of_machine_is_compatible("PowerBook2,1"))
/* iBook with 800x600 LCD */
default_vmode = VMODE_800_600_60;
else
default_vmode = VMODE_640_480_67;
sense = read_aty_sense(par);
PRINTKI("monitor sense=%x, mode %d\n",
sense, mac_map_monitor_sense(sense));
}
if (default_vmode <= 0 || default_vmode > VMODE_MAX)
default_vmode = VMODE_640_480_60;
if (default_cmode < CMODE_8 || default_cmode > CMODE_32)
default_cmode = CMODE_8;
if (!mac_vmode_to_var(default_vmode, default_cmode,
&var))
has_var = 1;
}
}
#endif /* !CONFIG_PPC */
#if defined(__i386__) && defined(CONFIG_FB_ATY_GENERIC_LCD)
if (!atyfb_get_timings_from_lcd(par, &var))
has_var = 1;
#endif
if (mode && fb_find_mode(&var, info, mode, NULL, 0, &defmode, 8))
has_var = 1;
if (!has_var)
var = default_var;
if (noaccel)
var.accel_flags &= ~FB_ACCELF_TEXT;
else
var.accel_flags |= FB_ACCELF_TEXT;
if (comp_sync != -1) {
if (!comp_sync)
var.sync &= ~FB_SYNC_COMP_HIGH_ACT;
else
var.sync |= FB_SYNC_COMP_HIGH_ACT;
}
if (var.yres == var.yres_virtual) {
u32 videoram = (info->fix.smem_len - (PAGE_SIZE << 2));
var.yres_virtual = ((videoram * 8) / var.bits_per_pixel) / var.xres_virtual;
if (var.yres_virtual < var.yres)
var.yres_virtual = var.yres;
}
ret = atyfb_check_var(&var, info);
if (ret) {
PRINTKE("can't set default video mode\n");
goto aty_init_exit;
}
#ifdef CONFIG_FB_ATY_CT
if (!noaccel && M64_HAS(INTEGRATED))
aty_init_cursor(info);
#endif /* CONFIG_FB_ATY_CT */
info->var = var;
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret < 0)
goto aty_init_exit;
ret = register_framebuffer(info);
if (ret < 0) {
fb_dealloc_cmap(&info->cmap);
goto aty_init_exit;
}
fb_list = info;
PRINTKI("fb%d: %s frame buffer device on %s\n",
info->node, info->fix.id, par->bus_type == ISA ? "ISA" : "PCI");
return 0;
aty_init_exit:
/* restore video mode */
aty_set_crtc(par, &par->saved_crtc);
par->pll_ops->set_pll(info, &par->saved_pll);
#ifdef CONFIG_MTRR
if (par->mtrr_reg >= 0) {
mtrr_del(par->mtrr_reg, 0, 0);
par->mtrr_reg = -1;
}
if (par->mtrr_aper >= 0) {
mtrr_del(par->mtrr_aper, 0, 0);
par->mtrr_aper = -1;
}
#endif
return ret;
}
#if defined(CONFIG_ATARI) && !defined(MODULE)
static int __devinit store_video_par(char *video_str, unsigned char m64_num)
{
char *p;
unsigned long vmembase, size, guiregbase;
PRINTKI("store_video_par() '%s' \n", video_str);
if (!(p = strsep(&video_str, ";")) || !*p)
goto mach64_invalid;
vmembase = simple_strtoul(p, NULL, 0);
if (!(p = strsep(&video_str, ";")) || !*p)
goto mach64_invalid;
size = simple_strtoul(p, NULL, 0);
if (!(p = strsep(&video_str, ";")) || !*p)
goto mach64_invalid;
guiregbase = simple_strtoul(p, NULL, 0);
phys_vmembase[m64_num] = vmembase;
phys_size[m64_num] = size;
phys_guiregbase[m64_num] = guiregbase;
PRINTKI("stored them all: $%08lX $%08lX $%08lX \n", vmembase, size,
guiregbase);
return 0;
mach64_invalid:
phys_vmembase[m64_num] = 0;
return -1;
}
#endif /* CONFIG_ATARI && !MODULE */
/*
* Blank the display.
*/
static int atyfb_blank(int blank, struct fb_info *info)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
u32 gen_cntl;
if (par->lock_blank || par->asleep)
return 0;
#ifdef CONFIG_FB_ATY_GENERIC_LCD
if (par->lcd_table && blank > FB_BLANK_NORMAL &&
(aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) {
u32 pm = aty_ld_lcd(POWER_MANAGEMENT, par);
pm &= ~PWR_BLON;
aty_st_lcd(POWER_MANAGEMENT, pm, par);
}
#endif
gen_cntl = aty_ld_le32(CRTC_GEN_CNTL, par);
gen_cntl &= ~0x400004c;
switch (blank) {
case FB_BLANK_UNBLANK:
break;
case FB_BLANK_NORMAL:
gen_cntl |= 0x4000040;
break;
case FB_BLANK_VSYNC_SUSPEND:
gen_cntl |= 0x4000048;
break;
case FB_BLANK_HSYNC_SUSPEND:
gen_cntl |= 0x4000044;
break;
case FB_BLANK_POWERDOWN:
gen_cntl |= 0x400004c;
break;
}
aty_st_le32(CRTC_GEN_CNTL, gen_cntl, par);
#ifdef CONFIG_FB_ATY_GENERIC_LCD
if (par->lcd_table && blank <= FB_BLANK_NORMAL &&
(aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) {
u32 pm = aty_ld_lcd(POWER_MANAGEMENT, par);
pm |= PWR_BLON;
aty_st_lcd(POWER_MANAGEMENT, pm, par);
}
#endif
return 0;
}
static void aty_st_pal(u_int regno, u_int red, u_int green, u_int blue,
const struct atyfb_par *par)
{
aty_st_8(DAC_W_INDEX, regno, par);
aty_st_8(DAC_DATA, red, par);
aty_st_8(DAC_DATA, green, par);
aty_st_8(DAC_DATA, blue, par);
}
/*
* Set a single color register. The values supplied are already
* rounded down to the hardware's capabilities (according to the
* entries in the var structure). Return != 0 for invalid regno.
* !! 4 & 8 = PSEUDO, > 8 = DIRECTCOLOR
*/
static int atyfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *info)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
int i, depth;
u32 *pal = info->pseudo_palette;
depth = info->var.bits_per_pixel;
if (depth == 16)
depth = (info->var.green.length == 5) ? 15 : 16;
if (par->asleep)
return 0;
if (regno > 255 ||
(depth == 16 && regno > 63) ||
(depth == 15 && regno > 31))
return 1;
red >>= 8;
green >>= 8;
blue >>= 8;
par->palette[regno].red = red;
par->palette[regno].green = green;
par->palette[regno].blue = blue;
if (regno < 16) {
switch (depth) {
case 15:
pal[regno] = (regno << 10) | (regno << 5) | regno;
break;
case 16:
pal[regno] = (regno << 11) | (regno << 5) | regno;
break;
case 24:
pal[regno] = (regno << 16) | (regno << 8) | regno;
break;
case 32:
i = (regno << 8) | regno;
pal[regno] = (i << 16) | i;
break;
}
}
i = aty_ld_8(DAC_CNTL, par) & 0xfc;
if (M64_HAS(EXTRA_BRIGHT))
i |= 0x2; /* DAC_CNTL | 0x2 turns off the extra brightness for gt */
aty_st_8(DAC_CNTL, i, par);
aty_st_8(DAC_MASK, 0xff, par);
if (M64_HAS(INTEGRATED)) {
if (depth == 16) {
if (regno < 32)
aty_st_pal(regno << 3, red,
par->palette[regno << 1].green,
blue, par);
red = par->palette[regno >> 1].red;
blue = par->palette[regno >> 1].blue;
regno <<= 2;
} else if (depth == 15) {
regno <<= 3;
for (i = 0; i < 8; i++)
aty_st_pal(regno + i, red, green, blue, par);
}
}
aty_st_pal(regno, red, green, blue, par);
return 0;
}
#ifdef CONFIG_PCI
#ifdef __sparc__
static int __devinit atyfb_setup_sparc(struct pci_dev *pdev,
struct fb_info *info,
unsigned long addr)
{
struct atyfb_par *par = info->par;
struct device_node *dp;
u32 mem, chip_id;
int i, j, ret;
/*
* Map memory-mapped registers.
*/
par->ati_regbase = (void *)addr + 0x7ffc00UL;
info->fix.mmio_start = addr + 0x7ffc00UL;
/*
* Map in big-endian aperture.
*/
info->screen_base = (char *) (addr + 0x800000UL);
info->fix.smem_start = addr + 0x800000UL;
/*
* Figure mmap addresses from PCI config space.
* Split Framebuffer in big- and little-endian halfs.
*/
for (i = 0; i < 6 && pdev->resource[i].start; i++)
/* nothing */ ;
j = i + 4;
par->mmap_map = kcalloc(j, sizeof(*par->mmap_map), GFP_ATOMIC);
if (!par->mmap_map) {
PRINTKE("atyfb_setup_sparc() can't alloc mmap_map\n");
return -ENOMEM;
}
for (i = 0, j = 2; i < 6 && pdev->resource[i].start; i++) {
struct resource *rp = &pdev->resource[i];
int io, breg = PCI_BASE_ADDRESS_0 + (i << 2);
unsigned long base;
u32 size, pbase;
base = rp->start;
io = (rp->flags & IORESOURCE_IO);
size = rp->end - base + 1;
pci_read_config_dword(pdev, breg, &pbase);
if (io)
size &= ~1;
/*
* Map the framebuffer a second time, this time without
* the braindead _PAGE_IE setting. This is used by the
* fixed Xserver, but we need to maintain the old mapping
* to stay compatible with older ones...
*/
if (base == addr) {
par->mmap_map[j].voff = (pbase + 0x10000000) & PAGE_MASK;
par->mmap_map[j].poff = base & PAGE_MASK;
par->mmap_map[j].size = (size + ~PAGE_MASK) & PAGE_MASK;
par->mmap_map[j].prot_mask = _PAGE_CACHE;
par->mmap_map[j].prot_flag = _PAGE_E;
j++;
}
/*
* Here comes the old framebuffer mapping with _PAGE_IE
* set for the big endian half of the framebuffer...
*/
if (base == addr) {
par->mmap_map[j].voff = (pbase + 0x800000) & PAGE_MASK;
par->mmap_map[j].poff = (base + 0x800000) & PAGE_MASK;
par->mmap_map[j].size = 0x800000;
par->mmap_map[j].prot_mask = _PAGE_CACHE;
par->mmap_map[j].prot_flag = _PAGE_E | _PAGE_IE;
size -= 0x800000;
j++;
}
par->mmap_map[j].voff = pbase & PAGE_MASK;
par->mmap_map[j].poff = base & PAGE_MASK;
par->mmap_map[j].size = (size + ~PAGE_MASK) & PAGE_MASK;
par->mmap_map[j].prot_mask = _PAGE_CACHE;
par->mmap_map[j].prot_flag = _PAGE_E;
j++;
}
ret = correct_chipset(par);
if (ret)
return ret;
if (IS_XL(pdev->device)) {
/*
* Fix PROMs idea of MEM_CNTL settings...
*/
mem = aty_ld_le32(MEM_CNTL, par);
chip_id = aty_ld_le32(CNFG_CHIP_ID, par);
if (((chip_id & CFG_CHIP_TYPE) == VT_CHIP_ID) && !((chip_id >> 24) & 1)) {
switch (mem & 0x0f) {
case 3:
mem = (mem & ~(0x0f)) | 2;
break;
case 7:
mem = (mem & ~(0x0f)) | 3;
break;
case 9:
mem = (mem & ~(0x0f)) | 4;
break;
case 11:
mem = (mem & ~(0x0f)) | 5;
break;
default:
break;
}
if ((aty_ld_le32(CNFG_STAT0, par) & 7) >= SDRAM)
mem &= ~(0x00700000);
}
mem &= ~(0xcf80e000); /* Turn off all undocumented bits. */
aty_st_le32(MEM_CNTL, mem, par);
}
dp = pci_device_to_OF_node(pdev);
if (dp == of_console_device) {
struct fb_var_screeninfo *var = &default_var;
unsigned int N, P, Q, M, T, R;
u32 v_total, h_total;
struct crtc crtc;
u8 pll_regs[16];
u8 clock_cntl;
crtc.vxres = of_getintprop_default(dp, "width", 1024);
crtc.vyres = of_getintprop_default(dp, "height", 768);
var->bits_per_pixel = of_getintprop_default(dp, "depth", 8);
var->xoffset = var->yoffset = 0;
crtc.h_tot_disp = aty_ld_le32(CRTC_H_TOTAL_DISP, par);
crtc.h_sync_strt_wid = aty_ld_le32(CRTC_H_SYNC_STRT_WID, par);
crtc.v_tot_disp = aty_ld_le32(CRTC_V_TOTAL_DISP, par);
crtc.v_sync_strt_wid = aty_ld_le32(CRTC_V_SYNC_STRT_WID, par);
crtc.gen_cntl = aty_ld_le32(CRTC_GEN_CNTL, par);
aty_crtc_to_var(&crtc, var);
h_total = var->xres + var->right_margin + var->hsync_len + var->left_margin;
v_total = var->yres + var->lower_margin + var->vsync_len + var->upper_margin;
/*
* Read the PLL to figure actual Refresh Rate.
*/
clock_cntl = aty_ld_8(CLOCK_CNTL, par);
/* DPRINTK("CLOCK_CNTL %02x\n", clock_cntl); */
for (i = 0; i < 16; i++)
pll_regs[i] = aty_ld_pll_ct(i, par);
/*
* PLL Reference Divider M:
*/
M = pll_regs[2];
/*
* PLL Feedback Divider N (Dependent on CLOCK_CNTL):
*/
N = pll_regs[7 + (clock_cntl & 3)];
/*
* PLL Post Divider P (Dependent on CLOCK_CNTL):
*/
P = 1 << (pll_regs[6] >> ((clock_cntl & 3) << 1));
/*
* PLL Divider Q:
*/
Q = N / P;
/*
* Target Frequency:
*
* T * M
* Q = -------
* 2 * R
*
* where R is XTALIN (= 14318 or 29498 kHz).
*/
if (IS_XL(pdev->device))
R = 29498;
else
R = 14318;
T = 2 * Q * R / M;
default_var.pixclock = 1000000000 / T;
}
return 0;
}
#else /* __sparc__ */
#ifdef __i386__
#ifdef CONFIG_FB_ATY_GENERIC_LCD
static void __devinit aty_init_lcd(struct atyfb_par *par, u32 bios_base)
{
u32 driv_inf_tab, sig;
u16 lcd_ofs;
/*
* To support an LCD panel, we should know it's dimensions and
* it's desired pixel clock.
* There are two ways to do it:
* - Check the startup video mode and calculate the panel
* size from it. This is unreliable.
* - Read it from the driver information table in the video BIOS.
*/
/* Address of driver information table is at offset 0x78. */
driv_inf_tab = bios_base + *((u16 *)(bios_base+0x78));
/* Check for the driver information table signature. */
sig = *(u32 *)driv_inf_tab;
if ((sig == 0x54504c24) || /* Rage LT pro */
(sig == 0x544d5224) || /* Rage mobility */
(sig == 0x54435824) || /* Rage XC */
(sig == 0x544c5824)) { /* Rage XL */
PRINTKI("BIOS contains driver information table.\n");
lcd_ofs = *(u16 *)(driv_inf_tab + 10);
par->lcd_table = 0;
if (lcd_ofs != 0)
par->lcd_table = bios_base + lcd_ofs;
}
if (par->lcd_table != 0) {
char model[24];
char strbuf[16];
char refresh_rates_buf[100];
int id, tech, f, i, m, default_refresh_rate;
char *txtcolour;
char *txtmonitor;
char *txtdual;
char *txtformat;
u16 width, height, panel_type, refresh_rates;
u16 *lcdmodeptr;
u32 format;
u8 lcd_refresh_rates[16] = { 50, 56, 60, 67, 70, 72, 75, 76, 85,
90, 100, 120, 140, 150, 160, 200 };
/*
* The most important information is the panel size at
* offset 25 and 27, but there's some other nice information
* which we print to the screen.
*/
id = *(u8 *)par->lcd_table;
strncpy(model, (char *)par->lcd_table+1, 24);
model[23] = 0;
width = par->lcd_width = *(u16 *)(par->lcd_table+25);
height = par->lcd_height = *(u16 *)(par->lcd_table+27);
panel_type = *(u16 *)(par->lcd_table+29);
if (panel_type & 1)
txtcolour = "colour";
else
txtcolour = "monochrome";
if (panel_type & 2)
txtdual = "dual (split) ";
else
txtdual = "";
tech = (panel_type >> 2) & 63;
switch (tech) {
case 0:
txtmonitor = "passive matrix";
break;
case 1:
txtmonitor = "active matrix";
break;
case 2:
txtmonitor = "active addressed STN";
break;
case 3:
txtmonitor = "EL";
break;
case 4:
txtmonitor = "plasma";
break;
default:
txtmonitor = "unknown";
}
format = *(u32 *)(par->lcd_table+57);
if (tech == 0 || tech == 2) {
switch (format & 7) {
case 0:
txtformat = "12 bit interface";
break;
case 1:
txtformat = "16 bit interface";
break;
case 2:
txtformat = "24 bit interface";
break;
default:
txtformat = "unknown format";
}
} else {
switch (format & 7) {
case 0:
txtformat = "8 colours";
break;
case 1:
txtformat = "512 colours";
break;
case 2:
txtformat = "4096 colours";
break;
case 4:
txtformat = "262144 colours (LT mode)";
break;
case 5:
txtformat = "16777216 colours";
break;
case 6:
txtformat = "262144 colours (FDPI-2 mode)";
break;
default:
txtformat = "unknown format";
}
}
PRINTKI("%s%s %s monitor detected: %s\n",
txtdual, txtcolour, txtmonitor, model);
PRINTKI(" id=%d, %dx%d pixels, %s\n",
id, width, height, txtformat);
refresh_rates_buf[0] = 0;
refresh_rates = *(u16 *)(par->lcd_table+62);
m = 1;
f = 0;
for (i = 0; i < 16; i++) {
if (refresh_rates & m) {
if (f == 0) {
sprintf(strbuf, "%d",
lcd_refresh_rates[i]);
f++;
} else {
sprintf(strbuf, ",%d",
lcd_refresh_rates[i]);
}
strcat(refresh_rates_buf, strbuf);
}
m = m << 1;
}
default_refresh_rate = (*(u8 *)(par->lcd_table+61) & 0xf0) >> 4;
PRINTKI(" supports refresh rates [%s], default %d Hz\n",
refresh_rates_buf, lcd_refresh_rates[default_refresh_rate]);
par->lcd_refreshrate = lcd_refresh_rates[default_refresh_rate];
/*
* We now need to determine the crtc parameters for the
* LCD monitor. This is tricky, because they are not stored
* individually in the BIOS. Instead, the BIOS contains a
* table of display modes that work for this monitor.
*
* The idea is that we search for a mode of the same dimensions
* as the dimensions of the LCD monitor. Say our LCD monitor
* is 800x600 pixels, we search for a 800x600 monitor.
* The CRTC parameters we find here are the ones that we need
* to use to simulate other resolutions on the LCD screen.
*/
lcdmodeptr = (u16 *)(par->lcd_table + 64);
while (*lcdmodeptr != 0) {
u32 modeptr;
u16 mwidth, mheight, lcd_hsync_start, lcd_vsync_start;
modeptr = bios_base + *lcdmodeptr;
mwidth = *((u16 *)(modeptr+0));
mheight = *((u16 *)(modeptr+2));
if (mwidth == width && mheight == height) {
par->lcd_pixclock = 100000000 / *((u16 *)(modeptr+9));
par->lcd_htotal = *((u16 *)(modeptr+17)) & 511;
par->lcd_hdisp = *((u16 *)(modeptr+19)) & 511;
lcd_hsync_start = *((u16 *)(modeptr+21)) & 511;
par->lcd_hsync_dly = (*((u16 *)(modeptr+21)) >> 9) & 7;
par->lcd_hsync_len = *((u8 *)(modeptr+23)) & 63;
par->lcd_vtotal = *((u16 *)(modeptr+24)) & 2047;
par->lcd_vdisp = *((u16 *)(modeptr+26)) & 2047;
lcd_vsync_start = *((u16 *)(modeptr+28)) & 2047;
par->lcd_vsync_len = (*((u16 *)(modeptr+28)) >> 11) & 31;
par->lcd_htotal = (par->lcd_htotal + 1) * 8;
par->lcd_hdisp = (par->lcd_hdisp + 1) * 8;
lcd_hsync_start = (lcd_hsync_start + 1) * 8;
par->lcd_hsync_len = par->lcd_hsync_len * 8;
par->lcd_vtotal++;
par->lcd_vdisp++;
lcd_vsync_start++;
par->lcd_right_margin = lcd_hsync_start - par->lcd_hdisp;
par->lcd_lower_margin = lcd_vsync_start - par->lcd_vdisp;
par->lcd_hblank_len = par->lcd_htotal - par->lcd_hdisp;
par->lcd_vblank_len = par->lcd_vtotal - par->lcd_vdisp;
break;
}
lcdmodeptr++;
}
if (*lcdmodeptr == 0) {
PRINTKE("LCD monitor CRTC parameters not found!!!\n");
/* To do: Switch to CRT if possible. */
} else {
PRINTKI(" LCD CRTC parameters: %d.%d %d %d %d %d %d %d %d %d\n",
1000000 / par->lcd_pixclock, 1000000 % par->lcd_pixclock,
par->lcd_hdisp,
par->lcd_hdisp + par->lcd_right_margin,
par->lcd_hdisp + par->lcd_right_margin
+ par->lcd_hsync_dly + par->lcd_hsync_len,
par->lcd_htotal,
par->lcd_vdisp,
par->lcd_vdisp + par->lcd_lower_margin,
par->lcd_vdisp + par->lcd_lower_margin + par->lcd_vsync_len,
par->lcd_vtotal);
PRINTKI(" : %d %d %d %d %d %d %d %d %d\n",
par->lcd_pixclock,
par->lcd_hblank_len - (par->lcd_right_margin +
par->lcd_hsync_dly + par->lcd_hsync_len),
par->lcd_hdisp,
par->lcd_right_margin,
par->lcd_hsync_len,
par->lcd_vblank_len - (par->lcd_lower_margin + par->lcd_vsync_len),
par->lcd_vdisp,
par->lcd_lower_margin,
par->lcd_vsync_len);
}
}
}
#endif /* CONFIG_FB_ATY_GENERIC_LCD */
static int __devinit init_from_bios(struct atyfb_par *par)
{
u32 bios_base, rom_addr;
int ret;
rom_addr = 0xc0000 + ((aty_ld_le32(SCRATCH_REG1, par) & 0x7f) << 11);
bios_base = (unsigned long)ioremap(rom_addr, 0x10000);
/* The BIOS starts with 0xaa55. */
if (*((u16 *)bios_base) == 0xaa55) {
u8 *bios_ptr;
u16 rom_table_offset, freq_table_offset;
PLL_BLOCK_MACH64 pll_block;
PRINTKI("Mach64 BIOS is located at %x, mapped at %x.\n", rom_addr, bios_base);
/* check for frequncy table */
bios_ptr = (u8*)bios_base;
rom_table_offset = (u16)(bios_ptr[0x48] | (bios_ptr[0x49] << 8));
freq_table_offset = bios_ptr[rom_table_offset + 16] | (bios_ptr[rom_table_offset + 17] << 8);
memcpy(&pll_block, bios_ptr + freq_table_offset, sizeof(PLL_BLOCK_MACH64));
PRINTKI("BIOS frequency table:\n");
PRINTKI("PCLK_min_freq %d, PCLK_max_freq %d, ref_freq %d, ref_divider %d\n",
pll_block.PCLK_min_freq, pll_block.PCLK_max_freq,
pll_block.ref_freq, pll_block.ref_divider);
PRINTKI("MCLK_pwd %d, MCLK_max_freq %d, XCLK_max_freq %d, SCLK_freq %d\n",
pll_block.MCLK_pwd, pll_block.MCLK_max_freq,
pll_block.XCLK_max_freq, pll_block.SCLK_freq);
par->pll_limits.pll_min = pll_block.PCLK_min_freq/100;
par->pll_limits.pll_max = pll_block.PCLK_max_freq/100;
par->pll_limits.ref_clk = pll_block.ref_freq/100;
par->pll_limits.ref_div = pll_block.ref_divider;
par->pll_limits.sclk = pll_block.SCLK_freq/100;
par->pll_limits.mclk = pll_block.MCLK_max_freq/100;
par->pll_limits.mclk_pm = pll_block.MCLK_pwd/100;
par->pll_limits.xclk = pll_block.XCLK_max_freq/100;
#ifdef CONFIG_FB_ATY_GENERIC_LCD
aty_init_lcd(par, bios_base);
#endif
ret = 0;
} else {
PRINTKE("no BIOS frequency table found, use parameters\n");
ret = -ENXIO;
}
iounmap((void __iomem *)bios_base);
return ret;
}
#endif /* __i386__ */
static int __devinit atyfb_setup_generic(struct pci_dev *pdev,
struct fb_info *info,
unsigned long addr)
{
struct atyfb_par *par = info->par;
u16 tmp;
unsigned long raddr;
struct resource *rrp;
int ret = 0;
raddr = addr + 0x7ff000UL;
rrp = &pdev->resource[2];
if ((rrp->flags & IORESOURCE_MEM) && request_mem_region(rrp->start, rrp->end - rrp->start + 1, "atyfb")) {
par->aux_start = rrp->start;
par->aux_size = rrp->end - rrp->start + 1;
raddr = rrp->start;
PRINTKI("using auxiliary register aperture\n");
}
info->fix.mmio_start = raddr;
par->ati_regbase = ioremap(info->fix.mmio_start, 0x1000);
if (par->ati_regbase == NULL)
return -ENOMEM;
info->fix.mmio_start += par->aux_start ? 0x400 : 0xc00;
par->ati_regbase += par->aux_start ? 0x400 : 0xc00;
/*
* Enable memory-space accesses using config-space
* command register.
*/
pci_read_config_word(pdev, PCI_COMMAND, &tmp);
if (!(tmp & PCI_COMMAND_MEMORY)) {
tmp |= PCI_COMMAND_MEMORY;
pci_write_config_word(pdev, PCI_COMMAND, tmp);
}
#ifdef __BIG_ENDIAN
/* Use the big-endian aperture */
addr += 0x800000;
#endif
/* Map in frame buffer */
info->fix.smem_start = addr;
info->screen_base = ioremap(addr, 0x800000);
if (info->screen_base == NULL) {
ret = -ENOMEM;
goto atyfb_setup_generic_fail;
}
ret = correct_chipset(par);
if (ret)
goto atyfb_setup_generic_fail;
#ifdef __i386__
ret = init_from_bios(par);
if (ret)
goto atyfb_setup_generic_fail;
#endif
if (!(aty_ld_le32(CRTC_GEN_CNTL, par) & CRTC_EXT_DISP_EN))
par->clk_wr_offset = (inb(R_GENMO) & 0x0CU) >> 2;
else
par->clk_wr_offset = aty_ld_8(CLOCK_CNTL, par) & 0x03U;
/* according to ATI, we should use clock 3 for acelerated mode */
par->clk_wr_offset = 3;
return 0;
atyfb_setup_generic_fail:
iounmap(par->ati_regbase);
par->ati_regbase = NULL;
if (info->screen_base) {
iounmap(info->screen_base);
info->screen_base = NULL;
}
return ret;
}
#endif /* !__sparc__ */
static int __devinit atyfb_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
unsigned long addr, res_start, res_size;
struct fb_info *info;
struct resource *rp;
struct atyfb_par *par;
int rc = -ENOMEM;
/* Enable device in PCI config */
if (pci_enable_device(pdev)) {
PRINTKE("Cannot enable PCI device\n");
return -ENXIO;
}
/* Find which resource to use */
rp = &pdev->resource[0];
if (rp->flags & IORESOURCE_IO)
rp = &pdev->resource[1];
addr = rp->start;
if (!addr)
return -ENXIO;
/* Reserve space */
res_start = rp->start;
res_size = rp->end - rp->start + 1;
if (!request_mem_region(res_start, res_size, "atyfb"))
return -EBUSY;
/* Allocate framebuffer */
info = framebuffer_alloc(sizeof(struct atyfb_par), &pdev->dev);
if (!info) {
PRINTKE("atyfb_pci_probe() can't alloc fb_info\n");
return -ENOMEM;
}
par = info->par;
info->fix = atyfb_fix;
info->device = &pdev->dev;
par->pci_id = pdev->device;
par->res_start = res_start;
par->res_size = res_size;
par->irq = pdev->irq;
par->pdev = pdev;
/* Setup "info" structure */
#ifdef __sparc__
rc = atyfb_setup_sparc(pdev, info, addr);
#else
rc = atyfb_setup_generic(pdev, info, addr);
#endif
if (rc)
goto err_release_mem;
pci_set_drvdata(pdev, info);
/* Init chip & register framebuffer */
rc = aty_init(info);
if (rc)
goto err_release_io;
#ifdef __sparc__
/*
* Add /dev/fb mmap values.
*/
par->mmap_map[0].voff = 0x8000000000000000UL;
par->mmap_map[0].poff = (unsigned long) info->screen_base & PAGE_MASK;
par->mmap_map[0].size = info->fix.smem_len;
par->mmap_map[0].prot_mask = _PAGE_CACHE;
par->mmap_map[0].prot_flag = _PAGE_E;
par->mmap_map[1].voff = par->mmap_map[0].voff + info->fix.smem_len;
par->mmap_map[1].poff = (long)par->ati_regbase & PAGE_MASK;
par->mmap_map[1].size = PAGE_SIZE;
par->mmap_map[1].prot_mask = _PAGE_CACHE;
par->mmap_map[1].prot_flag = _PAGE_E;
#endif /* __sparc__ */
mutex_lock(&reboot_lock);
if (!reboot_info)
reboot_info = info;
mutex_unlock(&reboot_lock);
return 0;
err_release_io:
#ifdef __sparc__
kfree(par->mmap_map);
#else
if (par->ati_regbase)
iounmap(par->ati_regbase);
if (info->screen_base)
iounmap(info->screen_base);
#endif
err_release_mem:
if (par->aux_start)
release_mem_region(par->aux_start, par->aux_size);
release_mem_region(par->res_start, par->res_size);
framebuffer_release(info);
return rc;
}
#endif /* CONFIG_PCI */
#ifdef CONFIG_ATARI
static int __init atyfb_atari_probe(void)
{
struct atyfb_par *par;
struct fb_info *info;
int m64_num;
u32 clock_r;
int num_found = 0;
for (m64_num = 0; m64_num < mach64_count; m64_num++) {
if (!phys_vmembase[m64_num] || !phys_size[m64_num] ||
!phys_guiregbase[m64_num]) {
PRINTKI("phys_*[%d] parameters not set => "
"returning early. \n", m64_num);
continue;
}
info = framebuffer_alloc(sizeof(struct atyfb_par), NULL);
if (!info) {
PRINTKE("atyfb_atari_probe() can't alloc fb_info\n");
return -ENOMEM;
}
par = info->par;
info->fix = atyfb_fix;
par->irq = (unsigned int) -1; /* something invalid */
/*
* Map the video memory (physical address given)
* to somewhere in the kernel address space.
*/
info->screen_base = ioremap(phys_vmembase[m64_num], phys_size[m64_num]);
info->fix.smem_start = (unsigned long)info->screen_base; /* Fake! */
par->ati_regbase = ioremap(phys_guiregbase[m64_num], 0x10000) +
0xFC00ul;
info->fix.mmio_start = (unsigned long)par->ati_regbase; /* Fake! */
aty_st_le32(CLOCK_CNTL, 0x12345678, par);
clock_r = aty_ld_le32(CLOCK_CNTL, par);
switch (clock_r & 0x003F) {
case 0x12:
par->clk_wr_offset = 3; /* */
break;
case 0x34:
par->clk_wr_offset = 2; /* Medusa ST-IO ISA Adapter etc. */
break;
case 0x16:
par->clk_wr_offset = 1; /* */
break;
case 0x38:
par->clk_wr_offset = 0; /* Panther 1 ISA Adapter (Gerald) */
break;
}
/* Fake pci_id for correct_chipset() */
switch (aty_ld_le32(CNFG_CHIP_ID, par) & CFG_CHIP_TYPE) {
case 0x00d7:
par->pci_id = PCI_CHIP_MACH64GX;
break;
case 0x0057:
par->pci_id = PCI_CHIP_MACH64CX;
break;
default:
break;
}
if (correct_chipset(par) || aty_init(info)) {
iounmap(info->screen_base);
iounmap(par->ati_regbase);
framebuffer_release(info);
} else {
num_found++;
}
}
return num_found ? 0 : -ENXIO;
}
#endif /* CONFIG_ATARI */
#ifdef CONFIG_PCI
static void __devexit atyfb_remove(struct fb_info *info)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
/* restore video mode */
aty_set_crtc(par, &par->saved_crtc);
par->pll_ops->set_pll(info, &par->saved_pll);
unregister_framebuffer(info);
#ifdef CONFIG_FB_ATY_BACKLIGHT
if (M64_HAS(MOBIL_BUS))
aty_bl_exit(info->bl_dev);
#endif
#ifdef CONFIG_MTRR
if (par->mtrr_reg >= 0) {
mtrr_del(par->mtrr_reg, 0, 0);
par->mtrr_reg = -1;
}
if (par->mtrr_aper >= 0) {
mtrr_del(par->mtrr_aper, 0, 0);
par->mtrr_aper = -1;
}
#endif
#ifndef __sparc__
if (par->ati_regbase)
iounmap(par->ati_regbase);
if (info->screen_base)
iounmap(info->screen_base);
#ifdef __BIG_ENDIAN
if (info->sprite.addr)
iounmap(info->sprite.addr);
#endif
#endif
#ifdef __sparc__
kfree(par->mmap_map);
#endif
if (par->aux_start)
release_mem_region(par->aux_start, par->aux_size);
if (par->res_start)
release_mem_region(par->res_start, par->res_size);
framebuffer_release(info);
}
static void __devexit atyfb_pci_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
mutex_lock(&reboot_lock);
if (reboot_info == info)
reboot_info = NULL;
mutex_unlock(&reboot_lock);
atyfb_remove(info);
}
static struct pci_device_id atyfb_pci_tbl[] = {
#ifdef CONFIG_FB_ATY_GX
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GX) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64CX) },
#endif /* CONFIG_FB_ATY_GX */
#ifdef CONFIG_FB_ATY_CT
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64CT) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64ET) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LT) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64VT) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GT) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64VU) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GU) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LG) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64VV) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GV) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GW) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GY) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GZ) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GB) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GD) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GI) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GP) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GQ) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LB) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LD) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LI) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LP) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LQ) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GM) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GN) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GO) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GL) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GR) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GS) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LM) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LN) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LR) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LS) },
#endif /* CONFIG_FB_ATY_CT */
{ }
};
MODULE_DEVICE_TABLE(pci, atyfb_pci_tbl);
static struct pci_driver atyfb_driver = {
.name = "atyfb",
.id_table = atyfb_pci_tbl,
.probe = atyfb_pci_probe,
.remove = __devexit_p(atyfb_pci_remove),
#ifdef CONFIG_PM
.suspend = atyfb_pci_suspend,
.resume = atyfb_pci_resume,
#endif /* CONFIG_PM */
};
#endif /* CONFIG_PCI */
#ifndef MODULE
static int __init atyfb_setup(char *options)
{
char *this_opt;
if (!options || !*options)
return 0;
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!strncmp(this_opt, "noaccel", 7)) {
noaccel = 1;
#ifdef CONFIG_MTRR
} else if (!strncmp(this_opt, "nomtrr", 6)) {
nomtrr = 1;
#endif
} else if (!strncmp(this_opt, "vram:", 5))
vram = simple_strtoul(this_opt + 5, NULL, 0);
else if (!strncmp(this_opt, "pll:", 4))
pll = simple_strtoul(this_opt + 4, NULL, 0);
else if (!strncmp(this_opt, "mclk:", 5))
mclk = simple_strtoul(this_opt + 5, NULL, 0);
else if (!strncmp(this_opt, "xclk:", 5))
xclk = simple_strtoul(this_opt+5, NULL, 0);
else if (!strncmp(this_opt, "comp_sync:", 10))
comp_sync = simple_strtoul(this_opt+10, NULL, 0);
else if (!strncmp(this_opt, "backlight:", 10))
backlight = simple_strtoul(this_opt+10, NULL, 0);
#ifdef CONFIG_PPC
else if (!strncmp(this_opt, "vmode:", 6)) {
unsigned int vmode =
simple_strtoul(this_opt + 6, NULL, 0);
if (vmode > 0 && vmode <= VMODE_MAX)
default_vmode = vmode;
} else if (!strncmp(this_opt, "cmode:", 6)) {
unsigned int cmode =
simple_strtoul(this_opt + 6, NULL, 0);
switch (cmode) {
case 0:
case 8:
default_cmode = CMODE_8;
break;
case 15:
case 16:
default_cmode = CMODE_16;
break;
case 24:
case 32:
default_cmode = CMODE_32;
break;
}
}
#endif
#ifdef CONFIG_ATARI
/*
* Why do we need this silly Mach64 argument?
* We are already here because of mach64= so its redundant.
*/
else if (MACH_IS_ATARI
&& (!strncmp(this_opt, "Mach64:", 7))) {
static unsigned char m64_num;
static char mach64_str[80];
strlcpy(mach64_str, this_opt + 7, sizeof(mach64_str));
if (!store_video_par(mach64_str, m64_num)) {
m64_num++;
mach64_count = m64_num;
}
}
#endif
else
mode = this_opt;
}
return 0;
}
#endif /* MODULE */
static int atyfb_reboot_notify(struct notifier_block *nb,
unsigned long code, void *unused)
{
struct atyfb_par *par;
if (code != SYS_RESTART)
return NOTIFY_DONE;
mutex_lock(&reboot_lock);
if (!reboot_info)
goto out;
if (!lock_fb_info(reboot_info))
goto out;
par = reboot_info->par;
/*
* HP OmniBook 500's BIOS doesn't like the state of the
* hardware after atyfb has been used. Restore the hardware
* to the original state to allow successful reboots.
*/
aty_set_crtc(par, &par->saved_crtc);
par->pll_ops->set_pll(reboot_info, &par->saved_pll);
unlock_fb_info(reboot_info);
out:
mutex_unlock(&reboot_lock);
return NOTIFY_DONE;
}
static struct notifier_block atyfb_reboot_notifier = {
.notifier_call = atyfb_reboot_notify,
};
static const struct dmi_system_id atyfb_reboot_ids[] = {
{
.ident = "HP OmniBook 500",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP OmniBook PC"),
DMI_MATCH(DMI_PRODUCT_VERSION, "HP OmniBook 500 FA"),
},
},
{ }
};
static int __init atyfb_init(void)
{
int err1 = 1, err2 = 1;
#ifndef MODULE
char *option = NULL;
if (fb_get_options("atyfb", &option))
return -ENODEV;
atyfb_setup(option);
#endif
#ifdef CONFIG_PCI
err1 = pci_register_driver(&atyfb_driver);
#endif
#ifdef CONFIG_ATARI
err2 = atyfb_atari_probe();
#endif
if (err1 && err2)
return -ENODEV;
if (dmi_check_system(atyfb_reboot_ids))
register_reboot_notifier(&atyfb_reboot_notifier);
return 0;
}
static void __exit atyfb_exit(void)
{
if (dmi_check_system(atyfb_reboot_ids))
unregister_reboot_notifier(&atyfb_reboot_notifier);
#ifdef CONFIG_PCI
pci_unregister_driver(&atyfb_driver);
#endif
}
module_init(atyfb_init);
module_exit(atyfb_exit);
MODULE_DESCRIPTION("FBDev driver for ATI Mach64 cards");
MODULE_LICENSE("GPL");
module_param(noaccel, bool, 0);
MODULE_PARM_DESC(noaccel, "bool: disable acceleration");
module_param(vram, int, 0);
MODULE_PARM_DESC(vram, "int: override size of video ram");
module_param(pll, int, 0);
MODULE_PARM_DESC(pll, "int: override video clock");
module_param(mclk, int, 0);
MODULE_PARM_DESC(mclk, "int: override memory clock");
module_param(xclk, int, 0);
MODULE_PARM_DESC(xclk, "int: override accelerated engine clock");
module_param(comp_sync, int, 0);
MODULE_PARM_DESC(comp_sync, "Set composite sync signal to low (0) or high (1)");
module_param(mode, charp, 0);
MODULE_PARM_DESC(mode, "Specify resolution as \"<xres>x<yres>[-<bpp>][@<refresh>]\" ");
#ifdef CONFIG_MTRR
module_param(nomtrr, bool, 0);
MODULE_PARM_DESC(nomtrr, "bool: disable use of MTRR registers");
#endif
| gpl-2.0 |
Eistuete/android_kernel_huawei_msm8916 | arch/arm/plat-samsung/s5p-irq-eint.c | 2374 | 5225 | /*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* S5P - IRQ EINT support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/device.h>
#include <linux/gpio.h>
#include <linux/irqchip/arm-vic.h>
#include <plat/regs-irqtype.h>
#include <mach/map.h>
#include <plat/cpu.h>
#include <plat/pm.h>
#include <plat/gpio-cfg.h>
#include <mach/regs-gpio.h>
static inline void s5p_irq_eint_mask(struct irq_data *data)
{
u32 mask;
mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
mask |= eint_irq_to_bit(data->irq);
__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
}
static void s5p_irq_eint_unmask(struct irq_data *data)
{
u32 mask;
mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
mask &= ~(eint_irq_to_bit(data->irq));
__raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
}
static inline void s5p_irq_eint_ack(struct irq_data *data)
{
__raw_writel(eint_irq_to_bit(data->irq),
S5P_EINT_PEND(EINT_REG_NR(data->irq)));
}
static void s5p_irq_eint_maskack(struct irq_data *data)
{
/* compiler should in-line these */
s5p_irq_eint_mask(data);
s5p_irq_eint_ack(data);
}
static int s5p_irq_eint_set_type(struct irq_data *data, unsigned int type)
{
int offs = EINT_OFFSET(data->irq);
int shift;
u32 ctrl, mask;
u32 newvalue = 0;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
newvalue = S5P_IRQ_TYPE_EDGE_RISING;
break;
case IRQ_TYPE_EDGE_FALLING:
newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
break;
case IRQ_TYPE_EDGE_BOTH:
newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
break;
case IRQ_TYPE_LEVEL_LOW:
newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
break;
case IRQ_TYPE_LEVEL_HIGH:
newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
break;
default:
printk(KERN_ERR "No such irq type %d", type);
return -EINVAL;
}
shift = (offs & 0x7) * 4;
mask = 0x7 << shift;
ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(data->irq)));
ctrl &= ~mask;
ctrl |= newvalue << shift;
__raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(data->irq)));
if ((0 <= offs) && (offs < 8))
s3c_gpio_cfgpin(EINT_GPIO_0(offs & 0x7), EINT_MODE);
else if ((8 <= offs) && (offs < 16))
s3c_gpio_cfgpin(EINT_GPIO_1(offs & 0x7), EINT_MODE);
else if ((16 <= offs) && (offs < 24))
s3c_gpio_cfgpin(EINT_GPIO_2(offs & 0x7), EINT_MODE);
else if ((24 <= offs) && (offs < 32))
s3c_gpio_cfgpin(EINT_GPIO_3(offs & 0x7), EINT_MODE);
else
printk(KERN_ERR "No such irq number %d", offs);
return 0;
}
static struct irq_chip s5p_irq_eint = {
.name = "s5p-eint",
.irq_mask = s5p_irq_eint_mask,
.irq_unmask = s5p_irq_eint_unmask,
.irq_mask_ack = s5p_irq_eint_maskack,
.irq_ack = s5p_irq_eint_ack,
.irq_set_type = s5p_irq_eint_set_type,
#ifdef CONFIG_PM
.irq_set_wake = s3c_irqext_wake,
#endif
};
/* s5p_irq_demux_eint
*
* This function demuxes the IRQ from the group0 external interrupts,
* from EINTs 16 to 31. It is designed to be inlined into the specific
* handler s5p_irq_demux_eintX_Y.
*
* Each EINT pend/mask registers handle eight of them.
*/
static inline void s5p_irq_demux_eint(unsigned int start)
{
u32 status = __raw_readl(S5P_EINT_PEND(EINT_REG_NR(start)));
u32 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(start)));
unsigned int irq;
status &= ~mask;
status &= 0xff;
while (status) {
irq = fls(status) - 1;
generic_handle_irq(irq + start);
status &= ~(1 << irq);
}
}
static void s5p_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
{
s5p_irq_demux_eint(IRQ_EINT(16));
s5p_irq_demux_eint(IRQ_EINT(24));
}
static inline void s5p_irq_vic_eint_mask(struct irq_data *data)
{
void __iomem *base = irq_data_get_irq_chip_data(data);
s5p_irq_eint_mask(data);
writel(1 << EINT_OFFSET(data->irq), base + VIC_INT_ENABLE_CLEAR);
}
static void s5p_irq_vic_eint_unmask(struct irq_data *data)
{
void __iomem *base = irq_data_get_irq_chip_data(data);
s5p_irq_eint_unmask(data);
writel(1 << EINT_OFFSET(data->irq), base + VIC_INT_ENABLE);
}
static inline void s5p_irq_vic_eint_ack(struct irq_data *data)
{
__raw_writel(eint_irq_to_bit(data->irq),
S5P_EINT_PEND(EINT_REG_NR(data->irq)));
}
static void s5p_irq_vic_eint_maskack(struct irq_data *data)
{
s5p_irq_vic_eint_mask(data);
s5p_irq_vic_eint_ack(data);
}
static struct irq_chip s5p_irq_vic_eint = {
.name = "s5p_vic_eint",
.irq_mask = s5p_irq_vic_eint_mask,
.irq_unmask = s5p_irq_vic_eint_unmask,
.irq_mask_ack = s5p_irq_vic_eint_maskack,
.irq_ack = s5p_irq_vic_eint_ack,
.irq_set_type = s5p_irq_eint_set_type,
#ifdef CONFIG_PM
.irq_set_wake = s3c_irqext_wake,
#endif
};
static int __init s5p_init_irq_eint(void)
{
int irq;
for (irq = IRQ_EINT(0); irq <= IRQ_EINT(15); irq++)
irq_set_chip(irq, &s5p_irq_vic_eint);
for (irq = IRQ_EINT(16); irq <= IRQ_EINT(31); irq++) {
irq_set_chip_and_handler(irq, &s5p_irq_eint, handle_level_irq);
set_irq_flags(irq, IRQF_VALID);
}
irq_set_chained_handler(IRQ_EINT16_31, s5p_irq_demux_eint16_31);
return 0;
}
arch_initcall(s5p_init_irq_eint);
| gpl-2.0 |
CAFans/android_kernel_lge_msm8974 | kernel/task_work.c | 2374 | 2223 | #include <linux/spinlock.h>
#include <linux/task_work.h>
#include <linux/tracehook.h>
static struct callback_head work_exited; /* all we need is ->next == NULL */
int
task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
{
struct callback_head *head;
do {
head = ACCESS_ONCE(task->task_works);
if (unlikely(head == &work_exited))
return -ESRCH;
work->next = head;
} while (cmpxchg(&task->task_works, head, work) != head);
if (notify)
set_notify_resume(task);
return 0;
}
struct callback_head *
task_work_cancel(struct task_struct *task, task_work_func_t func)
{
struct callback_head **pprev = &task->task_works;
struct callback_head *work = NULL;
unsigned long flags;
/*
* If cmpxchg() fails we continue without updating pprev.
* Either we raced with task_work_add() which added the
* new entry before this work, we will find it again. Or
* we raced with task_work_run(), *pprev == NULL/exited.
*/
raw_spin_lock_irqsave(&task->pi_lock, flags);
while ((work = ACCESS_ONCE(*pprev))) {
read_barrier_depends();
if (work->func != func)
pprev = &work->next;
else if (cmpxchg(pprev, work, work->next) == work)
break;
}
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
return work;
}
void task_work_run(void)
{
struct task_struct *task = current;
struct callback_head *work, *head, *next;
for (;;) {
/*
* work->func() can do task_work_add(), do not set
* work_exited unless the list is empty.
*/
do {
work = ACCESS_ONCE(task->task_works);
head = !work && (task->flags & PF_EXITING) ?
&work_exited : NULL;
} while (cmpxchg(&task->task_works, work, head) != work);
if (!work)
break;
/*
* Synchronize with task_work_cancel(). It can't remove
* the first entry == work, cmpxchg(task_works) should
* fail, but it can play with *work and other entries.
*/
raw_spin_unlock_wait(&task->pi_lock);
smp_mb();
/* Reverse the list to run the works in fifo order */
head = NULL;
do {
next = work->next;
work->next = head;
head = work;
work = next;
} while (work);
work = head;
do {
next = work->next;
work->func(work);
work = next;
cond_resched();
} while (work);
}
}
| gpl-2.0 |
TamCore/android_kernel_htc_msm8660 | crypto/sha1_generic.c | 4166 | 3458 | /*
* Cryptographic API.
*
* SHA1 Secure Hash Algorithm.
*
* Derived from cryptoapi implementation, adapted for in-place
* scatterlist interface.
*
* Copyright (c) Alan Smithee.
* Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
* Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <asm/byteorder.h>
static int sha1_init(struct shash_desc *desc)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
*sctx = (struct sha1_state){
.state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
};
return 0;
}
static int sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int partial, done;
const u8 *src;
partial = sctx->count & 0x3f;
sctx->count += len;
done = 0;
src = data;
if ((partial + len) > 63) {
u32 temp[SHA_WORKSPACE_WORDS];
if (partial) {
done = -partial;
memcpy(sctx->buffer + partial, data, done + 64);
src = sctx->buffer;
}
do {
sha_transform(sctx->state, src, temp);
done += 64;
src = data + done;
} while (done + 63 < len);
memset(temp, 0, sizeof(temp));
partial = 0;
}
memcpy(sctx->buffer + partial, src, len - done);
return 0;
}
/* Add padding and return the message digest. */
static int sha1_final(struct shash_desc *desc, u8 *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
__be32 *dst = (__be32 *)out;
u32 i, index, padlen;
__be64 bits;
static const u8 padding[64] = { 0x80, };
bits = cpu_to_be64(sctx->count << 3);
/* Pad out to 56 mod 64 */
index = sctx->count & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64+56) - index);
sha1_update(desc, padding, padlen);
/* Append length */
sha1_update(desc, (const u8 *)&bits, sizeof(bits));
/* Store state in digest */
for (i = 0; i < 5; i++)
dst[i] = cpu_to_be32(sctx->state[i]);
/* Wipe context */
memset(sctx, 0, sizeof *sctx);
return 0;
}
static int sha1_export(struct shash_desc *desc, void *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
memcpy(out, sctx, sizeof(*sctx));
return 0;
}
static int sha1_import(struct shash_desc *desc, const void *in)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
memcpy(sctx, in, sizeof(*sctx));
return 0;
}
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_init,
.update = sha1_update,
.final = sha1_final,
.export = sha1_export,
.import = sha1_import,
.descsize = sizeof(struct sha1_state),
.statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-generic",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int __init sha1_generic_mod_init(void)
{
return crypto_register_shash(&alg);
}
static void __exit sha1_generic_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
module_init(sha1_generic_mod_init);
module_exit(sha1_generic_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
MODULE_ALIAS("sha1");
| gpl-2.0 |
slukk/mako_msm | drivers/mtd/onenand/omap2.c | 4934 | 21977 | /*
* linux/drivers/mtd/onenand/omap2.c
*
* OneNAND driver for OMAP2 / OMAP3
*
* Copyright © 2005-2006 Nokia Corporation
*
* Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
* IRQ and DMA support written by Timo Teras
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; see the file COPYING. If not, write to the Free Software
* Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h>
#include <linux/mtd/partitions.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
#include <asm/mach/flash.h>
#include <plat/gpmc.h>
#include <plat/onenand.h>
#include <asm/gpio.h>
#include <plat/dma.h>
#include <plat/board.h>
#define DRIVER_NAME "omap2-onenand"
#define ONENAND_IO_SIZE SZ_128K
#define ONENAND_BUFRAM_SIZE (1024 * 5)
struct omap2_onenand {
struct platform_device *pdev;
int gpmc_cs;
unsigned long phys_base;
int gpio_irq;
struct mtd_info mtd;
struct onenand_chip onenand;
struct completion irq_done;
struct completion dma_done;
int dma_channel;
int freq;
int (*setup)(void __iomem *base, int *freq_ptr);
struct regulator *regulator;
};
static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
{
struct omap2_onenand *c = data;
complete(&c->dma_done);
}
static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
{
struct omap2_onenand *c = dev_id;
complete(&c->irq_done);
return IRQ_HANDLED;
}
static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
{
return readw(c->onenand.base + reg);
}
static inline void write_reg(struct omap2_onenand *c, unsigned short value,
int reg)
{
writew(value, c->onenand.base + reg);
}
static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
{
printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
msg, state, ctrl, intr);
}
static void wait_warn(char *msg, int state, unsigned int ctrl,
unsigned int intr)
{
printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
"intr 0x%04x\n", msg, state, ctrl, intr);
}
static int omap2_onenand_wait(struct mtd_info *mtd, int state)
{
struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
struct onenand_chip *this = mtd->priv;
unsigned int intr = 0;
unsigned int ctrl, ctrl_mask;
unsigned long timeout;
u32 syscfg;
if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
state == FL_VERIFYING_ERASE) {
int i = 21;
unsigned int intr_flags = ONENAND_INT_MASTER;
switch (state) {
case FL_RESETING:
intr_flags |= ONENAND_INT_RESET;
break;
case FL_PREPARING_ERASE:
intr_flags |= ONENAND_INT_ERASE;
break;
case FL_VERIFYING_ERASE:
i = 101;
break;
}
while (--i) {
udelay(1);
intr = read_reg(c, ONENAND_REG_INTERRUPT);
if (intr & ONENAND_INT_MASTER)
break;
}
ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
if (ctrl & ONENAND_CTRL_ERROR) {
wait_err("controller error", state, ctrl, intr);
return -EIO;
}
if ((intr & intr_flags) == intr_flags)
return 0;
/* Continue in wait for interrupt branch */
}
if (state != FL_READING) {
int result;
/* Turn interrupts on */
syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
syscfg |= ONENAND_SYS_CFG1_IOBE;
write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
if (cpu_is_omap34xx())
/* Add a delay to let GPIO settle */
syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
}
INIT_COMPLETION(c->irq_done);
if (c->gpio_irq) {
result = gpio_get_value(c->gpio_irq);
if (result == -1) {
ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
intr = read_reg(c, ONENAND_REG_INTERRUPT);
wait_err("gpio error", state, ctrl, intr);
return -EIO;
}
} else
result = 0;
if (result == 0) {
int retry_cnt = 0;
retry:
result = wait_for_completion_timeout(&c->irq_done,
msecs_to_jiffies(20));
if (result == 0) {
/* Timeout after 20ms */
ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
if (ctrl & ONENAND_CTRL_ONGO &&
!this->ongoing) {
/*
* The operation seems to be still going
* so give it some more time.
*/
retry_cnt += 1;
if (retry_cnt < 3)
goto retry;
intr = read_reg(c,
ONENAND_REG_INTERRUPT);
wait_err("timeout", state, ctrl, intr);
return -EIO;
}
intr = read_reg(c, ONENAND_REG_INTERRUPT);
if ((intr & ONENAND_INT_MASTER) == 0)
wait_warn("timeout", state, ctrl, intr);
}
}
} else {
int retry_cnt = 0;
/* Turn interrupts off */
syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
syscfg &= ~ONENAND_SYS_CFG1_IOBE;
write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
timeout = jiffies + msecs_to_jiffies(20);
while (1) {
if (time_before(jiffies, timeout)) {
intr = read_reg(c, ONENAND_REG_INTERRUPT);
if (intr & ONENAND_INT_MASTER)
break;
} else {
/* Timeout after 20ms */
ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
if (ctrl & ONENAND_CTRL_ONGO) {
/*
* The operation seems to be still going
* so give it some more time.
*/
retry_cnt += 1;
if (retry_cnt < 3) {
timeout = jiffies +
msecs_to_jiffies(20);
continue;
}
}
break;
}
}
}
intr = read_reg(c, ONENAND_REG_INTERRUPT);
ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
if (intr & ONENAND_INT_READ) {
int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
if (ecc) {
unsigned int addr1, addr8;
addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
if (ecc & ONENAND_ECC_2BIT_ALL) {
printk(KERN_ERR "onenand_wait: ECC error = "
"0x%04x, addr1 %#x, addr8 %#x\n",
ecc, addr1, addr8);
mtd->ecc_stats.failed++;
return -EBADMSG;
} else if (ecc & ONENAND_ECC_1BIT_ALL) {
printk(KERN_NOTICE "onenand_wait: correctable "
"ECC error = 0x%04x, addr1 %#x, "
"addr8 %#x\n", ecc, addr1, addr8);
mtd->ecc_stats.corrected++;
}
}
} else if (state == FL_READING) {
wait_err("timeout", state, ctrl, intr);
return -EIO;
}
if (ctrl & ONENAND_CTRL_ERROR) {
wait_err("controller error", state, ctrl, intr);
if (ctrl & ONENAND_CTRL_LOCK)
printk(KERN_ERR "onenand_wait: "
"Device is write protected!!!\n");
return -EIO;
}
ctrl_mask = 0xFE9F;
if (this->ongoing)
ctrl_mask &= ~0x8000;
if (ctrl & ctrl_mask)
wait_warn("unexpected controller status", state, ctrl, intr);
return 0;
}
static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
{
struct onenand_chip *this = mtd->priv;
if (ONENAND_CURRENT_BUFFERRAM(this)) {
if (area == ONENAND_DATARAM)
return this->writesize;
if (area == ONENAND_SPARERAM)
return mtd->oobsize;
}
return 0;
}
#if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
unsigned char *buffer, int offset,
size_t count)
{
struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
struct onenand_chip *this = mtd->priv;
dma_addr_t dma_src, dma_dst;
int bram_offset;
unsigned long timeout;
void *buf = (void *)buffer;
size_t xtra;
volatile unsigned *done;
bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
goto out_copy;
/* panic_write() may be in an interrupt context */
if (in_interrupt() || oops_in_progress)
goto out_copy;
if (buf >= high_memory) {
struct page *p1;
if (((size_t)buf & PAGE_MASK) !=
((size_t)(buf + count - 1) & PAGE_MASK))
goto out_copy;
p1 = vmalloc_to_page(buf);
if (!p1)
goto out_copy;
buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
}
xtra = count & 3;
if (xtra) {
count -= xtra;
memcpy(buf + count, this->base + bram_offset + count, xtra);
}
dma_src = c->phys_base + bram_offset;
dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
dev_err(&c->pdev->dev,
"Couldn't DMA map a %d byte buffer\n",
count);
goto out_copy;
}
omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
count >> 2, 1, 0, 0, 0);
omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
dma_src, 0, 0);
omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
dma_dst, 0, 0);
INIT_COMPLETION(c->dma_done);
omap_start_dma(c->dma_channel);
timeout = jiffies + msecs_to_jiffies(20);
done = &c->dma_done.done;
while (time_before(jiffies, timeout))
if (*done)
break;
dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
if (!*done) {
dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
goto out_copy;
}
return 0;
out_copy:
memcpy(buf, this->base + bram_offset, count);
return 0;
}
static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
const unsigned char *buffer,
int offset, size_t count)
{
struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
struct onenand_chip *this = mtd->priv;
dma_addr_t dma_src, dma_dst;
int bram_offset;
unsigned long timeout;
void *buf = (void *)buffer;
volatile unsigned *done;
bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
goto out_copy;
/* panic_write() may be in an interrupt context */
if (in_interrupt() || oops_in_progress)
goto out_copy;
if (buf >= high_memory) {
struct page *p1;
if (((size_t)buf & PAGE_MASK) !=
((size_t)(buf + count - 1) & PAGE_MASK))
goto out_copy;
p1 = vmalloc_to_page(buf);
if (!p1)
goto out_copy;
buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
}
dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
dma_dst = c->phys_base + bram_offset;
if (dma_mapping_error(&c->pdev->dev, dma_src)) {
dev_err(&c->pdev->dev,
"Couldn't DMA map a %d byte buffer\n",
count);
return -1;
}
omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
count >> 2, 1, 0, 0, 0);
omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
dma_src, 0, 0);
omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
dma_dst, 0, 0);
INIT_COMPLETION(c->dma_done);
omap_start_dma(c->dma_channel);
timeout = jiffies + msecs_to_jiffies(20);
done = &c->dma_done.done;
while (time_before(jiffies, timeout))
if (*done)
break;
dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
if (!*done) {
dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
goto out_copy;
}
return 0;
out_copy:
memcpy(this->base + bram_offset, buf, count);
return 0;
}
#else
int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
unsigned char *buffer, int offset,
size_t count);
int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
const unsigned char *buffer,
int offset, size_t count);
#endif
#if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
unsigned char *buffer, int offset,
size_t count)
{
struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
struct onenand_chip *this = mtd->priv;
dma_addr_t dma_src, dma_dst;
int bram_offset;
bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
/* DMA is not used. Revisit PM requirements before enabling it. */
if (1 || (c->dma_channel < 0) ||
((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
(((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
memcpy(buffer, (__force void *)(this->base + bram_offset),
count);
return 0;
}
dma_src = c->phys_base + bram_offset;
dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
DMA_FROM_DEVICE);
if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
dev_err(&c->pdev->dev,
"Couldn't DMA map a %d byte buffer\n",
count);
return -1;
}
omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
count / 4, 1, 0, 0, 0);
omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
dma_src, 0, 0);
omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
dma_dst, 0, 0);
INIT_COMPLETION(c->dma_done);
omap_start_dma(c->dma_channel);
wait_for_completion(&c->dma_done);
dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
return 0;
}
static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
const unsigned char *buffer,
int offset, size_t count)
{
struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
struct onenand_chip *this = mtd->priv;
dma_addr_t dma_src, dma_dst;
int bram_offset;
bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
/* DMA is not used. Revisit PM requirements before enabling it. */
if (1 || (c->dma_channel < 0) ||
((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
(((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
memcpy((__force void *)(this->base + bram_offset), buffer,
count);
return 0;
}
dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
DMA_TO_DEVICE);
dma_dst = c->phys_base + bram_offset;
if (dma_mapping_error(&c->pdev->dev, dma_src)) {
dev_err(&c->pdev->dev,
"Couldn't DMA map a %d byte buffer\n",
count);
return -1;
}
omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
count / 2, 1, 0, 0, 0);
omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
dma_src, 0, 0);
omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
dma_dst, 0, 0);
INIT_COMPLETION(c->dma_done);
omap_start_dma(c->dma_channel);
wait_for_completion(&c->dma_done);
dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
return 0;
}
#else
int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
unsigned char *buffer, int offset,
size_t count);
int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
const unsigned char *buffer,
int offset, size_t count);
#endif
static struct platform_driver omap2_onenand_driver;
static int __adjust_timing(struct device *dev, void *data)
{
int ret = 0;
struct omap2_onenand *c;
c = dev_get_drvdata(dev);
BUG_ON(c->setup == NULL);
/* DMA is not in use so this is all that is needed */
/* Revisit for OMAP3! */
ret = c->setup(c->onenand.base, &c->freq);
return ret;
}
int omap2_onenand_rephase(void)
{
return driver_for_each_device(&omap2_onenand_driver.driver, NULL,
NULL, __adjust_timing);
}
static void omap2_onenand_shutdown(struct platform_device *pdev)
{
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
/* With certain content in the buffer RAM, the OMAP boot ROM code
* can recognize the flash chip incorrectly. Zero it out before
* soft reset.
*/
memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
}
static int omap2_onenand_enable(struct mtd_info *mtd)
{
int ret;
struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
ret = regulator_enable(c->regulator);
if (ret != 0)
dev_err(&c->pdev->dev, "can't enable regulator\n");
return ret;
}
static int omap2_onenand_disable(struct mtd_info *mtd)
{
int ret;
struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
ret = regulator_disable(c->regulator);
if (ret != 0)
dev_err(&c->pdev->dev, "can't disable regulator\n");
return ret;
}
static int __devinit omap2_onenand_probe(struct platform_device *pdev)
{
struct omap_onenand_platform_data *pdata;
struct omap2_onenand *c;
struct onenand_chip *this;
int r;
pdata = pdev->dev.platform_data;
if (pdata == NULL) {
dev_err(&pdev->dev, "platform data missing\n");
return -ENODEV;
}
c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
if (!c)
return -ENOMEM;
init_completion(&c->irq_done);
init_completion(&c->dma_done);
c->gpmc_cs = pdata->cs;
c->gpio_irq = pdata->gpio_irq;
c->dma_channel = pdata->dma_channel;
if (c->dma_channel < 0) {
/* if -1, don't use DMA */
c->gpio_irq = 0;
}
r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base);
if (r < 0) {
dev_err(&pdev->dev, "Cannot request GPMC CS\n");
goto err_kfree;
}
if (request_mem_region(c->phys_base, ONENAND_IO_SIZE,
pdev->dev.driver->name) == NULL) {
dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, "
"size: 0x%x\n", c->phys_base, ONENAND_IO_SIZE);
r = -EBUSY;
goto err_free_cs;
}
c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE);
if (c->onenand.base == NULL) {
r = -ENOMEM;
goto err_release_mem_region;
}
if (pdata->onenand_setup != NULL) {
r = pdata->onenand_setup(c->onenand.base, &c->freq);
if (r < 0) {
dev_err(&pdev->dev, "Onenand platform setup failed: "
"%d\n", r);
goto err_iounmap;
}
c->setup = pdata->onenand_setup;
}
if (c->gpio_irq) {
if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) {
dev_err(&pdev->dev, "Failed to request GPIO%d for "
"OneNAND\n", c->gpio_irq);
goto err_iounmap;
}
gpio_direction_input(c->gpio_irq);
if ((r = request_irq(gpio_to_irq(c->gpio_irq),
omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
pdev->dev.driver->name, c)) < 0)
goto err_release_gpio;
}
if (c->dma_channel >= 0) {
r = omap_request_dma(0, pdev->dev.driver->name,
omap2_onenand_dma_cb, (void *) c,
&c->dma_channel);
if (r == 0) {
omap_set_dma_write_mode(c->dma_channel,
OMAP_DMA_WRITE_NON_POSTED);
omap_set_dma_src_data_pack(c->dma_channel, 1);
omap_set_dma_src_burst_mode(c->dma_channel,
OMAP_DMA_DATA_BURST_8);
omap_set_dma_dest_data_pack(c->dma_channel, 1);
omap_set_dma_dest_burst_mode(c->dma_channel,
OMAP_DMA_DATA_BURST_8);
} else {
dev_info(&pdev->dev,
"failed to allocate DMA for OneNAND, "
"using PIO instead\n");
c->dma_channel = -1;
}
}
dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
"base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base,
c->onenand.base, c->freq);
c->pdev = pdev;
c->mtd.name = dev_name(&pdev->dev);
c->mtd.priv = &c->onenand;
c->mtd.owner = THIS_MODULE;
c->mtd.dev.parent = &pdev->dev;
this = &c->onenand;
if (c->dma_channel >= 0) {
this->wait = omap2_onenand_wait;
if (cpu_is_omap34xx()) {
this->read_bufferram = omap3_onenand_read_bufferram;
this->write_bufferram = omap3_onenand_write_bufferram;
} else {
this->read_bufferram = omap2_onenand_read_bufferram;
this->write_bufferram = omap2_onenand_write_bufferram;
}
}
if (pdata->regulator_can_sleep) {
c->regulator = regulator_get(&pdev->dev, "vonenand");
if (IS_ERR(c->regulator)) {
dev_err(&pdev->dev, "Failed to get regulator\n");
r = PTR_ERR(c->regulator);
goto err_release_dma;
}
c->onenand.enable = omap2_onenand_enable;
c->onenand.disable = omap2_onenand_disable;
}
if (pdata->skip_initial_unlocking)
this->options |= ONENAND_SKIP_INITIAL_UNLOCKING;
if ((r = onenand_scan(&c->mtd, 1)) < 0)
goto err_release_regulator;
r = mtd_device_parse_register(&c->mtd, NULL, NULL,
pdata ? pdata->parts : NULL,
pdata ? pdata->nr_parts : 0);
if (r)
goto err_release_onenand;
platform_set_drvdata(pdev, c);
return 0;
err_release_onenand:
onenand_release(&c->mtd);
err_release_regulator:
regulator_put(c->regulator);
err_release_dma:
if (c->dma_channel != -1)
omap_free_dma(c->dma_channel);
if (c->gpio_irq)
free_irq(gpio_to_irq(c->gpio_irq), c);
err_release_gpio:
if (c->gpio_irq)
gpio_free(c->gpio_irq);
err_iounmap:
iounmap(c->onenand.base);
err_release_mem_region:
release_mem_region(c->phys_base, ONENAND_IO_SIZE);
err_free_cs:
gpmc_cs_free(c->gpmc_cs);
err_kfree:
kfree(c);
return r;
}
static int __devexit omap2_onenand_remove(struct platform_device *pdev)
{
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
onenand_release(&c->mtd);
regulator_put(c->regulator);
if (c->dma_channel != -1)
omap_free_dma(c->dma_channel);
omap2_onenand_shutdown(pdev);
platform_set_drvdata(pdev, NULL);
if (c->gpio_irq) {
free_irq(gpio_to_irq(c->gpio_irq), c);
gpio_free(c->gpio_irq);
}
iounmap(c->onenand.base);
release_mem_region(c->phys_base, ONENAND_IO_SIZE);
gpmc_cs_free(c->gpmc_cs);
kfree(c);
return 0;
}
static struct platform_driver omap2_onenand_driver = {
.probe = omap2_onenand_probe,
.remove = __devexit_p(omap2_onenand_remove),
.shutdown = omap2_onenand_shutdown,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
},
};
static int __init omap2_onenand_init(void)
{
printk(KERN_INFO "OneNAND driver initializing\n");
return platform_driver_register(&omap2_onenand_driver);
}
static void __exit omap2_onenand_exit(void)
{
platform_driver_unregister(&omap2_onenand_driver);
}
module_init(omap2_onenand_init);
module_exit(omap2_onenand_exit);
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");
| gpl-2.0 |
emceethemouth/kernel_bacon | drivers/video/fb-puv3.c | 5190 | 22307 | /*
* Frame Buffer Driver for PKUnity-v3 Unigfx
* Code specific to PKUnity SoC and UniCore ISA
*
* Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
* Copyright (C) 2001-2010 Guan Xuetao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/console.h>
#include <asm/sizes.h>
#include <mach/hardware.h>
/* Platform_data reserved for unifb registers. */
#define UNIFB_REGS_NUM 10
/* RAM reserved for the frame buffer. */
#define UNIFB_MEMSIZE (SZ_4M) /* 4 MB for 1024*768*32b */
/*
* cause UNIGFX don not have EDID
* all the modes are organized as follow
*/
static const struct fb_videomode unifb_modes[] = {
/* 0 640x480-60 VESA */
{ "640x480@60", 60, 640, 480, 25175000, 48, 16, 34, 10, 96, 1,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
/* 1 640x480-75 VESA */
{ "640x480@75", 75, 640, 480, 31500000, 120, 16, 18, 1, 64, 1,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
/* 2 800x600-60 VESA */
{ "800x600@60", 60, 800, 600, 40000000, 88, 40, 26, 1, 128, 1,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
/* 3 800x600-75 VESA */
{ "800x600@75", 75, 800, 600, 49500000, 160, 16, 23, 1, 80, 1,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
/* 4 1024x768-60 VESA */
{ "1024x768@60", 60, 1024, 768, 65000000, 160, 24, 34, 3, 136, 1,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
/* 5 1024x768-75 VESA */
{ "1024x768@75", 75, 1024, 768, 78750000, 176, 16, 30, 1, 96, 1,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
/* 6 1280x960-60 VESA */
{ "1280x960@60", 60, 1280, 960, 108000000, 312, 96, 38, 1, 112, 1,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
/* 7 1440x900-60 VESA */
{ "1440x900@60", 60, 1440, 900, 106500000, 232, 80, 30, 3, 152, 1,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
/* 8 FIXME 9 1024x600-60 VESA UNTESTED */
{ "1024x600@60", 60, 1024, 600, 50650000, 160, 24, 26, 1, 136, 1,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
/* 9 FIXME 10 1024x600-75 VESA UNTESTED */
{ "1024x600@75", 75, 1024, 600, 61500000, 176, 16, 23, 1, 96, 1,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
/* 10 FIXME 11 1366x768-60 VESA UNTESTED */
{ "1366x768@60", 60, 1366, 768, 85500000, 256, 58, 18, 1, 112, 3,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
};
static struct fb_var_screeninfo unifb_default = {
.xres = 640,
.yres = 480,
.xres_virtual = 640,
.yres_virtual = 480,
.bits_per_pixel = 16,
.red = { 11, 5, 0 },
.green = { 5, 6, 0 },
.blue = { 0, 5, 0 },
.activate = FB_ACTIVATE_NOW,
.height = -1,
.width = -1,
.pixclock = 25175000,
.left_margin = 48,
.right_margin = 16,
.upper_margin = 33,
.lower_margin = 10,
.hsync_len = 96,
.vsync_len = 2,
.vmode = FB_VMODE_NONINTERLACED,
};
static struct fb_fix_screeninfo unifb_fix = {
.id = "UNIGFX FB",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
.xpanstep = 1,
.ypanstep = 1,
.ywrapstep = 1,
.accel = FB_ACCEL_NONE,
};
static void unifb_sync(struct fb_info *info)
{
/* TODO: may, this can be replaced by interrupt */
int cnt;
for (cnt = 0; cnt < 0x10000000; cnt++) {
if (readl(UGE_COMMAND) & 0x1000000)
return;
}
if (cnt > 0x8000000)
dev_warn(info->device, "Warning: UniGFX GE time out ...\n");
}
static void unifb_prim_fillrect(struct fb_info *info,
const struct fb_fillrect *region)
{
int awidth = region->width;
int aheight = region->height;
int m_iBpp = info->var.bits_per_pixel;
int screen_width = info->var.xres;
int src_sel = 1; /* from fg_color */
int pat_sel = 1;
int src_x0 = 0;
int dst_x0 = region->dx;
int src_y0 = 0;
int dst_y0 = region->dy;
int rop_alpha_sel = 0;
int rop_alpha_code = 0xCC;
int x_dir = 1;
int y_dir = 1;
int alpha_r = 0;
int alpha_sel = 0;
int dst_pitch = screen_width * (m_iBpp / 8);
int dst_offset = dst_y0 * dst_pitch + dst_x0 * (m_iBpp / 8);
int src_pitch = screen_width * (m_iBpp / 8);
int src_offset = src_y0 * src_pitch + src_x0 * (m_iBpp / 8);
unsigned int command = 0;
int clip_region = 0;
int clip_en = 0;
int tp_en = 0;
int fg_color = 0;
int bottom = info->var.yres - 1;
int right = info->var.xres - 1;
int top = 0;
bottom = (bottom << 16) | right;
command = (rop_alpha_sel << 26) | (pat_sel << 18) | (src_sel << 16)
| (x_dir << 20) | (y_dir << 21) | (command << 24)
| (clip_region << 23) | (clip_en << 22) | (tp_en << 27);
src_pitch = (dst_pitch << 16) | src_pitch;
awidth = awidth | (aheight << 16);
alpha_r = ((rop_alpha_code & 0xff) << 8) | (alpha_r & 0xff)
| (alpha_sel << 16);
src_x0 = (src_x0 & 0x1fff) | ((src_y0 & 0x1fff) << 16);
dst_x0 = (dst_x0 & 0x1fff) | ((dst_y0 & 0x1fff) << 16);
fg_color = region->color;
unifb_sync(info);
writel(((u32 *)(info->pseudo_palette))[fg_color], UGE_FCOLOR);
writel(0, UGE_BCOLOR);
writel(src_pitch, UGE_PITCH);
writel(src_offset, UGE_SRCSTART);
writel(dst_offset, UGE_DSTSTART);
writel(awidth, UGE_WIDHEIGHT);
writel(top, UGE_CLIP0);
writel(bottom, UGE_CLIP1);
writel(alpha_r, UGE_ROPALPHA);
writel(src_x0, UGE_SRCXY);
writel(dst_x0, UGE_DSTXY);
writel(command, UGE_COMMAND);
}
static void unifb_fillrect(struct fb_info *info,
const struct fb_fillrect *region)
{
struct fb_fillrect modded;
int vxres, vyres;
if (info->flags & FBINFO_HWACCEL_DISABLED) {
sys_fillrect(info, region);
return;
}
vxres = info->var.xres_virtual;
vyres = info->var.yres_virtual;
memcpy(&modded, region, sizeof(struct fb_fillrect));
if (!modded.width || !modded.height ||
modded.dx >= vxres || modded.dy >= vyres)
return;
if (modded.dx + modded.width > vxres)
modded.width = vxres - modded.dx;
if (modded.dy + modded.height > vyres)
modded.height = vyres - modded.dy;
unifb_prim_fillrect(info, &modded);
}
static void unifb_prim_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
int awidth = area->width;
int aheight = area->height;
int m_iBpp = info->var.bits_per_pixel;
int screen_width = info->var.xres;
int src_sel = 2; /* from mem */
int pat_sel = 0;
int src_x0 = area->sx;
int dst_x0 = area->dx;
int src_y0 = area->sy;
int dst_y0 = area->dy;
int rop_alpha_sel = 0;
int rop_alpha_code = 0xCC;
int x_dir = 1;
int y_dir = 1;
int alpha_r = 0;
int alpha_sel = 0;
int dst_pitch = screen_width * (m_iBpp / 8);
int dst_offset = dst_y0 * dst_pitch + dst_x0 * (m_iBpp / 8);
int src_pitch = screen_width * (m_iBpp / 8);
int src_offset = src_y0 * src_pitch + src_x0 * (m_iBpp / 8);
unsigned int command = 0;
int clip_region = 0;
int clip_en = 1;
int tp_en = 0;
int top = 0;
int bottom = info->var.yres;
int right = info->var.xres;
int fg_color = 0;
int bg_color = 0;
if (src_x0 < 0)
src_x0 = 0;
if (src_y0 < 0)
src_y0 = 0;
if (src_y0 - dst_y0 > 0) {
y_dir = 1;
} else {
y_dir = 0;
src_offset = (src_y0 + aheight) * src_pitch +
src_x0 * (m_iBpp / 8);
dst_offset = (dst_y0 + aheight) * dst_pitch +
dst_x0 * (m_iBpp / 8);
src_y0 += aheight;
dst_y0 += aheight;
}
command = (rop_alpha_sel << 26) | (pat_sel << 18) | (src_sel << 16) |
(x_dir << 20) | (y_dir << 21) | (command << 24) |
(clip_region << 23) | (clip_en << 22) | (tp_en << 27);
src_pitch = (dst_pitch << 16) | src_pitch;
awidth = awidth | (aheight << 16);
alpha_r = ((rop_alpha_code & 0xff) << 8) | (alpha_r & 0xff) |
(alpha_sel << 16);
src_x0 = (src_x0 & 0x1fff) | ((src_y0 & 0x1fff) << 16);
dst_x0 = (dst_x0 & 0x1fff) | ((dst_y0 & 0x1fff) << 16);
bottom = (bottom << 16) | right;
unifb_sync(info);
writel(src_pitch, UGE_PITCH);
writel(src_offset, UGE_SRCSTART);
writel(dst_offset, UGE_DSTSTART);
writel(awidth, UGE_WIDHEIGHT);
writel(top, UGE_CLIP0);
writel(bottom, UGE_CLIP1);
writel(bg_color, UGE_BCOLOR);
writel(fg_color, UGE_FCOLOR);
writel(alpha_r, UGE_ROPALPHA);
writel(src_x0, UGE_SRCXY);
writel(dst_x0, UGE_DSTXY);
writel(command, UGE_COMMAND);
}
static void unifb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
struct fb_copyarea modded;
u32 vxres, vyres;
modded.sx = area->sx;
modded.sy = area->sy;
modded.dx = area->dx;
modded.dy = area->dy;
modded.width = area->width;
modded.height = area->height;
if (info->flags & FBINFO_HWACCEL_DISABLED) {
sys_copyarea(info, area);
return;
}
vxres = info->var.xres_virtual;
vyres = info->var.yres_virtual;
if (!modded.width || !modded.height ||
modded.sx >= vxres || modded.sy >= vyres ||
modded.dx >= vxres || modded.dy >= vyres)
return;
if (modded.sx + modded.width > vxres)
modded.width = vxres - modded.sx;
if (modded.dx + modded.width > vxres)
modded.width = vxres - modded.dx;
if (modded.sy + modded.height > vyres)
modded.height = vyres - modded.sy;
if (modded.dy + modded.height > vyres)
modded.height = vyres - modded.dy;
unifb_prim_copyarea(info, &modded);
}
static void unifb_imageblit(struct fb_info *info, const struct fb_image *image)
{
sys_imageblit(info, image);
}
static u_long get_line_length(int xres_virtual, int bpp)
{
u_long length;
length = xres_virtual * bpp;
length = (length + 31) & ~31;
length >>= 3;
return length;
}
/*
* Setting the video mode has been split into two parts.
* First part, xxxfb_check_var, must not write anything
* to hardware, it should only verify and adjust var.
* This means it doesn't alter par but it does use hardware
* data from it to check this var.
*/
static int unifb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
u_long line_length;
/*
* FB_VMODE_CONUPDATE and FB_VMODE_SMOOTH_XPAN are equal!
* as FB_VMODE_SMOOTH_XPAN is only used internally
*/
if (var->vmode & FB_VMODE_CONUPDATE) {
var->vmode |= FB_VMODE_YWRAP;
var->xoffset = info->var.xoffset;
var->yoffset = info->var.yoffset;
}
/*
* Some very basic checks
*/
if (!var->xres)
var->xres = 1;
if (!var->yres)
var->yres = 1;
if (var->xres > var->xres_virtual)
var->xres_virtual = var->xres;
if (var->yres > var->yres_virtual)
var->yres_virtual = var->yres;
if (var->bits_per_pixel <= 1)
var->bits_per_pixel = 1;
else if (var->bits_per_pixel <= 8)
var->bits_per_pixel = 8;
else if (var->bits_per_pixel <= 16)
var->bits_per_pixel = 16;
else if (var->bits_per_pixel <= 24)
var->bits_per_pixel = 24;
else if (var->bits_per_pixel <= 32)
var->bits_per_pixel = 32;
else
return -EINVAL;
if (var->xres_virtual < var->xoffset + var->xres)
var->xres_virtual = var->xoffset + var->xres;
if (var->yres_virtual < var->yoffset + var->yres)
var->yres_virtual = var->yoffset + var->yres;
/*
* Memory limit
*/
line_length =
get_line_length(var->xres_virtual, var->bits_per_pixel);
if (line_length * var->yres_virtual > UNIFB_MEMSIZE)
return -ENOMEM;
/*
* Now that we checked it we alter var. The reason being is that the
* video mode passed in might not work but slight changes to it might
* make it work. This way we let the user know what is acceptable.
*/
switch (var->bits_per_pixel) {
case 1:
case 8:
var->red.offset = 0;
var->red.length = 8;
var->green.offset = 0;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
var->transp.offset = 0;
var->transp.length = 0;
break;
case 16: /* RGBA 5551 */
if (var->transp.length) {
var->red.offset = 0;
var->red.length = 5;
var->green.offset = 5;
var->green.length = 5;
var->blue.offset = 10;
var->blue.length = 5;
var->transp.offset = 15;
var->transp.length = 1;
} else { /* RGB 565 */
var->red.offset = 11;
var->red.length = 5;
var->green.offset = 5;
var->green.length = 6;
var->blue.offset = 0;
var->blue.length = 5;
var->transp.offset = 0;
var->transp.length = 0;
}
break;
case 24: /* RGB 888 */
var->red.offset = 0;
var->red.length = 8;
var->green.offset = 8;
var->green.length = 8;
var->blue.offset = 16;
var->blue.length = 8;
var->transp.offset = 0;
var->transp.length = 0;
break;
case 32: /* RGBA 8888 */
var->red.offset = 16;
var->red.length = 8;
var->green.offset = 8;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
var->transp.offset = 24;
var->transp.length = 8;
break;
}
var->red.msb_right = 0;
var->green.msb_right = 0;
var->blue.msb_right = 0;
var->transp.msb_right = 0;
return 0;
}
/*
* This routine actually sets the video mode. It's in here where we
* the hardware state info->par and fix which can be affected by the
* change in par. For this driver it doesn't do much.
*/
static int unifb_set_par(struct fb_info *info)
{
int hTotal, vTotal, hSyncStart, hSyncEnd, vSyncStart, vSyncEnd;
int format;
#ifdef CONFIG_PUV3_PM
struct clk *clk_vga;
u32 pixclk = 0;
int i;
for (i = 0; i <= 10; i++) {
if (info->var.xres == unifb_modes[i].xres
&& info->var.yres == unifb_modes[i].yres
&& info->var.upper_margin == unifb_modes[i].upper_margin
&& info->var.lower_margin == unifb_modes[i].lower_margin
&& info->var.left_margin == unifb_modes[i].left_margin
&& info->var.right_margin == unifb_modes[i].right_margin
&& info->var.hsync_len == unifb_modes[i].hsync_len
&& info->var.vsync_len == unifb_modes[i].vsync_len) {
pixclk = unifb_modes[i].pixclock;
break;
}
}
/* set clock rate */
clk_vga = clk_get(info->device, "VGA_CLK");
if (clk_vga == ERR_PTR(-ENOENT))
return -ENOENT;
if (pixclk != 0) {
if (clk_set_rate(clk_vga, pixclk)) { /* set clock failed */
info->fix = unifb_fix;
info->var = unifb_default;
if (clk_set_rate(clk_vga, unifb_default.pixclock))
return -EINVAL;
}
}
#endif
info->fix.line_length = get_line_length(info->var.xres_virtual,
info->var.bits_per_pixel);
hSyncStart = info->var.xres + info->var.right_margin;
hSyncEnd = hSyncStart + info->var.hsync_len;
hTotal = hSyncEnd + info->var.left_margin;
vSyncStart = info->var.yres + info->var.lower_margin;
vSyncEnd = vSyncStart + info->var.vsync_len;
vTotal = vSyncEnd + info->var.upper_margin;
switch (info->var.bits_per_pixel) {
case 8:
format = UDE_CFG_DST8;
break;
case 16:
format = UDE_CFG_DST16;
break;
case 24:
format = UDE_CFG_DST24;
break;
case 32:
format = UDE_CFG_DST32;
break;
default:
return -EINVAL;
}
writel(info->fix.smem_start, UDE_FSA);
writel(info->var.yres, UDE_LS);
writel(get_line_length(info->var.xres,
info->var.bits_per_pixel) >> 3, UDE_PS);
/* >> 3 for hardware required. */
writel((hTotal << 16) | (info->var.xres), UDE_HAT);
writel(((hTotal - 1) << 16) | (info->var.xres - 1), UDE_HBT);
writel(((hSyncEnd - 1) << 16) | (hSyncStart - 1), UDE_HST);
writel((vTotal << 16) | (info->var.yres), UDE_VAT);
writel(((vTotal - 1) << 16) | (info->var.yres - 1), UDE_VBT);
writel(((vSyncEnd - 1) << 16) | (vSyncStart - 1), UDE_VST);
writel(UDE_CFG_GDEN_ENABLE | UDE_CFG_TIMEUP_ENABLE
| format | 0xC0000001, UDE_CFG);
return 0;
}
/*
* Set a single color register. The values supplied are already
* rounded down to the hardware's capabilities (according to the
* entries in the var structure). Return != 0 for invalid regno.
*/
static int unifb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *info)
{
if (regno >= 256) /* no. of hw registers */
return 1;
/* grayscale works only partially under directcolor */
if (info->var.grayscale) {
/* grayscale = 0.30*R + 0.59*G + 0.11*B */
red = green = blue =
(red * 77 + green * 151 + blue * 28) >> 8;
}
#define CNVT_TOHW(val, width) ((((val)<<(width))+0x7FFF-(val))>>16)
switch (info->fix.visual) {
case FB_VISUAL_TRUECOLOR:
case FB_VISUAL_PSEUDOCOLOR:
red = CNVT_TOHW(red, info->var.red.length);
green = CNVT_TOHW(green, info->var.green.length);
blue = CNVT_TOHW(blue, info->var.blue.length);
transp = CNVT_TOHW(transp, info->var.transp.length);
break;
case FB_VISUAL_DIRECTCOLOR:
red = CNVT_TOHW(red, 8); /* expect 8 bit DAC */
green = CNVT_TOHW(green, 8);
blue = CNVT_TOHW(blue, 8);
/* hey, there is bug in transp handling... */
transp = CNVT_TOHW(transp, 8);
break;
}
#undef CNVT_TOHW
/* Truecolor has hardware independent palette */
if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
u32 v;
if (regno >= 16)
return 1;
v = (red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset) |
(transp << info->var.transp.offset);
switch (info->var.bits_per_pixel) {
case 8:
break;
case 16:
case 24:
case 32:
((u32 *) (info->pseudo_palette))[regno] = v;
break;
default:
return 1;
}
return 0;
}
return 0;
}
/*
* Pan or Wrap the Display
*
* This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
*/
static int unifb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
if (var->vmode & FB_VMODE_YWRAP) {
if (var->yoffset < 0
|| var->yoffset >= info->var.yres_virtual
|| var->xoffset)
return -EINVAL;
} else {
if (var->xoffset + info->var.xres > info->var.xres_virtual ||
var->yoffset + info->var.yres > info->var.yres_virtual)
return -EINVAL;
}
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
if (var->vmode & FB_VMODE_YWRAP)
info->var.vmode |= FB_VMODE_YWRAP;
else
info->var.vmode &= ~FB_VMODE_YWRAP;
return 0;
}
int unifb_mmap(struct fb_info *info,
struct vm_area_struct *vma)
{
unsigned long size = vma->vm_end - vma->vm_start;
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
unsigned long pos = info->fix.smem_start + offset;
if (offset + size > info->fix.smem_len)
return -EINVAL;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (io_remap_pfn_range(vma, vma->vm_start, pos >> PAGE_SHIFT, size,
vma->vm_page_prot))
return -EAGAIN;
vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */
return 0;
}
static struct fb_ops unifb_ops = {
.fb_read = fb_sys_read,
.fb_write = fb_sys_write,
.fb_check_var = unifb_check_var,
.fb_set_par = unifb_set_par,
.fb_setcolreg = unifb_setcolreg,
.fb_pan_display = unifb_pan_display,
.fb_fillrect = unifb_fillrect,
.fb_copyarea = unifb_copyarea,
.fb_imageblit = unifb_imageblit,
.fb_mmap = unifb_mmap,
};
/*
* Initialisation
*/
static int unifb_probe(struct platform_device *dev)
{
struct fb_info *info;
u32 unifb_regs[UNIFB_REGS_NUM];
int retval = -ENOMEM;
struct resource *iomem;
void *videomemory;
videomemory = (void *)__get_free_pages(GFP_KERNEL | __GFP_COMP,
get_order(UNIFB_MEMSIZE));
if (!videomemory)
goto err;
memset(videomemory, 0, UNIFB_MEMSIZE);
unifb_fix.smem_start = virt_to_phys(videomemory);
unifb_fix.smem_len = UNIFB_MEMSIZE;
iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
unifb_fix.mmio_start = iomem->start;
info = framebuffer_alloc(sizeof(u32)*256, &dev->dev);
if (!info)
goto err;
info->screen_base = (char __iomem *)videomemory;
info->fbops = &unifb_ops;
retval = fb_find_mode(&info->var, info, NULL,
unifb_modes, 10, &unifb_modes[0], 16);
if (!retval || (retval == 4))
info->var = unifb_default;
info->fix = unifb_fix;
info->pseudo_palette = info->par;
info->par = NULL;
info->flags = FBINFO_FLAG_DEFAULT;
#ifdef FB_ACCEL_PUV3_UNIGFX
info->fix.accel = FB_ACCEL_PUV3_UNIGFX;
#endif
retval = fb_alloc_cmap(&info->cmap, 256, 0);
if (retval < 0)
goto err1;
retval = register_framebuffer(info);
if (retval < 0)
goto err2;
platform_set_drvdata(dev, info);
platform_device_add_data(dev, unifb_regs, sizeof(u32) * UNIFB_REGS_NUM);
printk(KERN_INFO
"fb%d: Virtual frame buffer device, using %dM of video memory\n",
info->node, UNIFB_MEMSIZE >> 20);
return 0;
err2:
fb_dealloc_cmap(&info->cmap);
err1:
framebuffer_release(info);
err:
return retval;
}
static int unifb_remove(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
if (info) {
unregister_framebuffer(info);
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
return 0;
}
#ifdef CONFIG_PM
static int unifb_resume(struct platform_device *dev)
{
int rc = 0;
u32 *unifb_regs = dev->dev.platform_data;
if (dev->dev.power.power_state.event == PM_EVENT_ON)
return 0;
console_lock();
if (dev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
writel(unifb_regs[0], UDE_FSA);
writel(unifb_regs[1], UDE_LS);
writel(unifb_regs[2], UDE_PS);
writel(unifb_regs[3], UDE_HAT);
writel(unifb_regs[4], UDE_HBT);
writel(unifb_regs[5], UDE_HST);
writel(unifb_regs[6], UDE_VAT);
writel(unifb_regs[7], UDE_VBT);
writel(unifb_regs[8], UDE_VST);
writel(unifb_regs[9], UDE_CFG);
}
dev->dev.power.power_state = PMSG_ON;
console_unlock();
return rc;
}
static int unifb_suspend(struct platform_device *dev, pm_message_t mesg)
{
u32 *unifb_regs = dev->dev.platform_data;
unifb_regs[0] = readl(UDE_FSA);
unifb_regs[1] = readl(UDE_LS);
unifb_regs[2] = readl(UDE_PS);
unifb_regs[3] = readl(UDE_HAT);
unifb_regs[4] = readl(UDE_HBT);
unifb_regs[5] = readl(UDE_HST);
unifb_regs[6] = readl(UDE_VAT);
unifb_regs[7] = readl(UDE_VBT);
unifb_regs[8] = readl(UDE_VST);
unifb_regs[9] = readl(UDE_CFG);
if (mesg.event == dev->dev.power.power_state.event)
return 0;
switch (mesg.event) {
case PM_EVENT_FREEZE: /* about to take snapshot */
case PM_EVENT_PRETHAW: /* before restoring snapshot */
goto done;
}
console_lock();
/* do nothing... */
console_unlock();
done:
dev->dev.power.power_state = mesg;
return 0;
}
#else
#define unifb_resume NULL
#define unifb_suspend NULL
#endif
static struct platform_driver unifb_driver = {
.probe = unifb_probe,
.remove = unifb_remove,
.resume = unifb_resume,
.suspend = unifb_suspend,
.driver = {
.name = "PKUnity-v3-UNIGFX",
},
};
static int __init unifb_init(void)
{
#ifndef MODULE
if (fb_get_options("unifb", NULL))
return -ENODEV;
#endif
return platform_driver_register(&unifb_driver);
}
module_init(unifb_init);
static void __exit unifb_exit(void)
{
platform_driver_unregister(&unifb_driver);
}
module_exit(unifb_exit);
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
Leoyzen/Charm-Eye | drivers/media/rc/keymaps/rc-twinhan1027.c | 9542 | 1854 | #include <media/rc-map.h>
#include <linux/module.h>
static struct rc_map_table twinhan_vp1027[] = {
{ 0x16, KEY_POWER2 },
{ 0x17, KEY_FAVORITES },
{ 0x0f, KEY_TEXT },
{ 0x48, KEY_INFO},
{ 0x1c, KEY_EPG },
{ 0x04, KEY_LIST },
{ 0x03, KEY_1 },
{ 0x01, KEY_2 },
{ 0x06, KEY_3 },
{ 0x09, KEY_4 },
{ 0x1d, KEY_5 },
{ 0x1f, KEY_6 },
{ 0x0d, KEY_7 },
{ 0x19, KEY_8 },
{ 0x1b, KEY_9 },
{ 0x15, KEY_0 },
{ 0x0c, KEY_CANCEL },
{ 0x4a, KEY_CLEAR },
{ 0x13, KEY_BACKSPACE },
{ 0x00, KEY_TAB },
{ 0x4b, KEY_UP },
{ 0x51, KEY_DOWN },
{ 0x4e, KEY_LEFT },
{ 0x52, KEY_RIGHT },
{ 0x4f, KEY_ENTER },
{ 0x1e, KEY_VOLUMEUP },
{ 0x0a, KEY_VOLUMEDOWN },
{ 0x02, KEY_CHANNELDOWN },
{ 0x05, KEY_CHANNELUP },
{ 0x11, KEY_RECORD },
{ 0x14, KEY_PLAY },
{ 0x4c, KEY_PAUSE },
{ 0x1a, KEY_STOP },
{ 0x40, KEY_REWIND },
{ 0x12, KEY_FASTFORWARD },
{ 0x41, KEY_PREVIOUSSONG },
{ 0x42, KEY_NEXTSONG },
{ 0x54, KEY_SAVE },
{ 0x50, KEY_LANGUAGE },
{ 0x47, KEY_MEDIA },
{ 0x4d, KEY_SCREEN },
{ 0x43, KEY_SUBTITLE },
{ 0x10, KEY_MUTE },
{ 0x49, KEY_AUDIO },
{ 0x07, KEY_SLEEP },
{ 0x08, KEY_VIDEO },
{ 0x0e, KEY_AGAIN },
{ 0x45, KEY_EQUAL },
{ 0x46, KEY_MINUS },
{ 0x18, KEY_RED },
{ 0x53, KEY_GREEN },
{ 0x5e, KEY_YELLOW },
{ 0x5f, KEY_BLUE },
};
static struct rc_map_list twinhan_vp1027_map = {
.map = {
.scan = twinhan_vp1027,
.size = ARRAY_SIZE(twinhan_vp1027),
.rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_TWINHAN_VP1027_DVBS,
}
};
static int __init init_rc_map_twinhan_vp1027(void)
{
return rc_map_register(&twinhan_vp1027_map);
}
static void __exit exit_rc_map_twinhan_vp1027(void)
{
rc_map_unregister(&twinhan_vp1027_map);
}
module_init(init_rc_map_twinhan_vp1027)
module_exit(exit_rc_map_twinhan_vp1027)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sergey Ivanov <123kash@gmail.com>");
| gpl-2.0 |
GalaxyTab4/android_kernel_samsung_matissewifi | drivers/isdn/hisax/st5481_d.c | 9798 | 19522 | /*
* Driver for ST5481 USB ISDN modem
*
* Author Frode Isaksen
* Copyright 2001 by Frode Isaksen <fisaksen@bewan.com>
* 2001 by Kai Germaschewski <kai.germaschewski@gmx.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/usb.h>
#include <linux/netdevice.h>
#include "st5481.h"
static void ph_connect(struct st5481_adapter *adapter);
static void ph_disconnect(struct st5481_adapter *adapter);
static struct Fsm l1fsm;
static char *strL1State[] =
{
"ST_L1_F3",
"ST_L1_F4",
"ST_L1_F6",
"ST_L1_F7",
"ST_L1_F8",
};
static char *strL1Event[] =
{
"EV_IND_DP",
"EV_IND_1",
"EV_IND_2",
"EV_IND_3",
"EV_IND_RSY",
"EV_IND_5",
"EV_IND_6",
"EV_IND_7",
"EV_IND_AP",
"EV_IND_9",
"EV_IND_10",
"EV_IND_11",
"EV_IND_AI8",
"EV_IND_AI10",
"EV_IND_AIL",
"EV_IND_DI",
"EV_PH_ACTIVATE_REQ",
"EV_PH_DEACTIVATE_REQ",
"EV_TIMER3",
};
static inline void D_L1L2(struct st5481_adapter *adapter, int pr, void *arg)
{
struct hisax_if *ifc = (struct hisax_if *) &adapter->hisax_d_if;
ifc->l1l2(ifc, pr, arg);
}
static void
l1_go_f3(struct FsmInst *fi, int event, void *arg)
{
struct st5481_adapter *adapter = fi->userdata;
if (fi->state == ST_L1_F7)
ph_disconnect(adapter);
FsmChangeState(fi, ST_L1_F3);
D_L1L2(adapter, PH_DEACTIVATE | INDICATION, NULL);
}
static void
l1_go_f6(struct FsmInst *fi, int event, void *arg)
{
struct st5481_adapter *adapter = fi->userdata;
if (fi->state == ST_L1_F7)
ph_disconnect(adapter);
FsmChangeState(fi, ST_L1_F6);
}
static void
l1_go_f7(struct FsmInst *fi, int event, void *arg)
{
struct st5481_adapter *adapter = fi->userdata;
FsmDelTimer(&adapter->timer, 0);
ph_connect(adapter);
FsmChangeState(fi, ST_L1_F7);
D_L1L2(adapter, PH_ACTIVATE | INDICATION, NULL);
}
static void
l1_go_f8(struct FsmInst *fi, int event, void *arg)
{
struct st5481_adapter *adapter = fi->userdata;
if (fi->state == ST_L1_F7)
ph_disconnect(adapter);
FsmChangeState(fi, ST_L1_F8);
}
static void
l1_timer3(struct FsmInst *fi, int event, void *arg)
{
struct st5481_adapter *adapter = fi->userdata;
st5481_ph_command(adapter, ST5481_CMD_DR);
FsmChangeState(fi, ST_L1_F3);
D_L1L2(adapter, PH_DEACTIVATE | INDICATION, NULL);
}
static void
l1_ignore(struct FsmInst *fi, int event, void *arg)
{
}
static void
l1_activate(struct FsmInst *fi, int event, void *arg)
{
struct st5481_adapter *adapter = fi->userdata;
st5481_ph_command(adapter, ST5481_CMD_DR);
st5481_ph_command(adapter, ST5481_CMD_PUP);
FsmRestartTimer(&adapter->timer, TIMER3_VALUE, EV_TIMER3, NULL, 2);
st5481_ph_command(adapter, ST5481_CMD_AR8);
FsmChangeState(fi, ST_L1_F4);
}
static struct FsmNode L1FnList[] __initdata =
{
{ST_L1_F3, EV_IND_DP, l1_ignore},
{ST_L1_F3, EV_IND_AP, l1_go_f6},
{ST_L1_F3, EV_IND_AI8, l1_go_f7},
{ST_L1_F3, EV_IND_AI10, l1_go_f7},
{ST_L1_F3, EV_PH_ACTIVATE_REQ, l1_activate},
{ST_L1_F4, EV_TIMER3, l1_timer3},
{ST_L1_F4, EV_IND_DP, l1_go_f3},
{ST_L1_F4, EV_IND_AP, l1_go_f6},
{ST_L1_F4, EV_IND_AI8, l1_go_f7},
{ST_L1_F4, EV_IND_AI10, l1_go_f7},
{ST_L1_F6, EV_TIMER3, l1_timer3},
{ST_L1_F6, EV_IND_DP, l1_go_f3},
{ST_L1_F6, EV_IND_AP, l1_ignore},
{ST_L1_F6, EV_IND_AI8, l1_go_f7},
{ST_L1_F6, EV_IND_AI10, l1_go_f7},
{ST_L1_F7, EV_IND_RSY, l1_go_f8},
{ST_L1_F7, EV_IND_DP, l1_go_f3},
{ST_L1_F7, EV_IND_AP, l1_go_f6},
{ST_L1_F7, EV_IND_AI8, l1_ignore},
{ST_L1_F7, EV_IND_AI10, l1_ignore},
{ST_L1_F7, EV_IND_RSY, l1_go_f8},
{ST_L1_F8, EV_TIMER3, l1_timer3},
{ST_L1_F8, EV_IND_DP, l1_go_f3},
{ST_L1_F8, EV_IND_AP, l1_go_f6},
{ST_L1_F8, EV_IND_AI8, l1_go_f8},
{ST_L1_F8, EV_IND_AI10, l1_go_f8},
{ST_L1_F8, EV_IND_RSY, l1_ignore},
};
static __printf(2, 3)
void l1m_debug(struct FsmInst *fi, char *fmt, ...)
{
va_list args;
char buf[256];
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
DBG(8, "%s", buf);
va_end(args);
}
/* ======================================================================
* D-Channel out
*/
/*
D OUT state machine:
====================
Transmit short frame (< 16 bytes of encoded data):
L1 FRAME D_OUT_STATE USB D CHANNEL
-------- ----------- --- ---------
FIXME
-> [xx..xx] SHORT_INIT -> [7Exx..xxC1C27EFF]
SHORT_WAIT_DEN <> OUT_D_COUNTER=16
END_OF_SHORT <- DEN_EVENT -> 7Exx
xxxx
xxxx
xxxx
xxxx
xxxx
C1C1
7EFF
WAIT_FOR_RESET_IDLE <- D_UNDERRUN <- (8ms)
IDLE <> Reset pipe
Transmit long frame (>= 16 bytes of encoded data):
L1 FRAME D_OUT_STATE USB D CHANNEL
-------- ----------- --- ---------
-> [xx...xx] IDLE
WAIT_FOR_STOP <> OUT_D_COUNTER=0
WAIT_FOR_RESET <> Reset pipe
STOP
INIT_LONG_FRAME -> [7Exx..xx]
WAIT_DEN <> OUT_D_COUNTER=16
OUT_NORMAL <- DEN_EVENT -> 7Exx
END_OF_FRAME_BUSY -> [xxxx] xxxx
END_OF_FRAME_NOT_BUSY -> [xxxx] xxxx
-> [xxxx] xxxx
-> [C1C2] xxxx
-> [7EFF] xxxx
xxxx
xxxx
....
xxxx
C1C2
7EFF
<- D_UNDERRUN <- (> 8ms)
WAIT_FOR_STOP <> OUT_D_COUNTER=0
WAIT_FOR_RESET <> Reset pipe
STOP
*/
static struct Fsm dout_fsm;
static char *strDoutState[] =
{
"ST_DOUT_NONE",
"ST_DOUT_SHORT_INIT",
"ST_DOUT_SHORT_WAIT_DEN",
"ST_DOUT_LONG_INIT",
"ST_DOUT_LONG_WAIT_DEN",
"ST_DOUT_NORMAL",
"ST_DOUT_WAIT_FOR_UNDERRUN",
"ST_DOUT_WAIT_FOR_NOT_BUSY",
"ST_DOUT_WAIT_FOR_STOP",
"ST_DOUT_WAIT_FOR_RESET",
};
static char *strDoutEvent[] =
{
"EV_DOUT_START_XMIT",
"EV_DOUT_COMPLETE",
"EV_DOUT_DEN",
"EV_DOUT_RESETED",
"EV_DOUT_STOPPED",
"EV_DOUT_COLL",
"EV_DOUT_UNDERRUN",
};
static __printf(2, 3)
void dout_debug(struct FsmInst *fi, char *fmt, ...)
{
va_list args;
char buf[256];
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
DBG(0x2, "%s", buf);
va_end(args);
}
static void dout_stop_event(void *context)
{
struct st5481_adapter *adapter = context;
FsmEvent(&adapter->d_out.fsm, EV_DOUT_STOPPED, NULL);
}
/*
* Start the transfer of a D channel frame.
*/
static void usb_d_out(struct st5481_adapter *adapter, int buf_nr)
{
struct st5481_d_out *d_out = &adapter->d_out;
struct urb *urb;
unsigned int num_packets, packet_offset;
int len, buf_size, bytes_sent;
struct sk_buff *skb;
struct usb_iso_packet_descriptor *desc;
if (d_out->fsm.state != ST_DOUT_NORMAL)
return;
if (test_and_set_bit(buf_nr, &d_out->busy)) {
DBG(2, "ep %d urb %d busy %#lx", EP_D_OUT, buf_nr, d_out->busy);
return;
}
urb = d_out->urb[buf_nr];
skb = d_out->tx_skb;
buf_size = NUM_ISO_PACKETS_D * SIZE_ISO_PACKETS_D_OUT;
if (skb) {
len = isdnhdlc_encode(&d_out->hdlc_state,
skb->data, skb->len, &bytes_sent,
urb->transfer_buffer, buf_size);
skb_pull(skb, bytes_sent);
} else {
// Send flags or idle
len = isdnhdlc_encode(&d_out->hdlc_state,
NULL, 0, &bytes_sent,
urb->transfer_buffer, buf_size);
}
if (len < buf_size) {
FsmChangeState(&d_out->fsm, ST_DOUT_WAIT_FOR_UNDERRUN);
}
if (skb && !skb->len) {
d_out->tx_skb = NULL;
D_L1L2(adapter, PH_DATA | CONFIRM, NULL);
dev_kfree_skb_any(skb);
}
// Prepare the URB
urb->transfer_buffer_length = len;
num_packets = 0;
packet_offset = 0;
while (packet_offset < len) {
desc = &urb->iso_frame_desc[num_packets];
desc->offset = packet_offset;
desc->length = SIZE_ISO_PACKETS_D_OUT;
if (len - packet_offset < desc->length)
desc->length = len - packet_offset;
num_packets++;
packet_offset += desc->length;
}
urb->number_of_packets = num_packets;
// Prepare the URB
urb->dev = adapter->usb_dev;
// Need to transmit the next buffer 2ms after the DEN_EVENT
urb->transfer_flags = 0;
urb->start_frame = usb_get_current_frame_number(adapter->usb_dev) + 2;
DBG_ISO_PACKET(0x20, urb);
if (usb_submit_urb(urb, GFP_KERNEL) < 0) {
// There is another URB queued up
urb->transfer_flags = URB_ISO_ASAP;
SUBMIT_URB(urb, GFP_KERNEL);
}
}
static void fifo_reseted(void *context)
{
struct st5481_adapter *adapter = context;
FsmEvent(&adapter->d_out.fsm, EV_DOUT_RESETED, NULL);
}
static void usb_d_out_complete(struct urb *urb)
{
struct st5481_adapter *adapter = urb->context;
struct st5481_d_out *d_out = &adapter->d_out;
long buf_nr;
DBG(2, "");
buf_nr = get_buf_nr(d_out->urb, urb);
test_and_clear_bit(buf_nr, &d_out->busy);
if (unlikely(urb->status < 0)) {
switch (urb->status) {
case -ENOENT:
case -ESHUTDOWN:
case -ECONNRESET:
DBG(1, "urb killed status %d", urb->status);
break;
default:
WARNING("urb status %d", urb->status);
if (d_out->busy == 0) {
st5481_usb_pipe_reset(adapter, EP_D_OUT | USB_DIR_OUT, fifo_reseted, adapter);
}
break;
}
return; // Give up
}
FsmEvent(&adapter->d_out.fsm, EV_DOUT_COMPLETE, (void *) buf_nr);
}
/* ====================================================================== */
static void dout_start_xmit(struct FsmInst *fsm, int event, void *arg)
{
// FIXME unify?
struct st5481_adapter *adapter = fsm->userdata;
struct st5481_d_out *d_out = &adapter->d_out;
struct urb *urb;
int len, bytes_sent;
struct sk_buff *skb;
int buf_nr = 0;
skb = d_out->tx_skb;
DBG(2, "len=%d", skb->len);
isdnhdlc_out_init(&d_out->hdlc_state, HDLC_DCHANNEL | HDLC_BITREVERSE);
if (test_and_set_bit(buf_nr, &d_out->busy)) {
WARNING("ep %d urb %d busy %#lx", EP_D_OUT, buf_nr, d_out->busy);
return;
}
urb = d_out->urb[buf_nr];
DBG_SKB(0x10, skb);
len = isdnhdlc_encode(&d_out->hdlc_state,
skb->data, skb->len, &bytes_sent,
urb->transfer_buffer, 16);
skb_pull(skb, bytes_sent);
if (len < 16)
FsmChangeState(&d_out->fsm, ST_DOUT_SHORT_INIT);
else
FsmChangeState(&d_out->fsm, ST_DOUT_LONG_INIT);
if (skb->len == 0) {
d_out->tx_skb = NULL;
D_L1L2(adapter, PH_DATA | CONFIRM, NULL);
dev_kfree_skb_any(skb);
}
// Prepare the URB
urb->transfer_buffer_length = len;
urb->iso_frame_desc[0].offset = 0;
urb->iso_frame_desc[0].length = len;
urb->number_of_packets = 1;
// Prepare the URB
urb->dev = adapter->usb_dev;
urb->transfer_flags = URB_ISO_ASAP;
DBG_ISO_PACKET(0x20, urb);
SUBMIT_URB(urb, GFP_KERNEL);
}
static void dout_short_fifo(struct FsmInst *fsm, int event, void *arg)
{
struct st5481_adapter *adapter = fsm->userdata;
struct st5481_d_out *d_out = &adapter->d_out;
FsmChangeState(&d_out->fsm, ST_DOUT_SHORT_WAIT_DEN);
st5481_usb_device_ctrl_msg(adapter, OUT_D_COUNTER, 16, NULL, NULL);
}
static void dout_end_short_frame(struct FsmInst *fsm, int event, void *arg)
{
struct st5481_adapter *adapter = fsm->userdata;
struct st5481_d_out *d_out = &adapter->d_out;
FsmChangeState(&d_out->fsm, ST_DOUT_WAIT_FOR_UNDERRUN);
}
static void dout_long_enable_fifo(struct FsmInst *fsm, int event, void *arg)
{
struct st5481_adapter *adapter = fsm->userdata;
struct st5481_d_out *d_out = &adapter->d_out;
st5481_usb_device_ctrl_msg(adapter, OUT_D_COUNTER, 16, NULL, NULL);
FsmChangeState(&d_out->fsm, ST_DOUT_LONG_WAIT_DEN);
}
static void dout_long_den(struct FsmInst *fsm, int event, void *arg)
{
struct st5481_adapter *adapter = fsm->userdata;
struct st5481_d_out *d_out = &adapter->d_out;
FsmChangeState(&d_out->fsm, ST_DOUT_NORMAL);
usb_d_out(adapter, 0);
usb_d_out(adapter, 1);
}
static void dout_reset(struct FsmInst *fsm, int event, void *arg)
{
struct st5481_adapter *adapter = fsm->userdata;
struct st5481_d_out *d_out = &adapter->d_out;
FsmChangeState(&d_out->fsm, ST_DOUT_WAIT_FOR_RESET);
st5481_usb_pipe_reset(adapter, EP_D_OUT | USB_DIR_OUT, fifo_reseted, adapter);
}
static void dout_stop(struct FsmInst *fsm, int event, void *arg)
{
struct st5481_adapter *adapter = fsm->userdata;
struct st5481_d_out *d_out = &adapter->d_out;
FsmChangeState(&d_out->fsm, ST_DOUT_WAIT_FOR_STOP);
st5481_usb_device_ctrl_msg(adapter, OUT_D_COUNTER, 0, dout_stop_event, adapter);
}
static void dout_underrun(struct FsmInst *fsm, int event, void *arg)
{
struct st5481_adapter *adapter = fsm->userdata;
struct st5481_d_out *d_out = &adapter->d_out;
if (test_bit(0, &d_out->busy) || test_bit(1, &d_out->busy)) {
FsmChangeState(&d_out->fsm, ST_DOUT_WAIT_FOR_NOT_BUSY);
} else {
dout_stop(fsm, event, arg);
}
}
static void dout_check_busy(struct FsmInst *fsm, int event, void *arg)
{
struct st5481_adapter *adapter = fsm->userdata;
struct st5481_d_out *d_out = &adapter->d_out;
if (!test_bit(0, &d_out->busy) && !test_bit(1, &d_out->busy))
dout_stop(fsm, event, arg);
}
static void dout_reseted(struct FsmInst *fsm, int event, void *arg)
{
struct st5481_adapter *adapter = fsm->userdata;
struct st5481_d_out *d_out = &adapter->d_out;
FsmChangeState(&d_out->fsm, ST_DOUT_NONE);
// FIXME locking
if (d_out->tx_skb)
FsmEvent(&d_out->fsm, EV_DOUT_START_XMIT, NULL);
}
static void dout_complete(struct FsmInst *fsm, int event, void *arg)
{
struct st5481_adapter *adapter = fsm->userdata;
long buf_nr = (long) arg;
usb_d_out(adapter, buf_nr);
}
static void dout_ignore(struct FsmInst *fsm, int event, void *arg)
{
}
static struct FsmNode DoutFnList[] __initdata =
{
{ST_DOUT_NONE, EV_DOUT_START_XMIT, dout_start_xmit},
{ST_DOUT_SHORT_INIT, EV_DOUT_COMPLETE, dout_short_fifo},
{ST_DOUT_SHORT_WAIT_DEN, EV_DOUT_DEN, dout_end_short_frame},
{ST_DOUT_SHORT_WAIT_DEN, EV_DOUT_UNDERRUN, dout_underrun},
{ST_DOUT_LONG_INIT, EV_DOUT_COMPLETE, dout_long_enable_fifo},
{ST_DOUT_LONG_WAIT_DEN, EV_DOUT_DEN, dout_long_den},
{ST_DOUT_LONG_WAIT_DEN, EV_DOUT_UNDERRUN, dout_underrun},
{ST_DOUT_NORMAL, EV_DOUT_UNDERRUN, dout_underrun},
{ST_DOUT_NORMAL, EV_DOUT_COMPLETE, dout_complete},
{ST_DOUT_WAIT_FOR_UNDERRUN, EV_DOUT_UNDERRUN, dout_underrun},
{ST_DOUT_WAIT_FOR_UNDERRUN, EV_DOUT_COMPLETE, dout_ignore},
{ST_DOUT_WAIT_FOR_NOT_BUSY, EV_DOUT_COMPLETE, dout_check_busy},
{ST_DOUT_WAIT_FOR_STOP, EV_DOUT_STOPPED, dout_reset},
{ST_DOUT_WAIT_FOR_RESET, EV_DOUT_RESETED, dout_reseted},
};
void st5481_d_l2l1(struct hisax_if *hisax_d_if, int pr, void *arg)
{
struct st5481_adapter *adapter = hisax_d_if->priv;
struct sk_buff *skb = arg;
switch (pr) {
case PH_ACTIVATE | REQUEST:
FsmEvent(&adapter->l1m, EV_PH_ACTIVATE_REQ, NULL);
break;
case PH_DEACTIVATE | REQUEST:
FsmEvent(&adapter->l1m, EV_PH_DEACTIVATE_REQ, NULL);
break;
case PH_DATA | REQUEST:
DBG(2, "PH_DATA REQUEST len %d", skb->len);
BUG_ON(adapter->d_out.tx_skb);
adapter->d_out.tx_skb = skb;
FsmEvent(&adapter->d_out.fsm, EV_DOUT_START_XMIT, NULL);
break;
default:
WARNING("pr %#x\n", pr);
break;
}
}
/* ======================================================================
*/
/*
* Start receiving on the D channel since entered state F7.
*/
static void ph_connect(struct st5481_adapter *adapter)
{
struct st5481_d_out *d_out = &adapter->d_out;
struct st5481_in *d_in = &adapter->d_in;
DBG(8, "");
FsmChangeState(&d_out->fsm, ST_DOUT_NONE);
// st5481_usb_device_ctrl_msg(adapter, FFMSK_D, OUT_UNDERRUN, NULL, NULL);
st5481_usb_device_ctrl_msg(adapter, FFMSK_D, 0xfc, NULL, NULL);
st5481_in_mode(d_in, L1_MODE_HDLC);
#ifdef LOOPBACK
// Turn loopback on (data sent on B and D looped back)
st5481_usb_device_ctrl_msg(cs, LBB, 0x04, NULL, NULL);
#endif
st5481_usb_pipe_reset(adapter, EP_D_OUT | USB_DIR_OUT, NULL, NULL);
// Turn on the green LED to tell that we are in state F7
adapter->leds |= GREEN_LED;
st5481_usb_device_ctrl_msg(adapter, GPIO_OUT, adapter->leds, NULL, NULL);
}
/*
* Stop receiving on the D channel since not in state F7.
*/
static void ph_disconnect(struct st5481_adapter *adapter)
{
DBG(8, "");
st5481_in_mode(&adapter->d_in, L1_MODE_NULL);
// Turn off the green LED to tell that we left state F7
adapter->leds &= ~GREEN_LED;
st5481_usb_device_ctrl_msg(adapter, GPIO_OUT, adapter->leds, NULL, NULL);
}
static int st5481_setup_d_out(struct st5481_adapter *adapter)
{
struct usb_device *dev = adapter->usb_dev;
struct usb_interface *intf;
struct usb_host_interface *altsetting = NULL;
struct usb_host_endpoint *endpoint;
struct st5481_d_out *d_out = &adapter->d_out;
DBG(2, "");
intf = usb_ifnum_to_if(dev, 0);
if (intf)
altsetting = usb_altnum_to_altsetting(intf, 3);
if (!altsetting)
return -ENXIO;
// Allocate URBs and buffers for the D channel out
endpoint = &altsetting->endpoint[EP_D_OUT-1];
DBG(2, "endpoint address=%02x,packet size=%d",
endpoint->desc.bEndpointAddress, le16_to_cpu(endpoint->desc.wMaxPacketSize));
return st5481_setup_isocpipes(d_out->urb, dev,
usb_sndisocpipe(dev, endpoint->desc.bEndpointAddress),
NUM_ISO_PACKETS_D, SIZE_ISO_PACKETS_D_OUT,
NUM_ISO_PACKETS_D * SIZE_ISO_PACKETS_D_OUT,
usb_d_out_complete, adapter);
}
static void st5481_release_d_out(struct st5481_adapter *adapter)
{
struct st5481_d_out *d_out = &adapter->d_out;
DBG(2, "");
st5481_release_isocpipes(d_out->urb);
}
int st5481_setup_d(struct st5481_adapter *adapter)
{
int retval;
DBG(2, "");
retval = st5481_setup_d_out(adapter);
if (retval)
goto err;
adapter->d_in.bufsize = MAX_DFRAME_LEN_L1;
adapter->d_in.num_packets = NUM_ISO_PACKETS_D;
adapter->d_in.packet_size = SIZE_ISO_PACKETS_D_IN;
adapter->d_in.ep = EP_D_IN | USB_DIR_IN;
adapter->d_in.counter = IN_D_COUNTER;
adapter->d_in.adapter = adapter;
adapter->d_in.hisax_if = &adapter->hisax_d_if.ifc;
retval = st5481_setup_in(&adapter->d_in);
if (retval)
goto err_d_out;
adapter->l1m.fsm = &l1fsm;
adapter->l1m.state = ST_L1_F3;
adapter->l1m.debug = st5481_debug & 0x100;
adapter->l1m.userdata = adapter;
adapter->l1m.printdebug = l1m_debug;
FsmInitTimer(&adapter->l1m, &adapter->timer);
adapter->d_out.fsm.fsm = &dout_fsm;
adapter->d_out.fsm.state = ST_DOUT_NONE;
adapter->d_out.fsm.debug = st5481_debug & 0x100;
adapter->d_out.fsm.userdata = adapter;
adapter->d_out.fsm.printdebug = dout_debug;
return 0;
err_d_out:
st5481_release_d_out(adapter);
err:
return retval;
}
void st5481_release_d(struct st5481_adapter *adapter)
{
DBG(2, "");
st5481_release_in(&adapter->d_in);
st5481_release_d_out(adapter);
}
/* ======================================================================
* init / exit
*/
int __init st5481_d_init(void)
{
int retval;
l1fsm.state_count = L1_STATE_COUNT;
l1fsm.event_count = L1_EVENT_COUNT;
l1fsm.strEvent = strL1Event;
l1fsm.strState = strL1State;
retval = FsmNew(&l1fsm, L1FnList, ARRAY_SIZE(L1FnList));
if (retval)
goto err;
dout_fsm.state_count = DOUT_STATE_COUNT;
dout_fsm.event_count = DOUT_EVENT_COUNT;
dout_fsm.strEvent = strDoutEvent;
dout_fsm.strState = strDoutState;
retval = FsmNew(&dout_fsm, DoutFnList, ARRAY_SIZE(DoutFnList));
if (retval)
goto err_l1;
return 0;
err_l1:
FsmFree(&l1fsm);
err:
return retval;
}
// can't be __exit
void st5481_d_exit(void)
{
FsmFree(&l1fsm);
FsmFree(&dout_fsm);
}
| gpl-2.0 |
bas-t/linux_media | drivers/usb/storage/usual-tables.c | 327 | 3569 | /*
* Driver for USB Mass Storage devices
* Usual Tables File for usb-storage and libusual
*
* Copyright (C) 2009 Alan Stern (stern@rowland.harvard.edu)
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb_usual.h>
/*
* The table of devices
*/
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
#define COMPLIANT_DEV UNUSUAL_DEV
#define USUAL_DEV(useProto, useTrans) \
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans) }
/* Define the device is matched with Vendor ID and interface descriptors */
#define UNUSUAL_VENDOR_INTF(id_vendor, cl, sc, pr, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ \
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
| USB_DEVICE_ID_MATCH_VENDOR, \
.idVendor = (id_vendor), \
.bInterfaceClass = (cl), \
.bInterfaceSubClass = (sc), \
.bInterfaceProtocol = (pr), \
.driver_info = (flags) \
}
struct usb_device_id usb_storage_usb_ids[] = {
# include "unusual_devs.h"
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, usb_storage_usb_ids);
#undef UNUSUAL_DEV
#undef COMPLIANT_DEV
#undef USUAL_DEV
#undef UNUSUAL_VENDOR_INTF
/*
* The table of devices to ignore
*/
struct ignore_entry {
u16 vid, pid, bcdmin, bcdmax;
};
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ \
.vid = id_vendor, \
.pid = id_product, \
.bcdmin = bcdDeviceMin, \
.bcdmax = bcdDeviceMax, \
}
static struct ignore_entry ignore_ids[] = {
# include "unusual_alauda.h"
# include "unusual_cypress.h"
# include "unusual_datafab.h"
# include "unusual_ene_ub6250.h"
# include "unusual_freecom.h"
# include "unusual_isd200.h"
# include "unusual_jumpshot.h"
# include "unusual_karma.h"
# include "unusual_onetouch.h"
# include "unusual_realtek.h"
# include "unusual_sddr09.h"
# include "unusual_sddr55.h"
# include "unusual_usbat.h"
{ } /* Terminating entry */
};
#undef UNUSUAL_DEV
/* Return an error if a device is in the ignore_ids list */
int usb_usual_ignore_device(struct usb_interface *intf)
{
struct usb_device *udev;
unsigned vid, pid, bcd;
struct ignore_entry *p;
udev = interface_to_usbdev(intf);
vid = le16_to_cpu(udev->descriptor.idVendor);
pid = le16_to_cpu(udev->descriptor.idProduct);
bcd = le16_to_cpu(udev->descriptor.bcdDevice);
for (p = ignore_ids; p->vid; ++p) {
if (p->vid == vid && p->pid == pid &&
p->bcdmin <= bcd && p->bcdmax >= bcd)
return -ENXIO;
}
return 0;
}
| gpl-2.0 |
franzjesus/GTab-10.1-AOKP | drivers/usb/gadget/printer.c | 583 | 41443 | /*
* printer.c -- Printer gadget driver
*
* Copyright (C) 2003-2005 David Brownell
* Copyright (C) 2006 Craig W. Nadler
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/utsname.h>
#include <linux/device.h>
#include <linux/moduleparam.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/cdev.h>
#include <asm/byteorder.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <asm/system.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/g_printer.h>
#include "gadget_chips.h"
/*
* Kbuild is not very cooperative with respect to linking separately
* compiled library objects into one module. So for now we won't use
* separate compilation ... ensuring init/exit sections work to shrink
* the runtime footprint, and giving us at least some parts of what
* a "gcc --combine ... part1.c part2.c part3.c ... " build would.
*/
#include "usbstring.c"
#include "config.c"
#include "epautoconf.c"
/*-------------------------------------------------------------------------*/
#define DRIVER_DESC "Printer Gadget"
#define DRIVER_VERSION "2007 OCT 06"
static DEFINE_MUTEX(printer_mutex);
static const char shortname [] = "printer";
static const char driver_desc [] = DRIVER_DESC;
static dev_t g_printer_devno;
static struct class *usb_gadget_class;
/*-------------------------------------------------------------------------*/
struct printer_dev {
spinlock_t lock; /* lock this structure */
/* lock buffer lists during read/write calls */
struct mutex lock_printer_io;
struct usb_gadget *gadget;
struct usb_request *req; /* for control responses */
u8 config;
s8 interface;
struct usb_ep *in_ep, *out_ep;
const struct usb_endpoint_descriptor
*in, *out;
struct list_head rx_reqs; /* List of free RX structs */
struct list_head rx_reqs_active; /* List of Active RX xfers */
struct list_head rx_buffers; /* List of completed xfers */
/* wait until there is data to be read. */
wait_queue_head_t rx_wait;
struct list_head tx_reqs; /* List of free TX structs */
struct list_head tx_reqs_active; /* List of Active TX xfers */
/* Wait until there are write buffers available to use. */
wait_queue_head_t tx_wait;
/* Wait until all write buffers have been sent. */
wait_queue_head_t tx_flush_wait;
struct usb_request *current_rx_req;
size_t current_rx_bytes;
u8 *current_rx_buf;
u8 printer_status;
u8 reset_printer;
struct cdev printer_cdev;
struct device *pdev;
u8 printer_cdev_open;
wait_queue_head_t wait;
};
static struct printer_dev usb_printer_gadget;
/*-------------------------------------------------------------------------*/
/* DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!!
* Instead: allocate your own, using normal USB-IF procedures.
*/
/* Thanks to NetChip Technologies for donating this product ID.
*/
#define PRINTER_VENDOR_NUM 0x0525 /* NetChip */
#define PRINTER_PRODUCT_NUM 0xa4a8 /* Linux-USB Printer Gadget */
/* Some systems will want different product identifiers published in the
* device descriptor, either numbers or strings or both. These string
* parameters are in UTF-8 (superset of ASCII's 7 bit characters).
*/
static ushort idVendor;
module_param(idVendor, ushort, S_IRUGO);
MODULE_PARM_DESC(idVendor, "USB Vendor ID");
static ushort idProduct;
module_param(idProduct, ushort, S_IRUGO);
MODULE_PARM_DESC(idProduct, "USB Product ID");
static ushort bcdDevice;
module_param(bcdDevice, ushort, S_IRUGO);
MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)");
static char *iManufacturer;
module_param(iManufacturer, charp, S_IRUGO);
MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string");
static char *iProduct;
module_param(iProduct, charp, S_IRUGO);
MODULE_PARM_DESC(iProduct, "USB Product string");
static char *iSerialNum;
module_param(iSerialNum, charp, S_IRUGO);
MODULE_PARM_DESC(iSerialNum, "1");
static char *iPNPstring;
module_param(iPNPstring, charp, S_IRUGO);
MODULE_PARM_DESC(iPNPstring, "MFG:linux;MDL:g_printer;CLS:PRINTER;SN:1;");
/* Number of requests to allocate per endpoint, not used for ep0. */
static unsigned qlen = 10;
module_param(qlen, uint, S_IRUGO|S_IWUSR);
#define QLEN qlen
#ifdef CONFIG_USB_GADGET_DUALSPEED
#define DEVSPEED USB_SPEED_HIGH
#else /* full speed (low speed doesn't do bulk) */
#define DEVSPEED USB_SPEED_FULL
#endif
/*-------------------------------------------------------------------------*/
#define xprintk(d, level, fmt, args...) \
printk(level "%s: " fmt, DRIVER_DESC, ## args)
#ifdef DEBUG
#define DBG(dev, fmt, args...) \
xprintk(dev, KERN_DEBUG, fmt, ## args)
#else
#define DBG(dev, fmt, args...) \
do { } while (0)
#endif /* DEBUG */
#ifdef VERBOSE
#define VDBG(dev, fmt, args...) \
xprintk(dev, KERN_DEBUG, fmt, ## args)
#else
#define VDBG(dev, fmt, args...) \
do { } while (0)
#endif /* VERBOSE */
#define ERROR(dev, fmt, args...) \
xprintk(dev, KERN_ERR, fmt, ## args)
#define WARNING(dev, fmt, args...) \
xprintk(dev, KERN_WARNING, fmt, ## args)
#define INFO(dev, fmt, args...) \
xprintk(dev, KERN_INFO, fmt, ## args)
/*-------------------------------------------------------------------------*/
/* USB DRIVER HOOKUP (to the hardware driver, below us), mostly
* ep0 implementation: descriptors, config management, setup().
* also optional class-specific notification interrupt transfer.
*/
/*
* DESCRIPTORS ... most are static, but strings and (full) configuration
* descriptors are built on demand.
*/
#define STRING_MANUFACTURER 1
#define STRING_PRODUCT 2
#define STRING_SERIALNUM 3
/* holds our biggest descriptor */
#define USB_DESC_BUFSIZE 256
#define USB_BUFSIZE 8192
/* This device advertises one configuration. */
#define DEV_CONFIG_VALUE 1
#define PRINTER_INTERFACE 0
static struct usb_device_descriptor device_desc = {
.bLength = sizeof device_desc,
.bDescriptorType = USB_DT_DEVICE,
.bcdUSB = cpu_to_le16(0x0200),
.bDeviceClass = USB_CLASS_PER_INTERFACE,
.bDeviceSubClass = 0,
.bDeviceProtocol = 0,
.idVendor = cpu_to_le16(PRINTER_VENDOR_NUM),
.idProduct = cpu_to_le16(PRINTER_PRODUCT_NUM),
.iManufacturer = STRING_MANUFACTURER,
.iProduct = STRING_PRODUCT,
.iSerialNumber = STRING_SERIALNUM,
.bNumConfigurations = 1
};
static struct usb_otg_descriptor otg_desc = {
.bLength = sizeof otg_desc,
.bDescriptorType = USB_DT_OTG,
.bmAttributes = USB_OTG_SRP
};
static struct usb_config_descriptor config_desc = {
.bLength = sizeof config_desc,
.bDescriptorType = USB_DT_CONFIG,
/* compute wTotalLength on the fly */
.bNumInterfaces = 1,
.bConfigurationValue = DEV_CONFIG_VALUE,
.iConfiguration = 0,
.bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
.bMaxPower = CONFIG_USB_GADGET_VBUS_DRAW / 2,
};
static struct usb_interface_descriptor intf_desc = {
.bLength = sizeof intf_desc,
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = PRINTER_INTERFACE,
.bNumEndpoints = 2,
.bInterfaceClass = USB_CLASS_PRINTER,
.bInterfaceSubClass = 1, /* Printer Sub-Class */
.bInterfaceProtocol = 2, /* Bi-Directional */
.iInterface = 0
};
static struct usb_endpoint_descriptor fs_ep_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK
};
static struct usb_endpoint_descriptor fs_ep_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK
};
static const struct usb_descriptor_header *fs_printer_function [11] = {
(struct usb_descriptor_header *) &otg_desc,
(struct usb_descriptor_header *) &intf_desc,
(struct usb_descriptor_header *) &fs_ep_in_desc,
(struct usb_descriptor_header *) &fs_ep_out_desc,
NULL
};
#ifdef CONFIG_USB_GADGET_DUALSPEED
/*
* usb 2.0 devices need to expose both high speed and full speed
* descriptors, unless they only run at full speed.
*/
static struct usb_endpoint_descriptor hs_ep_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512)
};
static struct usb_endpoint_descriptor hs_ep_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512)
};
static struct usb_qualifier_descriptor dev_qualifier = {
.bLength = sizeof dev_qualifier,
.bDescriptorType = USB_DT_DEVICE_QUALIFIER,
.bcdUSB = cpu_to_le16(0x0200),
.bDeviceClass = USB_CLASS_PRINTER,
.bNumConfigurations = 1
};
static const struct usb_descriptor_header *hs_printer_function [11] = {
(struct usb_descriptor_header *) &otg_desc,
(struct usb_descriptor_header *) &intf_desc,
(struct usb_descriptor_header *) &hs_ep_in_desc,
(struct usb_descriptor_header *) &hs_ep_out_desc,
NULL
};
/* maxpacket and other transfer characteristics vary by speed. */
#define ep_desc(g, hs, fs) (((g)->speed == USB_SPEED_HIGH)?(hs):(fs))
#else
/* if there's no high speed support, maxpacket doesn't change. */
#define ep_desc(g, hs, fs) (((void)(g)), (fs))
#endif /* !CONFIG_USB_GADGET_DUALSPEED */
/*-------------------------------------------------------------------------*/
/* descriptors that are built on-demand */
static char manufacturer [50];
static char product_desc [40] = DRIVER_DESC;
static char serial_num [40] = "1";
static char pnp_string [1024] =
"XXMFG:linux;MDL:g_printer;CLS:PRINTER;SN:1;";
/* static strings, in UTF-8 */
static struct usb_string strings [] = {
{ STRING_MANUFACTURER, manufacturer, },
{ STRING_PRODUCT, product_desc, },
{ STRING_SERIALNUM, serial_num, },
{ } /* end of list */
};
static struct usb_gadget_strings stringtab = {
.language = 0x0409, /* en-us */
.strings = strings,
};
/*-------------------------------------------------------------------------*/
static struct usb_request *
printer_req_alloc(struct usb_ep *ep, unsigned len, gfp_t gfp_flags)
{
struct usb_request *req;
req = usb_ep_alloc_request(ep, gfp_flags);
if (req != NULL) {
req->length = len;
req->buf = kmalloc(len, gfp_flags);
if (req->buf == NULL) {
usb_ep_free_request(ep, req);
return NULL;
}
}
return req;
}
static void
printer_req_free(struct usb_ep *ep, struct usb_request *req)
{
if (ep != NULL && req != NULL) {
kfree(req->buf);
usb_ep_free_request(ep, req);
}
}
/*-------------------------------------------------------------------------*/
static void rx_complete(struct usb_ep *ep, struct usb_request *req)
{
struct printer_dev *dev = ep->driver_data;
int status = req->status;
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
list_del_init(&req->list); /* Remode from Active List */
switch (status) {
/* normal completion */
case 0:
if (req->actual > 0) {
list_add_tail(&req->list, &dev->rx_buffers);
DBG(dev, "G_Printer : rx length %d\n", req->actual);
} else {
list_add(&req->list, &dev->rx_reqs);
}
break;
/* software-driven interface shutdown */
case -ECONNRESET: /* unlink */
case -ESHUTDOWN: /* disconnect etc */
VDBG(dev, "rx shutdown, code %d\n", status);
list_add(&req->list, &dev->rx_reqs);
break;
/* for hardware automagic (such as pxa) */
case -ECONNABORTED: /* endpoint reset */
DBG(dev, "rx %s reset\n", ep->name);
list_add(&req->list, &dev->rx_reqs);
break;
/* data overrun */
case -EOVERFLOW:
/* FALLTHROUGH */
default:
DBG(dev, "rx status %d\n", status);
list_add(&req->list, &dev->rx_reqs);
break;
}
wake_up_interruptible(&dev->rx_wait);
spin_unlock_irqrestore(&dev->lock, flags);
}
static void tx_complete(struct usb_ep *ep, struct usb_request *req)
{
struct printer_dev *dev = ep->driver_data;
switch (req->status) {
default:
VDBG(dev, "tx err %d\n", req->status);
/* FALLTHROUGH */
case -ECONNRESET: /* unlink */
case -ESHUTDOWN: /* disconnect etc */
break;
case 0:
break;
}
spin_lock(&dev->lock);
/* Take the request struct off the active list and put it on the
* free list.
*/
list_del_init(&req->list);
list_add(&req->list, &dev->tx_reqs);
wake_up_interruptible(&dev->tx_wait);
if (likely(list_empty(&dev->tx_reqs_active)))
wake_up_interruptible(&dev->tx_flush_wait);
spin_unlock(&dev->lock);
}
/*-------------------------------------------------------------------------*/
static int
printer_open(struct inode *inode, struct file *fd)
{
struct printer_dev *dev;
unsigned long flags;
int ret = -EBUSY;
mutex_lock(&printer_mutex);
dev = container_of(inode->i_cdev, struct printer_dev, printer_cdev);
spin_lock_irqsave(&dev->lock, flags);
if (!dev->printer_cdev_open) {
dev->printer_cdev_open = 1;
fd->private_data = dev;
ret = 0;
/* Change the printer status to show that it's on-line. */
dev->printer_status |= PRINTER_SELECTED;
}
spin_unlock_irqrestore(&dev->lock, flags);
DBG(dev, "printer_open returned %x\n", ret);
mutex_unlock(&printer_mutex);
return ret;
}
static int
printer_close(struct inode *inode, struct file *fd)
{
struct printer_dev *dev = fd->private_data;
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
dev->printer_cdev_open = 0;
fd->private_data = NULL;
/* Change printer status to show that the printer is off-line. */
dev->printer_status &= ~PRINTER_SELECTED;
spin_unlock_irqrestore(&dev->lock, flags);
DBG(dev, "printer_close\n");
return 0;
}
/* This function must be called with interrupts turned off. */
static void
setup_rx_reqs(struct printer_dev *dev)
{
struct usb_request *req;
while (likely(!list_empty(&dev->rx_reqs))) {
int error;
req = container_of(dev->rx_reqs.next,
struct usb_request, list);
list_del_init(&req->list);
/* The USB Host sends us whatever amount of data it wants to
* so we always set the length field to the full USB_BUFSIZE.
* If the amount of data is more than the read() caller asked
* for it will be stored in the request buffer until it is
* asked for by read().
*/
req->length = USB_BUFSIZE;
req->complete = rx_complete;
error = usb_ep_queue(dev->out_ep, req, GFP_ATOMIC);
if (error) {
DBG(dev, "rx submit --> %d\n", error);
list_add(&req->list, &dev->rx_reqs);
break;
} else {
list_add(&req->list, &dev->rx_reqs_active);
}
}
}
static ssize_t
printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr)
{
struct printer_dev *dev = fd->private_data;
unsigned long flags;
size_t size;
size_t bytes_copied;
struct usb_request *req;
/* This is a pointer to the current USB rx request. */
struct usb_request *current_rx_req;
/* This is the number of bytes in the current rx buffer. */
size_t current_rx_bytes;
/* This is a pointer to the current rx buffer. */
u8 *current_rx_buf;
if (len == 0)
return -EINVAL;
DBG(dev, "printer_read trying to read %d bytes\n", (int)len);
mutex_lock(&dev->lock_printer_io);
spin_lock_irqsave(&dev->lock, flags);
/* We will use this flag later to check if a printer reset happened
* after we turn interrupts back on.
*/
dev->reset_printer = 0;
setup_rx_reqs(dev);
bytes_copied = 0;
current_rx_req = dev->current_rx_req;
current_rx_bytes = dev->current_rx_bytes;
current_rx_buf = dev->current_rx_buf;
dev->current_rx_req = NULL;
dev->current_rx_bytes = 0;
dev->current_rx_buf = NULL;
/* Check if there is any data in the read buffers. Please note that
* current_rx_bytes is the number of bytes in the current rx buffer.
* If it is zero then check if there are any other rx_buffers that
* are on the completed list. We are only out of data if all rx
* buffers are empty.
*/
if ((current_rx_bytes == 0) &&
(likely(list_empty(&dev->rx_buffers)))) {
/* Turn interrupts back on before sleeping. */
spin_unlock_irqrestore(&dev->lock, flags);
/*
* If no data is available check if this is a NON-Blocking
* call or not.
*/
if (fd->f_flags & (O_NONBLOCK|O_NDELAY)) {
mutex_unlock(&dev->lock_printer_io);
return -EAGAIN;
}
/* Sleep until data is available */
wait_event_interruptible(dev->rx_wait,
(likely(!list_empty(&dev->rx_buffers))));
spin_lock_irqsave(&dev->lock, flags);
}
/* We have data to return then copy it to the caller's buffer.*/
while ((current_rx_bytes || likely(!list_empty(&dev->rx_buffers)))
&& len) {
if (current_rx_bytes == 0) {
req = container_of(dev->rx_buffers.next,
struct usb_request, list);
list_del_init(&req->list);
if (req->actual && req->buf) {
current_rx_req = req;
current_rx_bytes = req->actual;
current_rx_buf = req->buf;
} else {
list_add(&req->list, &dev->rx_reqs);
continue;
}
}
/* Don't leave irqs off while doing memory copies */
spin_unlock_irqrestore(&dev->lock, flags);
if (len > current_rx_bytes)
size = current_rx_bytes;
else
size = len;
size -= copy_to_user(buf, current_rx_buf, size);
bytes_copied += size;
len -= size;
buf += size;
spin_lock_irqsave(&dev->lock, flags);
/* We've disconnected or reset so return. */
if (dev->reset_printer) {
list_add(¤t_rx_req->list, &dev->rx_reqs);
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->lock_printer_io);
return -EAGAIN;
}
/* If we not returning all the data left in this RX request
* buffer then adjust the amount of data left in the buffer.
* Othewise if we are done with this RX request buffer then
* requeue it to get any incoming data from the USB host.
*/
if (size < current_rx_bytes) {
current_rx_bytes -= size;
current_rx_buf += size;
} else {
list_add(¤t_rx_req->list, &dev->rx_reqs);
current_rx_bytes = 0;
current_rx_buf = NULL;
current_rx_req = NULL;
}
}
dev->current_rx_req = current_rx_req;
dev->current_rx_bytes = current_rx_bytes;
dev->current_rx_buf = current_rx_buf;
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->lock_printer_io);
DBG(dev, "printer_read returned %d bytes\n", (int)bytes_copied);
if (bytes_copied)
return bytes_copied;
else
return -EAGAIN;
}
static ssize_t
printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
{
struct printer_dev *dev = fd->private_data;
unsigned long flags;
size_t size; /* Amount of data in a TX request. */
size_t bytes_copied = 0;
struct usb_request *req;
DBG(dev, "printer_write trying to send %d bytes\n", (int)len);
if (len == 0)
return -EINVAL;
mutex_lock(&dev->lock_printer_io);
spin_lock_irqsave(&dev->lock, flags);
/* Check if a printer reset happens while we have interrupts on */
dev->reset_printer = 0;
/* Check if there is any available write buffers */
if (likely(list_empty(&dev->tx_reqs))) {
/* Turn interrupts back on before sleeping. */
spin_unlock_irqrestore(&dev->lock, flags);
/*
* If write buffers are available check if this is
* a NON-Blocking call or not.
*/
if (fd->f_flags & (O_NONBLOCK|O_NDELAY)) {
mutex_unlock(&dev->lock_printer_io);
return -EAGAIN;
}
/* Sleep until a write buffer is available */
wait_event_interruptible(dev->tx_wait,
(likely(!list_empty(&dev->tx_reqs))));
spin_lock_irqsave(&dev->lock, flags);
}
while (likely(!list_empty(&dev->tx_reqs)) && len) {
if (len > USB_BUFSIZE)
size = USB_BUFSIZE;
else
size = len;
req = container_of(dev->tx_reqs.next, struct usb_request,
list);
list_del_init(&req->list);
req->complete = tx_complete;
req->length = size;
/* Check if we need to send a zero length packet. */
if (len > size)
/* They will be more TX requests so no yet. */
req->zero = 0;
else
/* If the data amount is not a multple of the
* maxpacket size then send a zero length packet.
*/
req->zero = ((len % dev->in_ep->maxpacket) == 0);
/* Don't leave irqs off while doing memory copies */
spin_unlock_irqrestore(&dev->lock, flags);
if (copy_from_user(req->buf, buf, size)) {
list_add(&req->list, &dev->tx_reqs);
mutex_unlock(&dev->lock_printer_io);
return bytes_copied;
}
bytes_copied += size;
len -= size;
buf += size;
spin_lock_irqsave(&dev->lock, flags);
/* We've disconnected or reset so free the req and buffer */
if (dev->reset_printer) {
list_add(&req->list, &dev->tx_reqs);
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->lock_printer_io);
return -EAGAIN;
}
if (usb_ep_queue(dev->in_ep, req, GFP_ATOMIC)) {
list_add(&req->list, &dev->tx_reqs);
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->lock_printer_io);
return -EAGAIN;
}
list_add(&req->list, &dev->tx_reqs_active);
}
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->lock_printer_io);
DBG(dev, "printer_write sent %d bytes\n", (int)bytes_copied);
if (bytes_copied) {
return bytes_copied;
} else {
return -EAGAIN;
}
}
static int
printer_fsync(struct file *fd, int datasync)
{
struct printer_dev *dev = fd->private_data;
unsigned long flags;
int tx_list_empty;
spin_lock_irqsave(&dev->lock, flags);
tx_list_empty = (likely(list_empty(&dev->tx_reqs)));
spin_unlock_irqrestore(&dev->lock, flags);
if (!tx_list_empty) {
/* Sleep until all data has been sent */
wait_event_interruptible(dev->tx_flush_wait,
(likely(list_empty(&dev->tx_reqs_active))));
}
return 0;
}
static unsigned int
printer_poll(struct file *fd, poll_table *wait)
{
struct printer_dev *dev = fd->private_data;
unsigned long flags;
int status = 0;
mutex_lock(&dev->lock_printer_io);
spin_lock_irqsave(&dev->lock, flags);
setup_rx_reqs(dev);
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->lock_printer_io);
poll_wait(fd, &dev->rx_wait, wait);
poll_wait(fd, &dev->tx_wait, wait);
spin_lock_irqsave(&dev->lock, flags);
if (likely(!list_empty(&dev->tx_reqs)))
status |= POLLOUT | POLLWRNORM;
if (likely(dev->current_rx_bytes) ||
likely(!list_empty(&dev->rx_buffers)))
status |= POLLIN | POLLRDNORM;
spin_unlock_irqrestore(&dev->lock, flags);
return status;
}
static long
printer_ioctl(struct file *fd, unsigned int code, unsigned long arg)
{
struct printer_dev *dev = fd->private_data;
unsigned long flags;
int status = 0;
DBG(dev, "printer_ioctl: cmd=0x%4.4x, arg=%lu\n", code, arg);
/* handle ioctls */
spin_lock_irqsave(&dev->lock, flags);
switch (code) {
case GADGET_GET_PRINTER_STATUS:
status = (int)dev->printer_status;
break;
case GADGET_SET_PRINTER_STATUS:
dev->printer_status = (u8)arg;
break;
default:
/* could not handle ioctl */
DBG(dev, "printer_ioctl: ERROR cmd=0x%4.4xis not supported\n",
code);
status = -ENOTTY;
}
spin_unlock_irqrestore(&dev->lock, flags);
return status;
}
/* used after endpoint configuration */
static const struct file_operations printer_io_operations = {
.owner = THIS_MODULE,
.open = printer_open,
.read = printer_read,
.write = printer_write,
.fsync = printer_fsync,
.poll = printer_poll,
.unlocked_ioctl = printer_ioctl,
.release = printer_close,
.llseek = noop_llseek,
};
/*-------------------------------------------------------------------------*/
static int
set_printer_interface(struct printer_dev *dev)
{
int result = 0;
dev->in = ep_desc(dev->gadget, &hs_ep_in_desc, &fs_ep_in_desc);
dev->in_ep->driver_data = dev;
dev->out = ep_desc(dev->gadget, &hs_ep_out_desc, &fs_ep_out_desc);
dev->out_ep->driver_data = dev;
result = usb_ep_enable(dev->in_ep, dev->in);
if (result != 0) {
DBG(dev, "enable %s --> %d\n", dev->in_ep->name, result);
goto done;
}
result = usb_ep_enable(dev->out_ep, dev->out);
if (result != 0) {
DBG(dev, "enable %s --> %d\n", dev->in_ep->name, result);
goto done;
}
done:
/* on error, disable any endpoints */
if (result != 0) {
(void) usb_ep_disable(dev->in_ep);
(void) usb_ep_disable(dev->out_ep);
dev->in = NULL;
dev->out = NULL;
}
/* caller is responsible for cleanup on error */
return result;
}
static void printer_reset_interface(struct printer_dev *dev)
{
if (dev->interface < 0)
return;
DBG(dev, "%s\n", __func__);
if (dev->in)
usb_ep_disable(dev->in_ep);
if (dev->out)
usb_ep_disable(dev->out_ep);
dev->interface = -1;
}
/* change our operational config. must agree with the code
* that returns config descriptors, and altsetting code.
*/
static int
printer_set_config(struct printer_dev *dev, unsigned number)
{
int result = 0;
struct usb_gadget *gadget = dev->gadget;
switch (number) {
case DEV_CONFIG_VALUE:
result = 0;
break;
default:
result = -EINVAL;
/* FALL THROUGH */
case 0:
break;
}
if (result) {
usb_gadget_vbus_draw(dev->gadget,
dev->gadget->is_otg ? 8 : 100);
} else {
char *speed;
unsigned power;
power = 2 * config_desc.bMaxPower;
usb_gadget_vbus_draw(dev->gadget, power);
switch (gadget->speed) {
case USB_SPEED_FULL: speed = "full"; break;
#ifdef CONFIG_USB_GADGET_DUALSPEED
case USB_SPEED_HIGH: speed = "high"; break;
#endif
default: speed = "?"; break;
}
dev->config = number;
INFO(dev, "%s speed config #%d: %d mA, %s\n",
speed, number, power, driver_desc);
}
return result;
}
static int
config_buf(enum usb_device_speed speed, u8 *buf, u8 type, unsigned index,
int is_otg)
{
int len;
const struct usb_descriptor_header **function;
#ifdef CONFIG_USB_GADGET_DUALSPEED
int hs = (speed == USB_SPEED_HIGH);
if (type == USB_DT_OTHER_SPEED_CONFIG)
hs = !hs;
if (hs) {
function = hs_printer_function;
} else {
function = fs_printer_function;
}
#else
function = fs_printer_function;
#endif
if (index >= device_desc.bNumConfigurations)
return -EINVAL;
/* for now, don't advertise srp-only devices */
if (!is_otg)
function++;
len = usb_gadget_config_buf(&config_desc, buf, USB_DESC_BUFSIZE,
function);
if (len < 0)
return len;
((struct usb_config_descriptor *) buf)->bDescriptorType = type;
return len;
}
/* Change our operational Interface. */
static int
set_interface(struct printer_dev *dev, unsigned number)
{
int result = 0;
/* Free the current interface */
switch (dev->interface) {
case PRINTER_INTERFACE:
printer_reset_interface(dev);
break;
}
switch (number) {
case PRINTER_INTERFACE:
result = set_printer_interface(dev);
if (result) {
printer_reset_interface(dev);
} else {
dev->interface = PRINTER_INTERFACE;
}
break;
default:
result = -EINVAL;
/* FALL THROUGH */
}
if (!result)
INFO(dev, "Using interface %x\n", number);
return result;
}
static void printer_setup_complete(struct usb_ep *ep, struct usb_request *req)
{
if (req->status || req->actual != req->length)
DBG((struct printer_dev *) ep->driver_data,
"setup complete --> %d, %d/%d\n",
req->status, req->actual, req->length);
}
static void printer_soft_reset(struct printer_dev *dev)
{
struct usb_request *req;
INFO(dev, "Received Printer Reset Request\n");
if (usb_ep_disable(dev->in_ep))
DBG(dev, "Failed to disable USB in_ep\n");
if (usb_ep_disable(dev->out_ep))
DBG(dev, "Failed to disable USB out_ep\n");
if (dev->current_rx_req != NULL) {
list_add(&dev->current_rx_req->list, &dev->rx_reqs);
dev->current_rx_req = NULL;
}
dev->current_rx_bytes = 0;
dev->current_rx_buf = NULL;
dev->reset_printer = 1;
while (likely(!(list_empty(&dev->rx_buffers)))) {
req = container_of(dev->rx_buffers.next, struct usb_request,
list);
list_del_init(&req->list);
list_add(&req->list, &dev->rx_reqs);
}
while (likely(!(list_empty(&dev->rx_reqs_active)))) {
req = container_of(dev->rx_buffers.next, struct usb_request,
list);
list_del_init(&req->list);
list_add(&req->list, &dev->rx_reqs);
}
while (likely(!(list_empty(&dev->tx_reqs_active)))) {
req = container_of(dev->tx_reqs_active.next,
struct usb_request, list);
list_del_init(&req->list);
list_add(&req->list, &dev->tx_reqs);
}
if (usb_ep_enable(dev->in_ep, dev->in))
DBG(dev, "Failed to enable USB in_ep\n");
if (usb_ep_enable(dev->out_ep, dev->out))
DBG(dev, "Failed to enable USB out_ep\n");
wake_up_interruptible(&dev->rx_wait);
wake_up_interruptible(&dev->tx_wait);
wake_up_interruptible(&dev->tx_flush_wait);
}
/*-------------------------------------------------------------------------*/
/*
* The setup() callback implements all the ep0 functionality that's not
* handled lower down.
*/
static int
printer_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
{
struct printer_dev *dev = get_gadget_data(gadget);
struct usb_request *req = dev->req;
int value = -EOPNOTSUPP;
u16 wIndex = le16_to_cpu(ctrl->wIndex);
u16 wValue = le16_to_cpu(ctrl->wValue);
u16 wLength = le16_to_cpu(ctrl->wLength);
DBG(dev, "ctrl req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest, wValue, wIndex, wLength);
req->complete = printer_setup_complete;
switch (ctrl->bRequestType&USB_TYPE_MASK) {
case USB_TYPE_STANDARD:
switch (ctrl->bRequest) {
case USB_REQ_GET_DESCRIPTOR:
if (ctrl->bRequestType != USB_DIR_IN)
break;
switch (wValue >> 8) {
case USB_DT_DEVICE:
value = min(wLength, (u16) sizeof device_desc);
memcpy(req->buf, &device_desc, value);
break;
#ifdef CONFIG_USB_GADGET_DUALSPEED
case USB_DT_DEVICE_QUALIFIER:
if (!gadget->is_dualspeed)
break;
value = min(wLength,
(u16) sizeof dev_qualifier);
memcpy(req->buf, &dev_qualifier, value);
break;
case USB_DT_OTHER_SPEED_CONFIG:
if (!gadget->is_dualspeed)
break;
/* FALLTHROUGH */
#endif /* CONFIG_USB_GADGET_DUALSPEED */
case USB_DT_CONFIG:
value = config_buf(gadget->speed, req->buf,
wValue >> 8,
wValue & 0xff,
gadget->is_otg);
if (value >= 0)
value = min(wLength, (u16) value);
break;
case USB_DT_STRING:
value = usb_gadget_get_string(&stringtab,
wValue & 0xff, req->buf);
if (value >= 0)
value = min(wLength, (u16) value);
break;
}
break;
case USB_REQ_SET_CONFIGURATION:
if (ctrl->bRequestType != 0)
break;
if (gadget->a_hnp_support)
DBG(dev, "HNP available\n");
else if (gadget->a_alt_hnp_support)
DBG(dev, "HNP needs a different root port\n");
value = printer_set_config(dev, wValue);
if (!value)
value = set_interface(dev, PRINTER_INTERFACE);
break;
case USB_REQ_GET_CONFIGURATION:
if (ctrl->bRequestType != USB_DIR_IN)
break;
*(u8 *)req->buf = dev->config;
value = min(wLength, (u16) 1);
break;
case USB_REQ_SET_INTERFACE:
if (ctrl->bRequestType != USB_RECIP_INTERFACE ||
!dev->config)
break;
value = set_interface(dev, PRINTER_INTERFACE);
break;
case USB_REQ_GET_INTERFACE:
if (ctrl->bRequestType !=
(USB_DIR_IN|USB_RECIP_INTERFACE)
|| !dev->config)
break;
*(u8 *)req->buf = dev->interface;
value = min(wLength, (u16) 1);
break;
default:
goto unknown;
}
break;
case USB_TYPE_CLASS:
switch (ctrl->bRequest) {
case 0: /* Get the IEEE-1284 PNP String */
/* Only one printer interface is supported. */
if ((wIndex>>8) != PRINTER_INTERFACE)
break;
value = (pnp_string[0]<<8)|pnp_string[1];
memcpy(req->buf, pnp_string, value);
DBG(dev, "1284 PNP String: %x %s\n", value,
&pnp_string[2]);
break;
case 1: /* Get Port Status */
/* Only one printer interface is supported. */
if (wIndex != PRINTER_INTERFACE)
break;
*(u8 *)req->buf = dev->printer_status;
value = min(wLength, (u16) 1);
break;
case 2: /* Soft Reset */
/* Only one printer interface is supported. */
if (wIndex != PRINTER_INTERFACE)
break;
printer_soft_reset(dev);
value = 0;
break;
default:
goto unknown;
}
break;
default:
unknown:
VDBG(dev,
"unknown ctrl req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
wValue, wIndex, wLength);
break;
}
/* respond with data transfer before status phase? */
if (value >= 0) {
req->length = value;
req->zero = value < wLength;
value = usb_ep_queue(gadget->ep0, req, GFP_ATOMIC);
if (value < 0) {
DBG(dev, "ep_queue --> %d\n", value);
req->status = 0;
printer_setup_complete(gadget->ep0, req);
}
}
/* host either stalls (value < 0) or reports success */
return value;
}
static void
printer_disconnect(struct usb_gadget *gadget)
{
struct printer_dev *dev = get_gadget_data(gadget);
unsigned long flags;
DBG(dev, "%s\n", __func__);
spin_lock_irqsave(&dev->lock, flags);
printer_reset_interface(dev);
spin_unlock_irqrestore(&dev->lock, flags);
}
static void
printer_unbind(struct usb_gadget *gadget)
{
struct printer_dev *dev = get_gadget_data(gadget);
struct usb_request *req;
DBG(dev, "%s\n", __func__);
/* Remove sysfs files */
device_destroy(usb_gadget_class, g_printer_devno);
/* Remove Character Device */
cdev_del(&dev->printer_cdev);
/* we must already have been disconnected ... no i/o may be active */
WARN_ON(!list_empty(&dev->tx_reqs_active));
WARN_ON(!list_empty(&dev->rx_reqs_active));
/* Free all memory for this driver. */
while (!list_empty(&dev->tx_reqs)) {
req = container_of(dev->tx_reqs.next, struct usb_request,
list);
list_del(&req->list);
printer_req_free(dev->in_ep, req);
}
if (dev->current_rx_req != NULL)
printer_req_free(dev->out_ep, dev->current_rx_req);
while (!list_empty(&dev->rx_reqs)) {
req = container_of(dev->rx_reqs.next,
struct usb_request, list);
list_del(&req->list);
printer_req_free(dev->out_ep, req);
}
while (!list_empty(&dev->rx_buffers)) {
req = container_of(dev->rx_buffers.next,
struct usb_request, list);
list_del(&req->list);
printer_req_free(dev->out_ep, req);
}
if (dev->req) {
printer_req_free(gadget->ep0, dev->req);
dev->req = NULL;
}
set_gadget_data(gadget, NULL);
}
static int __init
printer_bind(struct usb_gadget *gadget)
{
struct printer_dev *dev;
struct usb_ep *in_ep, *out_ep;
int status = -ENOMEM;
int gcnum;
size_t len;
u32 i;
struct usb_request *req;
dev = &usb_printer_gadget;
/* Setup the sysfs files for the printer gadget. */
dev->pdev = device_create(usb_gadget_class, NULL, g_printer_devno,
NULL, "g_printer");
if (IS_ERR(dev->pdev)) {
ERROR(dev, "Failed to create device: g_printer\n");
goto fail;
}
/*
* Register a character device as an interface to a user mode
* program that handles the printer specific functionality.
*/
cdev_init(&dev->printer_cdev, &printer_io_operations);
dev->printer_cdev.owner = THIS_MODULE;
status = cdev_add(&dev->printer_cdev, g_printer_devno, 1);
if (status) {
ERROR(dev, "Failed to open char device\n");
goto fail;
}
gcnum = usb_gadget_controller_number(gadget);
if (gcnum >= 0) {
device_desc.bcdDevice = cpu_to_le16(0x0200 + gcnum);
} else {
dev_warn(&gadget->dev, "controller '%s' not recognized\n",
gadget->name);
/* unrecognized, but safe unless bulk is REALLY quirky */
device_desc.bcdDevice =
cpu_to_le16(0xFFFF);
}
snprintf(manufacturer, sizeof(manufacturer), "%s %s with %s",
init_utsname()->sysname, init_utsname()->release,
gadget->name);
device_desc.idVendor =
cpu_to_le16(PRINTER_VENDOR_NUM);
device_desc.idProduct =
cpu_to_le16(PRINTER_PRODUCT_NUM);
/* support optional vendor/distro customization */
if (idVendor) {
if (!idProduct) {
dev_err(&gadget->dev, "idVendor needs idProduct!\n");
return -ENODEV;
}
device_desc.idVendor = cpu_to_le16(idVendor);
device_desc.idProduct = cpu_to_le16(idProduct);
if (bcdDevice)
device_desc.bcdDevice = cpu_to_le16(bcdDevice);
}
if (iManufacturer)
strlcpy(manufacturer, iManufacturer, sizeof manufacturer);
if (iProduct)
strlcpy(product_desc, iProduct, sizeof product_desc);
if (iSerialNum)
strlcpy(serial_num, iSerialNum, sizeof serial_num);
if (iPNPstring)
strlcpy(&pnp_string[2], iPNPstring, (sizeof pnp_string)-2);
len = strlen(pnp_string);
pnp_string[0] = (len >> 8) & 0xFF;
pnp_string[1] = len & 0xFF;
/* all we really need is bulk IN/OUT */
usb_ep_autoconfig_reset(gadget);
in_ep = usb_ep_autoconfig(gadget, &fs_ep_in_desc);
if (!in_ep) {
autoconf_fail:
dev_err(&gadget->dev, "can't autoconfigure on %s\n",
gadget->name);
return -ENODEV;
}
in_ep->driver_data = in_ep; /* claim */
out_ep = usb_ep_autoconfig(gadget, &fs_ep_out_desc);
if (!out_ep)
goto autoconf_fail;
out_ep->driver_data = out_ep; /* claim */
#ifdef CONFIG_USB_GADGET_DUALSPEED
/* assumes ep0 uses the same value for both speeds ... */
dev_qualifier.bMaxPacketSize0 = device_desc.bMaxPacketSize0;
/* and that all endpoints are dual-speed */
hs_ep_in_desc.bEndpointAddress = fs_ep_in_desc.bEndpointAddress;
hs_ep_out_desc.bEndpointAddress = fs_ep_out_desc.bEndpointAddress;
#endif /* DUALSPEED */
device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
usb_gadget_set_selfpowered(gadget);
if (gadget->is_otg) {
otg_desc.bmAttributes |= USB_OTG_HNP,
config_desc.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
spin_lock_init(&dev->lock);
mutex_init(&dev->lock_printer_io);
INIT_LIST_HEAD(&dev->tx_reqs);
INIT_LIST_HEAD(&dev->tx_reqs_active);
INIT_LIST_HEAD(&dev->rx_reqs);
INIT_LIST_HEAD(&dev->rx_reqs_active);
INIT_LIST_HEAD(&dev->rx_buffers);
init_waitqueue_head(&dev->rx_wait);
init_waitqueue_head(&dev->tx_wait);
init_waitqueue_head(&dev->tx_flush_wait);
dev->config = 0;
dev->interface = -1;
dev->printer_cdev_open = 0;
dev->printer_status = PRINTER_NOT_ERROR;
dev->current_rx_req = NULL;
dev->current_rx_bytes = 0;
dev->current_rx_buf = NULL;
dev->in_ep = in_ep;
dev->out_ep = out_ep;
/* preallocate control message data and buffer */
dev->req = printer_req_alloc(gadget->ep0, USB_DESC_BUFSIZE,
GFP_KERNEL);
if (!dev->req) {
status = -ENOMEM;
goto fail;
}
for (i = 0; i < QLEN; i++) {
req = printer_req_alloc(dev->in_ep, USB_BUFSIZE, GFP_KERNEL);
if (!req) {
while (!list_empty(&dev->tx_reqs)) {
req = container_of(dev->tx_reqs.next,
struct usb_request, list);
list_del(&req->list);
printer_req_free(dev->in_ep, req);
}
return -ENOMEM;
}
list_add(&req->list, &dev->tx_reqs);
}
for (i = 0; i < QLEN; i++) {
req = printer_req_alloc(dev->out_ep, USB_BUFSIZE, GFP_KERNEL);
if (!req) {
while (!list_empty(&dev->rx_reqs)) {
req = container_of(dev->rx_reqs.next,
struct usb_request, list);
list_del(&req->list);
printer_req_free(dev->out_ep, req);
}
return -ENOMEM;
}
list_add(&req->list, &dev->rx_reqs);
}
dev->req->complete = printer_setup_complete;
/* finish hookup to lower layer ... */
dev->gadget = gadget;
set_gadget_data(gadget, dev);
gadget->ep0->driver_data = dev;
INFO(dev, "%s, version: " DRIVER_VERSION "\n", driver_desc);
INFO(dev, "using %s, OUT %s IN %s\n", gadget->name, out_ep->name,
in_ep->name);
return 0;
fail:
printer_unbind(gadget);
return status;
}
/*-------------------------------------------------------------------------*/
static struct usb_gadget_driver printer_driver = {
.speed = DEVSPEED,
.function = (char *) driver_desc,
.unbind = printer_unbind,
.setup = printer_setup,
.disconnect = printer_disconnect,
.driver = {
.name = (char *) shortname,
.owner = THIS_MODULE,
},
};
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Craig Nadler");
MODULE_LICENSE("GPL");
static int __init
init(void)
{
int status;
usb_gadget_class = class_create(THIS_MODULE, "usb_printer_gadget");
if (IS_ERR(usb_gadget_class)) {
status = PTR_ERR(usb_gadget_class);
ERROR(dev, "unable to create usb_gadget class %d\n", status);
return status;
}
status = alloc_chrdev_region(&g_printer_devno, 0, 1,
"USB printer gadget");
if (status) {
ERROR(dev, "alloc_chrdev_region %d\n", status);
class_destroy(usb_gadget_class);
return status;
}
status = usb_gadget_probe_driver(&printer_driver, printer_bind);
if (status) {
class_destroy(usb_gadget_class);
unregister_chrdev_region(g_printer_devno, 1);
DBG(dev, "usb_gadget_probe_driver %x\n", status);
}
return status;
}
module_init(init);
static void __exit
cleanup(void)
{
int status;
mutex_lock(&usb_printer_gadget.lock_printer_io);
status = usb_gadget_unregister_driver(&printer_driver);
if (status)
ERROR(dev, "usb_gadget_unregister_driver %x\n", status);
unregister_chrdev_region(g_printer_devno, 2);
class_destroy(usb_gadget_class);
mutex_unlock(&usb_printer_gadget.lock_printer_io);
}
module_exit(cleanup);
| gpl-2.0 |
bugralevent/linux | drivers/media/rc/sunxi-cir.c | 839 | 9110 | /*
* Driver for Allwinner sunXi IR controller
*
* Copyright (C) 2014 Alexsey Shestacov <wingrime@linux-sunxi.org>
* Copyright (C) 2014 Alexander Bersenev <bay@hackerdom.ru>
*
* Based on sun5i-ir.c:
* Copyright (C) 2007-2012 Daniel Wang
* Allwinner Technology Co., Ltd. <www.allwinnertech.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/reset.h>
#include <media/rc-core.h>
#define SUNXI_IR_DEV "sunxi-ir"
/* Registers */
/* IR Control */
#define SUNXI_IR_CTL_REG 0x00
/* Global Enable */
#define REG_CTL_GEN BIT(0)
/* RX block enable */
#define REG_CTL_RXEN BIT(1)
/* CIR mode */
#define REG_CTL_MD (BIT(4) | BIT(5))
/* Rx Config */
#define SUNXI_IR_RXCTL_REG 0x10
/* Pulse Polarity Invert flag */
#define REG_RXCTL_RPPI BIT(2)
/* Rx Data */
#define SUNXI_IR_RXFIFO_REG 0x20
/* Rx Interrupt Enable */
#define SUNXI_IR_RXINT_REG 0x2C
/* Rx FIFO Overflow */
#define REG_RXINT_ROI_EN BIT(0)
/* Rx Packet End */
#define REG_RXINT_RPEI_EN BIT(1)
/* Rx FIFO Data Available */
#define REG_RXINT_RAI_EN BIT(4)
/* Rx FIFO available byte level */
#define REG_RXINT_RAL(val) ((val) << 8)
/* Rx Interrupt Status */
#define SUNXI_IR_RXSTA_REG 0x30
/* RX FIFO Get Available Counter */
#define REG_RXSTA_GET_AC(val) (((val) >> 8) & (ir->fifo_size * 2 - 1))
/* Clear all interrupt status value */
#define REG_RXSTA_CLEARALL 0xff
/* IR Sample Config */
#define SUNXI_IR_CIR_REG 0x34
/* CIR_REG register noise threshold */
#define REG_CIR_NTHR(val) (((val) << 2) & (GENMASK(7, 2)))
/* CIR_REG register idle threshold */
#define REG_CIR_ITHR(val) (((val) << 8) & (GENMASK(15, 8)))
/* Required frequency for IR0 or IR1 clock in CIR mode */
#define SUNXI_IR_BASE_CLK 8000000
/* Frequency after IR internal divider */
#define SUNXI_IR_CLK (SUNXI_IR_BASE_CLK / 64)
/* Sample period in ns */
#define SUNXI_IR_SAMPLE (1000000000ul / SUNXI_IR_CLK)
/* Noise threshold in samples */
#define SUNXI_IR_RXNOISE 1
/* Idle Threshold in samples */
#define SUNXI_IR_RXIDLE 20
/* Time after which device stops sending data in ms */
#define SUNXI_IR_TIMEOUT 120
struct sunxi_ir {
spinlock_t ir_lock;
struct rc_dev *rc;
void __iomem *base;
int irq;
int fifo_size;
struct clk *clk;
struct clk *apb_clk;
struct reset_control *rst;
const char *map_name;
};
static irqreturn_t sunxi_ir_irq(int irqno, void *dev_id)
{
unsigned long status;
unsigned char dt;
unsigned int cnt, rc;
struct sunxi_ir *ir = dev_id;
DEFINE_IR_RAW_EVENT(rawir);
spin_lock(&ir->ir_lock);
status = readl(ir->base + SUNXI_IR_RXSTA_REG);
/* clean all pending statuses */
writel(status | REG_RXSTA_CLEARALL, ir->base + SUNXI_IR_RXSTA_REG);
if (status & (REG_RXINT_RAI_EN | REG_RXINT_RPEI_EN)) {
/* How many messages in fifo */
rc = REG_RXSTA_GET_AC(status);
/* Sanity check */
rc = rc > ir->fifo_size ? ir->fifo_size : rc;
/* If we have data */
for (cnt = 0; cnt < rc; cnt++) {
/* for each bit in fifo */
dt = readb(ir->base + SUNXI_IR_RXFIFO_REG);
rawir.pulse = (dt & 0x80) != 0;
rawir.duration = ((dt & 0x7f) + 1) * SUNXI_IR_SAMPLE;
ir_raw_event_store_with_filter(ir->rc, &rawir);
}
}
if (status & REG_RXINT_ROI_EN) {
ir_raw_event_reset(ir->rc);
} else if (status & REG_RXINT_RPEI_EN) {
ir_raw_event_set_idle(ir->rc, true);
ir_raw_event_handle(ir->rc);
}
spin_unlock(&ir->ir_lock);
return IRQ_HANDLED;
}
static int sunxi_ir_probe(struct platform_device *pdev)
{
int ret = 0;
unsigned long tmp = 0;
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node;
struct resource *res;
struct sunxi_ir *ir;
ir = devm_kzalloc(dev, sizeof(struct sunxi_ir), GFP_KERNEL);
if (!ir)
return -ENOMEM;
if (of_device_is_compatible(dn, "allwinner,sun5i-a13-ir"))
ir->fifo_size = 64;
else
ir->fifo_size = 16;
/* Clock */
ir->apb_clk = devm_clk_get(dev, "apb");
if (IS_ERR(ir->apb_clk)) {
dev_err(dev, "failed to get a apb clock.\n");
return PTR_ERR(ir->apb_clk);
}
ir->clk = devm_clk_get(dev, "ir");
if (IS_ERR(ir->clk)) {
dev_err(dev, "failed to get a ir clock.\n");
return PTR_ERR(ir->clk);
}
/* Reset (optional) */
ir->rst = devm_reset_control_get_optional(dev, NULL);
if (IS_ERR(ir->rst)) {
ret = PTR_ERR(ir->rst);
if (ret == -EPROBE_DEFER)
return ret;
ir->rst = NULL;
} else {
ret = reset_control_deassert(ir->rst);
if (ret)
return ret;
}
ret = clk_set_rate(ir->clk, SUNXI_IR_BASE_CLK);
if (ret) {
dev_err(dev, "set ir base clock failed!\n");
goto exit_reset_assert;
}
if (clk_prepare_enable(ir->apb_clk)) {
dev_err(dev, "try to enable apb_ir_clk failed\n");
ret = -EINVAL;
goto exit_reset_assert;
}
if (clk_prepare_enable(ir->clk)) {
dev_err(dev, "try to enable ir_clk failed\n");
ret = -EINVAL;
goto exit_clkdisable_apb_clk;
}
/* IO */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ir->base = devm_ioremap_resource(dev, res);
if (IS_ERR(ir->base)) {
dev_err(dev, "failed to map registers\n");
ret = PTR_ERR(ir->base);
goto exit_clkdisable_clk;
}
ir->rc = rc_allocate_device();
if (!ir->rc) {
dev_err(dev, "failed to allocate device\n");
ret = -ENOMEM;
goto exit_clkdisable_clk;
}
ir->rc->priv = ir;
ir->rc->input_name = SUNXI_IR_DEV;
ir->rc->input_phys = "sunxi-ir/input0";
ir->rc->input_id.bustype = BUS_HOST;
ir->rc->input_id.vendor = 0x0001;
ir->rc->input_id.product = 0x0001;
ir->rc->input_id.version = 0x0100;
ir->map_name = of_get_property(dn, "linux,rc-map-name", NULL);
ir->rc->map_name = ir->map_name ?: RC_MAP_EMPTY;
ir->rc->dev.parent = dev;
ir->rc->driver_type = RC_DRIVER_IR_RAW;
ir->rc->allowed_protocols = RC_BIT_ALL;
ir->rc->rx_resolution = SUNXI_IR_SAMPLE;
ir->rc->timeout = MS_TO_NS(SUNXI_IR_TIMEOUT);
ir->rc->driver_name = SUNXI_IR_DEV;
ret = rc_register_device(ir->rc);
if (ret) {
dev_err(dev, "failed to register rc device\n");
goto exit_free_dev;
}
platform_set_drvdata(pdev, ir);
/* IRQ */
ir->irq = platform_get_irq(pdev, 0);
if (ir->irq < 0) {
dev_err(dev, "no irq resource\n");
ret = ir->irq;
goto exit_free_dev;
}
ret = devm_request_irq(dev, ir->irq, sunxi_ir_irq, 0, SUNXI_IR_DEV, ir);
if (ret) {
dev_err(dev, "failed request irq\n");
goto exit_free_dev;
}
/* Enable CIR Mode */
writel(REG_CTL_MD, ir->base+SUNXI_IR_CTL_REG);
/* Set noise threshold and idle threshold */
writel(REG_CIR_NTHR(SUNXI_IR_RXNOISE)|REG_CIR_ITHR(SUNXI_IR_RXIDLE),
ir->base + SUNXI_IR_CIR_REG);
/* Invert Input Signal */
writel(REG_RXCTL_RPPI, ir->base + SUNXI_IR_RXCTL_REG);
/* Clear All Rx Interrupt Status */
writel(REG_RXSTA_CLEARALL, ir->base + SUNXI_IR_RXSTA_REG);
/*
* Enable IRQ on overflow, packet end, FIFO available with trigger
* level
*/
writel(REG_RXINT_ROI_EN | REG_RXINT_RPEI_EN |
REG_RXINT_RAI_EN | REG_RXINT_RAL(ir->fifo_size / 2 - 1),
ir->base + SUNXI_IR_RXINT_REG);
/* Enable IR Module */
tmp = readl(ir->base + SUNXI_IR_CTL_REG);
writel(tmp | REG_CTL_GEN | REG_CTL_RXEN, ir->base + SUNXI_IR_CTL_REG);
dev_info(dev, "initialized sunXi IR driver\n");
return 0;
exit_free_dev:
rc_free_device(ir->rc);
exit_clkdisable_clk:
clk_disable_unprepare(ir->clk);
exit_clkdisable_apb_clk:
clk_disable_unprepare(ir->apb_clk);
exit_reset_assert:
if (ir->rst)
reset_control_assert(ir->rst);
return ret;
}
static int sunxi_ir_remove(struct platform_device *pdev)
{
unsigned long flags;
struct sunxi_ir *ir = platform_get_drvdata(pdev);
clk_disable_unprepare(ir->clk);
clk_disable_unprepare(ir->apb_clk);
if (ir->rst)
reset_control_assert(ir->rst);
spin_lock_irqsave(&ir->ir_lock, flags);
/* disable IR IRQ */
writel(0, ir->base + SUNXI_IR_RXINT_REG);
/* clear All Rx Interrupt Status */
writel(REG_RXSTA_CLEARALL, ir->base + SUNXI_IR_RXSTA_REG);
/* disable IR */
writel(0, ir->base + SUNXI_IR_CTL_REG);
spin_unlock_irqrestore(&ir->ir_lock, flags);
rc_unregister_device(ir->rc);
return 0;
}
static const struct of_device_id sunxi_ir_match[] = {
{ .compatible = "allwinner,sun4i-a10-ir", },
{ .compatible = "allwinner,sun5i-a13-ir", },
{},
};
static struct platform_driver sunxi_ir_driver = {
.probe = sunxi_ir_probe,
.remove = sunxi_ir_remove,
.driver = {
.name = SUNXI_IR_DEV,
.of_match_table = sunxi_ir_match,
},
};
module_platform_driver(sunxi_ir_driver);
MODULE_DESCRIPTION("Allwinner sunXi IR controller driver");
MODULE_AUTHOR("Alexsey Shestacov <wingrime@linux-sunxi.org>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
rminnich/linux | arch/mips/pmcs-msp71xx/msp_smp.c | 839 | 2130 | /*
* Copyright (C) 2000, 2001, 2004 MIPS Technologies, Inc.
* Copyright (C) 2001 Ralf Baechle
* Copyright (C) 2010 PMC-Sierra, Inc.
*
* VSMP support for MSP platforms . Derived from malta vsmp support.
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
*/
#include <linux/smp.h>
#include <linux/interrupt.h>
#ifdef CONFIG_MIPS_MT_SMP
#define MIPS_CPU_IPI_RESCHED_IRQ 0 /* SW int 0 for resched */
#define MIPS_CPU_IPI_CALL_IRQ 1 /* SW int 1 for call */
static void ipi_resched_dispatch(void)
{
do_IRQ(MIPS_CPU_IPI_RESCHED_IRQ);
}
static void ipi_call_dispatch(void)
{
do_IRQ(MIPS_CPU_IPI_CALL_IRQ);
}
static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
{
return IRQ_HANDLED;
}
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{
generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
static struct irqaction irq_resched = {
.handler = ipi_resched_interrupt,
.flags = IRQF_PERCPU,
.name = "IPI_resched"
};
static struct irqaction irq_call = {
.handler = ipi_call_interrupt,
.flags = IRQF_PERCPU,
.name = "IPI_call"
};
void __init arch_init_ipiirq(int irq, struct irqaction *action)
{
setup_irq(irq, action);
irq_set_handler(irq, handle_percpu_irq);
}
void __init msp_vsmp_int_init(void)
{
set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
arch_init_ipiirq(MIPS_CPU_IPI_RESCHED_IRQ, &irq_resched);
arch_init_ipiirq(MIPS_CPU_IPI_CALL_IRQ, &irq_call);
}
#endif /* CONFIG_MIPS_MT_SMP */
| gpl-2.0 |
aopp/android_kernel_google_msm | net/bluetooth/hci_sysfs.c | 1095 | 13763 | /* Bluetooth HCI driver model support. */
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
static struct class *bt_class;
struct dentry *bt_debugfs;
EXPORT_SYMBOL_GPL(bt_debugfs);
static inline char *link_typetostr(int type)
{
switch (type) {
case ACL_LINK:
return "ACL";
case SCO_LINK:
return "SCO";
case ESCO_LINK:
return "eSCO";
default:
return "UNKNOWN";
}
}
static ssize_t show_link_type(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hci_conn *conn = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", link_typetostr(conn->type));
}
static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hci_conn *conn = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", batostr(&conn->dst));
}
static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hci_conn *conn = dev_get_drvdata(dev);
return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
conn->features[0], conn->features[1],
conn->features[2], conn->features[3],
conn->features[4], conn->features[5],
conn->features[6], conn->features[7]);
}
#define LINK_ATTR(_name, _mode, _show, _store) \
struct device_attribute link_attr_##_name = __ATTR(_name, _mode, _show, _store)
static LINK_ATTR(type, S_IRUGO, show_link_type, NULL);
static LINK_ATTR(address, S_IRUGO, show_link_address, NULL);
static LINK_ATTR(features, S_IRUGO, show_link_features, NULL);
static struct attribute *bt_link_attrs[] = {
&link_attr_type.attr,
&link_attr_address.attr,
&link_attr_features.attr,
NULL
};
static struct attribute_group bt_link_group = {
.attrs = bt_link_attrs,
};
static const struct attribute_group *bt_link_groups[] = {
&bt_link_group,
NULL
};
static void bt_link_release(struct device *dev)
{
void *data = dev_get_drvdata(dev);
kfree(data);
}
static struct device_type bt_link = {
.name = "link",
.groups = bt_link_groups,
.release = bt_link_release,
};
static void add_conn(struct work_struct *work)
{
struct hci_conn *conn = container_of(work, struct hci_conn, work_add);
struct hci_dev *hdev = conn->hdev;
dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
dev_set_drvdata(&conn->dev, conn);
if (device_add(&conn->dev) < 0) {
BT_ERR("Failed to register connection device");
return;
}
hci_dev_hold(hdev);
}
/*
* The rfcomm tty device will possibly retain even when conn
* is down, and sysfs doesn't support move zombie device,
* so we should move the device before conn device is destroyed.
*/
static int __match_tty(struct device *dev, void *data)
{
return !strncmp(dev_name(dev), "rfcomm", 6);
}
static void del_conn(struct work_struct *work)
{
struct hci_conn *conn = container_of(work, struct hci_conn, work_del);
struct hci_dev *hdev = conn->hdev;
if (!device_is_registered(&conn->dev))
return;
while (1) {
struct device *dev;
dev = device_find_child(&conn->dev, NULL, __match_tty);
if (!dev)
break;
device_move(dev, NULL, DPM_ORDER_DEV_LAST);
put_device(dev);
}
device_del(&conn->dev);
put_device(&conn->dev);
hci_dev_put(hdev);
}
void hci_conn_init_sysfs(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
BT_DBG("conn %p", conn);
conn->dev.type = &bt_link;
conn->dev.class = bt_class;
conn->dev.parent = &hdev->dev;
device_initialize(&conn->dev);
INIT_WORK(&conn->work_add, add_conn);
INIT_WORK(&conn->work_del, del_conn);
}
void hci_conn_add_sysfs(struct hci_conn *conn)
{
BT_DBG("conn %p", conn);
queue_work(conn->hdev->workqueue, &conn->work_add);
}
void hci_conn_del_sysfs(struct hci_conn *conn)
{
BT_DBG("conn %p", conn);
if (conn->hdev)
queue_work(conn->hdev->workqueue, &conn->work_del);
}
static inline char *host_bustostr(int bus)
{
switch (bus) {
case HCI_VIRTUAL:
return "VIRTUAL";
case HCI_USB:
return "USB";
case HCI_PCCARD:
return "PCCARD";
case HCI_UART:
return "UART";
case HCI_RS232:
return "RS232";
case HCI_PCI:
return "PCI";
case HCI_SDIO:
return "SDIO";
default:
return "UNKNOWN";
}
}
static inline char *host_typetostr(int type)
{
switch (type) {
case HCI_BREDR:
return "BR/EDR";
case HCI_AMP:
return "AMP";
default:
return "UNKNOWN";
}
}
static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", host_bustostr(hdev->bus));
}
static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type));
}
static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = dev_get_drvdata(dev);
char name[HCI_MAX_NAME_LENGTH + 1];
int i;
for (i = 0; i < HCI_MAX_NAME_LENGTH; i++)
name[i] = hdev->dev_name[i];
name[HCI_MAX_NAME_LENGTH] = '\0';
return sprintf(buf, "%s\n", name);
}
static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = dev_get_drvdata(dev);
return sprintf(buf, "0x%.2x%.2x%.2x\n",
hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
}
static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", batostr(&hdev->bdaddr));
}
static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = dev_get_drvdata(dev);
return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
hdev->features[0], hdev->features[1],
hdev->features[2], hdev->features[3],
hdev->features[4], hdev->features[5],
hdev->features[6], hdev->features[7]);
}
static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", hdev->manufacturer);
}
static ssize_t show_hci_version(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", hdev->hci_ver);
}
static ssize_t show_hci_revision(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", hdev->hci_rev);
}
static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", hdev->idle_timeout);
}
static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct hci_dev *hdev = dev_get_drvdata(dev);
unsigned int val;
int rv;
rv = kstrtouint(buf, 0, &val);
if (rv < 0)
return rv;
if (val != 0 && (val < 500 || val > 3600000))
return -EINVAL;
hdev->idle_timeout = val;
return count;
}
static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", hdev->sniff_max_interval);
}
static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct hci_dev *hdev = dev_get_drvdata(dev);
u16 val;
int rv;
rv = kstrtou16(buf, 0, &val);
if (rv < 0)
return rv;
if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
return -EINVAL;
hdev->sniff_max_interval = val;
return count;
}
static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", hdev->sniff_min_interval);
}
static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct hci_dev *hdev = dev_get_drvdata(dev);
u16 val;
int rv;
rv = kstrtou16(buf, 0, &val);
if (rv < 0)
return rv;
if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
return -EINVAL;
hdev->sniff_min_interval = val;
return count;
}
static DEVICE_ATTR(bus, S_IRUGO, show_bus, NULL);
static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
static DEVICE_ATTR(class, S_IRUGO, show_class, NULL);
static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
static DEVICE_ATTR(features, S_IRUGO, show_features, NULL);
static DEVICE_ATTR(manufacturer, S_IRUGO, show_manufacturer, NULL);
static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL);
static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL);
static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR,
show_idle_timeout, store_idle_timeout);
static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR,
show_sniff_max_interval, store_sniff_max_interval);
static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR,
show_sniff_min_interval, store_sniff_min_interval);
static struct attribute *bt_host_attrs[] = {
&dev_attr_bus.attr,
&dev_attr_type.attr,
&dev_attr_name.attr,
&dev_attr_class.attr,
&dev_attr_address.attr,
&dev_attr_features.attr,
&dev_attr_manufacturer.attr,
&dev_attr_hci_version.attr,
&dev_attr_hci_revision.attr,
&dev_attr_idle_timeout.attr,
&dev_attr_sniff_max_interval.attr,
&dev_attr_sniff_min_interval.attr,
NULL
};
static struct attribute_group bt_host_group = {
.attrs = bt_host_attrs,
};
static const struct attribute_group *bt_host_groups[] = {
&bt_host_group,
NULL
};
static void bt_host_release(struct device *dev)
{
void *data = dev_get_drvdata(dev);
kfree(data);
}
static struct device_type bt_host = {
.name = "host",
.groups = bt_host_groups,
.release = bt_host_release,
};
static int inquiry_cache_show(struct seq_file *f, void *p)
{
struct hci_dev *hdev = f->private;
struct inquiry_cache *cache = &hdev->inq_cache;
struct inquiry_entry *e;
hci_dev_lock_bh(hdev);
for (e = cache->list; e; e = e->next) {
struct inquiry_data *data = &e->data;
seq_printf(f, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
batostr(&data->bdaddr),
data->pscan_rep_mode, data->pscan_period_mode,
data->pscan_mode, data->dev_class[2],
data->dev_class[1], data->dev_class[0],
__le16_to_cpu(data->clock_offset),
data->rssi, data->ssp_mode, e->timestamp);
}
hci_dev_unlock_bh(hdev);
return 0;
}
static int inquiry_cache_open(struct inode *inode, struct file *file)
{
return single_open(file, inquiry_cache_show, inode->i_private);
}
static const struct file_operations inquiry_cache_fops = {
.open = inquiry_cache_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int blacklist_show(struct seq_file *f, void *p)
{
struct hci_dev *hdev = f->private;
struct list_head *l;
hci_dev_lock_bh(hdev);
list_for_each(l, &hdev->blacklist) {
struct bdaddr_list *b;
b = list_entry(l, struct bdaddr_list, list);
seq_printf(f, "%s\n", batostr(&b->bdaddr));
}
hci_dev_unlock_bh(hdev);
return 0;
}
static int blacklist_open(struct inode *inode, struct file *file)
{
return single_open(file, blacklist_show, inode->i_private);
}
static const struct file_operations blacklist_fops = {
.open = blacklist_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void print_bt_uuid(struct seq_file *f, u8 *uuid)
{
u32 data0, data4;
u16 data1, data2, data3, data5;
memcpy(&data0, &uuid[0], 4);
memcpy(&data1, &uuid[4], 2);
memcpy(&data2, &uuid[6], 2);
memcpy(&data3, &uuid[8], 2);
memcpy(&data4, &uuid[10], 4);
memcpy(&data5, &uuid[14], 2);
seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n",
ntohl(data0), ntohs(data1), ntohs(data2),
ntohs(data3), ntohl(data4), ntohs(data5));
}
static int uuids_show(struct seq_file *f, void *p)
{
struct hci_dev *hdev = f->private;
struct list_head *l;
hci_dev_lock_bh(hdev);
list_for_each(l, &hdev->uuids) {
struct bt_uuid *uuid;
uuid = list_entry(l, struct bt_uuid, list);
print_bt_uuid(f, uuid->uuid);
}
hci_dev_unlock_bh(hdev);
return 0;
}
static int uuids_open(struct inode *inode, struct file *file)
{
return single_open(file, uuids_show, inode->i_private);
}
static const struct file_operations uuids_fops = {
.open = uuids_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
int hci_register_sysfs(struct hci_dev *hdev)
{
struct device *dev = &hdev->dev;
int err;
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
dev->type = &bt_host;
dev->class = bt_class;
dev->parent = hdev->parent;
dev_set_name(dev, "%s", hdev->name);
dev_set_drvdata(dev, hdev);
err = device_register(dev);
if (err < 0)
return err;
if (!bt_debugfs)
return 0;
hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
if (!hdev->debugfs)
return 0;
debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
hdev, &inquiry_cache_fops);
debugfs_create_file("blacklist", 0444, hdev->debugfs,
hdev, &blacklist_fops);
debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
return 0;
}
void hci_unregister_sysfs(struct hci_dev *hdev)
{
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
debugfs_remove_recursive(hdev->debugfs);
device_del(&hdev->dev);
}
int __init bt_sysfs_init(void)
{
bt_debugfs = debugfs_create_dir("bluetooth", NULL);
bt_class = class_create(THIS_MODULE, "bluetooth");
if (IS_ERR(bt_class))
return PTR_ERR(bt_class);
return 0;
}
void bt_sysfs_cleanup(void)
{
class_destroy(bt_class);
debugfs_remove_recursive(bt_debugfs);
}
| gpl-2.0 |
dovydasvenckus/linux | Documentation/vDSO/parse_vdso.c | 2119 | 7071 | /*
* parse_vdso.c: Linux reference vDSO parser
* Written by Andrew Lutomirski, 2011-2014.
*
* This code is meant to be linked in to various programs that run on Linux.
* As such, it is available with as few restrictions as possible. This file
* is licensed under the Creative Commons Zero License, version 1.0,
* available at http://creativecommons.org/publicdomain/zero/1.0/legalcode
*
* The vDSO is a regular ELF DSO that the kernel maps into user space when
* it starts a program. It works equally well in statically and dynamically
* linked binaries.
*
* This code is tested on x86. In principle it should work on any
* architecture that has a vDSO.
*/
#include <stdbool.h>
#include <stdint.h>
#include <string.h>
#include <limits.h>
#include <elf.h>
/*
* To use this vDSO parser, first call one of the vdso_init_* functions.
* If you've already parsed auxv, then pass the value of AT_SYSINFO_EHDR
* to vdso_init_from_sysinfo_ehdr. Otherwise pass auxv to vdso_init_from_auxv.
* Then call vdso_sym for each symbol you want. For example, to look up
* gettimeofday on x86_64, use:
*
* <some pointer> = vdso_sym("LINUX_2.6", "gettimeofday");
* or
* <some pointer> = vdso_sym("LINUX_2.6", "__vdso_gettimeofday");
*
* vdso_sym will return 0 if the symbol doesn't exist or if the init function
* failed or was not called. vdso_sym is a little slow, so its return value
* should be cached.
*
* vdso_sym is threadsafe; the init functions are not.
*
* These are the prototypes:
*/
extern void vdso_init_from_auxv(void *auxv);
extern void vdso_init_from_sysinfo_ehdr(uintptr_t base);
extern void *vdso_sym(const char *version, const char *name);
/* And here's the code. */
#ifndef ELF_BITS
# if ULONG_MAX > 0xffffffffUL
# define ELF_BITS 64
# else
# define ELF_BITS 32
# endif
#endif
#define ELF_BITS_XFORM2(bits, x) Elf##bits##_##x
#define ELF_BITS_XFORM(bits, x) ELF_BITS_XFORM2(bits, x)
#define ELF(x) ELF_BITS_XFORM(ELF_BITS, x)
static struct vdso_info
{
bool valid;
/* Load information */
uintptr_t load_addr;
uintptr_t load_offset; /* load_addr - recorded vaddr */
/* Symbol table */
ELF(Sym) *symtab;
const char *symstrings;
ELF(Word) *bucket, *chain;
ELF(Word) nbucket, nchain;
/* Version table */
ELF(Versym) *versym;
ELF(Verdef) *verdef;
} vdso_info;
/* Straight from the ELF specification. */
static unsigned long elf_hash(const unsigned char *name)
{
unsigned long h = 0, g;
while (*name)
{
h = (h << 4) + *name++;
if (g = h & 0xf0000000)
h ^= g >> 24;
h &= ~g;
}
return h;
}
void vdso_init_from_sysinfo_ehdr(uintptr_t base)
{
size_t i;
bool found_vaddr = false;
vdso_info.valid = false;
vdso_info.load_addr = base;
ELF(Ehdr) *hdr = (ELF(Ehdr)*)base;
if (hdr->e_ident[EI_CLASS] !=
(ELF_BITS == 32 ? ELFCLASS32 : ELFCLASS64)) {
return; /* Wrong ELF class -- check ELF_BITS */
}
ELF(Phdr) *pt = (ELF(Phdr)*)(vdso_info.load_addr + hdr->e_phoff);
ELF(Dyn) *dyn = 0;
/*
* We need two things from the segment table: the load offset
* and the dynamic table.
*/
for (i = 0; i < hdr->e_phnum; i++)
{
if (pt[i].p_type == PT_LOAD && !found_vaddr) {
found_vaddr = true;
vdso_info.load_offset = base
+ (uintptr_t)pt[i].p_offset
- (uintptr_t)pt[i].p_vaddr;
} else if (pt[i].p_type == PT_DYNAMIC) {
dyn = (ELF(Dyn)*)(base + pt[i].p_offset);
}
}
if (!found_vaddr || !dyn)
return; /* Failed */
/*
* Fish out the useful bits of the dynamic table.
*/
ELF(Word) *hash = 0;
vdso_info.symstrings = 0;
vdso_info.symtab = 0;
vdso_info.versym = 0;
vdso_info.verdef = 0;
for (i = 0; dyn[i].d_tag != DT_NULL; i++) {
switch (dyn[i].d_tag) {
case DT_STRTAB:
vdso_info.symstrings = (const char *)
((uintptr_t)dyn[i].d_un.d_ptr
+ vdso_info.load_offset);
break;
case DT_SYMTAB:
vdso_info.symtab = (ELF(Sym) *)
((uintptr_t)dyn[i].d_un.d_ptr
+ vdso_info.load_offset);
break;
case DT_HASH:
hash = (ELF(Word) *)
((uintptr_t)dyn[i].d_un.d_ptr
+ vdso_info.load_offset);
break;
case DT_VERSYM:
vdso_info.versym = (ELF(Versym) *)
((uintptr_t)dyn[i].d_un.d_ptr
+ vdso_info.load_offset);
break;
case DT_VERDEF:
vdso_info.verdef = (ELF(Verdef) *)
((uintptr_t)dyn[i].d_un.d_ptr
+ vdso_info.load_offset);
break;
}
}
if (!vdso_info.symstrings || !vdso_info.symtab || !hash)
return; /* Failed */
if (!vdso_info.verdef)
vdso_info.versym = 0;
/* Parse the hash table header. */
vdso_info.nbucket = hash[0];
vdso_info.nchain = hash[1];
vdso_info.bucket = &hash[2];
vdso_info.chain = &hash[vdso_info.nbucket + 2];
/* That's all we need. */
vdso_info.valid = true;
}
static bool vdso_match_version(ELF(Versym) ver,
const char *name, ELF(Word) hash)
{
/*
* This is a helper function to check if the version indexed by
* ver matches name (which hashes to hash).
*
* The version definition table is a mess, and I don't know how
* to do this in better than linear time without allocating memory
* to build an index. I also don't know why the table has
* variable size entries in the first place.
*
* For added fun, I can't find a comprehensible specification of how
* to parse all the weird flags in the table.
*
* So I just parse the whole table every time.
*/
/* First step: find the version definition */
ver &= 0x7fff; /* Apparently bit 15 means "hidden" */
ELF(Verdef) *def = vdso_info.verdef;
while(true) {
if ((def->vd_flags & VER_FLG_BASE) == 0
&& (def->vd_ndx & 0x7fff) == ver)
break;
if (def->vd_next == 0)
return false; /* No definition. */
def = (ELF(Verdef) *)((char *)def + def->vd_next);
}
/* Now figure out whether it matches. */
ELF(Verdaux) *aux = (ELF(Verdaux)*)((char *)def + def->vd_aux);
return def->vd_hash == hash
&& !strcmp(name, vdso_info.symstrings + aux->vda_name);
}
void *vdso_sym(const char *version, const char *name)
{
unsigned long ver_hash;
if (!vdso_info.valid)
return 0;
ver_hash = elf_hash(version);
ELF(Word) chain = vdso_info.bucket[elf_hash(name) % vdso_info.nbucket];
for (; chain != STN_UNDEF; chain = vdso_info.chain[chain]) {
ELF(Sym) *sym = &vdso_info.symtab[chain];
/* Check for a defined global or weak function w/ right name. */
if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC)
continue;
if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL &&
ELF64_ST_BIND(sym->st_info) != STB_WEAK)
continue;
if (sym->st_shndx == SHN_UNDEF)
continue;
if (strcmp(name, vdso_info.symstrings + sym->st_name))
continue;
/* Check symbol version. */
if (vdso_info.versym
&& !vdso_match_version(vdso_info.versym[chain],
version, ver_hash))
continue;
return (void *)(vdso_info.load_offset + sym->st_value);
}
return 0;
}
void vdso_init_from_auxv(void *auxv)
{
ELF(auxv_t) *elf_auxv = auxv;
for (int i = 0; elf_auxv[i].a_type != AT_NULL; i++)
{
if (elf_auxv[i].a_type == AT_SYSINFO_EHDR) {
vdso_init_from_sysinfo_ehdr(elf_auxv[i].a_un.a_val);
return;
}
}
vdso_info.valid = false;
}
| gpl-2.0 |
jepler/odroid-linux | drivers/net/wireless/hostap/hostap_ioctl.c | 2119 | 106234 | /* ioctl() (mostly Linux Wireless Extensions) routines for Host AP driver */
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/ethtool.h>
#include <linux/if_arp.h>
#include <linux/module.h>
#include <linux/etherdevice.h>
#include <net/lib80211.h>
#include "hostap_wlan.h"
#include "hostap.h"
#include "hostap_ap.h"
static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev)
{
struct hostap_interface *iface;
local_info_t *local;
struct iw_statistics *wstats;
iface = netdev_priv(dev);
local = iface->local;
/* Why are we doing that ? Jean II */
if (iface->type != HOSTAP_INTERFACE_MAIN)
return NULL;
wstats = &local->wstats;
wstats->status = 0;
wstats->discard.code =
local->comm_tallies.rx_discards_wep_undecryptable;
wstats->discard.misc =
local->comm_tallies.rx_fcs_errors +
local->comm_tallies.rx_discards_no_buffer +
local->comm_tallies.tx_discards_wrong_sa;
wstats->discard.retries =
local->comm_tallies.tx_retry_limit_exceeded;
wstats->discard.fragment =
local->comm_tallies.rx_message_in_bad_msg_fragments;
if (local->iw_mode != IW_MODE_MASTER &&
local->iw_mode != IW_MODE_REPEAT) {
int update = 1;
#ifdef in_atomic
/* RID reading might sleep and it must not be called in
* interrupt context or while atomic. However, this
* function seems to be called while atomic (at least in Linux
* 2.5.59). Update signal quality values only if in suitable
* context. Otherwise, previous values read from tick timer
* will be used. */
if (in_atomic())
update = 0;
#endif /* in_atomic */
if (update && prism2_update_comms_qual(dev) == 0)
wstats->qual.updated = IW_QUAL_ALL_UPDATED |
IW_QUAL_DBM;
wstats->qual.qual = local->comms_qual;
wstats->qual.level = local->avg_signal;
wstats->qual.noise = local->avg_noise;
} else {
wstats->qual.qual = 0;
wstats->qual.level = 0;
wstats->qual.noise = 0;
wstats->qual.updated = IW_QUAL_ALL_INVALID;
}
return wstats;
}
static int prism2_get_datarates(struct net_device *dev, u8 *rates)
{
struct hostap_interface *iface;
local_info_t *local;
u8 buf[12];
int len;
u16 val;
iface = netdev_priv(dev);
local = iface->local;
len = local->func->get_rid(dev, HFA384X_RID_SUPPORTEDDATARATES, buf,
sizeof(buf), 0);
if (len < 2)
return 0;
val = le16_to_cpu(*(__le16 *) buf); /* string length */
if (len - 2 < val || val > 10)
return 0;
memcpy(rates, buf + 2, val);
return val;
}
static int prism2_get_name(struct net_device *dev,
struct iw_request_info *info,
char *name, char *extra)
{
u8 rates[10];
int len, i, over2 = 0;
len = prism2_get_datarates(dev, rates);
for (i = 0; i < len; i++) {
if (rates[i] == 0x0b || rates[i] == 0x16) {
over2 = 1;
break;
}
}
strcpy(name, over2 ? "IEEE 802.11b" : "IEEE 802.11-DS");
return 0;
}
static int prism2_ioctl_siwencode(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *erq, char *keybuf)
{
struct hostap_interface *iface;
local_info_t *local;
int i;
struct lib80211_crypt_data **crypt;
iface = netdev_priv(dev);
local = iface->local;
i = erq->flags & IW_ENCODE_INDEX;
if (i < 1 || i > 4)
i = local->crypt_info.tx_keyidx;
else
i--;
if (i < 0 || i >= WEP_KEYS)
return -EINVAL;
crypt = &local->crypt_info.crypt[i];
if (erq->flags & IW_ENCODE_DISABLED) {
if (*crypt)
lib80211_crypt_delayed_deinit(&local->crypt_info, crypt);
goto done;
}
if (*crypt != NULL && (*crypt)->ops != NULL &&
strcmp((*crypt)->ops->name, "WEP") != 0) {
/* changing to use WEP; deinit previously used algorithm */
lib80211_crypt_delayed_deinit(&local->crypt_info, crypt);
}
if (*crypt == NULL) {
struct lib80211_crypt_data *new_crypt;
/* take WEP into use */
new_crypt = kzalloc(sizeof(struct lib80211_crypt_data),
GFP_KERNEL);
if (new_crypt == NULL)
return -ENOMEM;
new_crypt->ops = lib80211_get_crypto_ops("WEP");
if (!new_crypt->ops) {
request_module("lib80211_crypt_wep");
new_crypt->ops = lib80211_get_crypto_ops("WEP");
}
if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
new_crypt->priv = new_crypt->ops->init(i);
if (!new_crypt->ops || !new_crypt->priv) {
kfree(new_crypt);
new_crypt = NULL;
printk(KERN_WARNING "%s: could not initialize WEP: "
"load module hostap_crypt_wep.o\n",
dev->name);
return -EOPNOTSUPP;
}
*crypt = new_crypt;
}
if (erq->length > 0) {
int len = erq->length <= 5 ? 5 : 13;
int first = 1, j;
if (len > erq->length)
memset(keybuf + erq->length, 0, len - erq->length);
(*crypt)->ops->set_key(keybuf, len, NULL, (*crypt)->priv);
for (j = 0; j < WEP_KEYS; j++) {
if (j != i && local->crypt_info.crypt[j]) {
first = 0;
break;
}
}
if (first)
local->crypt_info.tx_keyidx = i;
} else {
/* No key data - just set the default TX key index */
local->crypt_info.tx_keyidx = i;
}
done:
local->open_wep = erq->flags & IW_ENCODE_OPEN;
if (hostap_set_encryption(local)) {
printk(KERN_DEBUG "%s: set_encryption failed\n", dev->name);
return -EINVAL;
}
/* Do not reset port0 if card is in Managed mode since resetting will
* generate new IEEE 802.11 authentication which may end up in looping
* with IEEE 802.1X. Prism2 documentation seem to require port reset
* after WEP configuration. However, keys are apparently changed at
* least in Managed mode. */
if (local->iw_mode != IW_MODE_INFRA && local->func->reset_port(dev)) {
printk(KERN_DEBUG "%s: reset_port failed\n", dev->name);
return -EINVAL;
}
return 0;
}
static int prism2_ioctl_giwencode(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *erq, char *key)
{
struct hostap_interface *iface;
local_info_t *local;
int i, len;
u16 val;
struct lib80211_crypt_data *crypt;
iface = netdev_priv(dev);
local = iface->local;
i = erq->flags & IW_ENCODE_INDEX;
if (i < 1 || i > 4)
i = local->crypt_info.tx_keyidx;
else
i--;
if (i < 0 || i >= WEP_KEYS)
return -EINVAL;
crypt = local->crypt_info.crypt[i];
erq->flags = i + 1;
if (crypt == NULL || crypt->ops == NULL) {
erq->length = 0;
erq->flags |= IW_ENCODE_DISABLED;
return 0;
}
if (strcmp(crypt->ops->name, "WEP") != 0) {
/* only WEP is supported with wireless extensions, so just
* report that encryption is used */
erq->length = 0;
erq->flags |= IW_ENCODE_ENABLED;
return 0;
}
/* Reads from HFA384X_RID_CNFDEFAULTKEY* return bogus values, so show
* the keys from driver buffer */
len = crypt->ops->get_key(key, WEP_KEY_LEN, NULL, crypt->priv);
erq->length = (len >= 0 ? len : 0);
if (local->func->get_rid(dev, HFA384X_RID_CNFWEPFLAGS, &val, 2, 1) < 0)
{
printk("CNFWEPFLAGS reading failed\n");
return -EOPNOTSUPP;
}
le16_to_cpus(&val);
if (val & HFA384X_WEPFLAGS_PRIVACYINVOKED)
erq->flags |= IW_ENCODE_ENABLED;
else
erq->flags |= IW_ENCODE_DISABLED;
if (val & HFA384X_WEPFLAGS_EXCLUDEUNENCRYPTED)
erq->flags |= IW_ENCODE_RESTRICTED;
else
erq->flags |= IW_ENCODE_OPEN;
return 0;
}
static int hostap_set_rate(struct net_device *dev)
{
struct hostap_interface *iface;
local_info_t *local;
int ret, basic_rates;
iface = netdev_priv(dev);
local = iface->local;
basic_rates = local->basic_rates & local->tx_rate_control;
if (!basic_rates || basic_rates != local->basic_rates) {
printk(KERN_INFO "%s: updating basic rate set automatically "
"to match with the new supported rate set\n",
dev->name);
if (!basic_rates)
basic_rates = local->tx_rate_control;
local->basic_rates = basic_rates;
if (hostap_set_word(dev, HFA384X_RID_CNFBASICRATES,
basic_rates))
printk(KERN_WARNING "%s: failed to set "
"cnfBasicRates\n", dev->name);
}
ret = (hostap_set_word(dev, HFA384X_RID_TXRATECONTROL,
local->tx_rate_control) ||
hostap_set_word(dev, HFA384X_RID_CNFSUPPORTEDRATES,
local->tx_rate_control) ||
local->func->reset_port(dev));
if (ret) {
printk(KERN_WARNING "%s: TXRateControl/cnfSupportedRates "
"setting to 0x%x failed\n",
dev->name, local->tx_rate_control);
}
/* Update TX rate configuration for all STAs based on new operational
* rate set. */
hostap_update_rates(local);
return ret;
}
static int prism2_ioctl_siwrate(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *rrq, char *extra)
{
struct hostap_interface *iface;
local_info_t *local;
iface = netdev_priv(dev);
local = iface->local;
if (rrq->fixed) {
switch (rrq->value) {
case 11000000:
local->tx_rate_control = HFA384X_RATES_11MBPS;
break;
case 5500000:
local->tx_rate_control = HFA384X_RATES_5MBPS;
break;
case 2000000:
local->tx_rate_control = HFA384X_RATES_2MBPS;
break;
case 1000000:
local->tx_rate_control = HFA384X_RATES_1MBPS;
break;
default:
local->tx_rate_control = HFA384X_RATES_1MBPS |
HFA384X_RATES_2MBPS | HFA384X_RATES_5MBPS |
HFA384X_RATES_11MBPS;
break;
}
} else {
switch (rrq->value) {
case 11000000:
local->tx_rate_control = HFA384X_RATES_1MBPS |
HFA384X_RATES_2MBPS | HFA384X_RATES_5MBPS |
HFA384X_RATES_11MBPS;
break;
case 5500000:
local->tx_rate_control = HFA384X_RATES_1MBPS |
HFA384X_RATES_2MBPS | HFA384X_RATES_5MBPS;
break;
case 2000000:
local->tx_rate_control = HFA384X_RATES_1MBPS |
HFA384X_RATES_2MBPS;
break;
case 1000000:
local->tx_rate_control = HFA384X_RATES_1MBPS;
break;
default:
local->tx_rate_control = HFA384X_RATES_1MBPS |
HFA384X_RATES_2MBPS | HFA384X_RATES_5MBPS |
HFA384X_RATES_11MBPS;
break;
}
}
return hostap_set_rate(dev);
}
static int prism2_ioctl_giwrate(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *rrq, char *extra)
{
u16 val;
struct hostap_interface *iface;
local_info_t *local;
int ret = 0;
iface = netdev_priv(dev);
local = iface->local;
if (local->func->get_rid(dev, HFA384X_RID_TXRATECONTROL, &val, 2, 1) <
0)
return -EINVAL;
if ((val & 0x1) && (val > 1))
rrq->fixed = 0;
else
rrq->fixed = 1;
if (local->iw_mode == IW_MODE_MASTER && local->ap != NULL &&
!local->fw_tx_rate_control) {
/* HFA384X_RID_CURRENTTXRATE seems to always be 2 Mbps in
* Host AP mode, so use the recorded TX rate of the last sent
* frame */
rrq->value = local->ap->last_tx_rate > 0 ?
local->ap->last_tx_rate * 100000 : 11000000;
return 0;
}
if (local->func->get_rid(dev, HFA384X_RID_CURRENTTXRATE, &val, 2, 1) <
0)
return -EINVAL;
switch (val) {
case HFA384X_RATES_1MBPS:
rrq->value = 1000000;
break;
case HFA384X_RATES_2MBPS:
rrq->value = 2000000;
break;
case HFA384X_RATES_5MBPS:
rrq->value = 5500000;
break;
case HFA384X_RATES_11MBPS:
rrq->value = 11000000;
break;
default:
/* should not happen */
rrq->value = 11000000;
ret = -EINVAL;
break;
}
return ret;
}
static int prism2_ioctl_siwsens(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *sens, char *extra)
{
struct hostap_interface *iface;
local_info_t *local;
iface = netdev_priv(dev);
local = iface->local;
/* Set the desired AP density */
if (sens->value < 1 || sens->value > 3)
return -EINVAL;
if (hostap_set_word(dev, HFA384X_RID_CNFSYSTEMSCALE, sens->value) ||
local->func->reset_port(dev))
return -EINVAL;
return 0;
}
static int prism2_ioctl_giwsens(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *sens, char *extra)
{
struct hostap_interface *iface;
local_info_t *local;
__le16 val;
iface = netdev_priv(dev);
local = iface->local;
/* Get the current AP density */
if (local->func->get_rid(dev, HFA384X_RID_CNFSYSTEMSCALE, &val, 2, 1) <
0)
return -EINVAL;
sens->value = le16_to_cpu(val);
sens->fixed = 1;
return 0;
}
/* Deprecated in new wireless extension API */
static int prism2_ioctl_giwaplist(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *data, char *extra)
{
struct hostap_interface *iface;
local_info_t *local;
struct sockaddr *addr;
struct iw_quality *qual;
iface = netdev_priv(dev);
local = iface->local;
if (local->iw_mode != IW_MODE_MASTER) {
printk(KERN_DEBUG "SIOCGIWAPLIST is currently only supported "
"in Host AP mode\n");
data->length = 0;
return -EOPNOTSUPP;
}
addr = kmalloc(sizeof(struct sockaddr) * IW_MAX_AP, GFP_KERNEL);
qual = kmalloc(sizeof(struct iw_quality) * IW_MAX_AP, GFP_KERNEL);
if (addr == NULL || qual == NULL) {
kfree(addr);
kfree(qual);
data->length = 0;
return -ENOMEM;
}
data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1);
memcpy(extra, addr, sizeof(struct sockaddr) * data->length);
data->flags = 1; /* has quality information */
memcpy(extra + sizeof(struct sockaddr) * data->length, qual,
sizeof(struct iw_quality) * data->length);
kfree(addr);
kfree(qual);
return 0;
}
static int prism2_ioctl_siwrts(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *rts, char *extra)
{
struct hostap_interface *iface;
local_info_t *local;
__le16 val;
iface = netdev_priv(dev);
local = iface->local;
if (rts->disabled)
val = cpu_to_le16(2347);
else if (rts->value < 0 || rts->value > 2347)
return -EINVAL;
else
val = cpu_to_le16(rts->value);
if (local->func->set_rid(dev, HFA384X_RID_RTSTHRESHOLD, &val, 2) ||
local->func->reset_port(dev))
return -EINVAL;
local->rts_threshold = rts->value;
return 0;
}
static int prism2_ioctl_giwrts(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *rts, char *extra)
{
struct hostap_interface *iface;
local_info_t *local;
__le16 val;
iface = netdev_priv(dev);
local = iface->local;
if (local->func->get_rid(dev, HFA384X_RID_RTSTHRESHOLD, &val, 2, 1) <
0)
return -EINVAL;
rts->value = le16_to_cpu(val);
rts->disabled = (rts->value == 2347);
rts->fixed = 1;
return 0;
}
static int prism2_ioctl_siwfrag(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *rts, char *extra)
{
struct hostap_interface *iface;
local_info_t *local;
__le16 val;
iface = netdev_priv(dev);
local = iface->local;
if (rts->disabled)
val = cpu_to_le16(2346);
else if (rts->value < 256 || rts->value > 2346)
return -EINVAL;
else
val = cpu_to_le16(rts->value & ~0x1); /* even numbers only */
local->fragm_threshold = rts->value & ~0x1;
if (local->func->set_rid(dev, HFA384X_RID_FRAGMENTATIONTHRESHOLD, &val,
2)
|| local->func->reset_port(dev))
return -EINVAL;
return 0;
}
static int prism2_ioctl_giwfrag(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *rts, char *extra)
{
struct hostap_interface *iface;
local_info_t *local;
__le16 val;
iface = netdev_priv(dev);
local = iface->local;
if (local->func->get_rid(dev, HFA384X_RID_FRAGMENTATIONTHRESHOLD,
&val, 2, 1) < 0)
return -EINVAL;
rts->value = le16_to_cpu(val);
rts->disabled = (rts->value == 2346);
rts->fixed = 1;
return 0;
}
#ifndef PRISM2_NO_STATION_MODES
static int hostap_join_ap(struct net_device *dev)
{
struct hostap_interface *iface;
local_info_t *local;
struct hfa384x_join_request req;
unsigned long flags;
int i;
struct hfa384x_hostscan_result *entry;
iface = netdev_priv(dev);
local = iface->local;
memcpy(req.bssid, local->preferred_ap, ETH_ALEN);
req.channel = 0;
spin_lock_irqsave(&local->lock, flags);
for (i = 0; i < local->last_scan_results_count; i++) {
if (!local->last_scan_results)
break;
entry = &local->last_scan_results[i];
if (memcmp(local->preferred_ap, entry->bssid, ETH_ALEN) == 0) {
req.channel = entry->chid;
break;
}
}
spin_unlock_irqrestore(&local->lock, flags);
if (local->func->set_rid(dev, HFA384X_RID_JOINREQUEST, &req,
sizeof(req))) {
printk(KERN_DEBUG "%s: JoinRequest %pM failed\n",
dev->name, local->preferred_ap);
return -1;
}
printk(KERN_DEBUG "%s: Trying to join BSSID %pM\n",
dev->name, local->preferred_ap);
return 0;
}
#endif /* PRISM2_NO_STATION_MODES */
static int prism2_ioctl_siwap(struct net_device *dev,
struct iw_request_info *info,
struct sockaddr *ap_addr, char *extra)
{
#ifdef PRISM2_NO_STATION_MODES
return -EOPNOTSUPP;
#else /* PRISM2_NO_STATION_MODES */
struct hostap_interface *iface;
local_info_t *local;
iface = netdev_priv(dev);
local = iface->local;
memcpy(local->preferred_ap, &ap_addr->sa_data, ETH_ALEN);
if (local->host_roaming == 1 && local->iw_mode == IW_MODE_INFRA) {
struct hfa384x_scan_request scan_req;
memset(&scan_req, 0, sizeof(scan_req));
scan_req.channel_list = cpu_to_le16(0x3fff);
scan_req.txrate = cpu_to_le16(HFA384X_RATES_1MBPS);
if (local->func->set_rid(dev, HFA384X_RID_SCANREQUEST,
&scan_req, sizeof(scan_req))) {
printk(KERN_DEBUG "%s: ScanResults request failed - "
"preferred AP delayed to next unsolicited "
"scan\n", dev->name);
}
} else if (local->host_roaming == 2 &&
local->iw_mode == IW_MODE_INFRA) {
if (hostap_join_ap(dev))
return -EINVAL;
} else {
printk(KERN_DEBUG "%s: Preferred AP (SIOCSIWAP) is used only "
"in Managed mode when host_roaming is enabled\n",
dev->name);
}
return 0;
#endif /* PRISM2_NO_STATION_MODES */
}
static int prism2_ioctl_giwap(struct net_device *dev,
struct iw_request_info *info,
struct sockaddr *ap_addr, char *extra)
{
struct hostap_interface *iface;
local_info_t *local;
iface = netdev_priv(dev);
local = iface->local;
ap_addr->sa_family = ARPHRD_ETHER;
switch (iface->type) {
case HOSTAP_INTERFACE_AP:
memcpy(&ap_addr->sa_data, dev->dev_addr, ETH_ALEN);
break;
case HOSTAP_INTERFACE_STA:
memcpy(&ap_addr->sa_data, local->assoc_ap_addr, ETH_ALEN);
break;
case HOSTAP_INTERFACE_WDS:
memcpy(&ap_addr->sa_data, iface->u.wds.remote_addr, ETH_ALEN);
break;
default:
if (local->func->get_rid(dev, HFA384X_RID_CURRENTBSSID,
&ap_addr->sa_data, ETH_ALEN, 1) < 0)
return -EOPNOTSUPP;
/* local->bssid is also updated in LinkStatus handler when in
* station mode */
memcpy(local->bssid, &ap_addr->sa_data, ETH_ALEN);
break;
}
return 0;
}
static int prism2_ioctl_siwnickn(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *data, char *nickname)
{
struct hostap_interface *iface;
local_info_t *local;
iface = netdev_priv(dev);
local = iface->local;
memset(local->name, 0, sizeof(local->name));
memcpy(local->name, nickname, data->length);
local->name_set = 1;
if (hostap_set_string(dev, HFA384X_RID_CNFOWNNAME, local->name) ||
local->func->reset_port(dev))
return -EINVAL;
return 0;
}
static int prism2_ioctl_giwnickn(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *data, char *nickname)
{
struct hostap_interface *iface;
local_info_t *local;
int len;
char name[MAX_NAME_LEN + 3];
u16 val;
iface = netdev_priv(dev);
local = iface->local;
len = local->func->get_rid(dev, HFA384X_RID_CNFOWNNAME,
&name, MAX_NAME_LEN + 2, 0);
val = le16_to_cpu(*(__le16 *) name);
if (len > MAX_NAME_LEN + 2 || len < 0 || val > MAX_NAME_LEN)
return -EOPNOTSUPP;
name[val + 2] = '\0';
data->length = val + 1;
memcpy(nickname, name + 2, val + 1);
return 0;
}
static int prism2_ioctl_siwfreq(struct net_device *dev,
struct iw_request_info *info,
struct iw_freq *freq, char *extra)
{
struct hostap_interface *iface;
local_info_t *local;
iface = netdev_priv(dev);
local = iface->local;
/* freq => chan. */
if (freq->e == 1 &&
freq->m / 100000 >= freq_list[0] &&
freq->m / 100000 <= freq_list[FREQ_COUNT - 1]) {
int ch;
int fr = freq->m / 100000;
for (ch = 0; ch < FREQ_COUNT; ch++) {
if (fr == freq_list[ch]) {
freq->e = 0;
freq->m = ch + 1;
break;
}
}
}
if (freq->e != 0 || freq->m < 1 || freq->m > FREQ_COUNT ||
!(local->channel_mask & (1 << (freq->m - 1))))
return -EINVAL;
local->channel = freq->m; /* channel is used in prism2_setup_rids() */
if (hostap_set_word(dev, HFA384X_RID_CNFOWNCHANNEL, local->channel) ||
local->func->reset_port(dev))
return -EINVAL;
return 0;
}
static int prism2_ioctl_giwfreq(struct net_device *dev,
struct iw_request_info *info,
struct iw_freq *freq, char *extra)
{
struct hostap_interface *iface;
local_info_t *local;
u16 val;
iface = netdev_priv(dev);
local = iface->local;
if (local->func->get_rid(dev, HFA384X_RID_CURRENTCHANNEL, &val, 2, 1) <
0)
return -EINVAL;
le16_to_cpus(&val);
if (val < 1 || val > FREQ_COUNT)
return -EINVAL;
freq->m = freq_list[val - 1] * 100000;
freq->e = 1;
return 0;
}
static void hostap_monitor_set_type(local_info_t *local)
{
struct net_device *dev = local->ddev;
if (dev == NULL)
return;
if (local->monitor_type == PRISM2_MONITOR_PRISM ||
local->monitor_type == PRISM2_MONITOR_CAPHDR) {
dev->type = ARPHRD_IEEE80211_PRISM;
} else if (local->monitor_type == PRISM2_MONITOR_RADIOTAP) {
dev->type = ARPHRD_IEEE80211_RADIOTAP;
} else {
dev->type = ARPHRD_IEEE80211;
}
}
static int prism2_ioctl_siwessid(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *data, char *ssid)
{
struct hostap_interface *iface;
local_info_t *local;
iface = netdev_priv(dev);
local = iface->local;
if (iface->type == HOSTAP_INTERFACE_WDS)
return -EOPNOTSUPP;
if (data->flags == 0)
ssid[0] = '\0'; /* ANY */
if (local->iw_mode == IW_MODE_MASTER && ssid[0] == '\0') {
/* Setting SSID to empty string seems to kill the card in
* Host AP mode */
printk(KERN_DEBUG "%s: Host AP mode does not support "
"'Any' essid\n", dev->name);
return -EINVAL;
}
memcpy(local->essid, ssid, data->length);
local->essid[data->length] = '\0';
if ((!local->fw_ap &&
hostap_set_string(dev, HFA384X_RID_CNFDESIREDSSID, local->essid))
|| hostap_set_string(dev, HFA384X_RID_CNFOWNSSID, local->essid) ||
local->func->reset_port(dev))
return -EINVAL;
return 0;
}
static int prism2_ioctl_giwessid(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *data, char *essid)
{
struct hostap_interface *iface;
local_info_t *local;
u16 val;
iface = netdev_priv(dev);
local = iface->local;
if (iface->type == HOSTAP_INTERFACE_WDS)
return -EOPNOTSUPP;
data->flags = 1; /* active */
if (local->iw_mode == IW_MODE_MASTER) {
data->length = strlen(local->essid);
memcpy(essid, local->essid, IW_ESSID_MAX_SIZE);
} else {
int len;
char ssid[MAX_SSID_LEN + 2];
memset(ssid, 0, sizeof(ssid));
len = local->func->get_rid(dev, HFA384X_RID_CURRENTSSID,
&ssid, MAX_SSID_LEN + 2, 0);
val = le16_to_cpu(*(__le16 *) ssid);
if (len > MAX_SSID_LEN + 2 || len < 0 || val > MAX_SSID_LEN) {
return -EOPNOTSUPP;
}
data->length = val;
memcpy(essid, ssid + 2, IW_ESSID_MAX_SIZE);
}
return 0;
}
static int prism2_ioctl_giwrange(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *data, char *extra)
{
struct hostap_interface *iface;
local_info_t *local;
struct iw_range *range = (struct iw_range *) extra;
u8 rates[10];
u16 val;
int i, len, over2;
iface = netdev_priv(dev);
local = iface->local;
data->length = sizeof(struct iw_range);
memset(range, 0, sizeof(struct iw_range));
/* TODO: could fill num_txpower and txpower array with
* something; however, there are 128 different values.. */
range->txpower_capa = IW_TXPOW_DBM;
if (local->iw_mode == IW_MODE_INFRA || local->iw_mode == IW_MODE_ADHOC)
{
range->min_pmp = 1 * 1024;
range->max_pmp = 65535 * 1024;
range->min_pmt = 1 * 1024;
range->max_pmt = 1000 * 1024;
range->pmp_flags = IW_POWER_PERIOD;
range->pmt_flags = IW_POWER_TIMEOUT;
range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT |
IW_POWER_UNICAST_R | IW_POWER_ALL_R;
}
range->we_version_compiled = WIRELESS_EXT;
range->we_version_source = 18;
range->retry_capa = IW_RETRY_LIMIT;
range->retry_flags = IW_RETRY_LIMIT;
range->min_retry = 0;
range->max_retry = 255;
range->num_channels = FREQ_COUNT;
val = 0;
for (i = 0; i < FREQ_COUNT; i++) {
if (local->channel_mask & (1 << i)) {
range->freq[val].i = i + 1;
range->freq[val].m = freq_list[i] * 100000;
range->freq[val].e = 1;
val++;
}
if (val == IW_MAX_FREQUENCIES)
break;
}
range->num_frequency = val;
if (local->sta_fw_ver >= PRISM2_FW_VER(1,3,1)) {
range->max_qual.qual = 70; /* what is correct max? This was not
* documented exactly. At least
* 69 has been observed. */
range->max_qual.level = 0; /* dB */
range->max_qual.noise = 0; /* dB */
/* What would be suitable values for "average/typical" qual? */
range->avg_qual.qual = 20;
range->avg_qual.level = -60;
range->avg_qual.noise = -95;
} else {
range->max_qual.qual = 92; /* 0 .. 92 */
range->max_qual.level = 154; /* 27 .. 154 */
range->max_qual.noise = 154; /* 27 .. 154 */
}
range->sensitivity = 3;
range->max_encoding_tokens = WEP_KEYS;
range->num_encoding_sizes = 2;
range->encoding_size[0] = 5;
range->encoding_size[1] = 13;
over2 = 0;
len = prism2_get_datarates(dev, rates);
range->num_bitrates = 0;
for (i = 0; i < len; i++) {
if (range->num_bitrates < IW_MAX_BITRATES) {
range->bitrate[range->num_bitrates] =
rates[i] * 500000;
range->num_bitrates++;
}
if (rates[i] == 0x0b || rates[i] == 0x16)
over2 = 1;
}
/* estimated maximum TCP throughput values (bps) */
range->throughput = over2 ? 5500000 : 1500000;
range->min_rts = 0;
range->max_rts = 2347;
range->min_frag = 256;
range->max_frag = 2346;
/* Event capability (kernel + driver) */
range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
IW_EVENT_CAPA_MASK(SIOCGIWAP) |
IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
range->event_capa[1] = IW_EVENT_CAPA_K_1;
range->event_capa[4] = (IW_EVENT_CAPA_MASK(IWEVTXDROP) |
IW_EVENT_CAPA_MASK(IWEVCUSTOM) |
IW_EVENT_CAPA_MASK(IWEVREGISTERED) |
IW_EVENT_CAPA_MASK(IWEVEXPIRED));
range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
if (local->sta_fw_ver >= PRISM2_FW_VER(1,3,1))
range->scan_capa = IW_SCAN_CAPA_ESSID;
return 0;
}
static int hostap_monitor_mode_enable(local_info_t *local)
{
struct net_device *dev = local->dev;
printk(KERN_DEBUG "Enabling monitor mode\n");
hostap_monitor_set_type(local);
if (hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE,
HFA384X_PORTTYPE_PSEUDO_IBSS)) {
printk(KERN_DEBUG "Port type setting for monitor mode "
"failed\n");
return -EOPNOTSUPP;
}
/* Host decrypt is needed to get the IV and ICV fields;
* however, monitor mode seems to remove WEP flag from frame
* control field */
if (hostap_set_word(dev, HFA384X_RID_CNFWEPFLAGS,
HFA384X_WEPFLAGS_HOSTENCRYPT |
HFA384X_WEPFLAGS_HOSTDECRYPT)) {
printk(KERN_DEBUG "WEP flags setting failed\n");
return -EOPNOTSUPP;
}
if (local->func->reset_port(dev) ||
local->func->cmd(dev, HFA384X_CMDCODE_TEST |
(HFA384X_TEST_MONITOR << 8),
0, NULL, NULL)) {
printk(KERN_DEBUG "Setting monitor mode failed\n");
return -EOPNOTSUPP;
}
return 0;
}
static int hostap_monitor_mode_disable(local_info_t *local)
{
struct net_device *dev = local->ddev;
if (dev == NULL)
return -1;
printk(KERN_DEBUG "%s: Disabling monitor mode\n", dev->name);
dev->type = ARPHRD_ETHER;
if (local->func->cmd(dev, HFA384X_CMDCODE_TEST |
(HFA384X_TEST_STOP << 8),
0, NULL, NULL))
return -1;
return hostap_set_encryption(local);
}
static int prism2_ioctl_siwmode(struct net_device *dev,
struct iw_request_info *info,
__u32 *mode, char *extra)
{
struct hostap_interface *iface;
local_info_t *local;
int double_reset = 0;
iface = netdev_priv(dev);
local = iface->local;
if (*mode != IW_MODE_ADHOC && *mode != IW_MODE_INFRA &&
*mode != IW_MODE_MASTER && *mode != IW_MODE_REPEAT &&
*mode != IW_MODE_MONITOR)
return -EOPNOTSUPP;
#ifdef PRISM2_NO_STATION_MODES
if (*mode == IW_MODE_ADHOC || *mode == IW_MODE_INFRA)
return -EOPNOTSUPP;
#endif /* PRISM2_NO_STATION_MODES */
if (*mode == local->iw_mode)
return 0;
if (*mode == IW_MODE_MASTER && local->essid[0] == '\0') {
printk(KERN_WARNING "%s: empty SSID not allowed in Master "
"mode\n", dev->name);
return -EINVAL;
}
if (local->iw_mode == IW_MODE_MONITOR)
hostap_monitor_mode_disable(local);
if ((local->iw_mode == IW_MODE_ADHOC ||
local->iw_mode == IW_MODE_MONITOR) && *mode == IW_MODE_MASTER) {
/* There seems to be a firmware bug in at least STA f/w v1.5.6
* that leaves beacon frames to use IBSS type when moving from
* IBSS to Host AP mode. Doing double Port0 reset seems to be
* enough to workaround this. */
double_reset = 1;
}
printk(KERN_DEBUG "prism2: %s: operating mode changed "
"%d -> %d\n", dev->name, local->iw_mode, *mode);
local->iw_mode = *mode;
if (local->iw_mode == IW_MODE_MONITOR)
hostap_monitor_mode_enable(local);
else if (local->iw_mode == IW_MODE_MASTER && !local->host_encrypt &&
!local->fw_encrypt_ok) {
printk(KERN_DEBUG "%s: defaulting to host-based encryption as "
"a workaround for firmware bug in Host AP mode WEP\n",
dev->name);
local->host_encrypt = 1;
}
if (hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE,
hostap_get_porttype(local)))
return -EOPNOTSUPP;
if (local->func->reset_port(dev))
return -EINVAL;
if (double_reset && local->func->reset_port(dev))
return -EINVAL;
if (local->iw_mode != IW_MODE_INFRA && local->iw_mode != IW_MODE_ADHOC)
{
/* netif_carrier is used only in client modes for now, so make
* sure carrier is on when moving to non-client modes. */
netif_carrier_on(local->dev);
netif_carrier_on(local->ddev);
}
return 0;
}
static int prism2_ioctl_giwmode(struct net_device *dev,
struct iw_request_info *info,
__u32 *mode, char *extra)
{
struct hostap_interface *iface;
local_info_t *local;
iface = netdev_priv(dev);
local = iface->local;
switch (iface->type) {
case HOSTAP_INTERFACE_STA:
*mode = IW_MODE_INFRA;
break;
case HOSTAP_INTERFACE_WDS:
*mode = IW_MODE_REPEAT;
break;
default:
*mode = local->iw_mode;
break;
}
return 0;
}
static int prism2_ioctl_siwpower(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *wrq, char *extra)
{
#ifdef PRISM2_NO_STATION_MODES
return -EOPNOTSUPP;
#else /* PRISM2_NO_STATION_MODES */
int ret = 0;
if (wrq->disabled)
return hostap_set_word(dev, HFA384X_RID_CNFPMENABLED, 0);
switch (wrq->flags & IW_POWER_MODE) {
case IW_POWER_UNICAST_R:
ret = hostap_set_word(dev, HFA384X_RID_CNFMULTICASTRECEIVE, 0);
if (ret)
return ret;
ret = hostap_set_word(dev, HFA384X_RID_CNFPMENABLED, 1);
if (ret)
return ret;
break;
case IW_POWER_ALL_R:
ret = hostap_set_word(dev, HFA384X_RID_CNFMULTICASTRECEIVE, 1);
if (ret)
return ret;
ret = hostap_set_word(dev, HFA384X_RID_CNFPMENABLED, 1);
if (ret)
return ret;
break;
case IW_POWER_ON:
break;
default:
return -EINVAL;
}
if (wrq->flags & IW_POWER_TIMEOUT) {
ret = hostap_set_word(dev, HFA384X_RID_CNFPMENABLED, 1);
if (ret)
return ret;
ret = hostap_set_word(dev, HFA384X_RID_CNFPMHOLDOVERDURATION,
wrq->value / 1024);
if (ret)
return ret;
}
if (wrq->flags & IW_POWER_PERIOD) {
ret = hostap_set_word(dev, HFA384X_RID_CNFPMENABLED, 1);
if (ret)
return ret;
ret = hostap_set_word(dev, HFA384X_RID_CNFMAXSLEEPDURATION,
wrq->value / 1024);
if (ret)
return ret;
}
return ret;
#endif /* PRISM2_NO_STATION_MODES */
}
static int prism2_ioctl_giwpower(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *rrq, char *extra)
{
#ifdef PRISM2_NO_STATION_MODES
return -EOPNOTSUPP;
#else /* PRISM2_NO_STATION_MODES */
struct hostap_interface *iface;
local_info_t *local;
__le16 enable, mcast;
iface = netdev_priv(dev);
local = iface->local;
if (local->func->get_rid(dev, HFA384X_RID_CNFPMENABLED, &enable, 2, 1)
< 0)
return -EINVAL;
if (!le16_to_cpu(enable)) {
rrq->disabled = 1;
return 0;
}
rrq->disabled = 0;
if ((rrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
__le16 timeout;
if (local->func->get_rid(dev,
HFA384X_RID_CNFPMHOLDOVERDURATION,
&timeout, 2, 1) < 0)
return -EINVAL;
rrq->flags = IW_POWER_TIMEOUT;
rrq->value = le16_to_cpu(timeout) * 1024;
} else {
__le16 period;
if (local->func->get_rid(dev, HFA384X_RID_CNFMAXSLEEPDURATION,
&period, 2, 1) < 0)
return -EINVAL;
rrq->flags = IW_POWER_PERIOD;
rrq->value = le16_to_cpu(period) * 1024;
}
if (local->func->get_rid(dev, HFA384X_RID_CNFMULTICASTRECEIVE, &mcast,
2, 1) < 0)
return -EINVAL;
if (le16_to_cpu(mcast))
rrq->flags |= IW_POWER_ALL_R;
else
rrq->flags |= IW_POWER_UNICAST_R;
return 0;
#endif /* PRISM2_NO_STATION_MODES */
}
static int prism2_ioctl_siwretry(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *rrq, char *extra)
{
struct hostap_interface *iface;
local_info_t *local;
iface = netdev_priv(dev);
local = iface->local;
if (rrq->disabled)
return -EINVAL;
/* setting retry limits is not supported with the current station
* firmware code; simulate this with alternative retry count for now */
if (rrq->flags == IW_RETRY_LIMIT) {
if (rrq->value < 0) {
/* disable manual retry count setting and use firmware
* defaults */
local->manual_retry_count = -1;
local->tx_control &= ~HFA384X_TX_CTRL_ALT_RTRY;
} else {
if (hostap_set_word(dev, HFA384X_RID_CNFALTRETRYCOUNT,
rrq->value)) {
printk(KERN_DEBUG "%s: Alternate retry count "
"setting to %d failed\n",
dev->name, rrq->value);
return -EOPNOTSUPP;
}
local->manual_retry_count = rrq->value;
local->tx_control |= HFA384X_TX_CTRL_ALT_RTRY;
}
return 0;
}
return -EOPNOTSUPP;
#if 0
/* what could be done, if firmware would support this.. */
if (rrq->flags & IW_RETRY_LIMIT) {
if (rrq->flags & IW_RETRY_LONG)
HFA384X_RID_LONGRETRYLIMIT = rrq->value;
else if (rrq->flags & IW_RETRY_SHORT)
HFA384X_RID_SHORTRETRYLIMIT = rrq->value;
else {
HFA384X_RID_LONGRETRYLIMIT = rrq->value;
HFA384X_RID_SHORTRETRYLIMIT = rrq->value;
}
}
if (rrq->flags & IW_RETRY_LIFETIME) {
HFA384X_RID_MAXTRANSMITLIFETIME = rrq->value / 1024;
}
return 0;
#endif /* 0 */
}
static int prism2_ioctl_giwretry(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *rrq, char *extra)
{
struct hostap_interface *iface;
local_info_t *local;
__le16 shortretry, longretry, lifetime, altretry;
iface = netdev_priv(dev);
local = iface->local;
if (local->func->get_rid(dev, HFA384X_RID_SHORTRETRYLIMIT, &shortretry,
2, 1) < 0 ||
local->func->get_rid(dev, HFA384X_RID_LONGRETRYLIMIT, &longretry,
2, 1) < 0 ||
local->func->get_rid(dev, HFA384X_RID_MAXTRANSMITLIFETIME,
&lifetime, 2, 1) < 0)
return -EINVAL;
rrq->disabled = 0;
if ((rrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
rrq->flags = IW_RETRY_LIFETIME;
rrq->value = le16_to_cpu(lifetime) * 1024;
} else {
if (local->manual_retry_count >= 0) {
rrq->flags = IW_RETRY_LIMIT;
if (local->func->get_rid(dev,
HFA384X_RID_CNFALTRETRYCOUNT,
&altretry, 2, 1) >= 0)
rrq->value = le16_to_cpu(altretry);
else
rrq->value = local->manual_retry_count;
} else if ((rrq->flags & IW_RETRY_LONG)) {
rrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
rrq->value = le16_to_cpu(longretry);
} else {
rrq->flags = IW_RETRY_LIMIT;
rrq->value = le16_to_cpu(shortretry);
if (shortretry != longretry)
rrq->flags |= IW_RETRY_SHORT;
}
}
return 0;
}
/* Note! This TX power controlling is experimental and should not be used in
* production use. It just sets raw power register and does not use any kind of
* feedback information from the measured TX power (CR58). This is now
* commented out to make sure that it is not used by accident. TX power
* configuration will be enabled again after proper algorithm using feedback
* has been implemented. */
#ifdef RAW_TXPOWER_SETTING
/* Map HFA386x's CR31 to and from dBm with some sort of ad hoc mapping..
* This version assumes following mapping:
* CR31 is 7-bit value with -64 to +63 range.
* -64 is mapped into +20dBm and +63 into -43dBm.
* This is certainly not an exact mapping for every card, but at least
* increasing dBm value should correspond to increasing TX power.
*/
static int prism2_txpower_hfa386x_to_dBm(u16 val)
{
signed char tmp;
if (val > 255)
val = 255;
tmp = val;
tmp >>= 2;
return -12 - tmp;
}
static u16 prism2_txpower_dBm_to_hfa386x(int val)
{
signed char tmp;
if (val > 20)
return 128;
else if (val < -43)
return 127;
tmp = val;
tmp = -12 - tmp;
tmp <<= 2;
return (unsigned char) tmp;
}
#endif /* RAW_TXPOWER_SETTING */
static int prism2_ioctl_siwtxpow(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *rrq, char *extra)
{
struct hostap_interface *iface;
local_info_t *local;
#ifdef RAW_TXPOWER_SETTING
char *tmp;
#endif
u16 val;
int ret = 0;
iface = netdev_priv(dev);
local = iface->local;
if (rrq->disabled) {
if (local->txpower_type != PRISM2_TXPOWER_OFF) {
val = 0xff; /* use all standby and sleep modes */
ret = local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF,
HFA386X_CR_A_D_TEST_MODES2,
&val, NULL);
printk(KERN_DEBUG "%s: Turning radio off: %s\n",
dev->name, ret ? "failed" : "OK");
local->txpower_type = PRISM2_TXPOWER_OFF;
}
return (ret ? -EOPNOTSUPP : 0);
}
if (local->txpower_type == PRISM2_TXPOWER_OFF) {
val = 0; /* disable all standby and sleep modes */
ret = local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF,
HFA386X_CR_A_D_TEST_MODES2, &val, NULL);
printk(KERN_DEBUG "%s: Turning radio on: %s\n",
dev->name, ret ? "failed" : "OK");
local->txpower_type = PRISM2_TXPOWER_UNKNOWN;
}
#ifdef RAW_TXPOWER_SETTING
if (!rrq->fixed && local->txpower_type != PRISM2_TXPOWER_AUTO) {
printk(KERN_DEBUG "Setting ALC on\n");
val = HFA384X_TEST_CFG_BIT_ALC;
local->func->cmd(dev, HFA384X_CMDCODE_TEST |
(HFA384X_TEST_CFG_BITS << 8), 1, &val, NULL);
local->txpower_type = PRISM2_TXPOWER_AUTO;
return 0;
}
if (local->txpower_type != PRISM2_TXPOWER_FIXED) {
printk(KERN_DEBUG "Setting ALC off\n");
val = HFA384X_TEST_CFG_BIT_ALC;
local->func->cmd(dev, HFA384X_CMDCODE_TEST |
(HFA384X_TEST_CFG_BITS << 8), 0, &val, NULL);
local->txpower_type = PRISM2_TXPOWER_FIXED;
}
if (rrq->flags == IW_TXPOW_DBM)
tmp = "dBm";
else if (rrq->flags == IW_TXPOW_MWATT)
tmp = "mW";
else
tmp = "UNKNOWN";
printk(KERN_DEBUG "Setting TX power to %d %s\n", rrq->value, tmp);
if (rrq->flags != IW_TXPOW_DBM) {
printk("SIOCSIWTXPOW with mW is not supported; use dBm\n");
return -EOPNOTSUPP;
}
local->txpower = rrq->value;
val = prism2_txpower_dBm_to_hfa386x(local->txpower);
if (local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF,
HFA386X_CR_MANUAL_TX_POWER, &val, NULL))
ret = -EOPNOTSUPP;
#else /* RAW_TXPOWER_SETTING */
if (rrq->fixed)
ret = -EOPNOTSUPP;
#endif /* RAW_TXPOWER_SETTING */
return ret;
}
static int prism2_ioctl_giwtxpow(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *rrq, char *extra)
{
#ifdef RAW_TXPOWER_SETTING
struct hostap_interface *iface;
local_info_t *local;
u16 resp0;
iface = netdev_priv(dev);
local = iface->local;
rrq->flags = IW_TXPOW_DBM;
rrq->disabled = 0;
rrq->fixed = 0;
if (local->txpower_type == PRISM2_TXPOWER_AUTO) {
if (local->func->cmd(dev, HFA384X_CMDCODE_READMIF,
HFA386X_CR_MANUAL_TX_POWER,
NULL, &resp0) == 0) {
rrq->value = prism2_txpower_hfa386x_to_dBm(resp0);
} else {
/* Could not get real txpower; guess 15 dBm */
rrq->value = 15;
}
} else if (local->txpower_type == PRISM2_TXPOWER_OFF) {
rrq->value = 0;
rrq->disabled = 1;
} else if (local->txpower_type == PRISM2_TXPOWER_FIXED) {
rrq->value = local->txpower;
rrq->fixed = 1;
} else {
printk("SIOCGIWTXPOW - unknown txpower_type=%d\n",
local->txpower_type);
}
return 0;
#else /* RAW_TXPOWER_SETTING */
return -EOPNOTSUPP;
#endif /* RAW_TXPOWER_SETTING */
}
#ifndef PRISM2_NO_STATION_MODES
/* HostScan request works with and without host_roaming mode. In addition, it
* does not break current association. However, it requires newer station
* firmware version (>= 1.3.1) than scan request. */
static int prism2_request_hostscan(struct net_device *dev,
u8 *ssid, u8 ssid_len)
{
struct hostap_interface *iface;
local_info_t *local;
struct hfa384x_hostscan_request scan_req;
iface = netdev_priv(dev);
local = iface->local;
memset(&scan_req, 0, sizeof(scan_req));
scan_req.channel_list = cpu_to_le16(local->channel_mask &
local->scan_channel_mask);
scan_req.txrate = cpu_to_le16(HFA384X_RATES_1MBPS);
if (ssid) {
if (ssid_len > 32)
return -EINVAL;
scan_req.target_ssid_len = cpu_to_le16(ssid_len);
memcpy(scan_req.target_ssid, ssid, ssid_len);
}
if (local->func->set_rid(dev, HFA384X_RID_HOSTSCAN, &scan_req,
sizeof(scan_req))) {
printk(KERN_DEBUG "%s: HOSTSCAN failed\n", dev->name);
return -EINVAL;
}
return 0;
}
static int prism2_request_scan(struct net_device *dev)
{
struct hostap_interface *iface;
local_info_t *local;
struct hfa384x_scan_request scan_req;
int ret = 0;
iface = netdev_priv(dev);
local = iface->local;
memset(&scan_req, 0, sizeof(scan_req));
scan_req.channel_list = cpu_to_le16(local->channel_mask &
local->scan_channel_mask);
scan_req.txrate = cpu_to_le16(HFA384X_RATES_1MBPS);
/* FIX:
* It seems to be enough to set roaming mode for a short moment to
* host-based and then setup scanrequest data and return the mode to
* firmware-based.
*
* Master mode would need to drop to Managed mode for a short while
* to make scanning work.. Or sweep through the different channels and
* use passive scan based on beacons. */
if (!local->host_roaming)
hostap_set_word(dev, HFA384X_RID_CNFROAMINGMODE,
HFA384X_ROAMING_HOST);
if (local->func->set_rid(dev, HFA384X_RID_SCANREQUEST, &scan_req,
sizeof(scan_req))) {
printk(KERN_DEBUG "SCANREQUEST failed\n");
ret = -EINVAL;
}
if (!local->host_roaming)
hostap_set_word(dev, HFA384X_RID_CNFROAMINGMODE,
HFA384X_ROAMING_FIRMWARE);
return ret;
}
#else /* !PRISM2_NO_STATION_MODES */
static inline int prism2_request_hostscan(struct net_device *dev,
u8 *ssid, u8 ssid_len)
{
return -EOPNOTSUPP;
}
static inline int prism2_request_scan(struct net_device *dev)
{
return -EOPNOTSUPP;
}
#endif /* !PRISM2_NO_STATION_MODES */
static int prism2_ioctl_siwscan(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *data, char *extra)
{
struct hostap_interface *iface;
local_info_t *local;
int ret;
u8 *ssid = NULL, ssid_len = 0;
struct iw_scan_req *req = (struct iw_scan_req *) extra;
iface = netdev_priv(dev);
local = iface->local;
if (data->length < sizeof(struct iw_scan_req))
req = NULL;
if (local->iw_mode == IW_MODE_MASTER) {
/* In master mode, we just return the results of our local
* tables, so we don't need to start anything...
* Jean II */
data->length = 0;
return 0;
}
if (!local->dev_enabled)
return -ENETDOWN;
if (req && data->flags & IW_SCAN_THIS_ESSID) {
ssid = req->essid;
ssid_len = req->essid_len;
if (ssid_len &&
((local->iw_mode != IW_MODE_INFRA &&
local->iw_mode != IW_MODE_ADHOC) ||
(local->sta_fw_ver < PRISM2_FW_VER(1,3,1))))
return -EOPNOTSUPP;
}
if (local->sta_fw_ver >= PRISM2_FW_VER(1,3,1))
ret = prism2_request_hostscan(dev, ssid, ssid_len);
else
ret = prism2_request_scan(dev);
if (ret == 0)
local->scan_timestamp = jiffies;
/* Could inquire F101, F103 or wait for SIOCGIWSCAN and read RID */
return ret;
}
#ifndef PRISM2_NO_STATION_MODES
static char * __prism2_translate_scan(local_info_t *local,
struct iw_request_info *info,
struct hfa384x_hostscan_result *scan,
struct hostap_bss_info *bss,
char *current_ev, char *end_buf)
{
int i, chan;
struct iw_event iwe;
char *current_val;
u16 capabilities;
u8 *pos;
u8 *ssid, *bssid;
size_t ssid_len;
char *buf;
if (bss) {
ssid = bss->ssid;
ssid_len = bss->ssid_len;
bssid = bss->bssid;
} else {
ssid = scan->ssid;
ssid_len = le16_to_cpu(scan->ssid_len);
bssid = scan->bssid;
}
if (ssid_len > 32)
ssid_len = 32;
/* First entry *MUST* be the AP MAC address */
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = SIOCGIWAP;
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
memcpy(iwe.u.ap_addr.sa_data, bssid, ETH_ALEN);
current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
IW_EV_ADDR_LEN);
/* Other entries will be displayed in the order we give them */
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = SIOCGIWESSID;
iwe.u.data.length = ssid_len;
iwe.u.data.flags = 1;
current_ev = iwe_stream_add_point(info, current_ev, end_buf,
&iwe, ssid);
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = SIOCGIWMODE;
if (bss) {
capabilities = bss->capab_info;
} else {
capabilities = le16_to_cpu(scan->capability);
}
if (capabilities & (WLAN_CAPABILITY_ESS |
WLAN_CAPABILITY_IBSS)) {
if (capabilities & WLAN_CAPABILITY_ESS)
iwe.u.mode = IW_MODE_MASTER;
else
iwe.u.mode = IW_MODE_ADHOC;
current_ev = iwe_stream_add_event(info, current_ev, end_buf,
&iwe, IW_EV_UINT_LEN);
}
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = SIOCGIWFREQ;
if (scan) {
chan = le16_to_cpu(scan->chid);
} else if (bss) {
chan = bss->chan;
} else {
chan = 0;
}
if (chan > 0) {
iwe.u.freq.m = freq_list[chan - 1] * 100000;
iwe.u.freq.e = 1;
current_ev = iwe_stream_add_event(info, current_ev, end_buf,
&iwe, IW_EV_FREQ_LEN);
}
if (scan) {
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVQUAL;
if (local->last_scan_type == PRISM2_HOSTSCAN) {
iwe.u.qual.level = le16_to_cpu(scan->sl);
iwe.u.qual.noise = le16_to_cpu(scan->anl);
} else {
iwe.u.qual.level =
HFA384X_LEVEL_TO_dBm(le16_to_cpu(scan->sl));
iwe.u.qual.noise =
HFA384X_LEVEL_TO_dBm(le16_to_cpu(scan->anl));
}
iwe.u.qual.updated = IW_QUAL_LEVEL_UPDATED
| IW_QUAL_NOISE_UPDATED
| IW_QUAL_QUAL_INVALID
| IW_QUAL_DBM;
current_ev = iwe_stream_add_event(info, current_ev, end_buf,
&iwe, IW_EV_QUAL_LEN);
}
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = SIOCGIWENCODE;
if (capabilities & WLAN_CAPABILITY_PRIVACY)
iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
else
iwe.u.data.flags = IW_ENCODE_DISABLED;
iwe.u.data.length = 0;
current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, "");
/* TODO: add SuppRates into BSS table */
if (scan) {
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = SIOCGIWRATE;
current_val = current_ev + iwe_stream_lcp_len(info);
pos = scan->sup_rates;
for (i = 0; i < sizeof(scan->sup_rates); i++) {
if (pos[i] == 0)
break;
/* Bit rate given in 500 kb/s units (+ 0x80) */
iwe.u.bitrate.value = ((pos[i] & 0x7f) * 500000);
current_val = iwe_stream_add_value(
info, current_ev, current_val, end_buf, &iwe,
IW_EV_PARAM_LEN);
}
/* Check if we added any event */
if ((current_val - current_ev) > iwe_stream_lcp_len(info))
current_ev = current_val;
}
/* TODO: add BeaconInt,resp_rate,atim into BSS table */
buf = kmalloc(MAX_WPA_IE_LEN * 2 + 30, GFP_ATOMIC);
if (buf && scan) {
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVCUSTOM;
sprintf(buf, "bcn_int=%d", le16_to_cpu(scan->beacon_interval));
iwe.u.data.length = strlen(buf);
current_ev = iwe_stream_add_point(info, current_ev, end_buf,
&iwe, buf);
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVCUSTOM;
sprintf(buf, "resp_rate=%d", le16_to_cpu(scan->rate));
iwe.u.data.length = strlen(buf);
current_ev = iwe_stream_add_point(info, current_ev, end_buf,
&iwe, buf);
if (local->last_scan_type == PRISM2_HOSTSCAN &&
(capabilities & WLAN_CAPABILITY_IBSS)) {
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVCUSTOM;
sprintf(buf, "atim=%d", le16_to_cpu(scan->atim));
iwe.u.data.length = strlen(buf);
current_ev = iwe_stream_add_point(info, current_ev,
end_buf, &iwe, buf);
}
}
kfree(buf);
if (bss && bss->wpa_ie_len > 0 && bss->wpa_ie_len <= MAX_WPA_IE_LEN) {
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVGENIE;
iwe.u.data.length = bss->wpa_ie_len;
current_ev = iwe_stream_add_point(info, current_ev, end_buf,
&iwe, bss->wpa_ie);
}
if (bss && bss->rsn_ie_len > 0 && bss->rsn_ie_len <= MAX_WPA_IE_LEN) {
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVGENIE;
iwe.u.data.length = bss->rsn_ie_len;
current_ev = iwe_stream_add_point(info, current_ev, end_buf,
&iwe, bss->rsn_ie);
}
return current_ev;
}
/* Translate scan data returned from the card to a card independent
* format that the Wireless Tools will understand - Jean II */
static inline int prism2_translate_scan(local_info_t *local,
struct iw_request_info *info,
char *buffer, int buflen)
{
struct hfa384x_hostscan_result *scan;
int entry, hostscan;
char *current_ev = buffer;
char *end_buf = buffer + buflen;
struct list_head *ptr;
spin_lock_bh(&local->lock);
list_for_each(ptr, &local->bss_list) {
struct hostap_bss_info *bss;
bss = list_entry(ptr, struct hostap_bss_info, list);
bss->included = 0;
}
hostscan = local->last_scan_type == PRISM2_HOSTSCAN;
for (entry = 0; entry < local->last_scan_results_count; entry++) {
int found = 0;
scan = &local->last_scan_results[entry];
/* Report every SSID if the AP is using multiple SSIDs. If no
* BSS record is found (e.g., when WPA mode is disabled),
* report the AP once. */
list_for_each(ptr, &local->bss_list) {
struct hostap_bss_info *bss;
bss = list_entry(ptr, struct hostap_bss_info, list);
if (memcmp(bss->bssid, scan->bssid, ETH_ALEN) == 0) {
bss->included = 1;
current_ev = __prism2_translate_scan(
local, info, scan, bss, current_ev,
end_buf);
found++;
}
}
if (!found) {
current_ev = __prism2_translate_scan(
local, info, scan, NULL, current_ev, end_buf);
}
/* Check if there is space for one more entry */
if ((end_buf - current_ev) <= IW_EV_ADDR_LEN) {
/* Ask user space to try again with a bigger buffer */
spin_unlock_bh(&local->lock);
return -E2BIG;
}
}
/* Prism2 firmware has limits (32 at least in some versions) for number
* of BSSes in scan results. Extend this limit by using local BSS list.
*/
list_for_each(ptr, &local->bss_list) {
struct hostap_bss_info *bss;
bss = list_entry(ptr, struct hostap_bss_info, list);
if (bss->included)
continue;
current_ev = __prism2_translate_scan(local, info, NULL, bss,
current_ev, end_buf);
/* Check if there is space for one more entry */
if ((end_buf - current_ev) <= IW_EV_ADDR_LEN) {
/* Ask user space to try again with a bigger buffer */
spin_unlock_bh(&local->lock);
return -E2BIG;
}
}
spin_unlock_bh(&local->lock);
return current_ev - buffer;
}
#endif /* PRISM2_NO_STATION_MODES */
static inline int prism2_ioctl_giwscan_sta(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *data, char *extra)
{
#ifdef PRISM2_NO_STATION_MODES
return -EOPNOTSUPP;
#else /* PRISM2_NO_STATION_MODES */
struct hostap_interface *iface;
local_info_t *local;
int res;
iface = netdev_priv(dev);
local = iface->local;
/* Wait until the scan is finished. We can probably do better
* than that - Jean II */
if (local->scan_timestamp &&
time_before(jiffies, local->scan_timestamp + 3 * HZ)) {
/* Important note : we don't want to block the caller
* until results are ready for various reasons.
* First, managing wait queues is complex and racy
* (there may be multiple simultaneous callers).
* Second, we grab some rtnetlink lock before coming
* here (in dev_ioctl()).
* Third, the caller can wait on the Wireless Event
* - Jean II */
return -EAGAIN;
}
local->scan_timestamp = 0;
res = prism2_translate_scan(local, info, extra, data->length);
if (res >= 0) {
data->length = res;
return 0;
} else {
data->length = 0;
return res;
}
#endif /* PRISM2_NO_STATION_MODES */
}
static int prism2_ioctl_giwscan(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *data, char *extra)
{
struct hostap_interface *iface;
local_info_t *local;
int res;
iface = netdev_priv(dev);
local = iface->local;
if (local->iw_mode == IW_MODE_MASTER) {
/* In MASTER mode, it doesn't make sense to go around
* scanning the frequencies and make the stations we serve
* wait when what the user is really interested about is the
* list of stations and access points we are talking to.
* So, just extract results from our cache...
* Jean II */
/* Translate to WE format */
res = prism2_ap_translate_scan(dev, info, extra);
if (res >= 0) {
printk(KERN_DEBUG "Scan result translation succeeded "
"(length=%d)\n", res);
data->length = res;
return 0;
} else {
printk(KERN_DEBUG
"Scan result translation failed (res=%d)\n",
res);
data->length = 0;
return res;
}
} else {
/* Station mode */
return prism2_ioctl_giwscan_sta(dev, info, data, extra);
}
}
static const struct iw_priv_args prism2_priv[] = {
{ PRISM2_IOCTL_MONITOR,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "monitor" },
{ PRISM2_IOCTL_READMIF,
IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1,
IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, "readmif" },
{ PRISM2_IOCTL_WRITEMIF,
IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 2, 0, "writemif" },
{ PRISM2_IOCTL_RESET,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "reset" },
{ PRISM2_IOCTL_INQUIRE,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "inquire" },
{ PRISM2_IOCTL_SET_RID_WORD,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "set_rid_word" },
{ PRISM2_IOCTL_MACCMD,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "maccmd" },
{ PRISM2_IOCTL_WDS_ADD,
IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "wds_add" },
{ PRISM2_IOCTL_WDS_DEL,
IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "wds_del" },
{ PRISM2_IOCTL_ADDMAC,
IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "addmac" },
{ PRISM2_IOCTL_DELMAC,
IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "delmac" },
{ PRISM2_IOCTL_KICKMAC,
IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "kickmac" },
/* --- raw access to sub-ioctls --- */
{ PRISM2_IOCTL_PRISM2_PARAM,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "prism2_param" },
{ PRISM2_IOCTL_GET_PRISM2_PARAM,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getprism2_param" },
/* --- sub-ioctls handlers --- */
{ PRISM2_IOCTL_PRISM2_PARAM,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "" },
{ PRISM2_IOCTL_GET_PRISM2_PARAM,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "" },
/* --- sub-ioctls definitions --- */
{ PRISM2_PARAM_TXRATECTRL,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "txratectrl" },
{ PRISM2_PARAM_TXRATECTRL,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gettxratectrl" },
{ PRISM2_PARAM_BEACON_INT,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "beacon_int" },
{ PRISM2_PARAM_BEACON_INT,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getbeacon_int" },
#ifndef PRISM2_NO_STATION_MODES
{ PRISM2_PARAM_PSEUDO_IBSS,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "pseudo_ibss" },
{ PRISM2_PARAM_PSEUDO_IBSS,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getpseudo_ibss" },
#endif /* PRISM2_NO_STATION_MODES */
{ PRISM2_PARAM_ALC,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "alc" },
{ PRISM2_PARAM_ALC,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getalc" },
{ PRISM2_PARAM_DUMP,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dump" },
{ PRISM2_PARAM_DUMP,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getdump" },
{ PRISM2_PARAM_OTHER_AP_POLICY,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "other_ap_policy" },
{ PRISM2_PARAM_OTHER_AP_POLICY,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getother_ap_pol" },
{ PRISM2_PARAM_AP_MAX_INACTIVITY,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "max_inactivity" },
{ PRISM2_PARAM_AP_MAX_INACTIVITY,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getmax_inactivi" },
{ PRISM2_PARAM_AP_BRIDGE_PACKETS,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bridge_packets" },
{ PRISM2_PARAM_AP_BRIDGE_PACKETS,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getbridge_packe" },
{ PRISM2_PARAM_DTIM_PERIOD,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dtim_period" },
{ PRISM2_PARAM_DTIM_PERIOD,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getdtim_period" },
{ PRISM2_PARAM_AP_NULLFUNC_ACK,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "nullfunc_ack" },
{ PRISM2_PARAM_AP_NULLFUNC_ACK,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getnullfunc_ack" },
{ PRISM2_PARAM_MAX_WDS,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "max_wds" },
{ PRISM2_PARAM_MAX_WDS,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getmax_wds" },
{ PRISM2_PARAM_AP_AUTOM_AP_WDS,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "autom_ap_wds" },
{ PRISM2_PARAM_AP_AUTOM_AP_WDS,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getautom_ap_wds" },
{ PRISM2_PARAM_AP_AUTH_ALGS,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ap_auth_algs" },
{ PRISM2_PARAM_AP_AUTH_ALGS,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getap_auth_algs" },
{ PRISM2_PARAM_MONITOR_ALLOW_FCSERR,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "allow_fcserr" },
{ PRISM2_PARAM_MONITOR_ALLOW_FCSERR,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getallow_fcserr" },
{ PRISM2_PARAM_HOST_ENCRYPT,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "host_encrypt" },
{ PRISM2_PARAM_HOST_ENCRYPT,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethost_encrypt" },
{ PRISM2_PARAM_HOST_DECRYPT,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "host_decrypt" },
{ PRISM2_PARAM_HOST_DECRYPT,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethost_decrypt" },
#ifndef PRISM2_NO_STATION_MODES
{ PRISM2_PARAM_HOST_ROAMING,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "host_roaming" },
{ PRISM2_PARAM_HOST_ROAMING,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethost_roaming" },
#endif /* PRISM2_NO_STATION_MODES */
{ PRISM2_PARAM_BCRX_STA_KEY,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bcrx_sta_key" },
{ PRISM2_PARAM_BCRX_STA_KEY,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getbcrx_sta_key" },
{ PRISM2_PARAM_IEEE_802_1X,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ieee_802_1x" },
{ PRISM2_PARAM_IEEE_802_1X,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getieee_802_1x" },
{ PRISM2_PARAM_ANTSEL_TX,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "antsel_tx" },
{ PRISM2_PARAM_ANTSEL_TX,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getantsel_tx" },
{ PRISM2_PARAM_ANTSEL_RX,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "antsel_rx" },
{ PRISM2_PARAM_ANTSEL_RX,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getantsel_rx" },
{ PRISM2_PARAM_MONITOR_TYPE,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "monitor_type" },
{ PRISM2_PARAM_MONITOR_TYPE,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getmonitor_type" },
{ PRISM2_PARAM_WDS_TYPE,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wds_type" },
{ PRISM2_PARAM_WDS_TYPE,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getwds_type" },
{ PRISM2_PARAM_HOSTSCAN,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "hostscan" },
{ PRISM2_PARAM_HOSTSCAN,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethostscan" },
{ PRISM2_PARAM_AP_SCAN,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ap_scan" },
{ PRISM2_PARAM_AP_SCAN,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getap_scan" },
{ PRISM2_PARAM_ENH_SEC,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "enh_sec" },
{ PRISM2_PARAM_ENH_SEC,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getenh_sec" },
#ifdef PRISM2_IO_DEBUG
{ PRISM2_PARAM_IO_DEBUG,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "io_debug" },
{ PRISM2_PARAM_IO_DEBUG,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getio_debug" },
#endif /* PRISM2_IO_DEBUG */
{ PRISM2_PARAM_BASIC_RATES,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "basic_rates" },
{ PRISM2_PARAM_BASIC_RATES,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getbasic_rates" },
{ PRISM2_PARAM_OPER_RATES,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "oper_rates" },
{ PRISM2_PARAM_OPER_RATES,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getoper_rates" },
{ PRISM2_PARAM_HOSTAPD,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "hostapd" },
{ PRISM2_PARAM_HOSTAPD,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethostapd" },
{ PRISM2_PARAM_HOSTAPD_STA,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "hostapd_sta" },
{ PRISM2_PARAM_HOSTAPD_STA,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethostapd_sta" },
{ PRISM2_PARAM_WPA,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wpa" },
{ PRISM2_PARAM_WPA,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getwpa" },
{ PRISM2_PARAM_PRIVACY_INVOKED,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "privacy_invoked" },
{ PRISM2_PARAM_PRIVACY_INVOKED,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getprivacy_invo" },
{ PRISM2_PARAM_TKIP_COUNTERMEASURES,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "tkip_countermea" },
{ PRISM2_PARAM_TKIP_COUNTERMEASURES,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gettkip_counter" },
{ PRISM2_PARAM_DROP_UNENCRYPTED,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "drop_unencrypte" },
{ PRISM2_PARAM_DROP_UNENCRYPTED,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getdrop_unencry" },
{ PRISM2_PARAM_SCAN_CHANNEL_MASK,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "scan_channels" },
{ PRISM2_PARAM_SCAN_CHANNEL_MASK,
0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getscan_channel" },
};
static int prism2_ioctl_priv_inquire(struct net_device *dev, int *i)
{
struct hostap_interface *iface;
local_info_t *local;
iface = netdev_priv(dev);
local = iface->local;
if (local->func->cmd(dev, HFA384X_CMDCODE_INQUIRE, *i, NULL, NULL))
return -EOPNOTSUPP;
return 0;
}
static int prism2_ioctl_priv_prism2_param(struct net_device *dev,
struct iw_request_info *info,
void *wrqu, char *extra)
{
struct hostap_interface *iface;
local_info_t *local;
int *i = (int *) extra;
int param = *i;
int value = *(i + 1);
int ret = 0;
u16 val;
iface = netdev_priv(dev);
local = iface->local;
switch (param) {
case PRISM2_PARAM_TXRATECTRL:
local->fw_tx_rate_control = value;
break;
case PRISM2_PARAM_BEACON_INT:
if (hostap_set_word(dev, HFA384X_RID_CNFBEACONINT, value) ||
local->func->reset_port(dev))
ret = -EINVAL;
else
local->beacon_int = value;
break;
#ifndef PRISM2_NO_STATION_MODES
case PRISM2_PARAM_PSEUDO_IBSS:
if (value == local->pseudo_adhoc)
break;
if (value != 0 && value != 1) {
ret = -EINVAL;
break;
}
printk(KERN_DEBUG "prism2: %s: pseudo IBSS change %d -> %d\n",
dev->name, local->pseudo_adhoc, value);
local->pseudo_adhoc = value;
if (local->iw_mode != IW_MODE_ADHOC)
break;
if (hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE,
hostap_get_porttype(local))) {
ret = -EOPNOTSUPP;
break;
}
if (local->func->reset_port(dev))
ret = -EINVAL;
break;
#endif /* PRISM2_NO_STATION_MODES */
case PRISM2_PARAM_ALC:
printk(KERN_DEBUG "%s: %s ALC\n", dev->name,
value == 0 ? "Disabling" : "Enabling");
val = HFA384X_TEST_CFG_BIT_ALC;
local->func->cmd(dev, HFA384X_CMDCODE_TEST |
(HFA384X_TEST_CFG_BITS << 8),
value == 0 ? 0 : 1, &val, NULL);
break;
case PRISM2_PARAM_DUMP:
local->frame_dump = value;
break;
case PRISM2_PARAM_OTHER_AP_POLICY:
if (value < 0 || value > 3) {
ret = -EINVAL;
break;
}
if (local->ap != NULL)
local->ap->ap_policy = value;
break;
case PRISM2_PARAM_AP_MAX_INACTIVITY:
if (value < 0 || value > 7 * 24 * 60 * 60) {
ret = -EINVAL;
break;
}
if (local->ap != NULL)
local->ap->max_inactivity = value * HZ;
break;
case PRISM2_PARAM_AP_BRIDGE_PACKETS:
if (local->ap != NULL)
local->ap->bridge_packets = value;
break;
case PRISM2_PARAM_DTIM_PERIOD:
if (value < 0 || value > 65535) {
ret = -EINVAL;
break;
}
if (hostap_set_word(dev, HFA384X_RID_CNFOWNDTIMPERIOD, value)
|| local->func->reset_port(dev))
ret = -EINVAL;
else
local->dtim_period = value;
break;
case PRISM2_PARAM_AP_NULLFUNC_ACK:
if (local->ap != NULL)
local->ap->nullfunc_ack = value;
break;
case PRISM2_PARAM_MAX_WDS:
local->wds_max_connections = value;
break;
case PRISM2_PARAM_AP_AUTOM_AP_WDS:
if (local->ap != NULL) {
if (!local->ap->autom_ap_wds && value) {
/* add WDS link to all APs in STA table */
hostap_add_wds_links(local);
}
local->ap->autom_ap_wds = value;
}
break;
case PRISM2_PARAM_AP_AUTH_ALGS:
local->auth_algs = value;
if (hostap_set_auth_algs(local))
ret = -EINVAL;
break;
case PRISM2_PARAM_MONITOR_ALLOW_FCSERR:
local->monitor_allow_fcserr = value;
break;
case PRISM2_PARAM_HOST_ENCRYPT:
local->host_encrypt = value;
if (hostap_set_encryption(local) ||
local->func->reset_port(dev))
ret = -EINVAL;
break;
case PRISM2_PARAM_HOST_DECRYPT:
local->host_decrypt = value;
if (hostap_set_encryption(local) ||
local->func->reset_port(dev))
ret = -EINVAL;
break;
#ifndef PRISM2_NO_STATION_MODES
case PRISM2_PARAM_HOST_ROAMING:
if (value < 0 || value > 2) {
ret = -EINVAL;
break;
}
local->host_roaming = value;
if (hostap_set_roaming(local) || local->func->reset_port(dev))
ret = -EINVAL;
break;
#endif /* PRISM2_NO_STATION_MODES */
case PRISM2_PARAM_BCRX_STA_KEY:
local->bcrx_sta_key = value;
break;
case PRISM2_PARAM_IEEE_802_1X:
local->ieee_802_1x = value;
break;
case PRISM2_PARAM_ANTSEL_TX:
if (value < 0 || value > HOSTAP_ANTSEL_HIGH) {
ret = -EINVAL;
break;
}
local->antsel_tx = value;
hostap_set_antsel(local);
break;
case PRISM2_PARAM_ANTSEL_RX:
if (value < 0 || value > HOSTAP_ANTSEL_HIGH) {
ret = -EINVAL;
break;
}
local->antsel_rx = value;
hostap_set_antsel(local);
break;
case PRISM2_PARAM_MONITOR_TYPE:
if (value != PRISM2_MONITOR_80211 &&
value != PRISM2_MONITOR_CAPHDR &&
value != PRISM2_MONITOR_PRISM &&
value != PRISM2_MONITOR_RADIOTAP) {
ret = -EINVAL;
break;
}
local->monitor_type = value;
if (local->iw_mode == IW_MODE_MONITOR)
hostap_monitor_set_type(local);
break;
case PRISM2_PARAM_WDS_TYPE:
local->wds_type = value;
break;
case PRISM2_PARAM_HOSTSCAN:
{
struct hfa384x_hostscan_request scan_req;
u16 rate;
memset(&scan_req, 0, sizeof(scan_req));
scan_req.channel_list = cpu_to_le16(0x3fff);
switch (value) {
case 1: rate = HFA384X_RATES_1MBPS; break;
case 2: rate = HFA384X_RATES_2MBPS; break;
case 3: rate = HFA384X_RATES_5MBPS; break;
case 4: rate = HFA384X_RATES_11MBPS; break;
default: rate = HFA384X_RATES_1MBPS; break;
}
scan_req.txrate = cpu_to_le16(rate);
/* leave SSID empty to accept all SSIDs */
if (local->iw_mode == IW_MODE_MASTER) {
if (hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE,
HFA384X_PORTTYPE_BSS) ||
local->func->reset_port(dev))
printk(KERN_DEBUG "Leaving Host AP mode "
"for HostScan failed\n");
}
if (local->func->set_rid(dev, HFA384X_RID_HOSTSCAN, &scan_req,
sizeof(scan_req))) {
printk(KERN_DEBUG "HOSTSCAN failed\n");
ret = -EINVAL;
}
if (local->iw_mode == IW_MODE_MASTER) {
wait_queue_t __wait;
init_waitqueue_entry(&__wait, current);
add_wait_queue(&local->hostscan_wq, &__wait);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ);
if (signal_pending(current))
ret = -EINTR;
set_current_state(TASK_RUNNING);
remove_wait_queue(&local->hostscan_wq, &__wait);
if (hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE,
HFA384X_PORTTYPE_HOSTAP) ||
local->func->reset_port(dev))
printk(KERN_DEBUG "Returning to Host AP mode "
"after HostScan failed\n");
}
break;
}
case PRISM2_PARAM_AP_SCAN:
local->passive_scan_interval = value;
if (timer_pending(&local->passive_scan_timer))
del_timer(&local->passive_scan_timer);
if (value > 0) {
local->passive_scan_timer.expires = jiffies +
local->passive_scan_interval * HZ;
add_timer(&local->passive_scan_timer);
}
break;
case PRISM2_PARAM_ENH_SEC:
if (value < 0 || value > 3) {
ret = -EINVAL;
break;
}
local->enh_sec = value;
if (hostap_set_word(dev, HFA384X_RID_CNFENHSECURITY,
local->enh_sec) ||
local->func->reset_port(dev)) {
printk(KERN_INFO "%s: cnfEnhSecurity requires STA f/w "
"1.6.3 or newer\n", dev->name);
ret = -EOPNOTSUPP;
}
break;
#ifdef PRISM2_IO_DEBUG
case PRISM2_PARAM_IO_DEBUG:
local->io_debug_enabled = value;
break;
#endif /* PRISM2_IO_DEBUG */
case PRISM2_PARAM_BASIC_RATES:
if ((value & local->tx_rate_control) != value || value == 0) {
printk(KERN_INFO "%s: invalid basic rate set - basic "
"rates must be in supported rate set\n",
dev->name);
ret = -EINVAL;
break;
}
local->basic_rates = value;
if (hostap_set_word(dev, HFA384X_RID_CNFBASICRATES,
local->basic_rates) ||
local->func->reset_port(dev))
ret = -EINVAL;
break;
case PRISM2_PARAM_OPER_RATES:
local->tx_rate_control = value;
if (hostap_set_rate(dev))
ret = -EINVAL;
break;
case PRISM2_PARAM_HOSTAPD:
ret = hostap_set_hostapd(local, value, 1);
break;
case PRISM2_PARAM_HOSTAPD_STA:
ret = hostap_set_hostapd_sta(local, value, 1);
break;
case PRISM2_PARAM_WPA:
local->wpa = value;
if (local->sta_fw_ver < PRISM2_FW_VER(1,7,0))
ret = -EOPNOTSUPP;
else if (hostap_set_word(dev, HFA384X_RID_SSNHANDLINGMODE,
value ? 1 : 0))
ret = -EINVAL;
break;
case PRISM2_PARAM_PRIVACY_INVOKED:
local->privacy_invoked = value;
if (hostap_set_encryption(local) ||
local->func->reset_port(dev))
ret = -EINVAL;
break;
case PRISM2_PARAM_TKIP_COUNTERMEASURES:
local->tkip_countermeasures = value;
break;
case PRISM2_PARAM_DROP_UNENCRYPTED:
local->drop_unencrypted = value;
break;
case PRISM2_PARAM_SCAN_CHANNEL_MASK:
local->scan_channel_mask = value;
break;
default:
printk(KERN_DEBUG "%s: prism2_param: unknown param %d\n",
dev->name, param);
ret = -EOPNOTSUPP;
break;
}
return ret;
}
static int prism2_ioctl_priv_get_prism2_param(struct net_device *dev,
struct iw_request_info *info,
void *wrqu, char *extra)
{
struct hostap_interface *iface;
local_info_t *local;
int *param = (int *) extra;
int ret = 0;
iface = netdev_priv(dev);
local = iface->local;
switch (*param) {
case PRISM2_PARAM_TXRATECTRL:
*param = local->fw_tx_rate_control;
break;
case PRISM2_PARAM_BEACON_INT:
*param = local->beacon_int;
break;
case PRISM2_PARAM_PSEUDO_IBSS:
*param = local->pseudo_adhoc;
break;
case PRISM2_PARAM_ALC:
ret = -EOPNOTSUPP; /* FIX */
break;
case PRISM2_PARAM_DUMP:
*param = local->frame_dump;
break;
case PRISM2_PARAM_OTHER_AP_POLICY:
if (local->ap != NULL)
*param = local->ap->ap_policy;
else
ret = -EOPNOTSUPP;
break;
case PRISM2_PARAM_AP_MAX_INACTIVITY:
if (local->ap != NULL)
*param = local->ap->max_inactivity / HZ;
else
ret = -EOPNOTSUPP;
break;
case PRISM2_PARAM_AP_BRIDGE_PACKETS:
if (local->ap != NULL)
*param = local->ap->bridge_packets;
else
ret = -EOPNOTSUPP;
break;
case PRISM2_PARAM_DTIM_PERIOD:
*param = local->dtim_period;
break;
case PRISM2_PARAM_AP_NULLFUNC_ACK:
if (local->ap != NULL)
*param = local->ap->nullfunc_ack;
else
ret = -EOPNOTSUPP;
break;
case PRISM2_PARAM_MAX_WDS:
*param = local->wds_max_connections;
break;
case PRISM2_PARAM_AP_AUTOM_AP_WDS:
if (local->ap != NULL)
*param = local->ap->autom_ap_wds;
else
ret = -EOPNOTSUPP;
break;
case PRISM2_PARAM_AP_AUTH_ALGS:
*param = local->auth_algs;
break;
case PRISM2_PARAM_MONITOR_ALLOW_FCSERR:
*param = local->monitor_allow_fcserr;
break;
case PRISM2_PARAM_HOST_ENCRYPT:
*param = local->host_encrypt;
break;
case PRISM2_PARAM_HOST_DECRYPT:
*param = local->host_decrypt;
break;
case PRISM2_PARAM_HOST_ROAMING:
*param = local->host_roaming;
break;
case PRISM2_PARAM_BCRX_STA_KEY:
*param = local->bcrx_sta_key;
break;
case PRISM2_PARAM_IEEE_802_1X:
*param = local->ieee_802_1x;
break;
case PRISM2_PARAM_ANTSEL_TX:
*param = local->antsel_tx;
break;
case PRISM2_PARAM_ANTSEL_RX:
*param = local->antsel_rx;
break;
case PRISM2_PARAM_MONITOR_TYPE:
*param = local->monitor_type;
break;
case PRISM2_PARAM_WDS_TYPE:
*param = local->wds_type;
break;
case PRISM2_PARAM_HOSTSCAN:
ret = -EOPNOTSUPP;
break;
case PRISM2_PARAM_AP_SCAN:
*param = local->passive_scan_interval;
break;
case PRISM2_PARAM_ENH_SEC:
*param = local->enh_sec;
break;
#ifdef PRISM2_IO_DEBUG
case PRISM2_PARAM_IO_DEBUG:
*param = local->io_debug_enabled;
break;
#endif /* PRISM2_IO_DEBUG */
case PRISM2_PARAM_BASIC_RATES:
*param = local->basic_rates;
break;
case PRISM2_PARAM_OPER_RATES:
*param = local->tx_rate_control;
break;
case PRISM2_PARAM_HOSTAPD:
*param = local->hostapd;
break;
case PRISM2_PARAM_HOSTAPD_STA:
*param = local->hostapd_sta;
break;
case PRISM2_PARAM_WPA:
if (local->sta_fw_ver < PRISM2_FW_VER(1,7,0))
ret = -EOPNOTSUPP;
*param = local->wpa;
break;
case PRISM2_PARAM_PRIVACY_INVOKED:
*param = local->privacy_invoked;
break;
case PRISM2_PARAM_TKIP_COUNTERMEASURES:
*param = local->tkip_countermeasures;
break;
case PRISM2_PARAM_DROP_UNENCRYPTED:
*param = local->drop_unencrypted;
break;
case PRISM2_PARAM_SCAN_CHANNEL_MASK:
*param = local->scan_channel_mask;
break;
default:
printk(KERN_DEBUG "%s: get_prism2_param: unknown param %d\n",
dev->name, *param);
ret = -EOPNOTSUPP;
break;
}
return ret;
}
static int prism2_ioctl_priv_readmif(struct net_device *dev,
struct iw_request_info *info,
void *wrqu, char *extra)
{
struct hostap_interface *iface;
local_info_t *local;
u16 resp0;
iface = netdev_priv(dev);
local = iface->local;
if (local->func->cmd(dev, HFA384X_CMDCODE_READMIF, *extra, NULL,
&resp0))
return -EOPNOTSUPP;
else
*extra = resp0;
return 0;
}
static int prism2_ioctl_priv_writemif(struct net_device *dev,
struct iw_request_info *info,
void *wrqu, char *extra)
{
struct hostap_interface *iface;
local_info_t *local;
u16 cr, val;
iface = netdev_priv(dev);
local = iface->local;
cr = *extra;
val = *(extra + 1);
if (local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF, cr, &val, NULL))
return -EOPNOTSUPP;
return 0;
}
static int prism2_ioctl_priv_monitor(struct net_device *dev, int *i)
{
struct hostap_interface *iface;
local_info_t *local;
int ret = 0;
u32 mode;
iface = netdev_priv(dev);
local = iface->local;
printk(KERN_DEBUG "%s: process %d (%s) used deprecated iwpriv monitor "
"- update software to use iwconfig mode monitor\n",
dev->name, task_pid_nr(current), current->comm);
/* Backward compatibility code - this can be removed at some point */
if (*i == 0) {
/* Disable monitor mode - old mode was not saved, so go to
* Master mode */
mode = IW_MODE_MASTER;
ret = prism2_ioctl_siwmode(dev, NULL, &mode, NULL);
} else if (*i == 1) {
/* netlink socket mode is not supported anymore since it did
* not separate different devices from each other and was not
* best method for delivering large amount of packets to
* user space */
ret = -EOPNOTSUPP;
} else if (*i == 2 || *i == 3) {
switch (*i) {
case 2:
local->monitor_type = PRISM2_MONITOR_80211;
break;
case 3:
local->monitor_type = PRISM2_MONITOR_PRISM;
break;
}
mode = IW_MODE_MONITOR;
ret = prism2_ioctl_siwmode(dev, NULL, &mode, NULL);
hostap_monitor_mode_enable(local);
} else
ret = -EINVAL;
return ret;
}
static int prism2_ioctl_priv_reset(struct net_device *dev, int *i)
{
struct hostap_interface *iface;
local_info_t *local;
iface = netdev_priv(dev);
local = iface->local;
printk(KERN_DEBUG "%s: manual reset request(%d)\n", dev->name, *i);
switch (*i) {
case 0:
/* Disable and enable card */
local->func->hw_shutdown(dev, 1);
local->func->hw_config(dev, 0);
break;
case 1:
/* COR sreset */
local->func->hw_reset(dev);
break;
case 2:
/* Disable and enable port 0 */
local->func->reset_port(dev);
break;
case 3:
prism2_sta_deauth(local, WLAN_REASON_DEAUTH_LEAVING);
if (local->func->cmd(dev, HFA384X_CMDCODE_DISABLE, 0, NULL,
NULL))
return -EINVAL;
break;
case 4:
if (local->func->cmd(dev, HFA384X_CMDCODE_ENABLE, 0, NULL,
NULL))
return -EINVAL;
break;
default:
printk(KERN_DEBUG "Unknown reset request %d\n", *i);
return -EOPNOTSUPP;
}
return 0;
}
static int prism2_ioctl_priv_set_rid_word(struct net_device *dev, int *i)
{
int rid = *i;
int value = *(i + 1);
printk(KERN_DEBUG "%s: Set RID[0x%X] = %d\n", dev->name, rid, value);
if (hostap_set_word(dev, rid, value))
return -EINVAL;
return 0;
}
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
static int ap_mac_cmd_ioctl(local_info_t *local, int *cmd)
{
int ret = 0;
switch (*cmd) {
case AP_MAC_CMD_POLICY_OPEN:
local->ap->mac_restrictions.policy = MAC_POLICY_OPEN;
break;
case AP_MAC_CMD_POLICY_ALLOW:
local->ap->mac_restrictions.policy = MAC_POLICY_ALLOW;
break;
case AP_MAC_CMD_POLICY_DENY:
local->ap->mac_restrictions.policy = MAC_POLICY_DENY;
break;
case AP_MAC_CMD_FLUSH:
ap_control_flush_macs(&local->ap->mac_restrictions);
break;
case AP_MAC_CMD_KICKALL:
ap_control_kickall(local->ap);
hostap_deauth_all_stas(local->dev, local->ap, 0);
break;
default:
ret = -EOPNOTSUPP;
break;
}
return ret;
}
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
#ifdef PRISM2_DOWNLOAD_SUPPORT
static int prism2_ioctl_priv_download(local_info_t *local, struct iw_point *p)
{
struct prism2_download_param *param;
int ret = 0;
if (p->length < sizeof(struct prism2_download_param) ||
p->length > 1024 || !p->pointer)
return -EINVAL;
param = kmalloc(p->length, GFP_KERNEL);
if (param == NULL)
return -ENOMEM;
if (copy_from_user(param, p->pointer, p->length)) {
ret = -EFAULT;
goto out;
}
if (p->length < sizeof(struct prism2_download_param) +
param->num_areas * sizeof(struct prism2_download_area)) {
ret = -EINVAL;
goto out;
}
ret = local->func->download(local, param);
out:
kfree(param);
return ret;
}
#endif /* PRISM2_DOWNLOAD_SUPPORT */
static int prism2_set_genericelement(struct net_device *dev, u8 *elem,
size_t len)
{
struct hostap_interface *iface = netdev_priv(dev);
local_info_t *local = iface->local;
u8 *buf;
/*
* Add 16-bit length in the beginning of the buffer because Prism2 RID
* includes it.
*/
buf = kmalloc(len + 2, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
*((__le16 *) buf) = cpu_to_le16(len);
memcpy(buf + 2, elem, len);
kfree(local->generic_elem);
local->generic_elem = buf;
local->generic_elem_len = len + 2;
return local->func->set_rid(local->dev, HFA384X_RID_GENERICELEMENT,
buf, len + 2);
}
static int prism2_ioctl_siwauth(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *data, char *extra)
{
struct hostap_interface *iface = netdev_priv(dev);
local_info_t *local = iface->local;
switch (data->flags & IW_AUTH_INDEX) {
case IW_AUTH_WPA_VERSION:
case IW_AUTH_CIPHER_PAIRWISE:
case IW_AUTH_CIPHER_GROUP:
case IW_AUTH_KEY_MGMT:
/*
* Host AP driver does not use these parameters and allows
* wpa_supplicant to control them internally.
*/
break;
case IW_AUTH_TKIP_COUNTERMEASURES:
local->tkip_countermeasures = data->value;
break;
case IW_AUTH_DROP_UNENCRYPTED:
local->drop_unencrypted = data->value;
break;
case IW_AUTH_80211_AUTH_ALG:
local->auth_algs = data->value;
break;
case IW_AUTH_WPA_ENABLED:
if (data->value == 0) {
local->wpa = 0;
if (local->sta_fw_ver < PRISM2_FW_VER(1,7,0))
break;
prism2_set_genericelement(dev, "", 0);
local->host_roaming = 0;
local->privacy_invoked = 0;
if (hostap_set_word(dev, HFA384X_RID_SSNHANDLINGMODE,
0) ||
hostap_set_roaming(local) ||
hostap_set_encryption(local) ||
local->func->reset_port(dev))
return -EINVAL;
break;
}
if (local->sta_fw_ver < PRISM2_FW_VER(1,7,0))
return -EOPNOTSUPP;
local->host_roaming = 2;
local->privacy_invoked = 1;
local->wpa = 1;
if (hostap_set_word(dev, HFA384X_RID_SSNHANDLINGMODE, 1) ||
hostap_set_roaming(local) ||
hostap_set_encryption(local) ||
local->func->reset_port(dev))
return -EINVAL;
break;
case IW_AUTH_RX_UNENCRYPTED_EAPOL:
local->ieee_802_1x = data->value;
break;
case IW_AUTH_PRIVACY_INVOKED:
local->privacy_invoked = data->value;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int prism2_ioctl_giwauth(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *data, char *extra)
{
struct hostap_interface *iface = netdev_priv(dev);
local_info_t *local = iface->local;
switch (data->flags & IW_AUTH_INDEX) {
case IW_AUTH_WPA_VERSION:
case IW_AUTH_CIPHER_PAIRWISE:
case IW_AUTH_CIPHER_GROUP:
case IW_AUTH_KEY_MGMT:
/*
* Host AP driver does not use these parameters and allows
* wpa_supplicant to control them internally.
*/
return -EOPNOTSUPP;
case IW_AUTH_TKIP_COUNTERMEASURES:
data->value = local->tkip_countermeasures;
break;
case IW_AUTH_DROP_UNENCRYPTED:
data->value = local->drop_unencrypted;
break;
case IW_AUTH_80211_AUTH_ALG:
data->value = local->auth_algs;
break;
case IW_AUTH_WPA_ENABLED:
data->value = local->wpa;
break;
case IW_AUTH_RX_UNENCRYPTED_EAPOL:
data->value = local->ieee_802_1x;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int prism2_ioctl_siwencodeext(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *erq, char *extra)
{
struct hostap_interface *iface = netdev_priv(dev);
local_info_t *local = iface->local;
struct iw_encode_ext *ext = (struct iw_encode_ext *) extra;
int i, ret = 0;
struct lib80211_crypto_ops *ops;
struct lib80211_crypt_data **crypt;
void *sta_ptr;
u8 *addr;
const char *alg, *module;
i = erq->flags & IW_ENCODE_INDEX;
if (i > WEP_KEYS)
return -EINVAL;
if (i < 1 || i > WEP_KEYS)
i = local->crypt_info.tx_keyidx;
else
i--;
if (i < 0 || i >= WEP_KEYS)
return -EINVAL;
addr = ext->addr.sa_data;
if (is_broadcast_ether_addr(addr)) {
sta_ptr = NULL;
crypt = &local->crypt_info.crypt[i];
} else {
if (i != 0)
return -EINVAL;
sta_ptr = ap_crypt_get_ptrs(local->ap, addr, 0, &crypt);
if (sta_ptr == NULL) {
if (local->iw_mode == IW_MODE_INFRA) {
/*
* TODO: add STA entry for the current AP so
* that unicast key can be used. For now, this
* is emulated by using default key idx 0.
*/
i = 0;
crypt = &local->crypt_info.crypt[i];
} else
return -EINVAL;
}
}
if ((erq->flags & IW_ENCODE_DISABLED) ||
ext->alg == IW_ENCODE_ALG_NONE) {
if (*crypt)
lib80211_crypt_delayed_deinit(&local->crypt_info, crypt);
goto done;
}
switch (ext->alg) {
case IW_ENCODE_ALG_WEP:
alg = "WEP";
module = "lib80211_crypt_wep";
break;
case IW_ENCODE_ALG_TKIP:
alg = "TKIP";
module = "lib80211_crypt_tkip";
break;
case IW_ENCODE_ALG_CCMP:
alg = "CCMP";
module = "lib80211_crypt_ccmp";
break;
default:
printk(KERN_DEBUG "%s: unsupported algorithm %d\n",
local->dev->name, ext->alg);
ret = -EOPNOTSUPP;
goto done;
}
ops = lib80211_get_crypto_ops(alg);
if (ops == NULL) {
request_module(module);
ops = lib80211_get_crypto_ops(alg);
}
if (ops == NULL) {
printk(KERN_DEBUG "%s: unknown crypto alg '%s'\n",
local->dev->name, alg);
ret = -EOPNOTSUPP;
goto done;
}
if (sta_ptr || ext->alg != IW_ENCODE_ALG_WEP) {
/*
* Per station encryption and other than WEP algorithms
* require host-based encryption, so force them on
* automatically.
*/
local->host_decrypt = local->host_encrypt = 1;
}
if (*crypt == NULL || (*crypt)->ops != ops) {
struct lib80211_crypt_data *new_crypt;
lib80211_crypt_delayed_deinit(&local->crypt_info, crypt);
new_crypt = kzalloc(sizeof(struct lib80211_crypt_data),
GFP_KERNEL);
if (new_crypt == NULL) {
ret = -ENOMEM;
goto done;
}
new_crypt->ops = ops;
if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
new_crypt->priv = new_crypt->ops->init(i);
if (new_crypt->priv == NULL) {
kfree(new_crypt);
ret = -EINVAL;
goto done;
}
*crypt = new_crypt;
}
/*
* TODO: if ext_flags does not have IW_ENCODE_EXT_RX_SEQ_VALID, the
* existing seq# should not be changed.
* TODO: if ext_flags has IW_ENCODE_EXT_TX_SEQ_VALID, next TX seq#
* should be changed to something else than zero.
*/
if ((!(ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) || ext->key_len > 0)
&& (*crypt)->ops->set_key &&
(*crypt)->ops->set_key(ext->key, ext->key_len, ext->rx_seq,
(*crypt)->priv) < 0) {
printk(KERN_DEBUG "%s: key setting failed\n",
local->dev->name);
ret = -EINVAL;
goto done;
}
if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
if (!sta_ptr)
local->crypt_info.tx_keyidx = i;
}
if (sta_ptr == NULL && ext->key_len > 0) {
int first = 1, j;
for (j = 0; j < WEP_KEYS; j++) {
if (j != i && local->crypt_info.crypt[j]) {
first = 0;
break;
}
}
if (first)
local->crypt_info.tx_keyidx = i;
}
done:
if (sta_ptr)
hostap_handle_sta_release(sta_ptr);
local->open_wep = erq->flags & IW_ENCODE_OPEN;
/*
* Do not reset port0 if card is in Managed mode since resetting will
* generate new IEEE 802.11 authentication which may end up in looping
* with IEEE 802.1X. Prism2 documentation seem to require port reset
* after WEP configuration. However, keys are apparently changed at
* least in Managed mode.
*/
if (ret == 0 &&
(hostap_set_encryption(local) ||
(local->iw_mode != IW_MODE_INFRA &&
local->func->reset_port(local->dev))))
ret = -EINVAL;
return ret;
}
static int prism2_ioctl_giwencodeext(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *erq, char *extra)
{
struct hostap_interface *iface = netdev_priv(dev);
local_info_t *local = iface->local;
struct lib80211_crypt_data **crypt;
void *sta_ptr;
int max_key_len, i;
struct iw_encode_ext *ext = (struct iw_encode_ext *) extra;
u8 *addr;
max_key_len = erq->length - sizeof(*ext);
if (max_key_len < 0)
return -EINVAL;
i = erq->flags & IW_ENCODE_INDEX;
if (i < 1 || i > WEP_KEYS)
i = local->crypt_info.tx_keyidx;
else
i--;
addr = ext->addr.sa_data;
if (is_broadcast_ether_addr(addr)) {
sta_ptr = NULL;
crypt = &local->crypt_info.crypt[i];
} else {
i = 0;
sta_ptr = ap_crypt_get_ptrs(local->ap, addr, 0, &crypt);
if (sta_ptr == NULL)
return -EINVAL;
}
erq->flags = i + 1;
memset(ext, 0, sizeof(*ext));
if (*crypt == NULL || (*crypt)->ops == NULL) {
ext->alg = IW_ENCODE_ALG_NONE;
ext->key_len = 0;
erq->flags |= IW_ENCODE_DISABLED;
} else {
if (strcmp((*crypt)->ops->name, "WEP") == 0)
ext->alg = IW_ENCODE_ALG_WEP;
else if (strcmp((*crypt)->ops->name, "TKIP") == 0)
ext->alg = IW_ENCODE_ALG_TKIP;
else if (strcmp((*crypt)->ops->name, "CCMP") == 0)
ext->alg = IW_ENCODE_ALG_CCMP;
else
return -EINVAL;
if ((*crypt)->ops->get_key) {
ext->key_len =
(*crypt)->ops->get_key(ext->key,
max_key_len,
ext->tx_seq,
(*crypt)->priv);
if (ext->key_len &&
(ext->alg == IW_ENCODE_ALG_TKIP ||
ext->alg == IW_ENCODE_ALG_CCMP))
ext->ext_flags |= IW_ENCODE_EXT_TX_SEQ_VALID;
}
}
if (sta_ptr)
hostap_handle_sta_release(sta_ptr);
return 0;
}
static int prism2_ioctl_set_encryption(local_info_t *local,
struct prism2_hostapd_param *param,
int param_len)
{
int ret = 0;
struct lib80211_crypto_ops *ops;
struct lib80211_crypt_data **crypt;
void *sta_ptr;
param->u.crypt.err = 0;
param->u.crypt.alg[HOSTAP_CRYPT_ALG_NAME_LEN - 1] = '\0';
if (param_len !=
(int) ((char *) param->u.crypt.key - (char *) param) +
param->u.crypt.key_len)
return -EINVAL;
if (is_broadcast_ether_addr(param->sta_addr)) {
if (param->u.crypt.idx >= WEP_KEYS)
return -EINVAL;
sta_ptr = NULL;
crypt = &local->crypt_info.crypt[param->u.crypt.idx];
} else {
if (param->u.crypt.idx)
return -EINVAL;
sta_ptr = ap_crypt_get_ptrs(
local->ap, param->sta_addr,
(param->u.crypt.flags & HOSTAP_CRYPT_FLAG_PERMANENT),
&crypt);
if (sta_ptr == NULL) {
param->u.crypt.err = HOSTAP_CRYPT_ERR_UNKNOWN_ADDR;
return -EINVAL;
}
}
if (strcmp(param->u.crypt.alg, "none") == 0) {
if (crypt)
lib80211_crypt_delayed_deinit(&local->crypt_info, crypt);
goto done;
}
ops = lib80211_get_crypto_ops(param->u.crypt.alg);
if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) {
request_module("lib80211_crypt_wep");
ops = lib80211_get_crypto_ops(param->u.crypt.alg);
} else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) {
request_module("lib80211_crypt_tkip");
ops = lib80211_get_crypto_ops(param->u.crypt.alg);
} else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) {
request_module("lib80211_crypt_ccmp");
ops = lib80211_get_crypto_ops(param->u.crypt.alg);
}
if (ops == NULL) {
printk(KERN_DEBUG "%s: unknown crypto alg '%s'\n",
local->dev->name, param->u.crypt.alg);
param->u.crypt.err = HOSTAP_CRYPT_ERR_UNKNOWN_ALG;
ret = -EINVAL;
goto done;
}
/* station based encryption and other than WEP algorithms require
* host-based encryption, so force them on automatically */
local->host_decrypt = local->host_encrypt = 1;
if (*crypt == NULL || (*crypt)->ops != ops) {
struct lib80211_crypt_data *new_crypt;
lib80211_crypt_delayed_deinit(&local->crypt_info, crypt);
new_crypt = kzalloc(sizeof(struct lib80211_crypt_data),
GFP_KERNEL);
if (new_crypt == NULL) {
ret = -ENOMEM;
goto done;
}
new_crypt->ops = ops;
new_crypt->priv = new_crypt->ops->init(param->u.crypt.idx);
if (new_crypt->priv == NULL) {
kfree(new_crypt);
param->u.crypt.err =
HOSTAP_CRYPT_ERR_CRYPT_INIT_FAILED;
ret = -EINVAL;
goto done;
}
*crypt = new_crypt;
}
if ((!(param->u.crypt.flags & HOSTAP_CRYPT_FLAG_SET_TX_KEY) ||
param->u.crypt.key_len > 0) && (*crypt)->ops->set_key &&
(*crypt)->ops->set_key(param->u.crypt.key,
param->u.crypt.key_len, param->u.crypt.seq,
(*crypt)->priv) < 0) {
printk(KERN_DEBUG "%s: key setting failed\n",
local->dev->name);
param->u.crypt.err = HOSTAP_CRYPT_ERR_KEY_SET_FAILED;
ret = -EINVAL;
goto done;
}
if (param->u.crypt.flags & HOSTAP_CRYPT_FLAG_SET_TX_KEY) {
if (!sta_ptr)
local->crypt_info.tx_keyidx = param->u.crypt.idx;
else if (param->u.crypt.idx) {
printk(KERN_DEBUG "%s: TX key idx setting failed\n",
local->dev->name);
param->u.crypt.err =
HOSTAP_CRYPT_ERR_TX_KEY_SET_FAILED;
ret = -EINVAL;
goto done;
}
}
done:
if (sta_ptr)
hostap_handle_sta_release(sta_ptr);
/* Do not reset port0 if card is in Managed mode since resetting will
* generate new IEEE 802.11 authentication which may end up in looping
* with IEEE 802.1X. Prism2 documentation seem to require port reset
* after WEP configuration. However, keys are apparently changed at
* least in Managed mode. */
if (ret == 0 &&
(hostap_set_encryption(local) ||
(local->iw_mode != IW_MODE_INFRA &&
local->func->reset_port(local->dev)))) {
param->u.crypt.err = HOSTAP_CRYPT_ERR_CARD_CONF_FAILED;
return -EINVAL;
}
return ret;
}
static int prism2_ioctl_get_encryption(local_info_t *local,
struct prism2_hostapd_param *param,
int param_len)
{
struct lib80211_crypt_data **crypt;
void *sta_ptr;
int max_key_len;
param->u.crypt.err = 0;
max_key_len = param_len -
(int) ((char *) param->u.crypt.key - (char *) param);
if (max_key_len < 0)
return -EINVAL;
if (is_broadcast_ether_addr(param->sta_addr)) {
sta_ptr = NULL;
if (param->u.crypt.idx >= WEP_KEYS)
param->u.crypt.idx = local->crypt_info.tx_keyidx;
crypt = &local->crypt_info.crypt[param->u.crypt.idx];
} else {
param->u.crypt.idx = 0;
sta_ptr = ap_crypt_get_ptrs(local->ap, param->sta_addr, 0,
&crypt);
if (sta_ptr == NULL) {
param->u.crypt.err = HOSTAP_CRYPT_ERR_UNKNOWN_ADDR;
return -EINVAL;
}
}
if (*crypt == NULL || (*crypt)->ops == NULL) {
memcpy(param->u.crypt.alg, "none", 5);
param->u.crypt.key_len = 0;
param->u.crypt.idx = 0xff;
} else {
strncpy(param->u.crypt.alg, (*crypt)->ops->name,
HOSTAP_CRYPT_ALG_NAME_LEN);
param->u.crypt.key_len = 0;
memset(param->u.crypt.seq, 0, 8);
if ((*crypt)->ops->get_key) {
param->u.crypt.key_len =
(*crypt)->ops->get_key(param->u.crypt.key,
max_key_len,
param->u.crypt.seq,
(*crypt)->priv);
}
}
if (sta_ptr)
hostap_handle_sta_release(sta_ptr);
return 0;
}
static int prism2_ioctl_get_rid(local_info_t *local,
struct prism2_hostapd_param *param,
int param_len)
{
int max_len, res;
max_len = param_len - PRISM2_HOSTAPD_RID_HDR_LEN;
if (max_len < 0)
return -EINVAL;
res = local->func->get_rid(local->dev, param->u.rid.rid,
param->u.rid.data, param->u.rid.len, 0);
if (res >= 0) {
param->u.rid.len = res;
return 0;
}
return res;
}
static int prism2_ioctl_set_rid(local_info_t *local,
struct prism2_hostapd_param *param,
int param_len)
{
int max_len;
max_len = param_len - PRISM2_HOSTAPD_RID_HDR_LEN;
if (max_len < 0 || max_len < param->u.rid.len)
return -EINVAL;
return local->func->set_rid(local->dev, param->u.rid.rid,
param->u.rid.data, param->u.rid.len);
}
static int prism2_ioctl_set_assoc_ap_addr(local_info_t *local,
struct prism2_hostapd_param *param,
int param_len)
{
printk(KERN_DEBUG "%ssta: associated as client with AP %pM\n",
local->dev->name, param->sta_addr);
memcpy(local->assoc_ap_addr, param->sta_addr, ETH_ALEN);
return 0;
}
static int prism2_ioctl_siwgenie(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *data, char *extra)
{
return prism2_set_genericelement(dev, extra, data->length);
}
static int prism2_ioctl_giwgenie(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *data, char *extra)
{
struct hostap_interface *iface = netdev_priv(dev);
local_info_t *local = iface->local;
int len = local->generic_elem_len - 2;
if (len <= 0 || local->generic_elem == NULL) {
data->length = 0;
return 0;
}
if (data->length < len)
return -E2BIG;
data->length = len;
memcpy(extra, local->generic_elem + 2, len);
return 0;
}
static int prism2_ioctl_set_generic_element(local_info_t *local,
struct prism2_hostapd_param *param,
int param_len)
{
int max_len, len;
len = param->u.generic_elem.len;
max_len = param_len - PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN;
if (max_len < 0 || max_len < len)
return -EINVAL;
return prism2_set_genericelement(local->dev,
param->u.generic_elem.data, len);
}
static int prism2_ioctl_siwmlme(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *data, char *extra)
{
struct hostap_interface *iface = netdev_priv(dev);
local_info_t *local = iface->local;
struct iw_mlme *mlme = (struct iw_mlme *) extra;
__le16 reason;
reason = cpu_to_le16(mlme->reason_code);
switch (mlme->cmd) {
case IW_MLME_DEAUTH:
return prism2_sta_send_mgmt(local, mlme->addr.sa_data,
IEEE80211_STYPE_DEAUTH,
(u8 *) &reason, 2);
case IW_MLME_DISASSOC:
return prism2_sta_send_mgmt(local, mlme->addr.sa_data,
IEEE80211_STYPE_DISASSOC,
(u8 *) &reason, 2);
default:
return -EOPNOTSUPP;
}
}
static int prism2_ioctl_mlme(local_info_t *local,
struct prism2_hostapd_param *param)
{
__le16 reason;
reason = cpu_to_le16(param->u.mlme.reason_code);
switch (param->u.mlme.cmd) {
case MLME_STA_DEAUTH:
return prism2_sta_send_mgmt(local, param->sta_addr,
IEEE80211_STYPE_DEAUTH,
(u8 *) &reason, 2);
case MLME_STA_DISASSOC:
return prism2_sta_send_mgmt(local, param->sta_addr,
IEEE80211_STYPE_DISASSOC,
(u8 *) &reason, 2);
default:
return -EOPNOTSUPP;
}
}
static int prism2_ioctl_scan_req(local_info_t *local,
struct prism2_hostapd_param *param)
{
#ifndef PRISM2_NO_STATION_MODES
if ((local->iw_mode != IW_MODE_INFRA &&
local->iw_mode != IW_MODE_ADHOC) ||
(local->sta_fw_ver < PRISM2_FW_VER(1,3,1)))
return -EOPNOTSUPP;
if (!local->dev_enabled)
return -ENETDOWN;
return prism2_request_hostscan(local->dev, param->u.scan_req.ssid,
param->u.scan_req.ssid_len);
#else /* PRISM2_NO_STATION_MODES */
return -EOPNOTSUPP;
#endif /* PRISM2_NO_STATION_MODES */
}
static int prism2_ioctl_priv_hostapd(local_info_t *local, struct iw_point *p)
{
struct prism2_hostapd_param *param;
int ret = 0;
int ap_ioctl = 0;
if (p->length < sizeof(struct prism2_hostapd_param) ||
p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
return -EINVAL;
param = kmalloc(p->length, GFP_KERNEL);
if (param == NULL)
return -ENOMEM;
if (copy_from_user(param, p->pointer, p->length)) {
ret = -EFAULT;
goto out;
}
switch (param->cmd) {
case PRISM2_SET_ENCRYPTION:
ret = prism2_ioctl_set_encryption(local, param, p->length);
break;
case PRISM2_GET_ENCRYPTION:
ret = prism2_ioctl_get_encryption(local, param, p->length);
break;
case PRISM2_HOSTAPD_GET_RID:
ret = prism2_ioctl_get_rid(local, param, p->length);
break;
case PRISM2_HOSTAPD_SET_RID:
ret = prism2_ioctl_set_rid(local, param, p->length);
break;
case PRISM2_HOSTAPD_SET_ASSOC_AP_ADDR:
ret = prism2_ioctl_set_assoc_ap_addr(local, param, p->length);
break;
case PRISM2_HOSTAPD_SET_GENERIC_ELEMENT:
ret = prism2_ioctl_set_generic_element(local, param,
p->length);
break;
case PRISM2_HOSTAPD_MLME:
ret = prism2_ioctl_mlme(local, param);
break;
case PRISM2_HOSTAPD_SCAN_REQ:
ret = prism2_ioctl_scan_req(local, param);
break;
default:
ret = prism2_hostapd(local->ap, param);
ap_ioctl = 1;
break;
}
if (ret == 1 || !ap_ioctl) {
if (copy_to_user(p->pointer, param, p->length)) {
ret = -EFAULT;
goto out;
} else if (ap_ioctl)
ret = 0;
}
out:
kfree(param);
return ret;
}
static void prism2_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct hostap_interface *iface;
local_info_t *local;
iface = netdev_priv(dev);
local = iface->local;
strlcpy(info->driver, "hostap", sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->fw_version),
"%d.%d.%d", (local->sta_fw_ver >> 16) & 0xff,
(local->sta_fw_ver >> 8) & 0xff,
local->sta_fw_ver & 0xff);
}
const struct ethtool_ops prism2_ethtool_ops = {
.get_drvinfo = prism2_get_drvinfo
};
/* Structures to export the Wireless Handlers */
static const iw_handler prism2_handler[] =
{
(iw_handler) NULL, /* SIOCSIWCOMMIT */
(iw_handler) prism2_get_name, /* SIOCGIWNAME */
(iw_handler) NULL, /* SIOCSIWNWID */
(iw_handler) NULL, /* SIOCGIWNWID */
(iw_handler) prism2_ioctl_siwfreq, /* SIOCSIWFREQ */
(iw_handler) prism2_ioctl_giwfreq, /* SIOCGIWFREQ */
(iw_handler) prism2_ioctl_siwmode, /* SIOCSIWMODE */
(iw_handler) prism2_ioctl_giwmode, /* SIOCGIWMODE */
(iw_handler) prism2_ioctl_siwsens, /* SIOCSIWSENS */
(iw_handler) prism2_ioctl_giwsens, /* SIOCGIWSENS */
(iw_handler) NULL /* not used */, /* SIOCSIWRANGE */
(iw_handler) prism2_ioctl_giwrange, /* SIOCGIWRANGE */
(iw_handler) NULL /* not used */, /* SIOCSIWPRIV */
(iw_handler) NULL /* kernel code */, /* SIOCGIWPRIV */
(iw_handler) NULL /* not used */, /* SIOCSIWSTATS */
(iw_handler) NULL /* kernel code */, /* SIOCGIWSTATS */
iw_handler_set_spy, /* SIOCSIWSPY */
iw_handler_get_spy, /* SIOCGIWSPY */
iw_handler_set_thrspy, /* SIOCSIWTHRSPY */
iw_handler_get_thrspy, /* SIOCGIWTHRSPY */
(iw_handler) prism2_ioctl_siwap, /* SIOCSIWAP */
(iw_handler) prism2_ioctl_giwap, /* SIOCGIWAP */
(iw_handler) prism2_ioctl_siwmlme, /* SIOCSIWMLME */
(iw_handler) prism2_ioctl_giwaplist, /* SIOCGIWAPLIST */
(iw_handler) prism2_ioctl_siwscan, /* SIOCSIWSCAN */
(iw_handler) prism2_ioctl_giwscan, /* SIOCGIWSCAN */
(iw_handler) prism2_ioctl_siwessid, /* SIOCSIWESSID */
(iw_handler) prism2_ioctl_giwessid, /* SIOCGIWESSID */
(iw_handler) prism2_ioctl_siwnickn, /* SIOCSIWNICKN */
(iw_handler) prism2_ioctl_giwnickn, /* SIOCGIWNICKN */
(iw_handler) NULL, /* -- hole -- */
(iw_handler) NULL, /* -- hole -- */
(iw_handler) prism2_ioctl_siwrate, /* SIOCSIWRATE */
(iw_handler) prism2_ioctl_giwrate, /* SIOCGIWRATE */
(iw_handler) prism2_ioctl_siwrts, /* SIOCSIWRTS */
(iw_handler) prism2_ioctl_giwrts, /* SIOCGIWRTS */
(iw_handler) prism2_ioctl_siwfrag, /* SIOCSIWFRAG */
(iw_handler) prism2_ioctl_giwfrag, /* SIOCGIWFRAG */
(iw_handler) prism2_ioctl_siwtxpow, /* SIOCSIWTXPOW */
(iw_handler) prism2_ioctl_giwtxpow, /* SIOCGIWTXPOW */
(iw_handler) prism2_ioctl_siwretry, /* SIOCSIWRETRY */
(iw_handler) prism2_ioctl_giwretry, /* SIOCGIWRETRY */
(iw_handler) prism2_ioctl_siwencode, /* SIOCSIWENCODE */
(iw_handler) prism2_ioctl_giwencode, /* SIOCGIWENCODE */
(iw_handler) prism2_ioctl_siwpower, /* SIOCSIWPOWER */
(iw_handler) prism2_ioctl_giwpower, /* SIOCGIWPOWER */
(iw_handler) NULL, /* -- hole -- */
(iw_handler) NULL, /* -- hole -- */
(iw_handler) prism2_ioctl_siwgenie, /* SIOCSIWGENIE */
(iw_handler) prism2_ioctl_giwgenie, /* SIOCGIWGENIE */
(iw_handler) prism2_ioctl_siwauth, /* SIOCSIWAUTH */
(iw_handler) prism2_ioctl_giwauth, /* SIOCGIWAUTH */
(iw_handler) prism2_ioctl_siwencodeext, /* SIOCSIWENCODEEXT */
(iw_handler) prism2_ioctl_giwencodeext, /* SIOCGIWENCODEEXT */
(iw_handler) NULL, /* SIOCSIWPMKSA */
(iw_handler) NULL, /* -- hole -- */
};
static const iw_handler prism2_private_handler[] =
{ /* SIOCIWFIRSTPRIV + */
(iw_handler) prism2_ioctl_priv_prism2_param, /* 0 */
(iw_handler) prism2_ioctl_priv_get_prism2_param, /* 1 */
(iw_handler) prism2_ioctl_priv_writemif, /* 2 */
(iw_handler) prism2_ioctl_priv_readmif, /* 3 */
};
const struct iw_handler_def hostap_iw_handler_def =
{
.num_standard = ARRAY_SIZE(prism2_handler),
.num_private = ARRAY_SIZE(prism2_private_handler),
.num_private_args = ARRAY_SIZE(prism2_priv),
.standard = (iw_handler *) prism2_handler,
.private = (iw_handler *) prism2_private_handler,
.private_args = (struct iw_priv_args *) prism2_priv,
.get_wireless_stats = hostap_get_wireless_stats,
};
int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct iwreq *wrq = (struct iwreq *) ifr;
struct hostap_interface *iface;
local_info_t *local;
int ret = 0;
iface = netdev_priv(dev);
local = iface->local;
switch (cmd) {
/* Private ioctls (iwpriv) that have not yet been converted
* into new wireless extensions API */
case PRISM2_IOCTL_INQUIRE:
if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
else ret = prism2_ioctl_priv_inquire(dev, (int *) wrq->u.name);
break;
case PRISM2_IOCTL_MONITOR:
if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
else ret = prism2_ioctl_priv_monitor(dev, (int *) wrq->u.name);
break;
case PRISM2_IOCTL_RESET:
if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
else ret = prism2_ioctl_priv_reset(dev, (int *) wrq->u.name);
break;
case PRISM2_IOCTL_WDS_ADD:
if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
else ret = prism2_wds_add(local, wrq->u.ap_addr.sa_data, 1);
break;
case PRISM2_IOCTL_WDS_DEL:
if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
else ret = prism2_wds_del(local, wrq->u.ap_addr.sa_data, 1, 0);
break;
case PRISM2_IOCTL_SET_RID_WORD:
if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
else ret = prism2_ioctl_priv_set_rid_word(dev,
(int *) wrq->u.name);
break;
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
case PRISM2_IOCTL_MACCMD:
if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
else ret = ap_mac_cmd_ioctl(local, (int *) wrq->u.name);
break;
case PRISM2_IOCTL_ADDMAC:
if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
else ret = ap_control_add_mac(&local->ap->mac_restrictions,
wrq->u.ap_addr.sa_data);
break;
case PRISM2_IOCTL_DELMAC:
if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
else ret = ap_control_del_mac(&local->ap->mac_restrictions,
wrq->u.ap_addr.sa_data);
break;
case PRISM2_IOCTL_KICKMAC:
if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
else ret = ap_control_kick_mac(local->ap, local->dev,
wrq->u.ap_addr.sa_data);
break;
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
/* Private ioctls that are not used with iwpriv;
* in SIOCDEVPRIVATE range */
#ifdef PRISM2_DOWNLOAD_SUPPORT
case PRISM2_IOCTL_DOWNLOAD:
if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
else ret = prism2_ioctl_priv_download(local, &wrq->u.data);
break;
#endif /* PRISM2_DOWNLOAD_SUPPORT */
case PRISM2_IOCTL_HOSTAPD:
if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
else ret = prism2_ioctl_priv_hostapd(local, &wrq->u.data);
break;
default:
ret = -EOPNOTSUPP;
break;
}
return ret;
}
| gpl-2.0 |
bheu/odroid_linux | drivers/net/s2io.c | 2375 | 248235 | /************************************************************************
* s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
* Copyright(c) 2002-2010 Exar Corp.
*
* This software may be used and distributed according to the terms of
* the GNU General Public License (GPL), incorporated herein by reference.
* Drivers based on or derived from this code fall under the GPL and must
* retain the authorship, copyright and license notice. This file is not
* a complete program and may only be used when the entire operating
* system is licensed under the GPL.
* See the file COPYING in this distribution for more information.
*
* Credits:
* Jeff Garzik : For pointing out the improper error condition
* check in the s2io_xmit routine and also some
* issues in the Tx watch dog function. Also for
* patiently answering all those innumerable
* questions regaring the 2.6 porting issues.
* Stephen Hemminger : Providing proper 2.6 porting mechanism for some
* macros available only in 2.6 Kernel.
* Francois Romieu : For pointing out all code part that were
* deprecated and also styling related comments.
* Grant Grundler : For helping me get rid of some Architecture
* dependent code.
* Christopher Hellwig : Some more 2.6 specific issues in the driver.
*
* The module loadable parameters that are supported by the driver and a brief
* explanation of all the variables.
*
* rx_ring_num : This can be used to program the number of receive rings used
* in the driver.
* rx_ring_sz: This defines the number of receive blocks each ring can have.
* This is also an array of size 8.
* rx_ring_mode: This defines the operation mode of all 8 rings. The valid
* values are 1, 2.
* tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
* tx_fifo_len: This too is an array of 8. Each element defines the number of
* Tx descriptors that can be associated with each corresponding FIFO.
* intr_type: This defines the type of interrupt. The values can be 0(INTA),
* 2(MSI_X). Default value is '2(MSI_X)'
* lro_max_pkts: This parameter defines maximum number of packets can be
* aggregated as a single large packet
* napi: This parameter used to enable/disable NAPI (polling Rx)
* Possible values '1' for enable and '0' for disable. Default is '1'
* ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
* Possible values '1' for enable and '0' for disable. Default is '0'
* vlan_tag_strip: This can be used to enable or disable vlan stripping.
* Possible values '1' for enable , '0' for disable.
* Default is '2' - which means disable in promisc mode
* and enable in non-promiscuous mode.
* multiq: This parameter used to enable/disable MULTIQUEUE support.
* Possible values '1' for enable and '0' for disable. Default is '0'
************************************************************************/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/mdio.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/stddef.h>
#include <linux/ioctl.h>
#include <linux/timex.h>
#include <linux/ethtool.h>
#include <linux/workqueue.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/prefetch.h>
#include <net/tcp.h>
#include <asm/system.h>
#include <asm/div64.h>
#include <asm/irq.h>
/* local include */
#include "s2io.h"
#include "s2io-regs.h"
#define DRV_VERSION "2.0.26.28"
/* S2io Driver name & version. */
static const char s2io_driver_name[] = "Neterion";
static const char s2io_driver_version[] = DRV_VERSION;
static const int rxd_size[2] = {32, 48};
static const int rxd_count[2] = {127, 85};
static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
{
int ret;
ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
(GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
return ret;
}
/*
* Cards with following subsystem_id have a link state indication
* problem, 600B, 600C, 600D, 640B, 640C and 640D.
* macro below identifies these cards given the subsystem_id.
*/
#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
(dev_type == XFRAME_I_DEVICE) ? \
((((subid >= 0x600B) && (subid <= 0x600D)) || \
((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
static inline int is_s2io_card_up(const struct s2io_nic *sp)
{
return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
}
/* Ethtool related variables and Macros. */
static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
"Register test\t(offline)",
"Eeprom test\t(offline)",
"Link test\t(online)",
"RLDRAM test\t(offline)",
"BIST Test\t(offline)"
};
static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
{"tmac_frms"},
{"tmac_data_octets"},
{"tmac_drop_frms"},
{"tmac_mcst_frms"},
{"tmac_bcst_frms"},
{"tmac_pause_ctrl_frms"},
{"tmac_ttl_octets"},
{"tmac_ucst_frms"},
{"tmac_nucst_frms"},
{"tmac_any_err_frms"},
{"tmac_ttl_less_fb_octets"},
{"tmac_vld_ip_octets"},
{"tmac_vld_ip"},
{"tmac_drop_ip"},
{"tmac_icmp"},
{"tmac_rst_tcp"},
{"tmac_tcp"},
{"tmac_udp"},
{"rmac_vld_frms"},
{"rmac_data_octets"},
{"rmac_fcs_err_frms"},
{"rmac_drop_frms"},
{"rmac_vld_mcst_frms"},
{"rmac_vld_bcst_frms"},
{"rmac_in_rng_len_err_frms"},
{"rmac_out_rng_len_err_frms"},
{"rmac_long_frms"},
{"rmac_pause_ctrl_frms"},
{"rmac_unsup_ctrl_frms"},
{"rmac_ttl_octets"},
{"rmac_accepted_ucst_frms"},
{"rmac_accepted_nucst_frms"},
{"rmac_discarded_frms"},
{"rmac_drop_events"},
{"rmac_ttl_less_fb_octets"},
{"rmac_ttl_frms"},
{"rmac_usized_frms"},
{"rmac_osized_frms"},
{"rmac_frag_frms"},
{"rmac_jabber_frms"},
{"rmac_ttl_64_frms"},
{"rmac_ttl_65_127_frms"},
{"rmac_ttl_128_255_frms"},
{"rmac_ttl_256_511_frms"},
{"rmac_ttl_512_1023_frms"},
{"rmac_ttl_1024_1518_frms"},
{"rmac_ip"},
{"rmac_ip_octets"},
{"rmac_hdr_err_ip"},
{"rmac_drop_ip"},
{"rmac_icmp"},
{"rmac_tcp"},
{"rmac_udp"},
{"rmac_err_drp_udp"},
{"rmac_xgmii_err_sym"},
{"rmac_frms_q0"},
{"rmac_frms_q1"},
{"rmac_frms_q2"},
{"rmac_frms_q3"},
{"rmac_frms_q4"},
{"rmac_frms_q5"},
{"rmac_frms_q6"},
{"rmac_frms_q7"},
{"rmac_full_q0"},
{"rmac_full_q1"},
{"rmac_full_q2"},
{"rmac_full_q3"},
{"rmac_full_q4"},
{"rmac_full_q5"},
{"rmac_full_q6"},
{"rmac_full_q7"},
{"rmac_pause_cnt"},
{"rmac_xgmii_data_err_cnt"},
{"rmac_xgmii_ctrl_err_cnt"},
{"rmac_accepted_ip"},
{"rmac_err_tcp"},
{"rd_req_cnt"},
{"new_rd_req_cnt"},
{"new_rd_req_rtry_cnt"},
{"rd_rtry_cnt"},
{"wr_rtry_rd_ack_cnt"},
{"wr_req_cnt"},
{"new_wr_req_cnt"},
{"new_wr_req_rtry_cnt"},
{"wr_rtry_cnt"},
{"wr_disc_cnt"},
{"rd_rtry_wr_ack_cnt"},
{"txp_wr_cnt"},
{"txd_rd_cnt"},
{"txd_wr_cnt"},
{"rxd_rd_cnt"},
{"rxd_wr_cnt"},
{"txf_rd_cnt"},
{"rxf_wr_cnt"}
};
static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
{"rmac_ttl_1519_4095_frms"},
{"rmac_ttl_4096_8191_frms"},
{"rmac_ttl_8192_max_frms"},
{"rmac_ttl_gt_max_frms"},
{"rmac_osized_alt_frms"},
{"rmac_jabber_alt_frms"},
{"rmac_gt_max_alt_frms"},
{"rmac_vlan_frms"},
{"rmac_len_discard"},
{"rmac_fcs_discard"},
{"rmac_pf_discard"},
{"rmac_da_discard"},
{"rmac_red_discard"},
{"rmac_rts_discard"},
{"rmac_ingm_full_discard"},
{"link_fault_cnt"}
};
static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
{"\n DRIVER STATISTICS"},
{"single_bit_ecc_errs"},
{"double_bit_ecc_errs"},
{"parity_err_cnt"},
{"serious_err_cnt"},
{"soft_reset_cnt"},
{"fifo_full_cnt"},
{"ring_0_full_cnt"},
{"ring_1_full_cnt"},
{"ring_2_full_cnt"},
{"ring_3_full_cnt"},
{"ring_4_full_cnt"},
{"ring_5_full_cnt"},
{"ring_6_full_cnt"},
{"ring_7_full_cnt"},
{"alarm_transceiver_temp_high"},
{"alarm_transceiver_temp_low"},
{"alarm_laser_bias_current_high"},
{"alarm_laser_bias_current_low"},
{"alarm_laser_output_power_high"},
{"alarm_laser_output_power_low"},
{"warn_transceiver_temp_high"},
{"warn_transceiver_temp_low"},
{"warn_laser_bias_current_high"},
{"warn_laser_bias_current_low"},
{"warn_laser_output_power_high"},
{"warn_laser_output_power_low"},
{"lro_aggregated_pkts"},
{"lro_flush_both_count"},
{"lro_out_of_sequence_pkts"},
{"lro_flush_due_to_max_pkts"},
{"lro_avg_aggr_pkts"},
{"mem_alloc_fail_cnt"},
{"pci_map_fail_cnt"},
{"watchdog_timer_cnt"},
{"mem_allocated"},
{"mem_freed"},
{"link_up_cnt"},
{"link_down_cnt"},
{"link_up_time"},
{"link_down_time"},
{"tx_tcode_buf_abort_cnt"},
{"tx_tcode_desc_abort_cnt"},
{"tx_tcode_parity_err_cnt"},
{"tx_tcode_link_loss_cnt"},
{"tx_tcode_list_proc_err_cnt"},
{"rx_tcode_parity_err_cnt"},
{"rx_tcode_abort_cnt"},
{"rx_tcode_parity_abort_cnt"},
{"rx_tcode_rda_fail_cnt"},
{"rx_tcode_unkn_prot_cnt"},
{"rx_tcode_fcs_err_cnt"},
{"rx_tcode_buf_size_err_cnt"},
{"rx_tcode_rxd_corrupt_cnt"},
{"rx_tcode_unkn_err_cnt"},
{"tda_err_cnt"},
{"pfc_err_cnt"},
{"pcc_err_cnt"},
{"tti_err_cnt"},
{"tpa_err_cnt"},
{"sm_err_cnt"},
{"lso_err_cnt"},
{"mac_tmac_err_cnt"},
{"mac_rmac_err_cnt"},
{"xgxs_txgxs_err_cnt"},
{"xgxs_rxgxs_err_cnt"},
{"rc_err_cnt"},
{"prc_pcix_err_cnt"},
{"rpa_err_cnt"},
{"rda_err_cnt"},
{"rti_err_cnt"},
{"mc_err_cnt"}
};
#define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
#define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
#define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
#define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
#define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
#define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
#define S2IO_STRINGS_LEN (S2IO_TEST_LEN * ETH_GSTRING_LEN)
#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
init_timer(&timer); \
timer.function = handle; \
timer.data = (unsigned long)arg; \
mod_timer(&timer, (jiffies + exp)) \
/* copy mac addr to def_mac_addr array */
static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
{
sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
}
/* Add the vlan */
static void s2io_vlan_rx_register(struct net_device *dev,
struct vlan_group *grp)
{
int i;
struct s2io_nic *nic = netdev_priv(dev);
unsigned long flags[MAX_TX_FIFOS];
struct config_param *config = &nic->config;
struct mac_info *mac_control = &nic->mac_control;
for (i = 0; i < config->tx_fifo_num; i++) {
struct fifo_info *fifo = &mac_control->fifos[i];
spin_lock_irqsave(&fifo->tx_lock, flags[i]);
}
nic->vlgrp = grp;
for (i = config->tx_fifo_num - 1; i >= 0; i--) {
struct fifo_info *fifo = &mac_control->fifos[i];
spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
}
}
/* Unregister the vlan */
static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
int i;
struct s2io_nic *nic = netdev_priv(dev);
unsigned long flags[MAX_TX_FIFOS];
struct config_param *config = &nic->config;
struct mac_info *mac_control = &nic->mac_control;
for (i = 0; i < config->tx_fifo_num; i++) {
struct fifo_info *fifo = &mac_control->fifos[i];
spin_lock_irqsave(&fifo->tx_lock, flags[i]);
}
if (nic->vlgrp)
vlan_group_set_device(nic->vlgrp, vid, NULL);
for (i = config->tx_fifo_num - 1; i >= 0; i--) {
struct fifo_info *fifo = &mac_control->fifos[i];
spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
}
}
/*
* Constants to be programmed into the Xena's registers, to configure
* the XAUI.
*/
#define END_SIGN 0x0
static const u64 herc_act_dtx_cfg[] = {
/* Set address */
0x8000051536750000ULL, 0x80000515367500E0ULL,
/* Write data */
0x8000051536750004ULL, 0x80000515367500E4ULL,
/* Set address */
0x80010515003F0000ULL, 0x80010515003F00E0ULL,
/* Write data */
0x80010515003F0004ULL, 0x80010515003F00E4ULL,
/* Set address */
0x801205150D440000ULL, 0x801205150D4400E0ULL,
/* Write data */
0x801205150D440004ULL, 0x801205150D4400E4ULL,
/* Set address */
0x80020515F2100000ULL, 0x80020515F21000E0ULL,
/* Write data */
0x80020515F2100004ULL, 0x80020515F21000E4ULL,
/* Done */
END_SIGN
};
static const u64 xena_dtx_cfg[] = {
/* Set address */
0x8000051500000000ULL, 0x80000515000000E0ULL,
/* Write data */
0x80000515D9350004ULL, 0x80000515D93500E4ULL,
/* Set address */
0x8001051500000000ULL, 0x80010515000000E0ULL,
/* Write data */
0x80010515001E0004ULL, 0x80010515001E00E4ULL,
/* Set address */
0x8002051500000000ULL, 0x80020515000000E0ULL,
/* Write data */
0x80020515F2100004ULL, 0x80020515F21000E4ULL,
END_SIGN
};
/*
* Constants for Fixing the MacAddress problem seen mostly on
* Alpha machines.
*/
static const u64 fix_mac[] = {
0x0060000000000000ULL, 0x0060600000000000ULL,
0x0040600000000000ULL, 0x0000600000000000ULL,
0x0020600000000000ULL, 0x0060600000000000ULL,
0x0020600000000000ULL, 0x0060600000000000ULL,
0x0020600000000000ULL, 0x0060600000000000ULL,
0x0020600000000000ULL, 0x0060600000000000ULL,
0x0020600000000000ULL, 0x0060600000000000ULL,
0x0020600000000000ULL, 0x0060600000000000ULL,
0x0020600000000000ULL, 0x0060600000000000ULL,
0x0020600000000000ULL, 0x0060600000000000ULL,
0x0020600000000000ULL, 0x0060600000000000ULL,
0x0020600000000000ULL, 0x0060600000000000ULL,
0x0020600000000000ULL, 0x0000600000000000ULL,
0x0040600000000000ULL, 0x0060600000000000ULL,
END_SIGN
};
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
/* Module Loadable parameters. */
S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
S2IO_PARM_INT(rx_ring_num, 1);
S2IO_PARM_INT(multiq, 0);
S2IO_PARM_INT(rx_ring_mode, 1);
S2IO_PARM_INT(use_continuous_tx_intrs, 1);
S2IO_PARM_INT(rmac_pause_time, 0x100);
S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
S2IO_PARM_INT(shared_splits, 0);
S2IO_PARM_INT(tmac_util_period, 5);
S2IO_PARM_INT(rmac_util_period, 5);
S2IO_PARM_INT(l3l4hdr_size, 128);
/* 0 is no steering, 1 is Priority steering, 2 is Default steering */
S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
/* Frequency of Rx desc syncs expressed as power of 2 */
S2IO_PARM_INT(rxsync_frequency, 3);
/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
S2IO_PARM_INT(intr_type, 2);
/* Large receive offload feature */
/* Max pkts to be aggregated by LRO at one time. If not specified,
* aggregation happens until we hit max IP pkt size(64K)
*/
S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
S2IO_PARM_INT(indicate_max_pkts, 0);
S2IO_PARM_INT(napi, 1);
S2IO_PARM_INT(ufo, 0);
S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
{DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
static unsigned int rx_ring_sz[MAX_RX_RINGS] =
{[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
static unsigned int rts_frm_len[MAX_RX_RINGS] =
{[0 ...(MAX_RX_RINGS - 1)] = 0 };
module_param_array(tx_fifo_len, uint, NULL, 0);
module_param_array(rx_ring_sz, uint, NULL, 0);
module_param_array(rts_frm_len, uint, NULL, 0);
/*
* S2IO device table.
* This table lists all the devices that this driver supports.
*/
static DEFINE_PCI_DEVICE_TABLE(s2io_tbl) = {
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
PCI_ANY_ID, PCI_ANY_ID},
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
PCI_ANY_ID, PCI_ANY_ID},
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
PCI_ANY_ID, PCI_ANY_ID},
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
PCI_ANY_ID, PCI_ANY_ID},
{0,}
};
MODULE_DEVICE_TABLE(pci, s2io_tbl);
static struct pci_error_handlers s2io_err_handler = {
.error_detected = s2io_io_error_detected,
.slot_reset = s2io_io_slot_reset,
.resume = s2io_io_resume,
};
static struct pci_driver s2io_driver = {
.name = "S2IO",
.id_table = s2io_tbl,
.probe = s2io_init_nic,
.remove = __devexit_p(s2io_rem_nic),
.err_handler = &s2io_err_handler,
};
/* A simplifier macro used both by init and free shared_mem Fns(). */
#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
/* netqueue manipulation helper functions */
static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
{
if (!sp->config.multiq) {
int i;
for (i = 0; i < sp->config.tx_fifo_num; i++)
sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
}
netif_tx_stop_all_queues(sp->dev);
}
static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
{
if (!sp->config.multiq)
sp->mac_control.fifos[fifo_no].queue_state =
FIFO_QUEUE_STOP;
netif_tx_stop_all_queues(sp->dev);
}
static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
{
if (!sp->config.multiq) {
int i;
for (i = 0; i < sp->config.tx_fifo_num; i++)
sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
}
netif_tx_start_all_queues(sp->dev);
}
static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
{
if (!sp->config.multiq)
sp->mac_control.fifos[fifo_no].queue_state =
FIFO_QUEUE_START;
netif_tx_start_all_queues(sp->dev);
}
static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
{
if (!sp->config.multiq) {
int i;
for (i = 0; i < sp->config.tx_fifo_num; i++)
sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
}
netif_tx_wake_all_queues(sp->dev);
}
static inline void s2io_wake_tx_queue(
struct fifo_info *fifo, int cnt, u8 multiq)
{
if (multiq) {
if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
netif_wake_subqueue(fifo->dev, fifo->fifo_no);
} else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
if (netif_queue_stopped(fifo->dev)) {
fifo->queue_state = FIFO_QUEUE_START;
netif_wake_queue(fifo->dev);
}
}
}
/**
* init_shared_mem - Allocation and Initialization of Memory
* @nic: Device private variable.
* Description: The function allocates all the memory areas shared
* between the NIC and the driver. This includes Tx descriptors,
* Rx descriptors and the statistics block.
*/
static int init_shared_mem(struct s2io_nic *nic)
{
u32 size;
void *tmp_v_addr, *tmp_v_addr_next;
dma_addr_t tmp_p_addr, tmp_p_addr_next;
struct RxD_block *pre_rxd_blk = NULL;
int i, j, blk_cnt;
int lst_size, lst_per_page;
struct net_device *dev = nic->dev;
unsigned long tmp;
struct buffAdd *ba;
struct config_param *config = &nic->config;
struct mac_info *mac_control = &nic->mac_control;
unsigned long long mem_allocated = 0;
/* Allocation and initialization of TXDLs in FIFOs */
size = 0;
for (i = 0; i < config->tx_fifo_num; i++) {
struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
size += tx_cfg->fifo_len;
}
if (size > MAX_AVAILABLE_TXDS) {
DBG_PRINT(ERR_DBG,
"Too many TxDs requested: %d, max supported: %d\n",
size, MAX_AVAILABLE_TXDS);
return -EINVAL;
}
size = 0;
for (i = 0; i < config->tx_fifo_num; i++) {
struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
size = tx_cfg->fifo_len;
/*
* Legal values are from 2 to 8192
*/
if (size < 2) {
DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
"Valid lengths are 2 through 8192\n",
i, size);
return -EINVAL;
}
}
lst_size = (sizeof(struct TxD) * config->max_txds);
lst_per_page = PAGE_SIZE / lst_size;
for (i = 0; i < config->tx_fifo_num; i++) {
struct fifo_info *fifo = &mac_control->fifos[i];
struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
int fifo_len = tx_cfg->fifo_len;
int list_holder_size = fifo_len * sizeof(struct list_info_hold);
fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
if (!fifo->list_info) {
DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
return -ENOMEM;
}
mem_allocated += list_holder_size;
}
for (i = 0; i < config->tx_fifo_num; i++) {
int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
lst_per_page);
struct fifo_info *fifo = &mac_control->fifos[i];
struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
fifo->tx_curr_put_info.offset = 0;
fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
fifo->tx_curr_get_info.offset = 0;
fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
fifo->fifo_no = i;
fifo->nic = nic;
fifo->max_txds = MAX_SKB_FRAGS + 2;
fifo->dev = dev;
for (j = 0; j < page_num; j++) {
int k = 0;
dma_addr_t tmp_p;
void *tmp_v;
tmp_v = pci_alloc_consistent(nic->pdev,
PAGE_SIZE, &tmp_p);
if (!tmp_v) {
DBG_PRINT(INFO_DBG,
"pci_alloc_consistent failed for TxDL\n");
return -ENOMEM;
}
/* If we got a zero DMA address(can happen on
* certain platforms like PPC), reallocate.
* Store virtual address of page we don't want,
* to be freed later.
*/
if (!tmp_p) {
mac_control->zerodma_virt_addr = tmp_v;
DBG_PRINT(INIT_DBG,
"%s: Zero DMA address for TxDL. "
"Virtual address %p\n",
dev->name, tmp_v);
tmp_v = pci_alloc_consistent(nic->pdev,
PAGE_SIZE, &tmp_p);
if (!tmp_v) {
DBG_PRINT(INFO_DBG,
"pci_alloc_consistent failed for TxDL\n");
return -ENOMEM;
}
mem_allocated += PAGE_SIZE;
}
while (k < lst_per_page) {
int l = (j * lst_per_page) + k;
if (l == tx_cfg->fifo_len)
break;
fifo->list_info[l].list_virt_addr =
tmp_v + (k * lst_size);
fifo->list_info[l].list_phy_addr =
tmp_p + (k * lst_size);
k++;
}
}
}
for (i = 0; i < config->tx_fifo_num; i++) {
struct fifo_info *fifo = &mac_control->fifos[i];
struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
size = tx_cfg->fifo_len;
fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
if (!fifo->ufo_in_band_v)
return -ENOMEM;
mem_allocated += (size * sizeof(u64));
}
/* Allocation and initialization of RXDs in Rings */
size = 0;
for (i = 0; i < config->rx_ring_num; i++) {
struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
struct ring_info *ring = &mac_control->rings[i];
if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
"multiple of RxDs per Block\n",
dev->name, i);
return FAILURE;
}
size += rx_cfg->num_rxd;
ring->block_count = rx_cfg->num_rxd /
(rxd_count[nic->rxd_mode] + 1);
ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
}
if (nic->rxd_mode == RXD_MODE_1)
size = (size * (sizeof(struct RxD1)));
else
size = (size * (sizeof(struct RxD3)));
for (i = 0; i < config->rx_ring_num; i++) {
struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
struct ring_info *ring = &mac_control->rings[i];
ring->rx_curr_get_info.block_index = 0;
ring->rx_curr_get_info.offset = 0;
ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
ring->rx_curr_put_info.block_index = 0;
ring->rx_curr_put_info.offset = 0;
ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
ring->nic = nic;
ring->ring_no = i;
blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
/* Allocating all the Rx blocks */
for (j = 0; j < blk_cnt; j++) {
struct rx_block_info *rx_blocks;
int l;
rx_blocks = &ring->rx_blocks[j];
size = SIZE_OF_BLOCK; /* size is always page size */
tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
&tmp_p_addr);
if (tmp_v_addr == NULL) {
/*
* In case of failure, free_shared_mem()
* is called, which should free any
* memory that was alloced till the
* failure happened.
*/
rx_blocks->block_virt_addr = tmp_v_addr;
return -ENOMEM;
}
mem_allocated += size;
memset(tmp_v_addr, 0, size);
size = sizeof(struct rxd_info) *
rxd_count[nic->rxd_mode];
rx_blocks->block_virt_addr = tmp_v_addr;
rx_blocks->block_dma_addr = tmp_p_addr;
rx_blocks->rxds = kmalloc(size, GFP_KERNEL);
if (!rx_blocks->rxds)
return -ENOMEM;
mem_allocated += size;
for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
rx_blocks->rxds[l].virt_addr =
rx_blocks->block_virt_addr +
(rxd_size[nic->rxd_mode] * l);
rx_blocks->rxds[l].dma_addr =
rx_blocks->block_dma_addr +
(rxd_size[nic->rxd_mode] * l);
}
}
/* Interlinking all Rx Blocks */
for (j = 0; j < blk_cnt; j++) {
int next = (j + 1) % blk_cnt;
tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
pre_rxd_blk = (struct RxD_block *)tmp_v_addr;
pre_rxd_blk->reserved_2_pNext_RxD_block =
(unsigned long)tmp_v_addr_next;
pre_rxd_blk->pNext_RxD_Blk_physical =
(u64)tmp_p_addr_next;
}
}
if (nic->rxd_mode == RXD_MODE_3B) {
/*
* Allocation of Storages for buffer addresses in 2BUFF mode
* and the buffers as well.
*/
for (i = 0; i < config->rx_ring_num; i++) {
struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
struct ring_info *ring = &mac_control->rings[i];
blk_cnt = rx_cfg->num_rxd /
(rxd_count[nic->rxd_mode] + 1);
size = sizeof(struct buffAdd *) * blk_cnt;
ring->ba = kmalloc(size, GFP_KERNEL);
if (!ring->ba)
return -ENOMEM;
mem_allocated += size;
for (j = 0; j < blk_cnt; j++) {
int k = 0;
size = sizeof(struct buffAdd) *
(rxd_count[nic->rxd_mode] + 1);
ring->ba[j] = kmalloc(size, GFP_KERNEL);
if (!ring->ba[j])
return -ENOMEM;
mem_allocated += size;
while (k != rxd_count[nic->rxd_mode]) {
ba = &ring->ba[j][k];
size = BUF0_LEN + ALIGN_SIZE;
ba->ba_0_org = kmalloc(size, GFP_KERNEL);
if (!ba->ba_0_org)
return -ENOMEM;
mem_allocated += size;
tmp = (unsigned long)ba->ba_0_org;
tmp += ALIGN_SIZE;
tmp &= ~((unsigned long)ALIGN_SIZE);
ba->ba_0 = (void *)tmp;
size = BUF1_LEN + ALIGN_SIZE;
ba->ba_1_org = kmalloc(size, GFP_KERNEL);
if (!ba->ba_1_org)
return -ENOMEM;
mem_allocated += size;
tmp = (unsigned long)ba->ba_1_org;
tmp += ALIGN_SIZE;
tmp &= ~((unsigned long)ALIGN_SIZE);
ba->ba_1 = (void *)tmp;
k++;
}
}
}
}
/* Allocation and initialization of Statistics block */
size = sizeof(struct stat_block);
mac_control->stats_mem =
pci_alloc_consistent(nic->pdev, size,
&mac_control->stats_mem_phy);
if (!mac_control->stats_mem) {
/*
* In case of failure, free_shared_mem() is called, which
* should free any memory that was alloced till the
* failure happened.
*/
return -ENOMEM;
}
mem_allocated += size;
mac_control->stats_mem_sz = size;
tmp_v_addr = mac_control->stats_mem;
mac_control->stats_info = (struct stat_block *)tmp_v_addr;
memset(tmp_v_addr, 0, size);
DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
return SUCCESS;
}
/**
* free_shared_mem - Free the allocated Memory
* @nic: Device private variable.
* Description: This function is to free all memory locations allocated by
* the init_shared_mem() function and return it to the kernel.
*/
static void free_shared_mem(struct s2io_nic *nic)
{
int i, j, blk_cnt, size;
void *tmp_v_addr;
dma_addr_t tmp_p_addr;
int lst_size, lst_per_page;
struct net_device *dev;
int page_num = 0;
struct config_param *config;
struct mac_info *mac_control;
struct stat_block *stats;
struct swStat *swstats;
if (!nic)
return;
dev = nic->dev;
config = &nic->config;
mac_control = &nic->mac_control;
stats = mac_control->stats_info;
swstats = &stats->sw_stat;
lst_size = sizeof(struct TxD) * config->max_txds;
lst_per_page = PAGE_SIZE / lst_size;
for (i = 0; i < config->tx_fifo_num; i++) {
struct fifo_info *fifo = &mac_control->fifos[i];
struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
for (j = 0; j < page_num; j++) {
int mem_blks = (j * lst_per_page);
struct list_info_hold *fli;
if (!fifo->list_info)
return;
fli = &fifo->list_info[mem_blks];
if (!fli->list_virt_addr)
break;
pci_free_consistent(nic->pdev, PAGE_SIZE,
fli->list_virt_addr,
fli->list_phy_addr);
swstats->mem_freed += PAGE_SIZE;
}
/* If we got a zero DMA address during allocation,
* free the page now
*/
if (mac_control->zerodma_virt_addr) {
pci_free_consistent(nic->pdev, PAGE_SIZE,
mac_control->zerodma_virt_addr,
(dma_addr_t)0);
DBG_PRINT(INIT_DBG,
"%s: Freeing TxDL with zero DMA address. "
"Virtual address %p\n",
dev->name, mac_control->zerodma_virt_addr);
swstats->mem_freed += PAGE_SIZE;
}
kfree(fifo->list_info);
swstats->mem_freed += tx_cfg->fifo_len *
sizeof(struct list_info_hold);
}
size = SIZE_OF_BLOCK;
for (i = 0; i < config->rx_ring_num; i++) {
struct ring_info *ring = &mac_control->rings[i];
blk_cnt = ring->block_count;
for (j = 0; j < blk_cnt; j++) {
tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
if (tmp_v_addr == NULL)
break;
pci_free_consistent(nic->pdev, size,
tmp_v_addr, tmp_p_addr);
swstats->mem_freed += size;
kfree(ring->rx_blocks[j].rxds);
swstats->mem_freed += sizeof(struct rxd_info) *
rxd_count[nic->rxd_mode];
}
}
if (nic->rxd_mode == RXD_MODE_3B) {
/* Freeing buffer storage addresses in 2BUFF mode. */
for (i = 0; i < config->rx_ring_num; i++) {
struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
struct ring_info *ring = &mac_control->rings[i];
blk_cnt = rx_cfg->num_rxd /
(rxd_count[nic->rxd_mode] + 1);
for (j = 0; j < blk_cnt; j++) {
int k = 0;
if (!ring->ba[j])
continue;
while (k != rxd_count[nic->rxd_mode]) {
struct buffAdd *ba = &ring->ba[j][k];
kfree(ba->ba_0_org);
swstats->mem_freed +=
BUF0_LEN + ALIGN_SIZE;
kfree(ba->ba_1_org);
swstats->mem_freed +=
BUF1_LEN + ALIGN_SIZE;
k++;
}
kfree(ring->ba[j]);
swstats->mem_freed += sizeof(struct buffAdd) *
(rxd_count[nic->rxd_mode] + 1);
}
kfree(ring->ba);
swstats->mem_freed += sizeof(struct buffAdd *) *
blk_cnt;
}
}
for (i = 0; i < nic->config.tx_fifo_num; i++) {
struct fifo_info *fifo = &mac_control->fifos[i];
struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
if (fifo->ufo_in_band_v) {
swstats->mem_freed += tx_cfg->fifo_len *
sizeof(u64);
kfree(fifo->ufo_in_band_v);
}
}
if (mac_control->stats_mem) {
swstats->mem_freed += mac_control->stats_mem_sz;
pci_free_consistent(nic->pdev,
mac_control->stats_mem_sz,
mac_control->stats_mem,
mac_control->stats_mem_phy);
}
}
/**
* s2io_verify_pci_mode -
*/
static int s2io_verify_pci_mode(struct s2io_nic *nic)
{
struct XENA_dev_config __iomem *bar0 = nic->bar0;
register u64 val64 = 0;
int mode;
val64 = readq(&bar0->pci_mode);
mode = (u8)GET_PCI_MODE(val64);
if (val64 & PCI_MODE_UNKNOWN_MODE)
return -1; /* Unknown PCI mode */
return mode;
}
#define NEC_VENID 0x1033
#define NEC_DEVID 0x0125
static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
{
struct pci_dev *tdev = NULL;
while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
if (tdev->bus == s2io_pdev->bus->parent) {
pci_dev_put(tdev);
return 1;
}
}
}
return 0;
}
static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
/**
* s2io_print_pci_mode -
*/
static int s2io_print_pci_mode(struct s2io_nic *nic)
{
struct XENA_dev_config __iomem *bar0 = nic->bar0;
register u64 val64 = 0;
int mode;
struct config_param *config = &nic->config;
const char *pcimode;
val64 = readq(&bar0->pci_mode);
mode = (u8)GET_PCI_MODE(val64);
if (val64 & PCI_MODE_UNKNOWN_MODE)
return -1; /* Unknown PCI mode */
config->bus_speed = bus_speed[mode];
if (s2io_on_nec_bridge(nic->pdev)) {
DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
nic->dev->name);
return mode;
}
switch (mode) {
case PCI_MODE_PCI_33:
pcimode = "33MHz PCI bus";
break;
case PCI_MODE_PCI_66:
pcimode = "66MHz PCI bus";
break;
case PCI_MODE_PCIX_M1_66:
pcimode = "66MHz PCIX(M1) bus";
break;
case PCI_MODE_PCIX_M1_100:
pcimode = "100MHz PCIX(M1) bus";
break;
case PCI_MODE_PCIX_M1_133:
pcimode = "133MHz PCIX(M1) bus";
break;
case PCI_MODE_PCIX_M2_66:
pcimode = "133MHz PCIX(M2) bus";
break;
case PCI_MODE_PCIX_M2_100:
pcimode = "200MHz PCIX(M2) bus";
break;
case PCI_MODE_PCIX_M2_133:
pcimode = "266MHz PCIX(M2) bus";
break;
default:
pcimode = "unsupported bus!";
mode = -1;
}
DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
return mode;
}
/**
* init_tti - Initialization transmit traffic interrupt scheme
* @nic: device private variable
* @link: link status (UP/DOWN) used to enable/disable continuous
* transmit interrupts
* Description: The function configures transmit traffic interrupts
* Return Value: SUCCESS on success and
* '-1' on failure
*/
static int init_tti(struct s2io_nic *nic, int link)
{
struct XENA_dev_config __iomem *bar0 = nic->bar0;
register u64 val64 = 0;
int i;
struct config_param *config = &nic->config;
for (i = 0; i < config->tx_fifo_num; i++) {
/*
* TTI Initialization. Default Tx timer gets us about
* 250 interrupts per sec. Continuous interrupts are enabled
* by default.
*/
if (nic->device_type == XFRAME_II_DEVICE) {
int count = (nic->config.bus_speed * 125)/2;
val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
} else
val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
TTI_DATA1_MEM_TX_URNG_B(0x10) |
TTI_DATA1_MEM_TX_URNG_C(0x30) |
TTI_DATA1_MEM_TX_TIMER_AC_EN;
if (i == 0)
if (use_continuous_tx_intrs && (link == LINK_UP))
val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
writeq(val64, &bar0->tti_data1_mem);
if (nic->config.intr_type == MSI_X) {
val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
TTI_DATA2_MEM_TX_UFC_B(0x100) |
TTI_DATA2_MEM_TX_UFC_C(0x200) |
TTI_DATA2_MEM_TX_UFC_D(0x300);
} else {
if ((nic->config.tx_steering_type ==
TX_DEFAULT_STEERING) &&
(config->tx_fifo_num > 1) &&
(i >= nic->udp_fifo_idx) &&
(i < (nic->udp_fifo_idx +
nic->total_udp_fifos)))
val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
TTI_DATA2_MEM_TX_UFC_B(0x80) |
TTI_DATA2_MEM_TX_UFC_C(0x100) |
TTI_DATA2_MEM_TX_UFC_D(0x120);
else
val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
TTI_DATA2_MEM_TX_UFC_B(0x20) |
TTI_DATA2_MEM_TX_UFC_C(0x40) |
TTI_DATA2_MEM_TX_UFC_D(0x80);
}
writeq(val64, &bar0->tti_data2_mem);
val64 = TTI_CMD_MEM_WE |
TTI_CMD_MEM_STROBE_NEW_CMD |
TTI_CMD_MEM_OFFSET(i);
writeq(val64, &bar0->tti_command_mem);
if (wait_for_cmd_complete(&bar0->tti_command_mem,
TTI_CMD_MEM_STROBE_NEW_CMD,
S2IO_BIT_RESET) != SUCCESS)
return FAILURE;
}
return SUCCESS;
}
/**
* init_nic - Initialization of hardware
* @nic: device private variable
* Description: The function sequentially configures every block
* of the H/W from their reset values.
* Return Value: SUCCESS on success and
* '-1' on failure (endian settings incorrect).
*/
static int init_nic(struct s2io_nic *nic)
{
struct XENA_dev_config __iomem *bar0 = nic->bar0;
struct net_device *dev = nic->dev;
register u64 val64 = 0;
void __iomem *add;
u32 time;
int i, j;
int dtx_cnt = 0;
unsigned long long mem_share;
int mem_size;
struct config_param *config = &nic->config;
struct mac_info *mac_control = &nic->mac_control;
/* to set the swapper controle on the card */
if (s2io_set_swapper(nic)) {
DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
return -EIO;
}
/*
* Herc requires EOI to be removed from reset before XGXS, so..
*/
if (nic->device_type & XFRAME_II_DEVICE) {
val64 = 0xA500000000ULL;
writeq(val64, &bar0->sw_reset);
msleep(500);
val64 = readq(&bar0->sw_reset);
}
/* Remove XGXS from reset state */
val64 = 0;
writeq(val64, &bar0->sw_reset);
msleep(500);
val64 = readq(&bar0->sw_reset);
/* Ensure that it's safe to access registers by checking
* RIC_RUNNING bit is reset. Check is valid only for XframeII.
*/
if (nic->device_type == XFRAME_II_DEVICE) {
for (i = 0; i < 50; i++) {
val64 = readq(&bar0->adapter_status);
if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
break;
msleep(10);
}
if (i == 50)
return -ENODEV;
}
/* Enable Receiving broadcasts */
add = &bar0->mac_cfg;
val64 = readq(&bar0->mac_cfg);
val64 |= MAC_RMAC_BCAST_ENABLE;
writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
writel((u32)val64, add);
writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
writel((u32) (val64 >> 32), (add + 4));
/* Read registers in all blocks */
val64 = readq(&bar0->mac_int_mask);
val64 = readq(&bar0->mc_int_mask);
val64 = readq(&bar0->xgxs_int_mask);
/* Set MTU */
val64 = dev->mtu;
writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
if (nic->device_type & XFRAME_II_DEVICE) {
while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
&bar0->dtx_control, UF);
if (dtx_cnt & 0x1)
msleep(1); /* Necessary!! */
dtx_cnt++;
}
} else {
while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
&bar0->dtx_control, UF);
val64 = readq(&bar0->dtx_control);
dtx_cnt++;
}
}
/* Tx DMA Initialization */
val64 = 0;
writeq(val64, &bar0->tx_fifo_partition_0);
writeq(val64, &bar0->tx_fifo_partition_1);
writeq(val64, &bar0->tx_fifo_partition_2);
writeq(val64, &bar0->tx_fifo_partition_3);
for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
if (i == (config->tx_fifo_num - 1)) {
if (i % 2 == 0)
i++;
}
switch (i) {
case 1:
writeq(val64, &bar0->tx_fifo_partition_0);
val64 = 0;
j = 0;
break;
case 3:
writeq(val64, &bar0->tx_fifo_partition_1);
val64 = 0;
j = 0;
break;
case 5:
writeq(val64, &bar0->tx_fifo_partition_2);
val64 = 0;
j = 0;
break;
case 7:
writeq(val64, &bar0->tx_fifo_partition_3);
val64 = 0;
j = 0;
break;
default:
j++;
break;
}
}
/*
* Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
* SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
*/
if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
val64 = readq(&bar0->tx_fifo_partition_0);
DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
&bar0->tx_fifo_partition_0, (unsigned long long)val64);
/*
* Initialization of Tx_PA_CONFIG register to ignore packet
* integrity checking.
*/
val64 = readq(&bar0->tx_pa_cfg);
val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
TX_PA_CFG_IGNORE_SNAP_OUI |
TX_PA_CFG_IGNORE_LLC_CTRL |
TX_PA_CFG_IGNORE_L2_ERR;
writeq(val64, &bar0->tx_pa_cfg);
/* Rx DMA intialization. */
val64 = 0;
for (i = 0; i < config->rx_ring_num; i++) {
struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
}
writeq(val64, &bar0->rx_queue_priority);
/*
* Allocating equal share of memory to all the
* configured Rings.
*/
val64 = 0;
if (nic->device_type & XFRAME_II_DEVICE)
mem_size = 32;
else
mem_size = 64;
for (i = 0; i < config->rx_ring_num; i++) {
switch (i) {
case 0:
mem_share = (mem_size / config->rx_ring_num +
mem_size % config->rx_ring_num);
val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
continue;
case 1:
mem_share = (mem_size / config->rx_ring_num);
val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
continue;
case 2:
mem_share = (mem_size / config->rx_ring_num);
val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
continue;
case 3:
mem_share = (mem_size / config->rx_ring_num);
val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
continue;
case 4:
mem_share = (mem_size / config->rx_ring_num);
val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
continue;
case 5:
mem_share = (mem_size / config->rx_ring_num);
val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
continue;
case 6:
mem_share = (mem_size / config->rx_ring_num);
val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
continue;
case 7:
mem_share = (mem_size / config->rx_ring_num);
val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
continue;
}
}
writeq(val64, &bar0->rx_queue_cfg);
/*
* Filling Tx round robin registers
* as per the number of FIFOs for equal scheduling priority
*/
switch (config->tx_fifo_num) {
case 1:
val64 = 0x0;
writeq(val64, &bar0->tx_w_round_robin_0);
writeq(val64, &bar0->tx_w_round_robin_1);
writeq(val64, &bar0->tx_w_round_robin_2);
writeq(val64, &bar0->tx_w_round_robin_3);
writeq(val64, &bar0->tx_w_round_robin_4);
break;
case 2:
val64 = 0x0001000100010001ULL;
writeq(val64, &bar0->tx_w_round_robin_0);
writeq(val64, &bar0->tx_w_round_robin_1);
writeq(val64, &bar0->tx_w_round_robin_2);
writeq(val64, &bar0->tx_w_round_robin_3);
val64 = 0x0001000100000000ULL;
writeq(val64, &bar0->tx_w_round_robin_4);
break;
case 3:
val64 = 0x0001020001020001ULL;
writeq(val64, &bar0->tx_w_round_robin_0);
val64 = 0x0200010200010200ULL;
writeq(val64, &bar0->tx_w_round_robin_1);
val64 = 0x0102000102000102ULL;
writeq(val64, &bar0->tx_w_round_robin_2);
val64 = 0x0001020001020001ULL;
writeq(val64, &bar0->tx_w_round_robin_3);
val64 = 0x0200010200000000ULL;
writeq(val64, &bar0->tx_w_round_robin_4);
break;
case 4:
val64 = 0x0001020300010203ULL;
writeq(val64, &bar0->tx_w_round_robin_0);
writeq(val64, &bar0->tx_w_round_robin_1);
writeq(val64, &bar0->tx_w_round_robin_2);
writeq(val64, &bar0->tx_w_round_robin_3);
val64 = 0x0001020300000000ULL;
writeq(val64, &bar0->tx_w_round_robin_4);
break;
case 5:
val64 = 0x0001020304000102ULL;
writeq(val64, &bar0->tx_w_round_robin_0);
val64 = 0x0304000102030400ULL;
writeq(val64, &bar0->tx_w_round_robin_1);
val64 = 0x0102030400010203ULL;
writeq(val64, &bar0->tx_w_round_robin_2);
val64 = 0x0400010203040001ULL;
writeq(val64, &bar0->tx_w_round_robin_3);
val64 = 0x0203040000000000ULL;
writeq(val64, &bar0->tx_w_round_robin_4);
break;
case 6:
val64 = 0x0001020304050001ULL;
writeq(val64, &bar0->tx_w_round_robin_0);
val64 = 0x0203040500010203ULL;
writeq(val64, &bar0->tx_w_round_robin_1);
val64 = 0x0405000102030405ULL;
writeq(val64, &bar0->tx_w_round_robin_2);
val64 = 0x0001020304050001ULL;
writeq(val64, &bar0->tx_w_round_robin_3);
val64 = 0x0203040500000000ULL;
writeq(val64, &bar0->tx_w_round_robin_4);
break;
case 7:
val64 = 0x0001020304050600ULL;
writeq(val64, &bar0->tx_w_round_robin_0);
val64 = 0x0102030405060001ULL;
writeq(val64, &bar0->tx_w_round_robin_1);
val64 = 0x0203040506000102ULL;
writeq(val64, &bar0->tx_w_round_robin_2);
val64 = 0x0304050600010203ULL;
writeq(val64, &bar0->tx_w_round_robin_3);
val64 = 0x0405060000000000ULL;
writeq(val64, &bar0->tx_w_round_robin_4);
break;
case 8:
val64 = 0x0001020304050607ULL;
writeq(val64, &bar0->tx_w_round_robin_0);
writeq(val64, &bar0->tx_w_round_robin_1);
writeq(val64, &bar0->tx_w_round_robin_2);
writeq(val64, &bar0->tx_w_round_robin_3);
val64 = 0x0001020300000000ULL;
writeq(val64, &bar0->tx_w_round_robin_4);
break;
}
/* Enable all configured Tx FIFO partitions */
val64 = readq(&bar0->tx_fifo_partition_0);
val64 |= (TX_FIFO_PARTITION_EN);
writeq(val64, &bar0->tx_fifo_partition_0);
/* Filling the Rx round robin registers as per the
* number of Rings and steering based on QoS with
* equal priority.
*/
switch (config->rx_ring_num) {
case 1:
val64 = 0x0;
writeq(val64, &bar0->rx_w_round_robin_0);
writeq(val64, &bar0->rx_w_round_robin_1);
writeq(val64, &bar0->rx_w_round_robin_2);
writeq(val64, &bar0->rx_w_round_robin_3);
writeq(val64, &bar0->rx_w_round_robin_4);
val64 = 0x8080808080808080ULL;
writeq(val64, &bar0->rts_qos_steering);
break;
case 2:
val64 = 0x0001000100010001ULL;
writeq(val64, &bar0->rx_w_round_robin_0);
writeq(val64, &bar0->rx_w_round_robin_1);
writeq(val64, &bar0->rx_w_round_robin_2);
writeq(val64, &bar0->rx_w_round_robin_3);
val64 = 0x0001000100000000ULL;
writeq(val64, &bar0->rx_w_round_robin_4);
val64 = 0x8080808040404040ULL;
writeq(val64, &bar0->rts_qos_steering);
break;
case 3:
val64 = 0x0001020001020001ULL;
writeq(val64, &bar0->rx_w_round_robin_0);
val64 = 0x0200010200010200ULL;
writeq(val64, &bar0->rx_w_round_robin_1);
val64 = 0x0102000102000102ULL;
writeq(val64, &bar0->rx_w_round_robin_2);
val64 = 0x0001020001020001ULL;
writeq(val64, &bar0->rx_w_round_robin_3);
val64 = 0x0200010200000000ULL;
writeq(val64, &bar0->rx_w_round_robin_4);
val64 = 0x8080804040402020ULL;
writeq(val64, &bar0->rts_qos_steering);
break;
case 4:
val64 = 0x0001020300010203ULL;
writeq(val64, &bar0->rx_w_round_robin_0);
writeq(val64, &bar0->rx_w_round_robin_1);
writeq(val64, &bar0->rx_w_round_robin_2);
writeq(val64, &bar0->rx_w_round_robin_3);
val64 = 0x0001020300000000ULL;
writeq(val64, &bar0->rx_w_round_robin_4);
val64 = 0x8080404020201010ULL;
writeq(val64, &bar0->rts_qos_steering);
break;
case 5:
val64 = 0x0001020304000102ULL;
writeq(val64, &bar0->rx_w_round_robin_0);
val64 = 0x0304000102030400ULL;
writeq(val64, &bar0->rx_w_round_robin_1);
val64 = 0x0102030400010203ULL;
writeq(val64, &bar0->rx_w_round_robin_2);
val64 = 0x0400010203040001ULL;
writeq(val64, &bar0->rx_w_round_robin_3);
val64 = 0x0203040000000000ULL;
writeq(val64, &bar0->rx_w_round_robin_4);
val64 = 0x8080404020201008ULL;
writeq(val64, &bar0->rts_qos_steering);
break;
case 6:
val64 = 0x0001020304050001ULL;
writeq(val64, &bar0->rx_w_round_robin_0);
val64 = 0x0203040500010203ULL;
writeq(val64, &bar0->rx_w_round_robin_1);
val64 = 0x0405000102030405ULL;
writeq(val64, &bar0->rx_w_round_robin_2);
val64 = 0x0001020304050001ULL;
writeq(val64, &bar0->rx_w_round_robin_3);
val64 = 0x0203040500000000ULL;
writeq(val64, &bar0->rx_w_round_robin_4);
val64 = 0x8080404020100804ULL;
writeq(val64, &bar0->rts_qos_steering);
break;
case 7:
val64 = 0x0001020304050600ULL;
writeq(val64, &bar0->rx_w_round_robin_0);
val64 = 0x0102030405060001ULL;
writeq(val64, &bar0->rx_w_round_robin_1);
val64 = 0x0203040506000102ULL;
writeq(val64, &bar0->rx_w_round_robin_2);
val64 = 0x0304050600010203ULL;
writeq(val64, &bar0->rx_w_round_robin_3);
val64 = 0x0405060000000000ULL;
writeq(val64, &bar0->rx_w_round_robin_4);
val64 = 0x8080402010080402ULL;
writeq(val64, &bar0->rts_qos_steering);
break;
case 8:
val64 = 0x0001020304050607ULL;
writeq(val64, &bar0->rx_w_round_robin_0);
writeq(val64, &bar0->rx_w_round_robin_1);
writeq(val64, &bar0->rx_w_round_robin_2);
writeq(val64, &bar0->rx_w_round_robin_3);
val64 = 0x0001020300000000ULL;
writeq(val64, &bar0->rx_w_round_robin_4);
val64 = 0x8040201008040201ULL;
writeq(val64, &bar0->rts_qos_steering);
break;
}
/* UDP Fix */
val64 = 0;
for (i = 0; i < 8; i++)
writeq(val64, &bar0->rts_frm_len_n[i]);
/* Set the default rts frame length for the rings configured */
val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
for (i = 0 ; i < config->rx_ring_num ; i++)
writeq(val64, &bar0->rts_frm_len_n[i]);
/* Set the frame length for the configured rings
* desired by the user
*/
for (i = 0; i < config->rx_ring_num; i++) {
/* If rts_frm_len[i] == 0 then it is assumed that user not
* specified frame length steering.
* If the user provides the frame length then program
* the rts_frm_len register for those values or else
* leave it as it is.
*/
if (rts_frm_len[i] != 0) {
writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
&bar0->rts_frm_len_n[i]);
}
}
/* Disable differentiated services steering logic */
for (i = 0; i < 64; i++) {
if (rts_ds_steer(nic, i, 0) == FAILURE) {
DBG_PRINT(ERR_DBG,
"%s: rts_ds_steer failed on codepoint %d\n",
dev->name, i);
return -ENODEV;
}
}
/* Program statistics memory */
writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
if (nic->device_type == XFRAME_II_DEVICE) {
val64 = STAT_BC(0x320);
writeq(val64, &bar0->stat_byte_cnt);
}
/*
* Initializing the sampling rate for the device to calculate the
* bandwidth utilization.
*/
val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
MAC_RX_LINK_UTIL_VAL(rmac_util_period);
writeq(val64, &bar0->mac_link_util);
/*
* Initializing the Transmit and Receive Traffic Interrupt
* Scheme.
*/
/* Initialize TTI */
if (SUCCESS != init_tti(nic, nic->last_link_state))
return -ENODEV;
/* RTI Initialization */
if (nic->device_type == XFRAME_II_DEVICE) {
/*
* Programmed to generate Apprx 500 Intrs per
* second
*/
int count = (nic->config.bus_speed * 125)/4;
val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
} else
val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
RTI_DATA1_MEM_RX_URNG_B(0x10) |
RTI_DATA1_MEM_RX_URNG_C(0x30) |
RTI_DATA1_MEM_RX_TIMER_AC_EN;
writeq(val64, &bar0->rti_data1_mem);
val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
RTI_DATA2_MEM_RX_UFC_B(0x2) ;
if (nic->config.intr_type == MSI_X)
val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
RTI_DATA2_MEM_RX_UFC_D(0x40));
else
val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
RTI_DATA2_MEM_RX_UFC_D(0x80));
writeq(val64, &bar0->rti_data2_mem);
for (i = 0; i < config->rx_ring_num; i++) {
val64 = RTI_CMD_MEM_WE |
RTI_CMD_MEM_STROBE_NEW_CMD |
RTI_CMD_MEM_OFFSET(i);
writeq(val64, &bar0->rti_command_mem);
/*
* Once the operation completes, the Strobe bit of the
* command register will be reset. We poll for this
* particular condition. We wait for a maximum of 500ms
* for the operation to complete, if it's not complete
* by then we return error.
*/
time = 0;
while (true) {
val64 = readq(&bar0->rti_command_mem);
if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
break;
if (time > 10) {
DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
dev->name);
return -ENODEV;
}
time++;
msleep(50);
}
}
/*
* Initializing proper values as Pause threshold into all
* the 8 Queues on Rx side.
*/
writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
/* Disable RMAC PAD STRIPPING */
add = &bar0->mac_cfg;
val64 = readq(&bar0->mac_cfg);
val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
writel((u32) (val64), add);
writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
writel((u32) (val64 >> 32), (add + 4));
val64 = readq(&bar0->mac_cfg);
/* Enable FCS stripping by adapter */
add = &bar0->mac_cfg;
val64 = readq(&bar0->mac_cfg);
val64 |= MAC_CFG_RMAC_STRIP_FCS;
if (nic->device_type == XFRAME_II_DEVICE)
writeq(val64, &bar0->mac_cfg);
else {
writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
writel((u32) (val64), add);
writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
writel((u32) (val64 >> 32), (add + 4));
}
/*
* Set the time value to be inserted in the pause frame
* generated by xena.
*/
val64 = readq(&bar0->rmac_pause_cfg);
val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
writeq(val64, &bar0->rmac_pause_cfg);
/*
* Set the Threshold Limit for Generating the pause frame
* If the amount of data in any Queue exceeds ratio of
* (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
* pause frame is generated
*/
val64 = 0;
for (i = 0; i < 4; i++) {
val64 |= (((u64)0xFF00 |
nic->mac_control.mc_pause_threshold_q0q3)
<< (i * 2 * 8));
}
writeq(val64, &bar0->mc_pause_thresh_q0q3);
val64 = 0;
for (i = 0; i < 4; i++) {
val64 |= (((u64)0xFF00 |
nic->mac_control.mc_pause_threshold_q4q7)
<< (i * 2 * 8));
}
writeq(val64, &bar0->mc_pause_thresh_q4q7);
/*
* TxDMA will stop Read request if the number of read split has
* exceeded the limit pointed by shared_splits
*/
val64 = readq(&bar0->pic_control);
val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
writeq(val64, &bar0->pic_control);
if (nic->config.bus_speed == 266) {
writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
writeq(0x0, &bar0->read_retry_delay);
writeq(0x0, &bar0->write_retry_delay);
}
/*
* Programming the Herc to split every write transaction
* that does not start on an ADB to reduce disconnects.
*/
if (nic->device_type == XFRAME_II_DEVICE) {
val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
MISC_LINK_STABILITY_PRD(3);
writeq(val64, &bar0->misc_control);
val64 = readq(&bar0->pic_control2);
val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
writeq(val64, &bar0->pic_control2);
}
if (strstr(nic->product_name, "CX4")) {
val64 = TMAC_AVG_IPG(0x17);
writeq(val64, &bar0->tmac_avg_ipg);
}
return SUCCESS;
}
#define LINK_UP_DOWN_INTERRUPT 1
#define MAC_RMAC_ERR_TIMER 2
static int s2io_link_fault_indication(struct s2io_nic *nic)
{
if (nic->device_type == XFRAME_II_DEVICE)
return LINK_UP_DOWN_INTERRUPT;
else
return MAC_RMAC_ERR_TIMER;
}
/**
* do_s2io_write_bits - update alarm bits in alarm register
* @value: alarm bits
* @flag: interrupt status
* @addr: address value
* Description: update alarm bits in alarm register
* Return Value:
* NONE.
*/
static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
{
u64 temp64;
temp64 = readq(addr);
if (flag == ENABLE_INTRS)
temp64 &= ~((u64)value);
else
temp64 |= ((u64)value);
writeq(temp64, addr);
}
static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
{
struct XENA_dev_config __iomem *bar0 = nic->bar0;
register u64 gen_int_mask = 0;
u64 interruptible;
writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
if (mask & TX_DMA_INTR) {
gen_int_mask |= TXDMA_INT_M;
do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
TXDMA_PCC_INT | TXDMA_TTI_INT |
TXDMA_LSO_INT | TXDMA_TPA_INT |
TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
PFC_MISC_0_ERR | PFC_MISC_1_ERR |
PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
&bar0->pfc_err_mask);
do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
PCC_N_SERR | PCC_6_COF_OV_ERR |
PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
PCC_TXB_ECC_SG_ERR,
flag, &bar0->pcc_err_mask);
do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
flag, &bar0->lso_err_mask);
do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
flag, &bar0->tpa_err_mask);
do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
}
if (mask & TX_MAC_INTR) {
gen_int_mask |= TXMAC_INT_M;
do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
&bar0->mac_int_mask);
do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
flag, &bar0->mac_tmac_err_mask);
}
if (mask & TX_XGXS_INTR) {
gen_int_mask |= TXXGXS_INT_M;
do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
&bar0->xgxs_int_mask);
do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
flag, &bar0->xgxs_txgxs_err_mask);
}
if (mask & RX_DMA_INTR) {
gen_int_mask |= RXDMA_INT_M;
do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
flag, &bar0->rxdma_int_mask);
do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
&bar0->prc_pcix_err_mask);
do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
&bar0->rpa_err_mask);
do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
RDA_FRM_ECC_SG_ERR |
RDA_MISC_ERR|RDA_PCIX_ERR,
flag, &bar0->rda_err_mask);
do_s2io_write_bits(RTI_SM_ERR_ALARM |
RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
flag, &bar0->rti_err_mask);
}
if (mask & RX_MAC_INTR) {
gen_int_mask |= RXMAC_INT_M;
do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
&bar0->mac_int_mask);
interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
RMAC_DOUBLE_ECC_ERR);
if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
interruptible |= RMAC_LINK_STATE_CHANGE_INT;
do_s2io_write_bits(interruptible,
flag, &bar0->mac_rmac_err_mask);
}
if (mask & RX_XGXS_INTR) {
gen_int_mask |= RXXGXS_INT_M;
do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
&bar0->xgxs_int_mask);
do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
&bar0->xgxs_rxgxs_err_mask);
}
if (mask & MC_INTR) {
gen_int_mask |= MC_INT_M;
do_s2io_write_bits(MC_INT_MASK_MC_INT,
flag, &bar0->mc_int_mask);
do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
&bar0->mc_err_mask);
}
nic->general_int_mask = gen_int_mask;
/* Remove this line when alarm interrupts are enabled */
nic->general_int_mask = 0;
}
/**
* en_dis_able_nic_intrs - Enable or Disable the interrupts
* @nic: device private variable,
* @mask: A mask indicating which Intr block must be modified and,
* @flag: A flag indicating whether to enable or disable the Intrs.
* Description: This function will either disable or enable the interrupts
* depending on the flag argument. The mask argument can be used to
* enable/disable any Intr block.
* Return Value: NONE.
*/
static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
{
struct XENA_dev_config __iomem *bar0 = nic->bar0;
register u64 temp64 = 0, intr_mask = 0;
intr_mask = nic->general_int_mask;
/* Top level interrupt classification */
/* PIC Interrupts */
if (mask & TX_PIC_INTR) {
/* Enable PIC Intrs in the general intr mask register */
intr_mask |= TXPIC_INT_M;
if (flag == ENABLE_INTRS) {
/*
* If Hercules adapter enable GPIO otherwise
* disable all PCIX, Flash, MDIO, IIC and GPIO
* interrupts for now.
* TODO
*/
if (s2io_link_fault_indication(nic) ==
LINK_UP_DOWN_INTERRUPT) {
do_s2io_write_bits(PIC_INT_GPIO, flag,
&bar0->pic_int_mask);
do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
&bar0->gpio_int_mask);
} else
writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
} else if (flag == DISABLE_INTRS) {
/*
* Disable PIC Intrs in the general
* intr mask register
*/
writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
}
}
/* Tx traffic interrupts */
if (mask & TX_TRAFFIC_INTR) {
intr_mask |= TXTRAFFIC_INT_M;
if (flag == ENABLE_INTRS) {
/*
* Enable all the Tx side interrupts
* writing 0 Enables all 64 TX interrupt levels
*/
writeq(0x0, &bar0->tx_traffic_mask);
} else if (flag == DISABLE_INTRS) {
/*
* Disable Tx Traffic Intrs in the general intr mask
* register.
*/
writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
}
}
/* Rx traffic interrupts */
if (mask & RX_TRAFFIC_INTR) {
intr_mask |= RXTRAFFIC_INT_M;
if (flag == ENABLE_INTRS) {
/* writing 0 Enables all 8 RX interrupt levels */
writeq(0x0, &bar0->rx_traffic_mask);
} else if (flag == DISABLE_INTRS) {
/*
* Disable Rx Traffic Intrs in the general intr mask
* register.
*/
writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
}
}
temp64 = readq(&bar0->general_int_mask);
if (flag == ENABLE_INTRS)
temp64 &= ~((u64)intr_mask);
else
temp64 = DISABLE_ALL_INTRS;
writeq(temp64, &bar0->general_int_mask);
nic->general_int_mask = readq(&bar0->general_int_mask);
}
/**
* verify_pcc_quiescent- Checks for PCC quiescent state
* Return: 1 If PCC is quiescence
* 0 If PCC is not quiescence
*/
static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
{
int ret = 0, herc;
struct XENA_dev_config __iomem *bar0 = sp->bar0;
u64 val64 = readq(&bar0->adapter_status);
herc = (sp->device_type == XFRAME_II_DEVICE);
if (flag == false) {
if ((!herc && (sp->pdev->revision >= 4)) || herc) {
if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
ret = 1;
} else {
if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
ret = 1;
}
} else {
if ((!herc && (sp->pdev->revision >= 4)) || herc) {
if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
ADAPTER_STATUS_RMAC_PCC_IDLE))
ret = 1;
} else {
if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
ret = 1;
}
}
return ret;
}
/**
* verify_xena_quiescence - Checks whether the H/W is ready
* Description: Returns whether the H/W is ready to go or not. Depending
* on whether adapter enable bit was written or not the comparison
* differs and the calling function passes the input argument flag to
* indicate this.
* Return: 1 If xena is quiescence
* 0 If Xena is not quiescence
*/
static int verify_xena_quiescence(struct s2io_nic *sp)
{
int mode;
struct XENA_dev_config __iomem *bar0 = sp->bar0;
u64 val64 = readq(&bar0->adapter_status);
mode = s2io_verify_pci_mode(sp);
if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
return 0;
}
if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
return 0;
}
if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
return 0;
}
if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
return 0;
}
if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
return 0;
}
if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
return 0;
}
if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
return 0;
}
if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
return 0;
}
/*
* In PCI 33 mode, the P_PLL is not used, and therefore,
* the the P_PLL_LOCK bit in the adapter_status register will
* not be asserted.
*/
if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
sp->device_type == XFRAME_II_DEVICE &&
mode != PCI_MODE_PCI_33) {
DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
return 0;
}
if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
return 0;
}
return 1;
}
/**
* fix_mac_address - Fix for Mac addr problem on Alpha platforms
* @sp: Pointer to device specifc structure
* Description :
* New procedure to clear mac address reading problems on Alpha platforms
*
*/
static void fix_mac_address(struct s2io_nic *sp)
{
struct XENA_dev_config __iomem *bar0 = sp->bar0;
int i = 0;
while (fix_mac[i] != END_SIGN) {
writeq(fix_mac[i++], &bar0->gpio_control);
udelay(10);
(void) readq(&bar0->gpio_control);
}
}
/**
* start_nic - Turns the device on
* @nic : device private variable.
* Description:
* This function actually turns the device on. Before this function is
* called,all Registers are configured from their reset states
* and shared memory is allocated but the NIC is still quiescent. On
* calling this function, the device interrupts are cleared and the NIC is
* literally switched on by writing into the adapter control register.
* Return Value:
* SUCCESS on success and -1 on failure.
*/
static int start_nic(struct s2io_nic *nic)
{
struct XENA_dev_config __iomem *bar0 = nic->bar0;
struct net_device *dev = nic->dev;
register u64 val64 = 0;
u16 subid, i;
struct config_param *config = &nic->config;
struct mac_info *mac_control = &nic->mac_control;
/* PRC Initialization and configuration */
for (i = 0; i < config->rx_ring_num; i++) {
struct ring_info *ring = &mac_control->rings[i];
writeq((u64)ring->rx_blocks[0].block_dma_addr,
&bar0->prc_rxd0_n[i]);
val64 = readq(&bar0->prc_ctrl_n[i]);
if (nic->rxd_mode == RXD_MODE_1)
val64 |= PRC_CTRL_RC_ENABLED;
else
val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
if (nic->device_type == XFRAME_II_DEVICE)
val64 |= PRC_CTRL_GROUP_READS;
val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
writeq(val64, &bar0->prc_ctrl_n[i]);
}
if (nic->rxd_mode == RXD_MODE_3B) {
/* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
val64 = readq(&bar0->rx_pa_cfg);
val64 |= RX_PA_CFG_IGNORE_L2_ERR;
writeq(val64, &bar0->rx_pa_cfg);
}
if (vlan_tag_strip == 0) {
val64 = readq(&bar0->rx_pa_cfg);
val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
writeq(val64, &bar0->rx_pa_cfg);
nic->vlan_strip_flag = 0;
}
/*
* Enabling MC-RLDRAM. After enabling the device, we timeout
* for around 100ms, which is approximately the time required
* for the device to be ready for operation.
*/
val64 = readq(&bar0->mc_rldram_mrs);
val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
val64 = readq(&bar0->mc_rldram_mrs);
msleep(100); /* Delay by around 100 ms. */
/* Enabling ECC Protection. */
val64 = readq(&bar0->adapter_control);
val64 &= ~ADAPTER_ECC_EN;
writeq(val64, &bar0->adapter_control);
/*
* Verify if the device is ready to be enabled, if so enable
* it.
*/
val64 = readq(&bar0->adapter_status);
if (!verify_xena_quiescence(nic)) {
DBG_PRINT(ERR_DBG, "%s: device is not ready, "
"Adapter status reads: 0x%llx\n",
dev->name, (unsigned long long)val64);
return FAILURE;
}
/*
* With some switches, link might be already up at this point.
* Because of this weird behavior, when we enable laser,
* we may not get link. We need to handle this. We cannot
* figure out which switch is misbehaving. So we are forced to
* make a global change.
*/
/* Enabling Laser. */
val64 = readq(&bar0->adapter_control);
val64 |= ADAPTER_EOI_TX_ON;
writeq(val64, &bar0->adapter_control);
if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
/*
* Dont see link state interrupts initially on some switches,
* so directly scheduling the link state task here.
*/
schedule_work(&nic->set_link_task);
}
/* SXE-002: Initialize link and activity LED */
subid = nic->pdev->subsystem_device;
if (((subid & 0xFF) >= 0x07) &&
(nic->device_type == XFRAME_I_DEVICE)) {
val64 = readq(&bar0->gpio_control);
val64 |= 0x0000800000000000ULL;
writeq(val64, &bar0->gpio_control);
val64 = 0x0411040400000000ULL;
writeq(val64, (void __iomem *)bar0 + 0x2700);
}
return SUCCESS;
}
/**
* s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
*/
static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
struct TxD *txdlp, int get_off)
{
struct s2io_nic *nic = fifo_data->nic;
struct sk_buff *skb;
struct TxD *txds;
u16 j, frg_cnt;
txds = txdlp;
if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
sizeof(u64), PCI_DMA_TODEVICE);
txds++;
}
skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
if (!skb) {
memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
return NULL;
}
pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
skb_headlen(skb), PCI_DMA_TODEVICE);
frg_cnt = skb_shinfo(skb)->nr_frags;
if (frg_cnt) {
txds++;
for (j = 0; j < frg_cnt; j++, txds++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
if (!txds->Buffer_Pointer)
break;
pci_unmap_page(nic->pdev,
(dma_addr_t)txds->Buffer_Pointer,
frag->size, PCI_DMA_TODEVICE);
}
}
memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
return skb;
}
/**
* free_tx_buffers - Free all queued Tx buffers
* @nic : device private variable.
* Description:
* Free all queued Tx buffers.
* Return Value: void
*/
static void free_tx_buffers(struct s2io_nic *nic)
{
struct net_device *dev = nic->dev;
struct sk_buff *skb;
struct TxD *txdp;
int i, j;
int cnt = 0;
struct config_param *config = &nic->config;
struct mac_info *mac_control = &nic->mac_control;
struct stat_block *stats = mac_control->stats_info;
struct swStat *swstats = &stats->sw_stat;
for (i = 0; i < config->tx_fifo_num; i++) {
struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
struct fifo_info *fifo = &mac_control->fifos[i];
unsigned long flags;
spin_lock_irqsave(&fifo->tx_lock, flags);
for (j = 0; j < tx_cfg->fifo_len; j++) {
txdp = (struct TxD *)fifo->list_info[j].list_virt_addr;
skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
if (skb) {
swstats->mem_freed += skb->truesize;
dev_kfree_skb(skb);
cnt++;
}
}
DBG_PRINT(INTR_DBG,
"%s: forcibly freeing %d skbs on FIFO%d\n",
dev->name, cnt, i);
fifo->tx_curr_get_info.offset = 0;
fifo->tx_curr_put_info.offset = 0;
spin_unlock_irqrestore(&fifo->tx_lock, flags);
}
}
/**
* stop_nic - To stop the nic
* @nic ; device private variable.
* Description:
* This function does exactly the opposite of what the start_nic()
* function does. This function is called to stop the device.
* Return Value:
* void.
*/
static void stop_nic(struct s2io_nic *nic)
{
struct XENA_dev_config __iomem *bar0 = nic->bar0;
register u64 val64 = 0;
u16 interruptible;
/* Disable all interrupts */
en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
interruptible |= TX_PIC_INTR;
en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
/* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
val64 = readq(&bar0->adapter_control);
val64 &= ~(ADAPTER_CNTL_EN);
writeq(val64, &bar0->adapter_control);
}
/**
* fill_rx_buffers - Allocates the Rx side skbs
* @ring_info: per ring structure
* @from_card_up: If this is true, we will map the buffer to get
* the dma address for buf0 and buf1 to give it to the card.
* Else we will sync the already mapped buffer to give it to the card.
* Description:
* The function allocates Rx side skbs and puts the physical
* address of these buffers into the RxD buffer pointers, so that the NIC
* can DMA the received frame into these locations.
* The NIC supports 3 receive modes, viz
* 1. single buffer,
* 2. three buffer and
* 3. Five buffer modes.
* Each mode defines how many fragments the received frame will be split
* up into by the NIC. The frame is split into L3 header, L4 Header,
* L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
* is split into 3 fragments. As of now only single buffer mode is
* supported.
* Return Value:
* SUCCESS on success or an appropriate -ve value on failure.
*/
static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
int from_card_up)
{
struct sk_buff *skb;
struct RxD_t *rxdp;
int off, size, block_no, block_no1;
u32 alloc_tab = 0;
u32 alloc_cnt;
u64 tmp;
struct buffAdd *ba;
struct RxD_t *first_rxdp = NULL;
u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
int rxd_index = 0;
struct RxD1 *rxdp1;
struct RxD3 *rxdp3;
struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
block_no1 = ring->rx_curr_get_info.block_index;
while (alloc_tab < alloc_cnt) {
block_no = ring->rx_curr_put_info.block_index;
off = ring->rx_curr_put_info.offset;
rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
rxd_index = off + 1;
if (block_no)
rxd_index += (block_no * ring->rxd_count);
if ((block_no == block_no1) &&
(off == ring->rx_curr_get_info.offset) &&
(rxdp->Host_Control)) {
DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
ring->dev->name);
goto end;
}
if (off && (off == ring->rxd_count)) {
ring->rx_curr_put_info.block_index++;
if (ring->rx_curr_put_info.block_index ==
ring->block_count)
ring->rx_curr_put_info.block_index = 0;
block_no = ring->rx_curr_put_info.block_index;
off = 0;
ring->rx_curr_put_info.offset = off;
rxdp = ring->rx_blocks[block_no].block_virt_addr;
DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
ring->dev->name, rxdp);
}
if ((rxdp->Control_1 & RXD_OWN_XENA) &&
((ring->rxd_mode == RXD_MODE_3B) &&
(rxdp->Control_2 & s2BIT(0)))) {
ring->rx_curr_put_info.offset = off;
goto end;
}
/* calculate size of skb based on ring mode */
size = ring->mtu +
HEADER_ETHERNET_II_802_3_SIZE +
HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
if (ring->rxd_mode == RXD_MODE_1)
size += NET_IP_ALIGN;
else
size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
/* allocate skb */
skb = dev_alloc_skb(size);
if (!skb) {
DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
ring->dev->name);
if (first_rxdp) {
wmb();
first_rxdp->Control_1 |= RXD_OWN_XENA;
}
swstats->mem_alloc_fail_cnt++;
return -ENOMEM ;
}
swstats->mem_allocated += skb->truesize;
if (ring->rxd_mode == RXD_MODE_1) {
/* 1 buffer mode - normal operation mode */
rxdp1 = (struct RxD1 *)rxdp;
memset(rxdp, 0, sizeof(struct RxD1));
skb_reserve(skb, NET_IP_ALIGN);
rxdp1->Buffer0_ptr =
pci_map_single(ring->pdev, skb->data,
size - NET_IP_ALIGN,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(nic->pdev,
rxdp1->Buffer0_ptr))
goto pci_map_failed;
rxdp->Control_2 =
SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
rxdp->Host_Control = (unsigned long)skb;
} else if (ring->rxd_mode == RXD_MODE_3B) {
/*
* 2 buffer mode -
* 2 buffer mode provides 128
* byte aligned receive buffers.
*/
rxdp3 = (struct RxD3 *)rxdp;
/* save buffer pointers to avoid frequent dma mapping */
Buffer0_ptr = rxdp3->Buffer0_ptr;
Buffer1_ptr = rxdp3->Buffer1_ptr;
memset(rxdp, 0, sizeof(struct RxD3));
/* restore the buffer pointers for dma sync*/
rxdp3->Buffer0_ptr = Buffer0_ptr;
rxdp3->Buffer1_ptr = Buffer1_ptr;
ba = &ring->ba[block_no][off];
skb_reserve(skb, BUF0_LEN);
tmp = (u64)(unsigned long)skb->data;
tmp += ALIGN_SIZE;
tmp &= ~ALIGN_SIZE;
skb->data = (void *) (unsigned long)tmp;
skb_reset_tail_pointer(skb);
if (from_card_up) {
rxdp3->Buffer0_ptr =
pci_map_single(ring->pdev, ba->ba_0,
BUF0_LEN,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(nic->pdev,
rxdp3->Buffer0_ptr))
goto pci_map_failed;
} else
pci_dma_sync_single_for_device(ring->pdev,
(dma_addr_t)rxdp3->Buffer0_ptr,
BUF0_LEN,
PCI_DMA_FROMDEVICE);
rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
if (ring->rxd_mode == RXD_MODE_3B) {
/* Two buffer mode */
/*
* Buffer2 will have L3/L4 header plus
* L4 payload
*/
rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
skb->data,
ring->mtu + 4,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(nic->pdev,
rxdp3->Buffer2_ptr))
goto pci_map_failed;
if (from_card_up) {
rxdp3->Buffer1_ptr =
pci_map_single(ring->pdev,
ba->ba_1,
BUF1_LEN,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(nic->pdev,
rxdp3->Buffer1_ptr)) {
pci_unmap_single(ring->pdev,
(dma_addr_t)(unsigned long)
skb->data,
ring->mtu + 4,
PCI_DMA_FROMDEVICE);
goto pci_map_failed;
}
}
rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
rxdp->Control_2 |= SET_BUFFER2_SIZE_3
(ring->mtu + 4);
}
rxdp->Control_2 |= s2BIT(0);
rxdp->Host_Control = (unsigned long) (skb);
}
if (alloc_tab & ((1 << rxsync_frequency) - 1))
rxdp->Control_1 |= RXD_OWN_XENA;
off++;
if (off == (ring->rxd_count + 1))
off = 0;
ring->rx_curr_put_info.offset = off;
rxdp->Control_2 |= SET_RXD_MARKER;
if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
if (first_rxdp) {
wmb();
first_rxdp->Control_1 |= RXD_OWN_XENA;
}
first_rxdp = rxdp;
}
ring->rx_bufs_left += 1;
alloc_tab++;
}
end:
/* Transfer ownership of first descriptor to adapter just before
* exiting. Before that, use memory barrier so that ownership
* and other fields are seen by adapter correctly.
*/
if (first_rxdp) {
wmb();
first_rxdp->Control_1 |= RXD_OWN_XENA;
}
return SUCCESS;
pci_map_failed:
swstats->pci_map_fail_cnt++;
swstats->mem_freed += skb->truesize;
dev_kfree_skb_irq(skb);
return -ENOMEM;
}
static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
{
struct net_device *dev = sp->dev;
int j;
struct sk_buff *skb;
struct RxD_t *rxdp;
struct RxD1 *rxdp1;
struct RxD3 *rxdp3;
struct mac_info *mac_control = &sp->mac_control;
struct stat_block *stats = mac_control->stats_info;
struct swStat *swstats = &stats->sw_stat;
for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
rxdp = mac_control->rings[ring_no].
rx_blocks[blk].rxds[j].virt_addr;
skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
if (!skb)
continue;
if (sp->rxd_mode == RXD_MODE_1) {
rxdp1 = (struct RxD1 *)rxdp;
pci_unmap_single(sp->pdev,
(dma_addr_t)rxdp1->Buffer0_ptr,
dev->mtu +
HEADER_ETHERNET_II_802_3_SIZE +
HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
PCI_DMA_FROMDEVICE);
memset(rxdp, 0, sizeof(struct RxD1));
} else if (sp->rxd_mode == RXD_MODE_3B) {
rxdp3 = (struct RxD3 *)rxdp;
pci_unmap_single(sp->pdev,
(dma_addr_t)rxdp3->Buffer0_ptr,
BUF0_LEN,
PCI_DMA_FROMDEVICE);
pci_unmap_single(sp->pdev,
(dma_addr_t)rxdp3->Buffer1_ptr,
BUF1_LEN,
PCI_DMA_FROMDEVICE);
pci_unmap_single(sp->pdev,
(dma_addr_t)rxdp3->Buffer2_ptr,
dev->mtu + 4,
PCI_DMA_FROMDEVICE);
memset(rxdp, 0, sizeof(struct RxD3));
}
swstats->mem_freed += skb->truesize;
dev_kfree_skb(skb);
mac_control->rings[ring_no].rx_bufs_left -= 1;
}
}
/**
* free_rx_buffers - Frees all Rx buffers
* @sp: device private variable.
* Description:
* This function will free all Rx buffers allocated by host.
* Return Value:
* NONE.
*/
static void free_rx_buffers(struct s2io_nic *sp)
{
struct net_device *dev = sp->dev;
int i, blk = 0, buf_cnt = 0;
struct config_param *config = &sp->config;
struct mac_info *mac_control = &sp->mac_control;
for (i = 0; i < config->rx_ring_num; i++) {
struct ring_info *ring = &mac_control->rings[i];
for (blk = 0; blk < rx_ring_sz[i]; blk++)
free_rxd_blk(sp, i, blk);
ring->rx_curr_put_info.block_index = 0;
ring->rx_curr_get_info.block_index = 0;
ring->rx_curr_put_info.offset = 0;
ring->rx_curr_get_info.offset = 0;
ring->rx_bufs_left = 0;
DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
dev->name, buf_cnt, i);
}
}
static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
{
if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
ring->dev->name);
}
return 0;
}
/**
* s2io_poll - Rx interrupt handler for NAPI support
* @napi : pointer to the napi structure.
* @budget : The number of packets that were budgeted to be processed
* during one pass through the 'Poll" function.
* Description:
* Comes into picture only if NAPI support has been incorporated. It does
* the same thing that rx_intr_handler does, but not in a interrupt context
* also It will process only a given number of packets.
* Return value:
* 0 on success and 1 if there are No Rx packets to be processed.
*/
static int s2io_poll_msix(struct napi_struct *napi, int budget)
{
struct ring_info *ring = container_of(napi, struct ring_info, napi);
struct net_device *dev = ring->dev;
int pkts_processed = 0;
u8 __iomem *addr = NULL;
u8 val8 = 0;
struct s2io_nic *nic = netdev_priv(dev);
struct XENA_dev_config __iomem *bar0 = nic->bar0;
int budget_org = budget;
if (unlikely(!is_s2io_card_up(nic)))
return 0;
pkts_processed = rx_intr_handler(ring, budget);
s2io_chk_rx_buffers(nic, ring);
if (pkts_processed < budget_org) {
napi_complete(napi);
/*Re Enable MSI-Rx Vector*/
addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
addr += 7 - ring->ring_no;
val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
writeb(val8, addr);
val8 = readb(addr);
}
return pkts_processed;
}
static int s2io_poll_inta(struct napi_struct *napi, int budget)
{
struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
int pkts_processed = 0;
int ring_pkts_processed, i;
struct XENA_dev_config __iomem *bar0 = nic->bar0;
int budget_org = budget;
struct config_param *config = &nic->config;
struct mac_info *mac_control = &nic->mac_control;
if (unlikely(!is_s2io_card_up(nic)))
return 0;
for (i = 0; i < config->rx_ring_num; i++) {
struct ring_info *ring = &mac_control->rings[i];
ring_pkts_processed = rx_intr_handler(ring, budget);
s2io_chk_rx_buffers(nic, ring);
pkts_processed += ring_pkts_processed;
budget -= ring_pkts_processed;
if (budget <= 0)
break;
}
if (pkts_processed < budget_org) {
napi_complete(napi);
/* Re enable the Rx interrupts for the ring */
writeq(0, &bar0->rx_traffic_mask);
readl(&bar0->rx_traffic_mask);
}
return pkts_processed;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/**
* s2io_netpoll - netpoll event handler entry point
* @dev : pointer to the device structure.
* Description:
* This function will be called by upper layer to check for events on the
* interface in situations where interrupts are disabled. It is used for
* specific in-kernel networking tasks, such as remote consoles and kernel
* debugging over the network (example netdump in RedHat).
*/
static void s2io_netpoll(struct net_device *dev)
{
struct s2io_nic *nic = netdev_priv(dev);
struct XENA_dev_config __iomem *bar0 = nic->bar0;
u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
int i;
struct config_param *config = &nic->config;
struct mac_info *mac_control = &nic->mac_control;
if (pci_channel_offline(nic->pdev))
return;
disable_irq(dev->irq);
writeq(val64, &bar0->rx_traffic_int);
writeq(val64, &bar0->tx_traffic_int);
/* we need to free up the transmitted skbufs or else netpoll will
* run out of skbs and will fail and eventually netpoll application such
* as netdump will fail.
*/
for (i = 0; i < config->tx_fifo_num; i++)
tx_intr_handler(&mac_control->fifos[i]);
/* check for received packet and indicate up to network */
for (i = 0; i < config->rx_ring_num; i++) {
struct ring_info *ring = &mac_control->rings[i];
rx_intr_handler(ring, 0);
}
for (i = 0; i < config->rx_ring_num; i++) {
struct ring_info *ring = &mac_control->rings[i];
if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
DBG_PRINT(INFO_DBG,
"%s: Out of memory in Rx Netpoll!!\n",
dev->name);
break;
}
}
enable_irq(dev->irq);
}
#endif
/**
* rx_intr_handler - Rx interrupt handler
* @ring_info: per ring structure.
* @budget: budget for napi processing.
* Description:
* If the interrupt is because of a received frame or if the
* receive ring contains fresh as yet un-processed frames,this function is
* called. It picks out the RxD at which place the last Rx processing had
* stopped and sends the skb to the OSM's Rx handler and then increments
* the offset.
* Return Value:
* No. of napi packets processed.
*/
static int rx_intr_handler(struct ring_info *ring_data, int budget)
{
int get_block, put_block;
struct rx_curr_get_info get_info, put_info;
struct RxD_t *rxdp;
struct sk_buff *skb;
int pkt_cnt = 0, napi_pkts = 0;
int i;
struct RxD1 *rxdp1;
struct RxD3 *rxdp3;
get_info = ring_data->rx_curr_get_info;
get_block = get_info.block_index;
memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
put_block = put_info.block_index;
rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
while (RXD_IS_UP2DT(rxdp)) {
/*
* If your are next to put index then it's
* FIFO full condition
*/
if ((get_block == put_block) &&
(get_info.offset + 1) == put_info.offset) {
DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
ring_data->dev->name);
break;
}
skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
if (skb == NULL) {
DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
ring_data->dev->name);
return 0;
}
if (ring_data->rxd_mode == RXD_MODE_1) {
rxdp1 = (struct RxD1 *)rxdp;
pci_unmap_single(ring_data->pdev, (dma_addr_t)
rxdp1->Buffer0_ptr,
ring_data->mtu +
HEADER_ETHERNET_II_802_3_SIZE +
HEADER_802_2_SIZE +
HEADER_SNAP_SIZE,
PCI_DMA_FROMDEVICE);
} else if (ring_data->rxd_mode == RXD_MODE_3B) {
rxdp3 = (struct RxD3 *)rxdp;
pci_dma_sync_single_for_cpu(ring_data->pdev,
(dma_addr_t)rxdp3->Buffer0_ptr,
BUF0_LEN,
PCI_DMA_FROMDEVICE);
pci_unmap_single(ring_data->pdev,
(dma_addr_t)rxdp3->Buffer2_ptr,
ring_data->mtu + 4,
PCI_DMA_FROMDEVICE);
}
prefetch(skb->data);
rx_osm_handler(ring_data, rxdp);
get_info.offset++;
ring_data->rx_curr_get_info.offset = get_info.offset;
rxdp = ring_data->rx_blocks[get_block].
rxds[get_info.offset].virt_addr;
if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
get_info.offset = 0;
ring_data->rx_curr_get_info.offset = get_info.offset;
get_block++;
if (get_block == ring_data->block_count)
get_block = 0;
ring_data->rx_curr_get_info.block_index = get_block;
rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
}
if (ring_data->nic->config.napi) {
budget--;
napi_pkts++;
if (!budget)
break;
}
pkt_cnt++;
if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
break;
}
if (ring_data->lro) {
/* Clear all LRO sessions before exiting */
for (i = 0; i < MAX_LRO_SESSIONS; i++) {
struct lro *lro = &ring_data->lro0_n[i];
if (lro->in_use) {
update_L3L4_header(ring_data->nic, lro);
queue_rx_frame(lro->parent, lro->vlan_tag);
clear_lro_session(lro);
}
}
}
return napi_pkts;
}
/**
* tx_intr_handler - Transmit interrupt handler
* @nic : device private variable
* Description:
* If an interrupt was raised to indicate DMA complete of the
* Tx packet, this function is called. It identifies the last TxD
* whose buffer was freed and frees all skbs whose data have already
* DMA'ed into the NICs internal memory.
* Return Value:
* NONE
*/
static void tx_intr_handler(struct fifo_info *fifo_data)
{
struct s2io_nic *nic = fifo_data->nic;
struct tx_curr_get_info get_info, put_info;
struct sk_buff *skb = NULL;
struct TxD *txdlp;
int pkt_cnt = 0;
unsigned long flags = 0;
u8 err_mask;
struct stat_block *stats = nic->mac_control.stats_info;
struct swStat *swstats = &stats->sw_stat;
if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
return;
get_info = fifo_data->tx_curr_get_info;
memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
txdlp = (struct TxD *)
fifo_data->list_info[get_info.offset].list_virt_addr;
while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
(get_info.offset != put_info.offset) &&
(txdlp->Host_Control)) {
/* Check for TxD errors */
if (txdlp->Control_1 & TXD_T_CODE) {
unsigned long long err;
err = txdlp->Control_1 & TXD_T_CODE;
if (err & 0x1) {
swstats->parity_err_cnt++;
}
/* update t_code statistics */
err_mask = err >> 48;
switch (err_mask) {
case 2:
swstats->tx_buf_abort_cnt++;
break;
case 3:
swstats->tx_desc_abort_cnt++;
break;
case 7:
swstats->tx_parity_err_cnt++;
break;
case 10:
swstats->tx_link_loss_cnt++;
break;
case 15:
swstats->tx_list_proc_err_cnt++;
break;
}
}
skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
if (skb == NULL) {
spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
__func__);
return;
}
pkt_cnt++;
/* Updating the statistics block */
swstats->mem_freed += skb->truesize;
dev_kfree_skb_irq(skb);
get_info.offset++;
if (get_info.offset == get_info.fifo_len + 1)
get_info.offset = 0;
txdlp = (struct TxD *)
fifo_data->list_info[get_info.offset].list_virt_addr;
fifo_data->tx_curr_get_info.offset = get_info.offset;
}
s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
}
/**
* s2io_mdio_write - Function to write in to MDIO registers
* @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
* @addr : address value
* @value : data value
* @dev : pointer to net_device structure
* Description:
* This function is used to write values to the MDIO registers
* NONE
*/
static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
struct net_device *dev)
{
u64 val64;
struct s2io_nic *sp = netdev_priv(dev);
struct XENA_dev_config __iomem *bar0 = sp->bar0;
/* address transaction */
val64 = MDIO_MMD_INDX_ADDR(addr) |
MDIO_MMD_DEV_ADDR(mmd_type) |
MDIO_MMS_PRT_ADDR(0x0);
writeq(val64, &bar0->mdio_control);
val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
writeq(val64, &bar0->mdio_control);
udelay(100);
/* Data transaction */
val64 = MDIO_MMD_INDX_ADDR(addr) |
MDIO_MMD_DEV_ADDR(mmd_type) |
MDIO_MMS_PRT_ADDR(0x0) |
MDIO_MDIO_DATA(value) |
MDIO_OP(MDIO_OP_WRITE_TRANS);
writeq(val64, &bar0->mdio_control);
val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
writeq(val64, &bar0->mdio_control);
udelay(100);
val64 = MDIO_MMD_INDX_ADDR(addr) |
MDIO_MMD_DEV_ADDR(mmd_type) |
MDIO_MMS_PRT_ADDR(0x0) |
MDIO_OP(MDIO_OP_READ_TRANS);
writeq(val64, &bar0->mdio_control);
val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
writeq(val64, &bar0->mdio_control);
udelay(100);
}
/**
* s2io_mdio_read - Function to write in to MDIO registers
* @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
* @addr : address value
* @dev : pointer to net_device structure
* Description:
* This function is used to read values to the MDIO registers
* NONE
*/
static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
{
u64 val64 = 0x0;
u64 rval64 = 0x0;
struct s2io_nic *sp = netdev_priv(dev);
struct XENA_dev_config __iomem *bar0 = sp->bar0;
/* address transaction */
val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
| MDIO_MMD_DEV_ADDR(mmd_type)
| MDIO_MMS_PRT_ADDR(0x0));
writeq(val64, &bar0->mdio_control);
val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
writeq(val64, &bar0->mdio_control);
udelay(100);
/* Data transaction */
val64 = MDIO_MMD_INDX_ADDR(addr) |
MDIO_MMD_DEV_ADDR(mmd_type) |
MDIO_MMS_PRT_ADDR(0x0) |
MDIO_OP(MDIO_OP_READ_TRANS);
writeq(val64, &bar0->mdio_control);
val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
writeq(val64, &bar0->mdio_control);
udelay(100);
/* Read the value from regs */
rval64 = readq(&bar0->mdio_control);
rval64 = rval64 & 0xFFFF0000;
rval64 = rval64 >> 16;
return rval64;
}
/**
* s2io_chk_xpak_counter - Function to check the status of the xpak counters
* @counter : counter value to be updated
* @flag : flag to indicate the status
* @type : counter type
* Description:
* This function is to check the status of the xpak counters value
* NONE
*/
static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
u16 flag, u16 type)
{
u64 mask = 0x3;
u64 val64;
int i;
for (i = 0; i < index; i++)
mask = mask << 0x2;
if (flag > 0) {
*counter = *counter + 1;
val64 = *regs_stat & mask;
val64 = val64 >> (index * 0x2);
val64 = val64 + 1;
if (val64 == 3) {
switch (type) {
case 1:
DBG_PRINT(ERR_DBG,
"Take Xframe NIC out of service.\n");
DBG_PRINT(ERR_DBG,
"Excessive temperatures may result in premature transceiver failure.\n");
break;
case 2:
DBG_PRINT(ERR_DBG,
"Take Xframe NIC out of service.\n");
DBG_PRINT(ERR_DBG,
"Excessive bias currents may indicate imminent laser diode failure.\n");
break;
case 3:
DBG_PRINT(ERR_DBG,
"Take Xframe NIC out of service.\n");
DBG_PRINT(ERR_DBG,
"Excessive laser output power may saturate far-end receiver.\n");
break;
default:
DBG_PRINT(ERR_DBG,
"Incorrect XPAK Alarm type\n");
}
val64 = 0x0;
}
val64 = val64 << (index * 0x2);
*regs_stat = (*regs_stat & (~mask)) | (val64);
} else {
*regs_stat = *regs_stat & (~mask);
}
}
/**
* s2io_updt_xpak_counter - Function to update the xpak counters
* @dev : pointer to net_device struct
* Description:
* This function is to upate the status of the xpak counters value
* NONE
*/
static void s2io_updt_xpak_counter(struct net_device *dev)
{
u16 flag = 0x0;
u16 type = 0x0;
u16 val16 = 0x0;
u64 val64 = 0x0;
u64 addr = 0x0;
struct s2io_nic *sp = netdev_priv(dev);
struct stat_block *stats = sp->mac_control.stats_info;
struct xpakStat *xstats = &stats->xpak_stat;
/* Check the communication with the MDIO slave */
addr = MDIO_CTRL1;
val64 = 0x0;
val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
DBG_PRINT(ERR_DBG,
"ERR: MDIO slave access failed - Returned %llx\n",
(unsigned long long)val64);
return;
}
/* Check for the expected value of control reg 1 */
if (val64 != MDIO_CTRL1_SPEED10G) {
DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
"Returned: %llx- Expected: 0x%x\n",
(unsigned long long)val64, MDIO_CTRL1_SPEED10G);
return;
}
/* Loading the DOM register to MDIO register */
addr = 0xA100;
s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
/* Reading the Alarm flags */
addr = 0xA070;
val64 = 0x0;
val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
flag = CHECKBIT(val64, 0x7);
type = 1;
s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
&xstats->xpak_regs_stat,
0x0, flag, type);
if (CHECKBIT(val64, 0x6))
xstats->alarm_transceiver_temp_low++;
flag = CHECKBIT(val64, 0x3);
type = 2;
s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
&xstats->xpak_regs_stat,
0x2, flag, type);
if (CHECKBIT(val64, 0x2))
xstats->alarm_laser_bias_current_low++;
flag = CHECKBIT(val64, 0x1);
type = 3;
s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
&xstats->xpak_regs_stat,
0x4, flag, type);
if (CHECKBIT(val64, 0x0))
xstats->alarm_laser_output_power_low++;
/* Reading the Warning flags */
addr = 0xA074;
val64 = 0x0;
val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
if (CHECKBIT(val64, 0x7))
xstats->warn_transceiver_temp_high++;
if (CHECKBIT(val64, 0x6))
xstats->warn_transceiver_temp_low++;
if (CHECKBIT(val64, 0x3))
xstats->warn_laser_bias_current_high++;
if (CHECKBIT(val64, 0x2))
xstats->warn_laser_bias_current_low++;
if (CHECKBIT(val64, 0x1))
xstats->warn_laser_output_power_high++;
if (CHECKBIT(val64, 0x0))
xstats->warn_laser_output_power_low++;
}
/**
* wait_for_cmd_complete - waits for a command to complete.
* @sp : private member of the device structure, which is a pointer to the
* s2io_nic structure.
* Description: Function that waits for a command to Write into RMAC
* ADDR DATA registers to be completed and returns either success or
* error depending on whether the command was complete or not.
* Return value:
* SUCCESS on success and FAILURE on failure.
*/
static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
int bit_state)
{
int ret = FAILURE, cnt = 0, delay = 1;
u64 val64;
if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
return FAILURE;
do {
val64 = readq(addr);
if (bit_state == S2IO_BIT_RESET) {
if (!(val64 & busy_bit)) {
ret = SUCCESS;
break;
}
} else {
if (val64 & busy_bit) {
ret = SUCCESS;
break;
}
}
if (in_interrupt())
mdelay(delay);
else
msleep(delay);
if (++cnt >= 10)
delay = 50;
} while (cnt < 20);
return ret;
}
/*
* check_pci_device_id - Checks if the device id is supported
* @id : device id
* Description: Function to check if the pci device id is supported by driver.
* Return value: Actual device id if supported else PCI_ANY_ID
*/
static u16 check_pci_device_id(u16 id)
{
switch (id) {
case PCI_DEVICE_ID_HERC_WIN:
case PCI_DEVICE_ID_HERC_UNI:
return XFRAME_II_DEVICE;
case PCI_DEVICE_ID_S2IO_UNI:
case PCI_DEVICE_ID_S2IO_WIN:
return XFRAME_I_DEVICE;
default:
return PCI_ANY_ID;
}
}
/**
* s2io_reset - Resets the card.
* @sp : private member of the device structure.
* Description: Function to Reset the card. This function then also
* restores the previously saved PCI configuration space registers as
* the card reset also resets the configuration space.
* Return value:
* void.
*/
static void s2io_reset(struct s2io_nic *sp)
{
struct XENA_dev_config __iomem *bar0 = sp->bar0;
u64 val64;
u16 subid, pci_cmd;
int i;
u16 val16;
unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
struct stat_block *stats;
struct swStat *swstats;
DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
__func__, pci_name(sp->pdev));
/* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
val64 = SW_RESET_ALL;
writeq(val64, &bar0->sw_reset);
if (strstr(sp->product_name, "CX4"))
msleep(750);
msleep(250);
for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
/* Restore the PCI state saved during initialization. */
pci_restore_state(sp->pdev);
pci_save_state(sp->pdev);
pci_read_config_word(sp->pdev, 0x2, &val16);
if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
break;
msleep(200);
}
if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
s2io_init_pci(sp);
/* Set swapper to enable I/O register access */
s2io_set_swapper(sp);
/* restore mac_addr entries */
do_s2io_restore_unicast_mc(sp);
/* Restore the MSIX table entries from local variables */
restore_xmsi_data(sp);
/* Clear certain PCI/PCI-X fields after reset */
if (sp->device_type == XFRAME_II_DEVICE) {
/* Clear "detected parity error" bit */
pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
/* Clearing PCIX Ecc status register */
pci_write_config_dword(sp->pdev, 0x68, 0x7C);
/* Clearing PCI_STATUS error reflected here */
writeq(s2BIT(62), &bar0->txpic_int_reg);
}
/* Reset device statistics maintained by OS */
memset(&sp->stats, 0, sizeof(struct net_device_stats));
stats = sp->mac_control.stats_info;
swstats = &stats->sw_stat;
/* save link up/down time/cnt, reset/memory/watchdog cnt */
up_cnt = swstats->link_up_cnt;
down_cnt = swstats->link_down_cnt;
up_time = swstats->link_up_time;
down_time = swstats->link_down_time;
reset_cnt = swstats->soft_reset_cnt;
mem_alloc_cnt = swstats->mem_allocated;
mem_free_cnt = swstats->mem_freed;
watchdog_cnt = swstats->watchdog_timer_cnt;
memset(stats, 0, sizeof(struct stat_block));
/* restore link up/down time/cnt, reset/memory/watchdog cnt */
swstats->link_up_cnt = up_cnt;
swstats->link_down_cnt = down_cnt;
swstats->link_up_time = up_time;
swstats->link_down_time = down_time;
swstats->soft_reset_cnt = reset_cnt;
swstats->mem_allocated = mem_alloc_cnt;
swstats->mem_freed = mem_free_cnt;
swstats->watchdog_timer_cnt = watchdog_cnt;
/* SXE-002: Configure link and activity LED to turn it off */
subid = sp->pdev->subsystem_device;
if (((subid & 0xFF) >= 0x07) &&
(sp->device_type == XFRAME_I_DEVICE)) {
val64 = readq(&bar0->gpio_control);
val64 |= 0x0000800000000000ULL;
writeq(val64, &bar0->gpio_control);
val64 = 0x0411040400000000ULL;
writeq(val64, (void __iomem *)bar0 + 0x2700);
}
/*
* Clear spurious ECC interrupts that would have occurred on
* XFRAME II cards after reset.
*/
if (sp->device_type == XFRAME_II_DEVICE) {
val64 = readq(&bar0->pcc_err_reg);
writeq(val64, &bar0->pcc_err_reg);
}
sp->device_enabled_once = false;
}
/**
* s2io_set_swapper - to set the swapper controle on the card
* @sp : private member of the device structure,
* pointer to the s2io_nic structure.
* Description: Function to set the swapper control on the card
* correctly depending on the 'endianness' of the system.
* Return value:
* SUCCESS on success and FAILURE on failure.
*/
static int s2io_set_swapper(struct s2io_nic *sp)
{
struct net_device *dev = sp->dev;
struct XENA_dev_config __iomem *bar0 = sp->bar0;
u64 val64, valt, valr;
/*
* Set proper endian settings and verify the same by reading
* the PIF Feed-back register.
*/
val64 = readq(&bar0->pif_rd_swapper_fb);
if (val64 != 0x0123456789ABCDEFULL) {
int i = 0;
static const u64 value[] = {
0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
0x8100008181000081ULL, /* FE=1, SE=0 */
0x4200004242000042ULL, /* FE=0, SE=1 */
0 /* FE=0, SE=0 */
};
while (i < 4) {
writeq(value[i], &bar0->swapper_ctrl);
val64 = readq(&bar0->pif_rd_swapper_fb);
if (val64 == 0x0123456789ABCDEFULL)
break;
i++;
}
if (i == 4) {
DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
"feedback read %llx\n",
dev->name, (unsigned long long)val64);
return FAILURE;
}
valr = value[i];
} else {
valr = readq(&bar0->swapper_ctrl);
}
valt = 0x0123456789ABCDEFULL;
writeq(valt, &bar0->xmsi_address);
val64 = readq(&bar0->xmsi_address);
if (val64 != valt) {
int i = 0;
static const u64 value[] = {
0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
0x0081810000818100ULL, /* FE=1, SE=0 */
0x0042420000424200ULL, /* FE=0, SE=1 */
0 /* FE=0, SE=0 */
};
while (i < 4) {
writeq((value[i] | valr), &bar0->swapper_ctrl);
writeq(valt, &bar0->xmsi_address);
val64 = readq(&bar0->xmsi_address);
if (val64 == valt)
break;
i++;
}
if (i == 4) {
unsigned long long x = val64;
DBG_PRINT(ERR_DBG,
"Write failed, Xmsi_addr reads:0x%llx\n", x);
return FAILURE;
}
}
val64 = readq(&bar0->swapper_ctrl);
val64 &= 0xFFFF000000000000ULL;
#ifdef __BIG_ENDIAN
/*
* The device by default set to a big endian format, so a
* big endian driver need not set anything.
*/
val64 |= (SWAPPER_CTRL_TXP_FE |
SWAPPER_CTRL_TXP_SE |
SWAPPER_CTRL_TXD_R_FE |
SWAPPER_CTRL_TXD_W_FE |
SWAPPER_CTRL_TXF_R_FE |
SWAPPER_CTRL_RXD_R_FE |
SWAPPER_CTRL_RXD_W_FE |
SWAPPER_CTRL_RXF_W_FE |
SWAPPER_CTRL_XMSI_FE |
SWAPPER_CTRL_STATS_FE |
SWAPPER_CTRL_STATS_SE);
if (sp->config.intr_type == INTA)
val64 |= SWAPPER_CTRL_XMSI_SE;
writeq(val64, &bar0->swapper_ctrl);
#else
/*
* Initially we enable all bits to make it accessible by the
* driver, then we selectively enable only those bits that
* we want to set.
*/
val64 |= (SWAPPER_CTRL_TXP_FE |
SWAPPER_CTRL_TXP_SE |
SWAPPER_CTRL_TXD_R_FE |
SWAPPER_CTRL_TXD_R_SE |
SWAPPER_CTRL_TXD_W_FE |
SWAPPER_CTRL_TXD_W_SE |
SWAPPER_CTRL_TXF_R_FE |
SWAPPER_CTRL_RXD_R_FE |
SWAPPER_CTRL_RXD_R_SE |
SWAPPER_CTRL_RXD_W_FE |
SWAPPER_CTRL_RXD_W_SE |
SWAPPER_CTRL_RXF_W_FE |
SWAPPER_CTRL_XMSI_FE |
SWAPPER_CTRL_STATS_FE |
SWAPPER_CTRL_STATS_SE);
if (sp->config.intr_type == INTA)
val64 |= SWAPPER_CTRL_XMSI_SE;
writeq(val64, &bar0->swapper_ctrl);
#endif
val64 = readq(&bar0->swapper_ctrl);
/*
* Verifying if endian settings are accurate by reading a
* feedback register.
*/
val64 = readq(&bar0->pif_rd_swapper_fb);
if (val64 != 0x0123456789ABCDEFULL) {
/* Endian settings are incorrect, calls for another dekko. */
DBG_PRINT(ERR_DBG,
"%s: Endian settings are wrong, feedback read %llx\n",
dev->name, (unsigned long long)val64);
return FAILURE;
}
return SUCCESS;
}
static int wait_for_msix_trans(struct s2io_nic *nic, int i)
{
struct XENA_dev_config __iomem *bar0 = nic->bar0;
u64 val64;
int ret = 0, cnt = 0;
do {
val64 = readq(&bar0->xmsi_access);
if (!(val64 & s2BIT(15)))
break;
mdelay(1);
cnt++;
} while (cnt < 5);
if (cnt == 5) {
DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
ret = 1;
}
return ret;
}
static void restore_xmsi_data(struct s2io_nic *nic)
{
struct XENA_dev_config __iomem *bar0 = nic->bar0;
u64 val64;
int i, msix_index;
if (nic->device_type == XFRAME_I_DEVICE)
return;
for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
msix_index = (i) ? ((i-1) * 8 + 1) : 0;
writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
writeq(nic->msix_info[i].data, &bar0->xmsi_data);
val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
writeq(val64, &bar0->xmsi_access);
if (wait_for_msix_trans(nic, msix_index)) {
DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
__func__, msix_index);
continue;
}
}
}
static void store_xmsi_data(struct s2io_nic *nic)
{
struct XENA_dev_config __iomem *bar0 = nic->bar0;
u64 val64, addr, data;
int i, msix_index;
if (nic->device_type == XFRAME_I_DEVICE)
return;
/* Store and display */
for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
msix_index = (i) ? ((i-1) * 8 + 1) : 0;
val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
writeq(val64, &bar0->xmsi_access);
if (wait_for_msix_trans(nic, msix_index)) {
DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
__func__, msix_index);
continue;
}
addr = readq(&bar0->xmsi_address);
data = readq(&bar0->xmsi_data);
if (addr && data) {
nic->msix_info[i].addr = addr;
nic->msix_info[i].data = data;
}
}
}
static int s2io_enable_msi_x(struct s2io_nic *nic)
{
struct XENA_dev_config __iomem *bar0 = nic->bar0;
u64 rx_mat;
u16 msi_control; /* Temp variable */
int ret, i, j, msix_indx = 1;
int size;
struct stat_block *stats = nic->mac_control.stats_info;
struct swStat *swstats = &stats->sw_stat;
size = nic->num_entries * sizeof(struct msix_entry);
nic->entries = kzalloc(size, GFP_KERNEL);
if (!nic->entries) {
DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
__func__);
swstats->mem_alloc_fail_cnt++;
return -ENOMEM;
}
swstats->mem_allocated += size;
size = nic->num_entries * sizeof(struct s2io_msix_entry);
nic->s2io_entries = kzalloc(size, GFP_KERNEL);
if (!nic->s2io_entries) {
DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
__func__);
swstats->mem_alloc_fail_cnt++;
kfree(nic->entries);
swstats->mem_freed
+= (nic->num_entries * sizeof(struct msix_entry));
return -ENOMEM;
}
swstats->mem_allocated += size;
nic->entries[0].entry = 0;
nic->s2io_entries[0].entry = 0;
nic->s2io_entries[0].in_use = MSIX_FLG;
nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
nic->s2io_entries[0].arg = &nic->mac_control.fifos;
for (i = 1; i < nic->num_entries; i++) {
nic->entries[i].entry = ((i - 1) * 8) + 1;
nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
nic->s2io_entries[i].arg = NULL;
nic->s2io_entries[i].in_use = 0;
}
rx_mat = readq(&bar0->rx_mat);
for (j = 0; j < nic->config.rx_ring_num; j++) {
rx_mat |= RX_MAT_SET(j, msix_indx);
nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
nic->s2io_entries[j+1].in_use = MSIX_FLG;
msix_indx += 8;
}
writeq(rx_mat, &bar0->rx_mat);
readq(&bar0->rx_mat);
ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
/* We fail init if error or we get less vectors than min required */
if (ret) {
DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
kfree(nic->entries);
swstats->mem_freed += nic->num_entries *
sizeof(struct msix_entry);
kfree(nic->s2io_entries);
swstats->mem_freed += nic->num_entries *
sizeof(struct s2io_msix_entry);
nic->entries = NULL;
nic->s2io_entries = NULL;
return -ENOMEM;
}
/*
* To enable MSI-X, MSI also needs to be enabled, due to a bug
* in the herc NIC. (Temp change, needs to be removed later)
*/
pci_read_config_word(nic->pdev, 0x42, &msi_control);
msi_control |= 0x1; /* Enable MSI */
pci_write_config_word(nic->pdev, 0x42, msi_control);
return 0;
}
/* Handle software interrupt used during MSI(X) test */
static irqreturn_t s2io_test_intr(int irq, void *dev_id)
{
struct s2io_nic *sp = dev_id;
sp->msi_detected = 1;
wake_up(&sp->msi_wait);
return IRQ_HANDLED;
}
/* Test interrupt path by forcing a a software IRQ */
static int s2io_test_msi(struct s2io_nic *sp)
{
struct pci_dev *pdev = sp->pdev;
struct XENA_dev_config __iomem *bar0 = sp->bar0;
int err;
u64 val64, saved64;
err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
sp->name, sp);
if (err) {
DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
sp->dev->name, pci_name(pdev), pdev->irq);
return err;
}
init_waitqueue_head(&sp->msi_wait);
sp->msi_detected = 0;
saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
val64 |= SCHED_INT_CTRL_ONE_SHOT;
val64 |= SCHED_INT_CTRL_TIMER_EN;
val64 |= SCHED_INT_CTRL_INT2MSI(1);
writeq(val64, &bar0->scheduled_int_ctrl);
wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
if (!sp->msi_detected) {
/* MSI(X) test failed, go back to INTx mode */
DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
"using MSI(X) during test\n",
sp->dev->name, pci_name(pdev));
err = -EOPNOTSUPP;
}
free_irq(sp->entries[1].vector, sp);
writeq(saved64, &bar0->scheduled_int_ctrl);
return err;
}
static void remove_msix_isr(struct s2io_nic *sp)
{
int i;
u16 msi_control;
for (i = 0; i < sp->num_entries; i++) {
if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
int vector = sp->entries[i].vector;
void *arg = sp->s2io_entries[i].arg;
free_irq(vector, arg);
}
}
kfree(sp->entries);
kfree(sp->s2io_entries);
sp->entries = NULL;
sp->s2io_entries = NULL;
pci_read_config_word(sp->pdev, 0x42, &msi_control);
msi_control &= 0xFFFE; /* Disable MSI */
pci_write_config_word(sp->pdev, 0x42, msi_control);
pci_disable_msix(sp->pdev);
}
static void remove_inta_isr(struct s2io_nic *sp)
{
struct net_device *dev = sp->dev;
free_irq(sp->pdev->irq, dev);
}
/* ********************************************************* *
* Functions defined below concern the OS part of the driver *
* ********************************************************* */
/**
* s2io_open - open entry point of the driver
* @dev : pointer to the device structure.
* Description:
* This function is the open entry point of the driver. It mainly calls a
* function to allocate Rx buffers and inserts them into the buffer
* descriptors and then enables the Rx part of the NIC.
* Return value:
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
static int s2io_open(struct net_device *dev)
{
struct s2io_nic *sp = netdev_priv(dev);
struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
int err = 0;
/*
* Make sure you have link off by default every time
* Nic is initialized
*/
netif_carrier_off(dev);
sp->last_link_state = 0;
/* Initialize H/W and enable interrupts */
err = s2io_card_up(sp);
if (err) {
DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
dev->name);
goto hw_init_failed;
}
if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
s2io_card_down(sp);
err = -ENODEV;
goto hw_init_failed;
}
s2io_start_all_tx_queue(sp);
return 0;
hw_init_failed:
if (sp->config.intr_type == MSI_X) {
if (sp->entries) {
kfree(sp->entries);
swstats->mem_freed += sp->num_entries *
sizeof(struct msix_entry);
}
if (sp->s2io_entries) {
kfree(sp->s2io_entries);
swstats->mem_freed += sp->num_entries *
sizeof(struct s2io_msix_entry);
}
}
return err;
}
/**
* s2io_close -close entry point of the driver
* @dev : device pointer.
* Description:
* This is the stop entry point of the driver. It needs to undo exactly
* whatever was done by the open entry point,thus it's usually referred to
* as the close function.Among other things this function mainly stops the
* Rx side of the NIC and frees all the Rx buffers in the Rx rings.
* Return value:
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
static int s2io_close(struct net_device *dev)
{
struct s2io_nic *sp = netdev_priv(dev);
struct config_param *config = &sp->config;
u64 tmp64;
int offset;
/* Return if the device is already closed *
* Can happen when s2io_card_up failed in change_mtu *
*/
if (!is_s2io_card_up(sp))
return 0;
s2io_stop_all_tx_queue(sp);
/* delete all populated mac entries */
for (offset = 1; offset < config->max_mc_addr; offset++) {
tmp64 = do_s2io_read_unicast_mc(sp, offset);
if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
do_s2io_delete_unicast_mc(sp, tmp64);
}
s2io_card_down(sp);
return 0;
}
/**
* s2io_xmit - Tx entry point of te driver
* @skb : the socket buffer containing the Tx data.
* @dev : device pointer.
* Description :
* This function is the Tx entry point of the driver. S2IO NIC supports
* certain protocol assist features on Tx side, namely CSO, S/G, LSO.
* NOTE: when device can't queue the pkt,just the trans_start variable will
* not be upadted.
* Return value:
* 0 on success & 1 on failure.
*/
static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct s2io_nic *sp = netdev_priv(dev);
u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
register u64 val64;
struct TxD *txdp;
struct TxFIFO_element __iomem *tx_fifo;
unsigned long flags = 0;
u16 vlan_tag = 0;
struct fifo_info *fifo = NULL;
int do_spin_lock = 1;
int offload_type;
int enable_per_list_interrupt = 0;
struct config_param *config = &sp->config;
struct mac_info *mac_control = &sp->mac_control;
struct stat_block *stats = mac_control->stats_info;
struct swStat *swstats = &stats->sw_stat;
DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
if (unlikely(skb->len <= 0)) {
DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
if (!is_s2io_card_up(sp)) {
DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
dev->name);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
queue = 0;
if (vlan_tx_tag_present(skb))
vlan_tag = vlan_tx_tag_get(skb);
if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *ip;
struct tcphdr *th;
ip = ip_hdr(skb);
if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
th = (struct tcphdr *)(((unsigned char *)ip) +
ip->ihl*4);
if (ip->protocol == IPPROTO_TCP) {
queue_len = sp->total_tcp_fifos;
queue = (ntohs(th->source) +
ntohs(th->dest)) &
sp->fifo_selector[queue_len - 1];
if (queue >= queue_len)
queue = queue_len - 1;
} else if (ip->protocol == IPPROTO_UDP) {
queue_len = sp->total_udp_fifos;
queue = (ntohs(th->source) +
ntohs(th->dest)) &
sp->fifo_selector[queue_len - 1];
if (queue >= queue_len)
queue = queue_len - 1;
queue += sp->udp_fifo_idx;
if (skb->len > 1024)
enable_per_list_interrupt = 1;
do_spin_lock = 0;
}
}
}
} else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
/* get fifo number based on skb->priority value */
queue = config->fifo_mapping
[skb->priority & (MAX_TX_FIFOS - 1)];
fifo = &mac_control->fifos[queue];
if (do_spin_lock)
spin_lock_irqsave(&fifo->tx_lock, flags);
else {
if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
return NETDEV_TX_LOCKED;
}
if (sp->config.multiq) {
if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
spin_unlock_irqrestore(&fifo->tx_lock, flags);
return NETDEV_TX_BUSY;
}
} else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
if (netif_queue_stopped(dev)) {
spin_unlock_irqrestore(&fifo->tx_lock, flags);
return NETDEV_TX_BUSY;
}
}
put_off = (u16)fifo->tx_curr_put_info.offset;
get_off = (u16)fifo->tx_curr_get_info.offset;
txdp = (struct TxD *)fifo->list_info[put_off].list_virt_addr;
queue_len = fifo->tx_curr_put_info.fifo_len + 1;
/* Avoid "put" pointer going beyond "get" pointer */
if (txdp->Host_Control ||
((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
s2io_stop_tx_queue(sp, fifo->fifo_no);
dev_kfree_skb(skb);
spin_unlock_irqrestore(&fifo->tx_lock, flags);
return NETDEV_TX_OK;
}
offload_type = s2io_offload_type(skb);
if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
txdp->Control_1 |= TXD_TCP_LSO_EN;
txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
TXD_TX_CKO_TCP_EN |
TXD_TX_CKO_UDP_EN);
}
txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
txdp->Control_1 |= TXD_LIST_OWN_XENA;
txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
if (enable_per_list_interrupt)
if (put_off & (queue_len >> 5))
txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
if (vlan_tag) {
txdp->Control_2 |= TXD_VLAN_ENABLE;
txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
}
frg_len = skb_headlen(skb);
if (offload_type == SKB_GSO_UDP) {
int ufo_size;
ufo_size = s2io_udp_mss(skb);
ufo_size &= ~7;
txdp->Control_1 |= TXD_UFO_EN;
txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
#ifdef __BIG_ENDIAN
/* both variants do cpu_to_be64(be32_to_cpu(...)) */
fifo->ufo_in_band_v[put_off] =
(__force u64)skb_shinfo(skb)->ip6_frag_id;
#else
fifo->ufo_in_band_v[put_off] =
(__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
#endif
txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
txdp->Buffer_Pointer = pci_map_single(sp->pdev,
fifo->ufo_in_band_v,
sizeof(u64),
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
goto pci_map_failed;
txdp++;
}
txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
frg_len, PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
goto pci_map_failed;
txdp->Host_Control = (unsigned long)skb;
txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
if (offload_type == SKB_GSO_UDP)
txdp->Control_1 |= TXD_UFO_EN;
frg_cnt = skb_shinfo(skb)->nr_frags;
/* For fragmented SKB. */
for (i = 0; i < frg_cnt; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
/* A '0' length fragment will be ignored */
if (!frag->size)
continue;
txdp++;
txdp->Buffer_Pointer = (u64)pci_map_page(sp->pdev, frag->page,
frag->page_offset,
frag->size,
PCI_DMA_TODEVICE);
txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
if (offload_type == SKB_GSO_UDP)
txdp->Control_1 |= TXD_UFO_EN;
}
txdp->Control_1 |= TXD_GATHER_CODE_LAST;
if (offload_type == SKB_GSO_UDP)
frg_cnt++; /* as Txd0 was used for inband header */
tx_fifo = mac_control->tx_FIFO_start[queue];
val64 = fifo->list_info[put_off].list_phy_addr;
writeq(val64, &tx_fifo->TxDL_Pointer);
val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
TX_FIFO_LAST_LIST);
if (offload_type)
val64 |= TX_FIFO_SPECIAL_FUNC;
writeq(val64, &tx_fifo->List_Control);
mmiowb();
put_off++;
if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
put_off = 0;
fifo->tx_curr_put_info.offset = put_off;
/* Avoid "put" pointer going beyond "get" pointer */
if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
swstats->fifo_full_cnt++;
DBG_PRINT(TX_DBG,
"No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
put_off, get_off);
s2io_stop_tx_queue(sp, fifo->fifo_no);
}
swstats->mem_allocated += skb->truesize;
spin_unlock_irqrestore(&fifo->tx_lock, flags);
if (sp->config.intr_type == MSI_X)
tx_intr_handler(fifo);
return NETDEV_TX_OK;
pci_map_failed:
swstats->pci_map_fail_cnt++;
s2io_stop_tx_queue(sp, fifo->fifo_no);
swstats->mem_freed += skb->truesize;
dev_kfree_skb(skb);
spin_unlock_irqrestore(&fifo->tx_lock, flags);
return NETDEV_TX_OK;
}
static void
s2io_alarm_handle(unsigned long data)
{
struct s2io_nic *sp = (struct s2io_nic *)data;
struct net_device *dev = sp->dev;
s2io_handle_errors(dev);
mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
}
static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
{
struct ring_info *ring = (struct ring_info *)dev_id;
struct s2io_nic *sp = ring->nic;
struct XENA_dev_config __iomem *bar0 = sp->bar0;
if (unlikely(!is_s2io_card_up(sp)))
return IRQ_HANDLED;
if (sp->config.napi) {
u8 __iomem *addr = NULL;
u8 val8 = 0;
addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
addr += (7 - ring->ring_no);
val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
writeb(val8, addr);
val8 = readb(addr);
napi_schedule(&ring->napi);
} else {
rx_intr_handler(ring, 0);
s2io_chk_rx_buffers(sp, ring);
}
return IRQ_HANDLED;
}
static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
{
int i;
struct fifo_info *fifos = (struct fifo_info *)dev_id;
struct s2io_nic *sp = fifos->nic;
struct XENA_dev_config __iomem *bar0 = sp->bar0;
struct config_param *config = &sp->config;
u64 reason;
if (unlikely(!is_s2io_card_up(sp)))
return IRQ_NONE;
reason = readq(&bar0->general_int_status);
if (unlikely(reason == S2IO_MINUS_ONE))
/* Nothing much can be done. Get out */
return IRQ_HANDLED;
if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
if (reason & GEN_INTR_TXPIC)
s2io_txpic_intr_handle(sp);
if (reason & GEN_INTR_TXTRAFFIC)
writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
for (i = 0; i < config->tx_fifo_num; i++)
tx_intr_handler(&fifos[i]);
writeq(sp->general_int_mask, &bar0->general_int_mask);
readl(&bar0->general_int_status);
return IRQ_HANDLED;
}
/* The interrupt was not raised by us */
return IRQ_NONE;
}
static void s2io_txpic_intr_handle(struct s2io_nic *sp)
{
struct XENA_dev_config __iomem *bar0 = sp->bar0;
u64 val64;
val64 = readq(&bar0->pic_int_status);
if (val64 & PIC_INT_GPIO) {
val64 = readq(&bar0->gpio_int_reg);
if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
(val64 & GPIO_INT_REG_LINK_UP)) {
/*
* This is unstable state so clear both up/down
* interrupt and adapter to re-evaluate the link state.
*/
val64 |= GPIO_INT_REG_LINK_DOWN;
val64 |= GPIO_INT_REG_LINK_UP;
writeq(val64, &bar0->gpio_int_reg);
val64 = readq(&bar0->gpio_int_mask);
val64 &= ~(GPIO_INT_MASK_LINK_UP |
GPIO_INT_MASK_LINK_DOWN);
writeq(val64, &bar0->gpio_int_mask);
} else if (val64 & GPIO_INT_REG_LINK_UP) {
val64 = readq(&bar0->adapter_status);
/* Enable Adapter */
val64 = readq(&bar0->adapter_control);
val64 |= ADAPTER_CNTL_EN;
writeq(val64, &bar0->adapter_control);
val64 |= ADAPTER_LED_ON;
writeq(val64, &bar0->adapter_control);
if (!sp->device_enabled_once)
sp->device_enabled_once = 1;
s2io_link(sp, LINK_UP);
/*
* unmask link down interrupt and mask link-up
* intr
*/
val64 = readq(&bar0->gpio_int_mask);
val64 &= ~GPIO_INT_MASK_LINK_DOWN;
val64 |= GPIO_INT_MASK_LINK_UP;
writeq(val64, &bar0->gpio_int_mask);
} else if (val64 & GPIO_INT_REG_LINK_DOWN) {
val64 = readq(&bar0->adapter_status);
s2io_link(sp, LINK_DOWN);
/* Link is down so unmaks link up interrupt */
val64 = readq(&bar0->gpio_int_mask);
val64 &= ~GPIO_INT_MASK_LINK_UP;
val64 |= GPIO_INT_MASK_LINK_DOWN;
writeq(val64, &bar0->gpio_int_mask);
/* turn off LED */
val64 = readq(&bar0->adapter_control);
val64 = val64 & (~ADAPTER_LED_ON);
writeq(val64, &bar0->adapter_control);
}
}
val64 = readq(&bar0->gpio_int_mask);
}
/**
* do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
* @value: alarm bits
* @addr: address value
* @cnt: counter variable
* Description: Check for alarm and increment the counter
* Return Value:
* 1 - if alarm bit set
* 0 - if alarm bit is not set
*/
static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
unsigned long long *cnt)
{
u64 val64;
val64 = readq(addr);
if (val64 & value) {
writeq(val64, addr);
(*cnt)++;
return 1;
}
return 0;
}
/**
* s2io_handle_errors - Xframe error indication handler
* @nic: device private variable
* Description: Handle alarms such as loss of link, single or
* double ECC errors, critical and serious errors.
* Return Value:
* NONE
*/
static void s2io_handle_errors(void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
struct s2io_nic *sp = netdev_priv(dev);
struct XENA_dev_config __iomem *bar0 = sp->bar0;
u64 temp64 = 0, val64 = 0;
int i = 0;
struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
if (!is_s2io_card_up(sp))
return;
if (pci_channel_offline(sp->pdev))
return;
memset(&sw_stat->ring_full_cnt, 0,
sizeof(sw_stat->ring_full_cnt));
/* Handling the XPAK counters update */
if (stats->xpak_timer_count < 72000) {
/* waiting for an hour */
stats->xpak_timer_count++;
} else {
s2io_updt_xpak_counter(dev);
/* reset the count to zero */
stats->xpak_timer_count = 0;
}
/* Handling link status change error Intr */
if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
val64 = readq(&bar0->mac_rmac_err_reg);
writeq(val64, &bar0->mac_rmac_err_reg);
if (val64 & RMAC_LINK_STATE_CHANGE_INT)
schedule_work(&sp->set_link_task);
}
/* In case of a serious error, the device will be Reset. */
if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
&sw_stat->serious_err_cnt))
goto reset;
/* Check for data parity error */
if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
&sw_stat->parity_err_cnt))
goto reset;
/* Check for ring full counter */
if (sp->device_type == XFRAME_II_DEVICE) {
val64 = readq(&bar0->ring_bump_counter1);
for (i = 0; i < 4; i++) {
temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
temp64 >>= 64 - ((i+1)*16);
sw_stat->ring_full_cnt[i] += temp64;
}
val64 = readq(&bar0->ring_bump_counter2);
for (i = 0; i < 4; i++) {
temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
temp64 >>= 64 - ((i+1)*16);
sw_stat->ring_full_cnt[i+4] += temp64;
}
}
val64 = readq(&bar0->txdma_int_status);
/*check for pfc_err*/
if (val64 & TXDMA_PFC_INT) {
if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
PFC_MISC_0_ERR | PFC_MISC_1_ERR |
PFC_PCIX_ERR,
&bar0->pfc_err_reg,
&sw_stat->pfc_err_cnt))
goto reset;
do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
&bar0->pfc_err_reg,
&sw_stat->pfc_err_cnt);
}
/*check for tda_err*/
if (val64 & TXDMA_TDA_INT) {
if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
TDA_SM0_ERR_ALARM |
TDA_SM1_ERR_ALARM,
&bar0->tda_err_reg,
&sw_stat->tda_err_cnt))
goto reset;
do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
&bar0->tda_err_reg,
&sw_stat->tda_err_cnt);
}
/*check for pcc_err*/
if (val64 & TXDMA_PCC_INT) {
if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
PCC_N_SERR | PCC_6_COF_OV_ERR |
PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
PCC_TXB_ECC_DB_ERR,
&bar0->pcc_err_reg,
&sw_stat->pcc_err_cnt))
goto reset;
do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
&bar0->pcc_err_reg,
&sw_stat->pcc_err_cnt);
}
/*check for tti_err*/
if (val64 & TXDMA_TTI_INT) {
if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
&bar0->tti_err_reg,
&sw_stat->tti_err_cnt))
goto reset;
do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
&bar0->tti_err_reg,
&sw_stat->tti_err_cnt);
}
/*check for lso_err*/
if (val64 & TXDMA_LSO_INT) {
if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
&bar0->lso_err_reg,
&sw_stat->lso_err_cnt))
goto reset;
do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
&bar0->lso_err_reg,
&sw_stat->lso_err_cnt);
}
/*check for tpa_err*/
if (val64 & TXDMA_TPA_INT) {
if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
&bar0->tpa_err_reg,
&sw_stat->tpa_err_cnt))
goto reset;
do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
&bar0->tpa_err_reg,
&sw_stat->tpa_err_cnt);
}
/*check for sm_err*/
if (val64 & TXDMA_SM_INT) {
if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
&bar0->sm_err_reg,
&sw_stat->sm_err_cnt))
goto reset;
}
val64 = readq(&bar0->mac_int_status);
if (val64 & MAC_INT_STATUS_TMAC_INT) {
if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
&bar0->mac_tmac_err_reg,
&sw_stat->mac_tmac_err_cnt))
goto reset;
do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
TMAC_DESC_ECC_SG_ERR |
TMAC_DESC_ECC_DB_ERR,
&bar0->mac_tmac_err_reg,
&sw_stat->mac_tmac_err_cnt);
}
val64 = readq(&bar0->xgxs_int_status);
if (val64 & XGXS_INT_STATUS_TXGXS) {
if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
&bar0->xgxs_txgxs_err_reg,
&sw_stat->xgxs_txgxs_err_cnt))
goto reset;
do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
&bar0->xgxs_txgxs_err_reg,
&sw_stat->xgxs_txgxs_err_cnt);
}
val64 = readq(&bar0->rxdma_int_status);
if (val64 & RXDMA_INT_RC_INT_M) {
if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
RC_FTC_ECC_DB_ERR |
RC_PRCn_SM_ERR_ALARM |
RC_FTC_SM_ERR_ALARM,
&bar0->rc_err_reg,
&sw_stat->rc_err_cnt))
goto reset;
do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
RC_FTC_ECC_SG_ERR |
RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
&sw_stat->rc_err_cnt);
if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
PRC_PCI_AB_WR_Rn |
PRC_PCI_AB_F_WR_Rn,
&bar0->prc_pcix_err_reg,
&sw_stat->prc_pcix_err_cnt))
goto reset;
do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
PRC_PCI_DP_WR_Rn |
PRC_PCI_DP_F_WR_Rn,
&bar0->prc_pcix_err_reg,
&sw_stat->prc_pcix_err_cnt);
}
if (val64 & RXDMA_INT_RPA_INT_M) {
if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
&bar0->rpa_err_reg,
&sw_stat->rpa_err_cnt))
goto reset;
do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
&bar0->rpa_err_reg,
&sw_stat->rpa_err_cnt);
}
if (val64 & RXDMA_INT_RDA_INT_M) {
if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
RDA_FRM_ECC_DB_N_AERR |
RDA_SM1_ERR_ALARM |
RDA_SM0_ERR_ALARM |
RDA_RXD_ECC_DB_SERR,
&bar0->rda_err_reg,
&sw_stat->rda_err_cnt))
goto reset;
do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
RDA_FRM_ECC_SG_ERR |
RDA_MISC_ERR |
RDA_PCIX_ERR,
&bar0->rda_err_reg,
&sw_stat->rda_err_cnt);
}
if (val64 & RXDMA_INT_RTI_INT_M) {
if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
&bar0->rti_err_reg,
&sw_stat->rti_err_cnt))
goto reset;
do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
&bar0->rti_err_reg,
&sw_stat->rti_err_cnt);
}
val64 = readq(&bar0->mac_int_status);
if (val64 & MAC_INT_STATUS_RMAC_INT) {
if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
&bar0->mac_rmac_err_reg,
&sw_stat->mac_rmac_err_cnt))
goto reset;
do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
RMAC_SINGLE_ECC_ERR |
RMAC_DOUBLE_ECC_ERR,
&bar0->mac_rmac_err_reg,
&sw_stat->mac_rmac_err_cnt);
}
val64 = readq(&bar0->xgxs_int_status);
if (val64 & XGXS_INT_STATUS_RXGXS) {
if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
&bar0->xgxs_rxgxs_err_reg,
&sw_stat->xgxs_rxgxs_err_cnt))
goto reset;
}
val64 = readq(&bar0->mc_int_status);
if (val64 & MC_INT_STATUS_MC_INT) {
if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
&bar0->mc_err_reg,
&sw_stat->mc_err_cnt))
goto reset;
/* Handling Ecc errors */
if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
writeq(val64, &bar0->mc_err_reg);
if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
sw_stat->double_ecc_errs++;
if (sp->device_type != XFRAME_II_DEVICE) {
/*
* Reset XframeI only if critical error
*/
if (val64 &
(MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
MC_ERR_REG_MIRI_ECC_DB_ERR_1))
goto reset;
}
} else
sw_stat->single_ecc_errs++;
}
}
return;
reset:
s2io_stop_all_tx_queue(sp);
schedule_work(&sp->rst_timer_task);
sw_stat->soft_reset_cnt++;
}
/**
* s2io_isr - ISR handler of the device .
* @irq: the irq of the device.
* @dev_id: a void pointer to the dev structure of the NIC.
* Description: This function is the ISR handler of the device. It
* identifies the reason for the interrupt and calls the relevant
* service routines. As a contongency measure, this ISR allocates the
* recv buffers, if their numbers are below the panic value which is
* presently set to 25% of the original number of rcv buffers allocated.
* Return value:
* IRQ_HANDLED: will be returned if IRQ was handled by this routine
* IRQ_NONE: will be returned if interrupt is not from our device
*/
static irqreturn_t s2io_isr(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
struct s2io_nic *sp = netdev_priv(dev);
struct XENA_dev_config __iomem *bar0 = sp->bar0;
int i;
u64 reason = 0;
struct mac_info *mac_control;
struct config_param *config;
/* Pretend we handled any irq's from a disconnected card */
if (pci_channel_offline(sp->pdev))
return IRQ_NONE;
if (!is_s2io_card_up(sp))
return IRQ_NONE;
config = &sp->config;
mac_control = &sp->mac_control;
/*
* Identify the cause for interrupt and call the appropriate
* interrupt handler. Causes for the interrupt could be;
* 1. Rx of packet.
* 2. Tx complete.
* 3. Link down.
*/
reason = readq(&bar0->general_int_status);
if (unlikely(reason == S2IO_MINUS_ONE))
return IRQ_HANDLED; /* Nothing much can be done. Get out */
if (reason &
(GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
if (config->napi) {
if (reason & GEN_INTR_RXTRAFFIC) {
napi_schedule(&sp->napi);
writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
readl(&bar0->rx_traffic_int);
}
} else {
/*
* rx_traffic_int reg is an R1 register, writing all 1's
* will ensure that the actual interrupt causing bit
* get's cleared and hence a read can be avoided.
*/
if (reason & GEN_INTR_RXTRAFFIC)
writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
for (i = 0; i < config->rx_ring_num; i++) {
struct ring_info *ring = &mac_control->rings[i];
rx_intr_handler(ring, 0);
}
}
/*
* tx_traffic_int reg is an R1 register, writing all 1's
* will ensure that the actual interrupt causing bit get's
* cleared and hence a read can be avoided.
*/
if (reason & GEN_INTR_TXTRAFFIC)
writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
for (i = 0; i < config->tx_fifo_num; i++)
tx_intr_handler(&mac_control->fifos[i]);
if (reason & GEN_INTR_TXPIC)
s2io_txpic_intr_handle(sp);
/*
* Reallocate the buffers from the interrupt handler itself.
*/
if (!config->napi) {
for (i = 0; i < config->rx_ring_num; i++) {
struct ring_info *ring = &mac_control->rings[i];
s2io_chk_rx_buffers(sp, ring);
}
}
writeq(sp->general_int_mask, &bar0->general_int_mask);
readl(&bar0->general_int_status);
return IRQ_HANDLED;
} else if (!reason) {
/* The interrupt was not raised by us */
return IRQ_NONE;
}
return IRQ_HANDLED;
}
/**
* s2io_updt_stats -
*/
static void s2io_updt_stats(struct s2io_nic *sp)
{
struct XENA_dev_config __iomem *bar0 = sp->bar0;
u64 val64;
int cnt = 0;
if (is_s2io_card_up(sp)) {
/* Apprx 30us on a 133 MHz bus */
val64 = SET_UPDT_CLICKS(10) |
STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
writeq(val64, &bar0->stat_cfg);
do {
udelay(100);
val64 = readq(&bar0->stat_cfg);
if (!(val64 & s2BIT(0)))
break;
cnt++;
if (cnt == 5)
break; /* Updt failed */
} while (1);
}
}
/**
* s2io_get_stats - Updates the device statistics structure.
* @dev : pointer to the device structure.
* Description:
* This function updates the device statistics structure in the s2io_nic
* structure and returns a pointer to the same.
* Return value:
* pointer to the updated net_device_stats structure.
*/
static struct net_device_stats *s2io_get_stats(struct net_device *dev)
{
struct s2io_nic *sp = netdev_priv(dev);
struct mac_info *mac_control = &sp->mac_control;
struct stat_block *stats = mac_control->stats_info;
u64 delta;
/* Configure Stats for immediate updt */
s2io_updt_stats(sp);
/* A device reset will cause the on-adapter statistics to be zero'ed.
* This can be done while running by changing the MTU. To prevent the
* system from having the stats zero'ed, the driver keeps a copy of the
* last update to the system (which is also zero'ed on reset). This
* enables the driver to accurately know the delta between the last
* update and the current update.
*/
delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
sp->stats.rx_packets += delta;
dev->stats.rx_packets += delta;
delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
sp->stats.tx_packets += delta;
dev->stats.tx_packets += delta;
delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
sp->stats.rx_bytes += delta;
dev->stats.rx_bytes += delta;
delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
sp->stats.tx_bytes += delta;
dev->stats.tx_bytes += delta;
delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
sp->stats.rx_errors += delta;
dev->stats.rx_errors += delta;
delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
sp->stats.tx_errors += delta;
dev->stats.tx_errors += delta;
delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
sp->stats.rx_dropped += delta;
dev->stats.rx_dropped += delta;
delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
sp->stats.tx_dropped += delta;
dev->stats.tx_dropped += delta;
/* The adapter MAC interprets pause frames as multicast packets, but
* does not pass them up. This erroneously increases the multicast
* packet count and needs to be deducted when the multicast frame count
* is queried.
*/
delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
le32_to_cpu(stats->rmac_vld_mcst_frms);
delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
delta -= sp->stats.multicast;
sp->stats.multicast += delta;
dev->stats.multicast += delta;
delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
le32_to_cpu(stats->rmac_usized_frms)) +
le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
sp->stats.rx_length_errors += delta;
dev->stats.rx_length_errors += delta;
delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
sp->stats.rx_crc_errors += delta;
dev->stats.rx_crc_errors += delta;
return &dev->stats;
}
/**
* s2io_set_multicast - entry point for multicast address enable/disable.
* @dev : pointer to the device structure
* Description:
* This function is a driver entry point which gets called by the kernel
* whenever multicast addresses must be enabled/disabled. This also gets
* called to set/reset promiscuous mode. Depending on the deivce flag, we
* determine, if multicast address must be enabled or if promiscuous mode
* is to be disabled etc.
* Return value:
* void.
*/
static void s2io_set_multicast(struct net_device *dev)
{
int i, j, prev_cnt;
struct netdev_hw_addr *ha;
struct s2io_nic *sp = netdev_priv(dev);
struct XENA_dev_config __iomem *bar0 = sp->bar0;
u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
0xfeffffffffffULL;
u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
void __iomem *add;
struct config_param *config = &sp->config;
if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
/* Enable all Multicast addresses */
writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
&bar0->rmac_addr_data0_mem);
writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
&bar0->rmac_addr_data1_mem);
val64 = RMAC_ADDR_CMD_MEM_WE |
RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
writeq(val64, &bar0->rmac_addr_cmd_mem);
/* Wait till command completes */
wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
S2IO_BIT_RESET);
sp->m_cast_flg = 1;
sp->all_multi_pos = config->max_mc_addr - 1;
} else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
/* Disable all Multicast addresses */
writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
&bar0->rmac_addr_data0_mem);
writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
&bar0->rmac_addr_data1_mem);
val64 = RMAC_ADDR_CMD_MEM_WE |
RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
writeq(val64, &bar0->rmac_addr_cmd_mem);
/* Wait till command completes */
wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
S2IO_BIT_RESET);
sp->m_cast_flg = 0;
sp->all_multi_pos = 0;
}
if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
/* Put the NIC into promiscuous mode */
add = &bar0->mac_cfg;
val64 = readq(&bar0->mac_cfg);
val64 |= MAC_CFG_RMAC_PROM_ENABLE;
writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
writel((u32)val64, add);
writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
writel((u32) (val64 >> 32), (add + 4));
if (vlan_tag_strip != 1) {
val64 = readq(&bar0->rx_pa_cfg);
val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
writeq(val64, &bar0->rx_pa_cfg);
sp->vlan_strip_flag = 0;
}
val64 = readq(&bar0->mac_cfg);
sp->promisc_flg = 1;
DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
dev->name);
} else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
/* Remove the NIC from promiscuous mode */
add = &bar0->mac_cfg;
val64 = readq(&bar0->mac_cfg);
val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
writel((u32)val64, add);
writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
writel((u32) (val64 >> 32), (add + 4));
if (vlan_tag_strip != 0) {
val64 = readq(&bar0->rx_pa_cfg);
val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
writeq(val64, &bar0->rx_pa_cfg);
sp->vlan_strip_flag = 1;
}
val64 = readq(&bar0->mac_cfg);
sp->promisc_flg = 0;
DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
}
/* Update individual M_CAST address list */
if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
if (netdev_mc_count(dev) >
(config->max_mc_addr - config->max_mac_addr)) {
DBG_PRINT(ERR_DBG,
"%s: No more Rx filters can be added - "
"please enable ALL_MULTI instead\n",
dev->name);
return;
}
prev_cnt = sp->mc_addr_count;
sp->mc_addr_count = netdev_mc_count(dev);
/* Clear out the previous list of Mc in the H/W. */
for (i = 0; i < prev_cnt; i++) {
writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
&bar0->rmac_addr_data0_mem);
writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
&bar0->rmac_addr_data1_mem);
val64 = RMAC_ADDR_CMD_MEM_WE |
RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
RMAC_ADDR_CMD_MEM_OFFSET
(config->mc_start_offset + i);
writeq(val64, &bar0->rmac_addr_cmd_mem);
/* Wait for command completes */
if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
S2IO_BIT_RESET)) {
DBG_PRINT(ERR_DBG,
"%s: Adding Multicasts failed\n",
dev->name);
return;
}
}
/* Create the new Rx filter list and update the same in H/W. */
i = 0;
netdev_for_each_mc_addr(ha, dev) {
mac_addr = 0;
for (j = 0; j < ETH_ALEN; j++) {
mac_addr |= ha->addr[j];
mac_addr <<= 8;
}
mac_addr >>= 8;
writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
&bar0->rmac_addr_data0_mem);
writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
&bar0->rmac_addr_data1_mem);
val64 = RMAC_ADDR_CMD_MEM_WE |
RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
RMAC_ADDR_CMD_MEM_OFFSET
(i + config->mc_start_offset);
writeq(val64, &bar0->rmac_addr_cmd_mem);
/* Wait for command completes */
if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
S2IO_BIT_RESET)) {
DBG_PRINT(ERR_DBG,
"%s: Adding Multicasts failed\n",
dev->name);
return;
}
i++;
}
}
}
/* read from CAM unicast & multicast addresses and store it in
* def_mac_addr structure
*/
static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
{
int offset;
u64 mac_addr = 0x0;
struct config_param *config = &sp->config;
/* store unicast & multicast mac addresses */
for (offset = 0; offset < config->max_mc_addr; offset++) {
mac_addr = do_s2io_read_unicast_mc(sp, offset);
/* if read fails disable the entry */
if (mac_addr == FAILURE)
mac_addr = S2IO_DISABLE_MAC_ENTRY;
do_s2io_copy_mac_addr(sp, offset, mac_addr);
}
}
/* restore unicast & multicast MAC to CAM from def_mac_addr structure */
static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
{
int offset;
struct config_param *config = &sp->config;
/* restore unicast mac address */
for (offset = 0; offset < config->max_mac_addr; offset++)
do_s2io_prog_unicast(sp->dev,
sp->def_mac_addr[offset].mac_addr);
/* restore multicast mac address */
for (offset = config->mc_start_offset;
offset < config->max_mc_addr; offset++)
do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
}
/* add a multicast MAC address to CAM */
static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
{
int i;
u64 mac_addr = 0;
struct config_param *config = &sp->config;
for (i = 0; i < ETH_ALEN; i++) {
mac_addr <<= 8;
mac_addr |= addr[i];
}
if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
return SUCCESS;
/* check if the multicast mac already preset in CAM */
for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
u64 tmp64;
tmp64 = do_s2io_read_unicast_mc(sp, i);
if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
break;
if (tmp64 == mac_addr)
return SUCCESS;
}
if (i == config->max_mc_addr) {
DBG_PRINT(ERR_DBG,
"CAM full no space left for multicast MAC\n");
return FAILURE;
}
/* Update the internal structure with this new mac address */
do_s2io_copy_mac_addr(sp, i, mac_addr);
return do_s2io_add_mac(sp, mac_addr, i);
}
/* add MAC address to CAM */
static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
{
u64 val64;
struct XENA_dev_config __iomem *bar0 = sp->bar0;
writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
&bar0->rmac_addr_data0_mem);
val64 = RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
RMAC_ADDR_CMD_MEM_OFFSET(off);
writeq(val64, &bar0->rmac_addr_cmd_mem);
/* Wait till command completes */
if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
S2IO_BIT_RESET)) {
DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
return FAILURE;
}
return SUCCESS;
}
/* deletes a specified unicast/multicast mac entry from CAM */
static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
{
int offset;
u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
struct config_param *config = &sp->config;
for (offset = 1;
offset < config->max_mc_addr; offset++) {
tmp64 = do_s2io_read_unicast_mc(sp, offset);
if (tmp64 == addr) {
/* disable the entry by writing 0xffffffffffffULL */
if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
return FAILURE;
/* store the new mac list from CAM */
do_s2io_store_unicast_mc(sp);
return SUCCESS;
}
}
DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
(unsigned long long)addr);
return FAILURE;
}
/* read mac entries from CAM */
static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
{
u64 tmp64 = 0xffffffffffff0000ULL, val64;
struct XENA_dev_config __iomem *bar0 = sp->bar0;
/* read mac addr */
val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
RMAC_ADDR_CMD_MEM_OFFSET(offset);
writeq(val64, &bar0->rmac_addr_cmd_mem);
/* Wait till command completes */
if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
S2IO_BIT_RESET)) {
DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
return FAILURE;
}
tmp64 = readq(&bar0->rmac_addr_data0_mem);
return tmp64 >> 16;
}
/**
* s2io_set_mac_addr driver entry point
*/
static int s2io_set_mac_addr(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
/* store the MAC address in CAM */
return do_s2io_prog_unicast(dev, dev->dev_addr);
}
/**
* do_s2io_prog_unicast - Programs the Xframe mac address
* @dev : pointer to the device structure.
* @addr: a uchar pointer to the new mac address which is to be set.
* Description : This procedure will program the Xframe to receive
* frames with new Mac Address
* Return value: SUCCESS on success and an appropriate (-)ve integer
* as defined in errno.h file on failure.
*/
static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
{
struct s2io_nic *sp = netdev_priv(dev);
register u64 mac_addr = 0, perm_addr = 0;
int i;
u64 tmp64;
struct config_param *config = &sp->config;
/*
* Set the new MAC address as the new unicast filter and reflect this
* change on the device address registered with the OS. It will be
* at offset 0.
*/
for (i = 0; i < ETH_ALEN; i++) {
mac_addr <<= 8;
mac_addr |= addr[i];
perm_addr <<= 8;
perm_addr |= sp->def_mac_addr[0].mac_addr[i];
}
/* check if the dev_addr is different than perm_addr */
if (mac_addr == perm_addr)
return SUCCESS;
/* check if the mac already preset in CAM */
for (i = 1; i < config->max_mac_addr; i++) {
tmp64 = do_s2io_read_unicast_mc(sp, i);
if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
break;
if (tmp64 == mac_addr) {
DBG_PRINT(INFO_DBG,
"MAC addr:0x%llx already present in CAM\n",
(unsigned long long)mac_addr);
return SUCCESS;
}
}
if (i == config->max_mac_addr) {
DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
return FAILURE;
}
/* Update the internal structure with this new mac address */
do_s2io_copy_mac_addr(sp, i, mac_addr);
return do_s2io_add_mac(sp, mac_addr, i);
}
/**
* s2io_ethtool_sset - Sets different link parameters.
* @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
* @info: pointer to the structure with parameters given by ethtool to set
* link information.
* Description:
* The function sets different link parameters provided by the user onto
* the NIC.
* Return value:
* 0 on success.
*/
static int s2io_ethtool_sset(struct net_device *dev,
struct ethtool_cmd *info)
{
struct s2io_nic *sp = netdev_priv(dev);
if ((info->autoneg == AUTONEG_ENABLE) ||
(ethtool_cmd_speed(info) != SPEED_10000) ||
(info->duplex != DUPLEX_FULL))
return -EINVAL;
else {
s2io_close(sp->dev);
s2io_open(sp->dev);
}
return 0;
}
/**
* s2io_ethtol_gset - Return link specific information.
* @sp : private member of the device structure, pointer to the
* s2io_nic structure.
* @info : pointer to the structure with parameters given by ethtool
* to return link information.
* Description:
* Returns link specific information like speed, duplex etc.. to ethtool.
* Return value :
* return 0 on success.
*/
static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
{
struct s2io_nic *sp = netdev_priv(dev);
info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
info->port = PORT_FIBRE;
/* info->transceiver */
info->transceiver = XCVR_EXTERNAL;
if (netif_carrier_ok(sp->dev)) {
ethtool_cmd_speed_set(info, SPEED_10000);
info->duplex = DUPLEX_FULL;
} else {
ethtool_cmd_speed_set(info, -1);
info->duplex = -1;
}
info->autoneg = AUTONEG_DISABLE;
return 0;
}
/**
* s2io_ethtool_gdrvinfo - Returns driver specific information.
* @sp : private member of the device structure, which is a pointer to the
* s2io_nic structure.
* @info : pointer to the structure with parameters given by ethtool to
* return driver information.
* Description:
* Returns driver specefic information like name, version etc.. to ethtool.
* Return value:
* void
*/
static void s2io_ethtool_gdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct s2io_nic *sp = netdev_priv(dev);
strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
strncpy(info->version, s2io_driver_version, sizeof(info->version));
strncpy(info->fw_version, "", sizeof(info->fw_version));
strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
info->regdump_len = XENA_REG_SPACE;
info->eedump_len = XENA_EEPROM_SPACE;
}
/**
* s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
* @sp: private member of the device structure, which is a pointer to the
* s2io_nic structure.
* @regs : pointer to the structure with parameters given by ethtool for
* dumping the registers.
* @reg_space: The input argumnet into which all the registers are dumped.
* Description:
* Dumps the entire register space of xFrame NIC into the user given
* buffer area.
* Return value :
* void .
*/
static void s2io_ethtool_gregs(struct net_device *dev,
struct ethtool_regs *regs, void *space)
{
int i;
u64 reg;
u8 *reg_space = (u8 *)space;
struct s2io_nic *sp = netdev_priv(dev);
regs->len = XENA_REG_SPACE;
regs->version = sp->pdev->subsystem_device;
for (i = 0; i < regs->len; i += 8) {
reg = readq(sp->bar0 + i);
memcpy((reg_space + i), ®, 8);
}
}
/*
* s2io_set_led - control NIC led
*/
static void s2io_set_led(struct s2io_nic *sp, bool on)
{
struct XENA_dev_config __iomem *bar0 = sp->bar0;
u16 subid = sp->pdev->subsystem_device;
u64 val64;
if ((sp->device_type == XFRAME_II_DEVICE) ||
((subid & 0xFF) >= 0x07)) {
val64 = readq(&bar0->gpio_control);
if (on)
val64 |= GPIO_CTRL_GPIO_0;
else
val64 &= ~GPIO_CTRL_GPIO_0;
writeq(val64, &bar0->gpio_control);
} else {
val64 = readq(&bar0->adapter_control);
if (on)
val64 |= ADAPTER_LED_ON;
else
val64 &= ~ADAPTER_LED_ON;
writeq(val64, &bar0->adapter_control);
}
}
/**
* s2io_ethtool_set_led - To physically identify the nic on the system.
* @dev : network device
* @state: led setting
*
* Description: Used to physically identify the NIC on the system.
* The Link LED will blink for a time specified by the user for
* identification.
* NOTE: The Link has to be Up to be able to blink the LED. Hence
* identification is possible only if it's link is up.
*/
static int s2io_ethtool_set_led(struct net_device *dev,
enum ethtool_phys_id_state state)
{
struct s2io_nic *sp = netdev_priv(dev);
struct XENA_dev_config __iomem *bar0 = sp->bar0;
u16 subid = sp->pdev->subsystem_device;
if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
u64 val64 = readq(&bar0->adapter_control);
if (!(val64 & ADAPTER_CNTL_EN)) {
pr_err("Adapter Link down, cannot blink LED\n");
return -EAGAIN;
}
}
switch (state) {
case ETHTOOL_ID_ACTIVE:
sp->adapt_ctrl_org = readq(&bar0->gpio_control);
return 1; /* cycle on/off once per second */
case ETHTOOL_ID_ON:
s2io_set_led(sp, true);
break;
case ETHTOOL_ID_OFF:
s2io_set_led(sp, false);
break;
case ETHTOOL_ID_INACTIVE:
if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
}
return 0;
}
static void s2io_ethtool_gringparam(struct net_device *dev,
struct ethtool_ringparam *ering)
{
struct s2io_nic *sp = netdev_priv(dev);
int i, tx_desc_count = 0, rx_desc_count = 0;
if (sp->rxd_mode == RXD_MODE_1) {
ering->rx_max_pending = MAX_RX_DESC_1;
ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
} else {
ering->rx_max_pending = MAX_RX_DESC_2;
ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
}
ering->rx_mini_max_pending = 0;
ering->tx_max_pending = MAX_TX_DESC;
for (i = 0; i < sp->config.rx_ring_num; i++)
rx_desc_count += sp->config.rx_cfg[i].num_rxd;
ering->rx_pending = rx_desc_count;
ering->rx_jumbo_pending = rx_desc_count;
ering->rx_mini_pending = 0;
for (i = 0; i < sp->config.tx_fifo_num; i++)
tx_desc_count += sp->config.tx_cfg[i].fifo_len;
ering->tx_pending = tx_desc_count;
DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
}
/**
* s2io_ethtool_getpause_data -Pause frame frame generation and reception.
* @sp : private member of the device structure, which is a pointer to the
* s2io_nic structure.
* @ep : pointer to the structure with pause parameters given by ethtool.
* Description:
* Returns the Pause frame generation and reception capability of the NIC.
* Return value:
* void
*/
static void s2io_ethtool_getpause_data(struct net_device *dev,
struct ethtool_pauseparam *ep)
{
u64 val64;
struct s2io_nic *sp = netdev_priv(dev);
struct XENA_dev_config __iomem *bar0 = sp->bar0;
val64 = readq(&bar0->rmac_pause_cfg);
if (val64 & RMAC_PAUSE_GEN_ENABLE)
ep->tx_pause = true;
if (val64 & RMAC_PAUSE_RX_ENABLE)
ep->rx_pause = true;
ep->autoneg = false;
}
/**
* s2io_ethtool_setpause_data - set/reset pause frame generation.
* @sp : private member of the device structure, which is a pointer to the
* s2io_nic structure.
* @ep : pointer to the structure with pause parameters given by ethtool.
* Description:
* It can be used to set or reset Pause frame generation or reception
* support of the NIC.
* Return value:
* int, returns 0 on Success
*/
static int s2io_ethtool_setpause_data(struct net_device *dev,
struct ethtool_pauseparam *ep)
{
u64 val64;
struct s2io_nic *sp = netdev_priv(dev);
struct XENA_dev_config __iomem *bar0 = sp->bar0;
val64 = readq(&bar0->rmac_pause_cfg);
if (ep->tx_pause)
val64 |= RMAC_PAUSE_GEN_ENABLE;
else
val64 &= ~RMAC_PAUSE_GEN_ENABLE;
if (ep->rx_pause)
val64 |= RMAC_PAUSE_RX_ENABLE;
else
val64 &= ~RMAC_PAUSE_RX_ENABLE;
writeq(val64, &bar0->rmac_pause_cfg);
return 0;
}
/**
* read_eeprom - reads 4 bytes of data from user given offset.
* @sp : private member of the device structure, which is a pointer to the
* s2io_nic structure.
* @off : offset at which the data must be written
* @data : Its an output parameter where the data read at the given
* offset is stored.
* Description:
* Will read 4 bytes of data from the user given offset and return the
* read data.
* NOTE: Will allow to read only part of the EEPROM visible through the
* I2C bus.
* Return value:
* -1 on failure and 0 on success.
*/
#define S2IO_DEV_ID 5
static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
{
int ret = -1;
u32 exit_cnt = 0;
u64 val64;
struct XENA_dev_config __iomem *bar0 = sp->bar0;
if (sp->device_type == XFRAME_I_DEVICE) {
val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
I2C_CONTROL_ADDR(off) |
I2C_CONTROL_BYTE_CNT(0x3) |
I2C_CONTROL_READ |
I2C_CONTROL_CNTL_START;
SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
while (exit_cnt < 5) {
val64 = readq(&bar0->i2c_control);
if (I2C_CONTROL_CNTL_END(val64)) {
*data = I2C_CONTROL_GET_DATA(val64);
ret = 0;
break;
}
msleep(50);
exit_cnt++;
}
}
if (sp->device_type == XFRAME_II_DEVICE) {
val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
SPI_CONTROL_BYTECNT(0x3) |
SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
val64 |= SPI_CONTROL_REQ;
SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
while (exit_cnt < 5) {
val64 = readq(&bar0->spi_control);
if (val64 & SPI_CONTROL_NACK) {
ret = 1;
break;
} else if (val64 & SPI_CONTROL_DONE) {
*data = readq(&bar0->spi_data);
*data &= 0xffffff;
ret = 0;
break;
}
msleep(50);
exit_cnt++;
}
}
return ret;
}
/**
* write_eeprom - actually writes the relevant part of the data value.
* @sp : private member of the device structure, which is a pointer to the
* s2io_nic structure.
* @off : offset at which the data must be written
* @data : The data that is to be written
* @cnt : Number of bytes of the data that are actually to be written into
* the Eeprom. (max of 3)
* Description:
* Actually writes the relevant part of the data value into the Eeprom
* through the I2C bus.
* Return value:
* 0 on success, -1 on failure.
*/
static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
{
int exit_cnt = 0, ret = -1;
u64 val64;
struct XENA_dev_config __iomem *bar0 = sp->bar0;
if (sp->device_type == XFRAME_I_DEVICE) {
val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
I2C_CONTROL_ADDR(off) |
I2C_CONTROL_BYTE_CNT(cnt) |
I2C_CONTROL_SET_DATA((u32)data) |
I2C_CONTROL_CNTL_START;
SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
while (exit_cnt < 5) {
val64 = readq(&bar0->i2c_control);
if (I2C_CONTROL_CNTL_END(val64)) {
if (!(val64 & I2C_CONTROL_NACK))
ret = 0;
break;
}
msleep(50);
exit_cnt++;
}
}
if (sp->device_type == XFRAME_II_DEVICE) {
int write_cnt = (cnt == 8) ? 0 : cnt;
writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
SPI_CONTROL_BYTECNT(write_cnt) |
SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
val64 |= SPI_CONTROL_REQ;
SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
while (exit_cnt < 5) {
val64 = readq(&bar0->spi_control);
if (val64 & SPI_CONTROL_NACK) {
ret = 1;
break;
} else if (val64 & SPI_CONTROL_DONE) {
ret = 0;
break;
}
msleep(50);
exit_cnt++;
}
}
return ret;
}
static void s2io_vpd_read(struct s2io_nic *nic)
{
u8 *vpd_data;
u8 data;
int i = 0, cnt, len, fail = 0;
int vpd_addr = 0x80;
struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
if (nic->device_type == XFRAME_II_DEVICE) {
strcpy(nic->product_name, "Xframe II 10GbE network adapter");
vpd_addr = 0x80;
} else {
strcpy(nic->product_name, "Xframe I 10GbE network adapter");
vpd_addr = 0x50;
}
strcpy(nic->serial_num, "NOT AVAILABLE");
vpd_data = kmalloc(256, GFP_KERNEL);
if (!vpd_data) {
swstats->mem_alloc_fail_cnt++;
return;
}
swstats->mem_allocated += 256;
for (i = 0; i < 256; i += 4) {
pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
for (cnt = 0; cnt < 5; cnt++) {
msleep(2);
pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
if (data == 0x80)
break;
}
if (cnt >= 5) {
DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
fail = 1;
break;
}
pci_read_config_dword(nic->pdev, (vpd_addr + 4),
(u32 *)&vpd_data[i]);
}
if (!fail) {
/* read serial number of adapter */
for (cnt = 0; cnt < 252; cnt++) {
if ((vpd_data[cnt] == 'S') &&
(vpd_data[cnt+1] == 'N')) {
len = vpd_data[cnt+2];
if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
memcpy(nic->serial_num,
&vpd_data[cnt + 3],
len);
memset(nic->serial_num+len,
0,
VPD_STRING_LEN-len);
break;
}
}
}
}
if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
len = vpd_data[1];
memcpy(nic->product_name, &vpd_data[3], len);
nic->product_name[len] = 0;
}
kfree(vpd_data);
swstats->mem_freed += 256;
}
/**
* s2io_ethtool_geeprom - reads the value stored in the Eeprom.
* @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
* @eeprom : pointer to the user level structure provided by ethtool,
* containing all relevant information.
* @data_buf : user defined value to be written into Eeprom.
* Description: Reads the values stored in the Eeprom at given offset
* for a given length. Stores these values int the input argument data
* buffer 'data_buf' and returns these to the caller (ethtool.)
* Return value:
* int 0 on success
*/
static int s2io_ethtool_geeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 * data_buf)
{
u32 i, valid;
u64 data;
struct s2io_nic *sp = netdev_priv(dev);
eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
for (i = 0; i < eeprom->len; i += 4) {
if (read_eeprom(sp, (eeprom->offset + i), &data)) {
DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
return -EFAULT;
}
valid = INV(data);
memcpy((data_buf + i), &valid, 4);
}
return 0;
}
/**
* s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
* @sp : private member of the device structure, which is a pointer to the
* s2io_nic structure.
* @eeprom : pointer to the user level structure provided by ethtool,
* containing all relevant information.
* @data_buf ; user defined value to be written into Eeprom.
* Description:
* Tries to write the user provided value in the Eeprom, at the offset
* given by the user.
* Return value:
* 0 on success, -EFAULT on failure.
*/
static int s2io_ethtool_seeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom,
u8 *data_buf)
{
int len = eeprom->len, cnt = 0;
u64 valid = 0, data;
struct s2io_nic *sp = netdev_priv(dev);
if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
DBG_PRINT(ERR_DBG,
"ETHTOOL_WRITE_EEPROM Err: "
"Magic value is wrong, it is 0x%x should be 0x%x\n",
(sp->pdev->vendor | (sp->pdev->device << 16)),
eeprom->magic);
return -EFAULT;
}
while (len) {
data = (u32)data_buf[cnt] & 0x000000FF;
if (data)
valid = (u32)(data << 24);
else
valid = data;
if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
DBG_PRINT(ERR_DBG,
"ETHTOOL_WRITE_EEPROM Err: "
"Cannot write into the specified offset\n");
return -EFAULT;
}
cnt++;
len--;
}
return 0;
}
/**
* s2io_register_test - reads and writes into all clock domains.
* @sp : private member of the device structure, which is a pointer to the
* s2io_nic structure.
* @data : variable that returns the result of each of the test conducted b
* by the driver.
* Description:
* Read and write into all clock domains. The NIC has 3 clock domains,
* see that registers in all the three regions are accessible.
* Return value:
* 0 on success.
*/
static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
{
struct XENA_dev_config __iomem *bar0 = sp->bar0;
u64 val64 = 0, exp_val;
int fail = 0;
val64 = readq(&bar0->pif_rd_swapper_fb);
if (val64 != 0x123456789abcdefULL) {
fail = 1;
DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
}
val64 = readq(&bar0->rmac_pause_cfg);
if (val64 != 0xc000ffff00000000ULL) {
fail = 1;
DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
}
val64 = readq(&bar0->rx_queue_cfg);
if (sp->device_type == XFRAME_II_DEVICE)
exp_val = 0x0404040404040404ULL;
else
exp_val = 0x0808080808080808ULL;
if (val64 != exp_val) {
fail = 1;
DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
}
val64 = readq(&bar0->xgxs_efifo_cfg);
if (val64 != 0x000000001923141EULL) {
fail = 1;
DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
}
val64 = 0x5A5A5A5A5A5A5A5AULL;
writeq(val64, &bar0->xmsi_data);
val64 = readq(&bar0->xmsi_data);
if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
fail = 1;
DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
}
val64 = 0xA5A5A5A5A5A5A5A5ULL;
writeq(val64, &bar0->xmsi_data);
val64 = readq(&bar0->xmsi_data);
if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
fail = 1;
DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
}
*data = fail;
return fail;
}
/**
* s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
* @sp : private member of the device structure, which is a pointer to the
* s2io_nic structure.
* @data:variable that returns the result of each of the test conducted by
* the driver.
* Description:
* Verify that EEPROM in the xena can be programmed using I2C_CONTROL
* register.
* Return value:
* 0 on success.
*/
static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
{
int fail = 0;
u64 ret_data, org_4F0, org_7F0;
u8 saved_4F0 = 0, saved_7F0 = 0;
struct net_device *dev = sp->dev;
/* Test Write Error at offset 0 */
/* Note that SPI interface allows write access to all areas
* of EEPROM. Hence doing all negative testing only for Xframe I.
*/
if (sp->device_type == XFRAME_I_DEVICE)
if (!write_eeprom(sp, 0, 0, 3))
fail = 1;
/* Save current values at offsets 0x4F0 and 0x7F0 */
if (!read_eeprom(sp, 0x4F0, &org_4F0))
saved_4F0 = 1;
if (!read_eeprom(sp, 0x7F0, &org_7F0))
saved_7F0 = 1;
/* Test Write at offset 4f0 */
if (write_eeprom(sp, 0x4F0, 0x012345, 3))
fail = 1;
if (read_eeprom(sp, 0x4F0, &ret_data))
fail = 1;
if (ret_data != 0x012345) {
DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
"Data written %llx Data read %llx\n",
dev->name, (unsigned long long)0x12345,
(unsigned long long)ret_data);
fail = 1;
}
/* Reset the EEPROM data go FFFF */
write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
/* Test Write Request Error at offset 0x7c */
if (sp->device_type == XFRAME_I_DEVICE)
if (!write_eeprom(sp, 0x07C, 0, 3))
fail = 1;
/* Test Write Request at offset 0x7f0 */
if (write_eeprom(sp, 0x7F0, 0x012345, 3))
fail = 1;
if (read_eeprom(sp, 0x7F0, &ret_data))
fail = 1;
if (ret_data != 0x012345) {
DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
"Data written %llx Data read %llx\n",
dev->name, (unsigned long long)0x12345,
(unsigned long long)ret_data);
fail = 1;
}
/* Reset the EEPROM data go FFFF */
write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
if (sp->device_type == XFRAME_I_DEVICE) {
/* Test Write Error at offset 0x80 */
if (!write_eeprom(sp, 0x080, 0, 3))
fail = 1;
/* Test Write Error at offset 0xfc */
if (!write_eeprom(sp, 0x0FC, 0, 3))
fail = 1;
/* Test Write Error at offset 0x100 */
if (!write_eeprom(sp, 0x100, 0, 3))
fail = 1;
/* Test Write Error at offset 4ec */
if (!write_eeprom(sp, 0x4EC, 0, 3))
fail = 1;
}
/* Restore values at offsets 0x4F0 and 0x7F0 */
if (saved_4F0)
write_eeprom(sp, 0x4F0, org_4F0, 3);
if (saved_7F0)
write_eeprom(sp, 0x7F0, org_7F0, 3);
*data = fail;
return fail;
}
/**
* s2io_bist_test - invokes the MemBist test of the card .
* @sp : private member of the device structure, which is a pointer to the
* s2io_nic structure.
* @data:variable that returns the result of each of the test conducted by
* the driver.
* Description:
* This invokes the MemBist test of the card. We give around
* 2 secs time for the Test to complete. If it's still not complete
* within this peiod, we consider that the test failed.
* Return value:
* 0 on success and -1 on failure.
*/
static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
{
u8 bist = 0;
int cnt = 0, ret = -1;
pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
bist |= PCI_BIST_START;
pci_write_config_word(sp->pdev, PCI_BIST, bist);
while (cnt < 20) {
pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
if (!(bist & PCI_BIST_START)) {
*data = (bist & PCI_BIST_CODE_MASK);
ret = 0;
break;
}
msleep(100);
cnt++;
}
return ret;
}
/**
* s2io-link_test - verifies the link state of the nic
* @sp ; private member of the device structure, which is a pointer to the
* s2io_nic structure.
* @data: variable that returns the result of each of the test conducted by
* the driver.
* Description:
* The function verifies the link state of the NIC and updates the input
* argument 'data' appropriately.
* Return value:
* 0 on success.
*/
static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
{
struct XENA_dev_config __iomem *bar0 = sp->bar0;
u64 val64;
val64 = readq(&bar0->adapter_status);
if (!(LINK_IS_UP(val64)))
*data = 1;
else
*data = 0;
return *data;
}
/**
* s2io_rldram_test - offline test for access to the RldRam chip on the NIC
* @sp - private member of the device structure, which is a pointer to the
* s2io_nic structure.
* @data - variable that returns the result of each of the test
* conducted by the driver.
* Description:
* This is one of the offline test that tests the read and write
* access to the RldRam chip on the NIC.
* Return value:
* 0 on success.
*/
static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
{
struct XENA_dev_config __iomem *bar0 = sp->bar0;
u64 val64;
int cnt, iteration = 0, test_fail = 0;
val64 = readq(&bar0->adapter_control);
val64 &= ~ADAPTER_ECC_EN;
writeq(val64, &bar0->adapter_control);
val64 = readq(&bar0->mc_rldram_test_ctrl);
val64 |= MC_RLDRAM_TEST_MODE;
SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
val64 = readq(&bar0->mc_rldram_mrs);
val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
val64 |= MC_RLDRAM_MRS_ENABLE;
SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
while (iteration < 2) {
val64 = 0x55555555aaaa0000ULL;
if (iteration == 1)
val64 ^= 0xFFFFFFFFFFFF0000ULL;
writeq(val64, &bar0->mc_rldram_test_d0);
val64 = 0xaaaa5a5555550000ULL;
if (iteration == 1)
val64 ^= 0xFFFFFFFFFFFF0000ULL;
writeq(val64, &bar0->mc_rldram_test_d1);
val64 = 0x55aaaaaaaa5a0000ULL;
if (iteration == 1)
val64 ^= 0xFFFFFFFFFFFF0000ULL;
writeq(val64, &bar0->mc_rldram_test_d2);
val64 = (u64) (0x0000003ffffe0100ULL);
writeq(val64, &bar0->mc_rldram_test_add);
val64 = MC_RLDRAM_TEST_MODE |
MC_RLDRAM_TEST_WRITE |
MC_RLDRAM_TEST_GO;
SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
for (cnt = 0; cnt < 5; cnt++) {
val64 = readq(&bar0->mc_rldram_test_ctrl);
if (val64 & MC_RLDRAM_TEST_DONE)
break;
msleep(200);
}
if (cnt == 5)
break;
val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
for (cnt = 0; cnt < 5; cnt++) {
val64 = readq(&bar0->mc_rldram_test_ctrl);
if (val64 & MC_RLDRAM_TEST_DONE)
break;
msleep(500);
}
if (cnt == 5)
break;
val64 = readq(&bar0->mc_rldram_test_ctrl);
if (!(val64 & MC_RLDRAM_TEST_PASS))
test_fail = 1;
iteration++;
}
*data = test_fail;
/* Bring the adapter out of test mode */
SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
return test_fail;
}
/**
* s2io_ethtool_test - conducts 6 tsets to determine the health of card.
* @sp : private member of the device structure, which is a pointer to the
* s2io_nic structure.
* @ethtest : pointer to a ethtool command specific structure that will be
* returned to the user.
* @data : variable that returns the result of each of the test
* conducted by the driver.
* Description:
* This function conducts 6 tests ( 4 offline and 2 online) to determine
* the health of the card.
* Return value:
* void
*/
static void s2io_ethtool_test(struct net_device *dev,
struct ethtool_test *ethtest,
uint64_t *data)
{
struct s2io_nic *sp = netdev_priv(dev);
int orig_state = netif_running(sp->dev);
if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
/* Offline Tests. */
if (orig_state)
s2io_close(sp->dev);
if (s2io_register_test(sp, &data[0]))
ethtest->flags |= ETH_TEST_FL_FAILED;
s2io_reset(sp);
if (s2io_rldram_test(sp, &data[3]))
ethtest->flags |= ETH_TEST_FL_FAILED;
s2io_reset(sp);
if (s2io_eeprom_test(sp, &data[1]))
ethtest->flags |= ETH_TEST_FL_FAILED;
if (s2io_bist_test(sp, &data[4]))
ethtest->flags |= ETH_TEST_FL_FAILED;
if (orig_state)
s2io_open(sp->dev);
data[2] = 0;
} else {
/* Online Tests. */
if (!orig_state) {
DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
dev->name);
data[0] = -1;
data[1] = -1;
data[2] = -1;
data[3] = -1;
data[4] = -1;
}
if (s2io_link_test(sp, &data[2]))
ethtest->flags |= ETH_TEST_FL_FAILED;
data[0] = 0;
data[1] = 0;
data[3] = 0;
data[4] = 0;
}
}
static void s2io_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *estats,
u64 *tmp_stats)
{
int i = 0, k;
struct s2io_nic *sp = netdev_priv(dev);
struct stat_block *stats = sp->mac_control.stats_info;
struct swStat *swstats = &stats->sw_stat;
struct xpakStat *xstats = &stats->xpak_stat;
s2io_updt_stats(sp);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->tmac_frms_oflow) << 32 |
le32_to_cpu(stats->tmac_frms);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
le32_to_cpu(stats->tmac_data_octets);
tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
le32_to_cpu(stats->tmac_mcst_frms);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
le32_to_cpu(stats->tmac_bcst_frms);
tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
le32_to_cpu(stats->tmac_ttl_octets);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
le32_to_cpu(stats->tmac_ucst_frms);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
le32_to_cpu(stats->tmac_nucst_frms);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
le32_to_cpu(stats->tmac_any_err_frms);
tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
le32_to_cpu(stats->tmac_vld_ip);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
le32_to_cpu(stats->tmac_drop_ip);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
le32_to_cpu(stats->tmac_icmp);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
le32_to_cpu(stats->tmac_rst_tcp);
tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
le32_to_cpu(stats->tmac_udp);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
le32_to_cpu(stats->rmac_vld_frms);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
le32_to_cpu(stats->rmac_data_octets);
tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
le32_to_cpu(stats->rmac_vld_mcst_frms);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
le32_to_cpu(stats->rmac_vld_bcst_frms);
tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
le32_to_cpu(stats->rmac_ttl_octets);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
| le32_to_cpu(stats->rmac_accepted_ucst_frms);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
<< 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
le32_to_cpu(stats->rmac_discarded_frms);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->rmac_drop_events_oflow)
<< 32 | le32_to_cpu(stats->rmac_drop_events);
tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
le32_to_cpu(stats->rmac_usized_frms);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
le32_to_cpu(stats->rmac_osized_frms);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
le32_to_cpu(stats->rmac_frag_frms);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
le32_to_cpu(stats->rmac_jabber_frms);
tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
le32_to_cpu(stats->rmac_ip);
tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
le32_to_cpu(stats->rmac_drop_ip);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
le32_to_cpu(stats->rmac_icmp);
tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
le32_to_cpu(stats->rmac_udp);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
le32_to_cpu(stats->rmac_err_drp_udp);
tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
le32_to_cpu(stats->rmac_pause_cnt);
tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
tmp_stats[i++] =
(u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
le32_to_cpu(stats->rmac_accepted_ip);
tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
/* Enhanced statistics exist only for Hercules */
if (sp->device_type == XFRAME_II_DEVICE) {
tmp_stats[i++] =
le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
tmp_stats[i++] =
le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
tmp_stats[i++] =
le64_to_cpu(stats->rmac_ttl_8192_max_frms);
tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
}
tmp_stats[i++] = 0;
tmp_stats[i++] = swstats->single_ecc_errs;
tmp_stats[i++] = swstats->double_ecc_errs;
tmp_stats[i++] = swstats->parity_err_cnt;
tmp_stats[i++] = swstats->serious_err_cnt;
tmp_stats[i++] = swstats->soft_reset_cnt;
tmp_stats[i++] = swstats->fifo_full_cnt;
for (k = 0; k < MAX_RX_RINGS; k++)
tmp_stats[i++] = swstats->ring_full_cnt[k];
tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
tmp_stats[i++] = xstats->alarm_laser_output_power_high;
tmp_stats[i++] = xstats->alarm_laser_output_power_low;
tmp_stats[i++] = xstats->warn_transceiver_temp_high;
tmp_stats[i++] = xstats->warn_transceiver_temp_low;
tmp_stats[i++] = xstats->warn_laser_bias_current_high;
tmp_stats[i++] = xstats->warn_laser_bias_current_low;
tmp_stats[i++] = xstats->warn_laser_output_power_high;
tmp_stats[i++] = xstats->warn_laser_output_power_low;
tmp_stats[i++] = swstats->clubbed_frms_cnt;
tmp_stats[i++] = swstats->sending_both;
tmp_stats[i++] = swstats->outof_sequence_pkts;
tmp_stats[i++] = swstats->flush_max_pkts;
if (swstats->num_aggregations) {
u64 tmp = swstats->sum_avg_pkts_aggregated;
int count = 0;
/*
* Since 64-bit divide does not work on all platforms,
* do repeated subtraction.
*/
while (tmp >= swstats->num_aggregations) {
tmp -= swstats->num_aggregations;
count++;
}
tmp_stats[i++] = count;
} else
tmp_stats[i++] = 0;
tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
tmp_stats[i++] = swstats->pci_map_fail_cnt;
tmp_stats[i++] = swstats->watchdog_timer_cnt;
tmp_stats[i++] = swstats->mem_allocated;
tmp_stats[i++] = swstats->mem_freed;
tmp_stats[i++] = swstats->link_up_cnt;
tmp_stats[i++] = swstats->link_down_cnt;
tmp_stats[i++] = swstats->link_up_time;
tmp_stats[i++] = swstats->link_down_time;
tmp_stats[i++] = swstats->tx_buf_abort_cnt;
tmp_stats[i++] = swstats->tx_desc_abort_cnt;
tmp_stats[i++] = swstats->tx_parity_err_cnt;
tmp_stats[i++] = swstats->tx_link_loss_cnt;
tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
tmp_stats[i++] = swstats->rx_parity_err_cnt;
tmp_stats[i++] = swstats->rx_abort_cnt;
tmp_stats[i++] = swstats->rx_parity_abort_cnt;
tmp_stats[i++] = swstats->rx_rda_fail_cnt;
tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
tmp_stats[i++] = swstats->rx_fcs_err_cnt;
tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
tmp_stats[i++] = swstats->rx_unkn_err_cnt;
tmp_stats[i++] = swstats->tda_err_cnt;
tmp_stats[i++] = swstats->pfc_err_cnt;
tmp_stats[i++] = swstats->pcc_err_cnt;
tmp_stats[i++] = swstats->tti_err_cnt;
tmp_stats[i++] = swstats->tpa_err_cnt;
tmp_stats[i++] = swstats->sm_err_cnt;
tmp_stats[i++] = swstats->lso_err_cnt;
tmp_stats[i++] = swstats->mac_tmac_err_cnt;
tmp_stats[i++] = swstats->mac_rmac_err_cnt;
tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
tmp_stats[i++] = swstats->rc_err_cnt;
tmp_stats[i++] = swstats->prc_pcix_err_cnt;
tmp_stats[i++] = swstats->rpa_err_cnt;
tmp_stats[i++] = swstats->rda_err_cnt;
tmp_stats[i++] = swstats->rti_err_cnt;
tmp_stats[i++] = swstats->mc_err_cnt;
}
static int s2io_ethtool_get_regs_len(struct net_device *dev)
{
return XENA_REG_SPACE;
}
static int s2io_get_eeprom_len(struct net_device *dev)
{
return XENA_EEPROM_SPACE;
}
static int s2io_get_sset_count(struct net_device *dev, int sset)
{
struct s2io_nic *sp = netdev_priv(dev);
switch (sset) {
case ETH_SS_TEST:
return S2IO_TEST_LEN;
case ETH_SS_STATS:
switch (sp->device_type) {
case XFRAME_I_DEVICE:
return XFRAME_I_STAT_LEN;
case XFRAME_II_DEVICE:
return XFRAME_II_STAT_LEN;
default:
return 0;
}
default:
return -EOPNOTSUPP;
}
}
static void s2io_ethtool_get_strings(struct net_device *dev,
u32 stringset, u8 *data)
{
int stat_size = 0;
struct s2io_nic *sp = netdev_priv(dev);
switch (stringset) {
case ETH_SS_TEST:
memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
break;
case ETH_SS_STATS:
stat_size = sizeof(ethtool_xena_stats_keys);
memcpy(data, ðtool_xena_stats_keys, stat_size);
if (sp->device_type == XFRAME_II_DEVICE) {
memcpy(data + stat_size,
ðtool_enhanced_stats_keys,
sizeof(ethtool_enhanced_stats_keys));
stat_size += sizeof(ethtool_enhanced_stats_keys);
}
memcpy(data + stat_size, ðtool_driver_stats_keys,
sizeof(ethtool_driver_stats_keys));
}
}
static int s2io_set_features(struct net_device *dev, u32 features)
{
struct s2io_nic *sp = netdev_priv(dev);
u32 changed = (features ^ dev->features) & NETIF_F_LRO;
if (changed && netif_running(dev)) {
int rc;
s2io_stop_all_tx_queue(sp);
s2io_card_down(sp);
dev->features = features;
rc = s2io_card_up(sp);
if (rc)
s2io_reset(sp);
else
s2io_start_all_tx_queue(sp);
return rc ? rc : 1;
}
return 0;
}
static const struct ethtool_ops netdev_ethtool_ops = {
.get_settings = s2io_ethtool_gset,
.set_settings = s2io_ethtool_sset,
.get_drvinfo = s2io_ethtool_gdrvinfo,
.get_regs_len = s2io_ethtool_get_regs_len,
.get_regs = s2io_ethtool_gregs,
.get_link = ethtool_op_get_link,
.get_eeprom_len = s2io_get_eeprom_len,
.get_eeprom = s2io_ethtool_geeprom,
.set_eeprom = s2io_ethtool_seeprom,
.get_ringparam = s2io_ethtool_gringparam,
.get_pauseparam = s2io_ethtool_getpause_data,
.set_pauseparam = s2io_ethtool_setpause_data,
.self_test = s2io_ethtool_test,
.get_strings = s2io_ethtool_get_strings,
.set_phys_id = s2io_ethtool_set_led,
.get_ethtool_stats = s2io_get_ethtool_stats,
.get_sset_count = s2io_get_sset_count,
};
/**
* s2io_ioctl - Entry point for the Ioctl
* @dev : Device pointer.
* @ifr : An IOCTL specefic structure, that can contain a pointer to
* a proprietary structure used to pass information to the driver.
* @cmd : This is used to distinguish between the different commands that
* can be passed to the IOCTL functions.
* Description:
* Currently there are no special functionality supported in IOCTL, hence
* function always return EOPNOTSUPPORTED
*/
static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
return -EOPNOTSUPP;
}
/**
* s2io_change_mtu - entry point to change MTU size for the device.
* @dev : device pointer.
* @new_mtu : the new MTU size for the device.
* Description: A driver entry point to change MTU size for the device.
* Before changing the MTU the device must be stopped.
* Return value:
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
static int s2io_change_mtu(struct net_device *dev, int new_mtu)
{
struct s2io_nic *sp = netdev_priv(dev);
int ret = 0;
if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", dev->name);
return -EPERM;
}
dev->mtu = new_mtu;
if (netif_running(dev)) {
s2io_stop_all_tx_queue(sp);
s2io_card_down(sp);
ret = s2io_card_up(sp);
if (ret) {
DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
__func__);
return ret;
}
s2io_wake_all_tx_queue(sp);
} else { /* Device is down */
struct XENA_dev_config __iomem *bar0 = sp->bar0;
u64 val64 = new_mtu;
writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
}
return ret;
}
/**
* s2io_set_link - Set the LInk status
* @data: long pointer to device private structue
* Description: Sets the link status for the adapter
*/
static void s2io_set_link(struct work_struct *work)
{
struct s2io_nic *nic = container_of(work, struct s2io_nic,
set_link_task);
struct net_device *dev = nic->dev;
struct XENA_dev_config __iomem *bar0 = nic->bar0;
register u64 val64;
u16 subid;
rtnl_lock();
if (!netif_running(dev))
goto out_unlock;
if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
/* The card is being reset, no point doing anything */
goto out_unlock;
}
subid = nic->pdev->subsystem_device;
if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
/*
* Allow a small delay for the NICs self initiated
* cleanup to complete.
*/
msleep(100);
}
val64 = readq(&bar0->adapter_status);
if (LINK_IS_UP(val64)) {
if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
if (verify_xena_quiescence(nic)) {
val64 = readq(&bar0->adapter_control);
val64 |= ADAPTER_CNTL_EN;
writeq(val64, &bar0->adapter_control);
if (CARDS_WITH_FAULTY_LINK_INDICATORS(
nic->device_type, subid)) {
val64 = readq(&bar0->gpio_control);
val64 |= GPIO_CTRL_GPIO_0;
writeq(val64, &bar0->gpio_control);
val64 = readq(&bar0->gpio_control);
} else {
val64 |= ADAPTER_LED_ON;
writeq(val64, &bar0->adapter_control);
}
nic->device_enabled_once = true;
} else {
DBG_PRINT(ERR_DBG,
"%s: Error: device is not Quiescent\n",
dev->name);
s2io_stop_all_tx_queue(nic);
}
}
val64 = readq(&bar0->adapter_control);
val64 |= ADAPTER_LED_ON;
writeq(val64, &bar0->adapter_control);
s2io_link(nic, LINK_UP);
} else {
if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
subid)) {
val64 = readq(&bar0->gpio_control);
val64 &= ~GPIO_CTRL_GPIO_0;
writeq(val64, &bar0->gpio_control);
val64 = readq(&bar0->gpio_control);
}
/* turn off LED */
val64 = readq(&bar0->adapter_control);
val64 = val64 & (~ADAPTER_LED_ON);
writeq(val64, &bar0->adapter_control);
s2io_link(nic, LINK_DOWN);
}
clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
out_unlock:
rtnl_unlock();
}
static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
struct buffAdd *ba,
struct sk_buff **skb, u64 *temp0, u64 *temp1,
u64 *temp2, int size)
{
struct net_device *dev = sp->dev;
struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
/* allocate skb */
if (*skb) {
DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
/*
* As Rx frame are not going to be processed,
* using same mapped address for the Rxd
* buffer pointer
*/
rxdp1->Buffer0_ptr = *temp0;
} else {
*skb = dev_alloc_skb(size);
if (!(*skb)) {
DBG_PRINT(INFO_DBG,
"%s: Out of memory to allocate %s\n",
dev->name, "1 buf mode SKBs");
stats->mem_alloc_fail_cnt++;
return -ENOMEM ;
}
stats->mem_allocated += (*skb)->truesize;
/* storing the mapped addr in a temp variable
* such it will be used for next rxd whose
* Host Control is NULL
*/
rxdp1->Buffer0_ptr = *temp0 =
pci_map_single(sp->pdev, (*skb)->data,
size - NET_IP_ALIGN,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
goto memalloc_failed;
rxdp->Host_Control = (unsigned long) (*skb);
}
} else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
/* Two buffer Mode */
if (*skb) {
rxdp3->Buffer2_ptr = *temp2;
rxdp3->Buffer0_ptr = *temp0;
rxdp3->Buffer1_ptr = *temp1;
} else {
*skb = dev_alloc_skb(size);
if (!(*skb)) {
DBG_PRINT(INFO_DBG,
"%s: Out of memory to allocate %s\n",
dev->name,
"2 buf mode SKBs");
stats->mem_alloc_fail_cnt++;
return -ENOMEM;
}
stats->mem_allocated += (*skb)->truesize;
rxdp3->Buffer2_ptr = *temp2 =
pci_map_single(sp->pdev, (*skb)->data,
dev->mtu + 4,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
goto memalloc_failed;
rxdp3->Buffer0_ptr = *temp0 =
pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(sp->pdev,
rxdp3->Buffer0_ptr)) {
pci_unmap_single(sp->pdev,
(dma_addr_t)rxdp3->Buffer2_ptr,
dev->mtu + 4,
PCI_DMA_FROMDEVICE);
goto memalloc_failed;
}
rxdp->Host_Control = (unsigned long) (*skb);
/* Buffer-1 will be dummy buffer not used */
rxdp3->Buffer1_ptr = *temp1 =
pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(sp->pdev,
rxdp3->Buffer1_ptr)) {
pci_unmap_single(sp->pdev,
(dma_addr_t)rxdp3->Buffer0_ptr,
BUF0_LEN, PCI_DMA_FROMDEVICE);
pci_unmap_single(sp->pdev,
(dma_addr_t)rxdp3->Buffer2_ptr,
dev->mtu + 4,
PCI_DMA_FROMDEVICE);
goto memalloc_failed;
}
}
}
return 0;
memalloc_failed:
stats->pci_map_fail_cnt++;
stats->mem_freed += (*skb)->truesize;
dev_kfree_skb(*skb);
return -ENOMEM;
}
static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
int size)
{
struct net_device *dev = sp->dev;
if (sp->rxd_mode == RXD_MODE_1) {
rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
} else if (sp->rxd_mode == RXD_MODE_3B) {
rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
}
}
static int rxd_owner_bit_reset(struct s2io_nic *sp)
{
int i, j, k, blk_cnt = 0, size;
struct config_param *config = &sp->config;
struct mac_info *mac_control = &sp->mac_control;
struct net_device *dev = sp->dev;
struct RxD_t *rxdp = NULL;
struct sk_buff *skb = NULL;
struct buffAdd *ba = NULL;
u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
/* Calculate the size based on ring mode */
size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
if (sp->rxd_mode == RXD_MODE_1)
size += NET_IP_ALIGN;
else if (sp->rxd_mode == RXD_MODE_3B)
size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
for (i = 0; i < config->rx_ring_num; i++) {
struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
struct ring_info *ring = &mac_control->rings[i];
blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
for (j = 0; j < blk_cnt; j++) {
for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
if (sp->rxd_mode == RXD_MODE_3B)
ba = &ring->ba[j][k];
if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
(u64 *)&temp0_64,
(u64 *)&temp1_64,
(u64 *)&temp2_64,
size) == -ENOMEM) {
return 0;
}
set_rxd_buffer_size(sp, rxdp, size);
wmb();
/* flip the Ownership bit to Hardware */
rxdp->Control_1 |= RXD_OWN_XENA;
}
}
}
return 0;
}
static int s2io_add_isr(struct s2io_nic *sp)
{
int ret = 0;
struct net_device *dev = sp->dev;
int err = 0;
if (sp->config.intr_type == MSI_X)
ret = s2io_enable_msi_x(sp);
if (ret) {
DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
sp->config.intr_type = INTA;
}
/*
* Store the values of the MSIX table in
* the struct s2io_nic structure
*/
store_xmsi_data(sp);
/* After proper initialization of H/W, register ISR */
if (sp->config.intr_type == MSI_X) {
int i, msix_rx_cnt = 0;
for (i = 0; i < sp->num_entries; i++) {
if (sp->s2io_entries[i].in_use == MSIX_FLG) {
if (sp->s2io_entries[i].type ==
MSIX_RING_TYPE) {
sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
dev->name, i);
err = request_irq(sp->entries[i].vector,
s2io_msix_ring_handle,
0,
sp->desc[i],
sp->s2io_entries[i].arg);
} else if (sp->s2io_entries[i].type ==
MSIX_ALARM_TYPE) {
sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
dev->name, i);
err = request_irq(sp->entries[i].vector,
s2io_msix_fifo_handle,
0,
sp->desc[i],
sp->s2io_entries[i].arg);
}
/* if either data or addr is zero print it. */
if (!(sp->msix_info[i].addr &&
sp->msix_info[i].data)) {
DBG_PRINT(ERR_DBG,
"%s @Addr:0x%llx Data:0x%llx\n",
sp->desc[i],
(unsigned long long)
sp->msix_info[i].addr,
(unsigned long long)
ntohl(sp->msix_info[i].data));
} else
msix_rx_cnt++;
if (err) {
remove_msix_isr(sp);
DBG_PRINT(ERR_DBG,
"%s:MSI-X-%d registration "
"failed\n", dev->name, i);
DBG_PRINT(ERR_DBG,
"%s: Defaulting to INTA\n",
dev->name);
sp->config.intr_type = INTA;
break;
}
sp->s2io_entries[i].in_use =
MSIX_REGISTERED_SUCCESS;
}
}
if (!err) {
pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
DBG_PRINT(INFO_DBG,
"MSI-X-TX entries enabled through alarm vector\n");
}
}
if (sp->config.intr_type == INTA) {
err = request_irq((int)sp->pdev->irq, s2io_isr, IRQF_SHARED,
sp->name, dev);
if (err) {
DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
dev->name);
return -1;
}
}
return 0;
}
static void s2io_rem_isr(struct s2io_nic *sp)
{
if (sp->config.intr_type == MSI_X)
remove_msix_isr(sp);
else
remove_inta_isr(sp);
}
static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
{
int cnt = 0;
struct XENA_dev_config __iomem *bar0 = sp->bar0;
register u64 val64 = 0;
struct config_param *config;
config = &sp->config;
if (!is_s2io_card_up(sp))
return;
del_timer_sync(&sp->alarm_timer);
/* If s2io_set_link task is executing, wait till it completes. */
while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
msleep(50);
clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
/* Disable napi */
if (sp->config.napi) {
int off = 0;
if (config->intr_type == MSI_X) {
for (; off < sp->config.rx_ring_num; off++)
napi_disable(&sp->mac_control.rings[off].napi);
}
else
napi_disable(&sp->napi);
}
/* disable Tx and Rx traffic on the NIC */
if (do_io)
stop_nic(sp);
s2io_rem_isr(sp);
/* stop the tx queue, indicate link down */
s2io_link(sp, LINK_DOWN);
/* Check if the device is Quiescent and then Reset the NIC */
while (do_io) {
/* As per the HW requirement we need to replenish the
* receive buffer to avoid the ring bump. Since there is
* no intention of processing the Rx frame at this pointwe are
* just setting the ownership bit of rxd in Each Rx
* ring to HW and set the appropriate buffer size
* based on the ring mode
*/
rxd_owner_bit_reset(sp);
val64 = readq(&bar0->adapter_status);
if (verify_xena_quiescence(sp)) {
if (verify_pcc_quiescent(sp, sp->device_enabled_once))
break;
}
msleep(50);
cnt++;
if (cnt == 10) {
DBG_PRINT(ERR_DBG, "Device not Quiescent - "
"adapter status reads 0x%llx\n",
(unsigned long long)val64);
break;
}
}
if (do_io)
s2io_reset(sp);
/* Free all Tx buffers */
free_tx_buffers(sp);
/* Free all Rx buffers */
free_rx_buffers(sp);
clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
}
static void s2io_card_down(struct s2io_nic *sp)
{
do_s2io_card_down(sp, 1);
}
static int s2io_card_up(struct s2io_nic *sp)
{
int i, ret = 0;
struct config_param *config;
struct mac_info *mac_control;
struct net_device *dev = (struct net_device *)sp->dev;
u16 interruptible;
/* Initialize the H/W I/O registers */
ret = init_nic(sp);
if (ret != 0) {
DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
dev->name);
if (ret != -EIO)
s2io_reset(sp);
return ret;
}
/*
* Initializing the Rx buffers. For now we are considering only 1
* Rx ring and initializing buffers into 30 Rx blocks
*/
config = &sp->config;
mac_control = &sp->mac_control;
for (i = 0; i < config->rx_ring_num; i++) {
struct ring_info *ring = &mac_control->rings[i];
ring->mtu = dev->mtu;
ring->lro = !!(dev->features & NETIF_F_LRO);
ret = fill_rx_buffers(sp, ring, 1);
if (ret) {
DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
dev->name);
s2io_reset(sp);
free_rx_buffers(sp);
return -ENOMEM;
}
DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
ring->rx_bufs_left);
}
/* Initialise napi */
if (config->napi) {
if (config->intr_type == MSI_X) {
for (i = 0; i < sp->config.rx_ring_num; i++)
napi_enable(&sp->mac_control.rings[i].napi);
} else {
napi_enable(&sp->napi);
}
}
/* Maintain the state prior to the open */
if (sp->promisc_flg)
sp->promisc_flg = 0;
if (sp->m_cast_flg) {
sp->m_cast_flg = 0;
sp->all_multi_pos = 0;
}
/* Setting its receive mode */
s2io_set_multicast(dev);
if (dev->features & NETIF_F_LRO) {
/* Initialize max aggregatable pkts per session based on MTU */
sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
/* Check if we can use (if specified) user provided value */
if (lro_max_pkts < sp->lro_max_aggr_per_sess)
sp->lro_max_aggr_per_sess = lro_max_pkts;
}
/* Enable Rx Traffic and interrupts on the NIC */
if (start_nic(sp)) {
DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
s2io_reset(sp);
free_rx_buffers(sp);
return -ENODEV;
}
/* Add interrupt service routine */
if (s2io_add_isr(sp) != 0) {
if (sp->config.intr_type == MSI_X)
s2io_rem_isr(sp);
s2io_reset(sp);
free_rx_buffers(sp);
return -ENODEV;
}
S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
set_bit(__S2IO_STATE_CARD_UP, &sp->state);
/* Enable select interrupts */
en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
if (sp->config.intr_type != INTA) {
interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
} else {
interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
interruptible |= TX_PIC_INTR;
en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
}
return 0;
}
/**
* s2io_restart_nic - Resets the NIC.
* @data : long pointer to the device private structure
* Description:
* This function is scheduled to be run by the s2io_tx_watchdog
* function after 0.5 secs to reset the NIC. The idea is to reduce
* the run time of the watch dog routine which is run holding a
* spin lock.
*/
static void s2io_restart_nic(struct work_struct *work)
{
struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
struct net_device *dev = sp->dev;
rtnl_lock();
if (!netif_running(dev))
goto out_unlock;
s2io_card_down(sp);
if (s2io_card_up(sp)) {
DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
}
s2io_wake_all_tx_queue(sp);
DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
out_unlock:
rtnl_unlock();
}
/**
* s2io_tx_watchdog - Watchdog for transmit side.
* @dev : Pointer to net device structure
* Description:
* This function is triggered if the Tx Queue is stopped
* for a pre-defined amount of time when the Interface is still up.
* If the Interface is jammed in such a situation, the hardware is
* reset (by s2io_close) and restarted again (by s2io_open) to
* overcome any problem that might have been caused in the hardware.
* Return value:
* void
*/
static void s2io_tx_watchdog(struct net_device *dev)
{
struct s2io_nic *sp = netdev_priv(dev);
struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
if (netif_carrier_ok(dev)) {
swstats->watchdog_timer_cnt++;
schedule_work(&sp->rst_timer_task);
swstats->soft_reset_cnt++;
}
}
/**
* rx_osm_handler - To perform some OS related operations on SKB.
* @sp: private member of the device structure,pointer to s2io_nic structure.
* @skb : the socket buffer pointer.
* @len : length of the packet
* @cksum : FCS checksum of the frame.
* @ring_no : the ring from which this RxD was extracted.
* Description:
* This function is called by the Rx interrupt serivce routine to perform
* some OS related operations on the SKB before passing it to the upper
* layers. It mainly checks if the checksum is OK, if so adds it to the
* SKBs cksum variable, increments the Rx packet count and passes the SKB
* to the upper layer. If the checksum is wrong, it increments the Rx
* packet error count, frees the SKB and returns error.
* Return value:
* SUCCESS on success and -1 on failure.
*/
static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
{
struct s2io_nic *sp = ring_data->nic;
struct net_device *dev = (struct net_device *)ring_data->dev;
struct sk_buff *skb = (struct sk_buff *)
((unsigned long)rxdp->Host_Control);
int ring_no = ring_data->ring_no;
u16 l3_csum, l4_csum;
unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
struct lro *uninitialized_var(lro);
u8 err_mask;
struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
skb->dev = dev;
if (err) {
/* Check for parity error */
if (err & 0x1)
swstats->parity_err_cnt++;
err_mask = err >> 48;
switch (err_mask) {
case 1:
swstats->rx_parity_err_cnt++;
break;
case 2:
swstats->rx_abort_cnt++;
break;
case 3:
swstats->rx_parity_abort_cnt++;
break;
case 4:
swstats->rx_rda_fail_cnt++;
break;
case 5:
swstats->rx_unkn_prot_cnt++;
break;
case 6:
swstats->rx_fcs_err_cnt++;
break;
case 7:
swstats->rx_buf_size_err_cnt++;
break;
case 8:
swstats->rx_rxd_corrupt_cnt++;
break;
case 15:
swstats->rx_unkn_err_cnt++;
break;
}
/*
* Drop the packet if bad transfer code. Exception being
* 0x5, which could be due to unsupported IPv6 extension header.
* In this case, we let stack handle the packet.
* Note that in this case, since checksum will be incorrect,
* stack will validate the same.
*/
if (err_mask != 0x5) {
DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
dev->name, err_mask);
dev->stats.rx_crc_errors++;
swstats->mem_freed
+= skb->truesize;
dev_kfree_skb(skb);
ring_data->rx_bufs_left -= 1;
rxdp->Host_Control = 0;
return 0;
}
}
rxdp->Host_Control = 0;
if (sp->rxd_mode == RXD_MODE_1) {
int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
skb_put(skb, len);
} else if (sp->rxd_mode == RXD_MODE_3B) {
int get_block = ring_data->rx_curr_get_info.block_index;
int get_off = ring_data->rx_curr_get_info.offset;
int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
unsigned char *buff = skb_push(skb, buf0_len);
struct buffAdd *ba = &ring_data->ba[get_block][get_off];
memcpy(buff, ba->ba_0, buf0_len);
skb_put(skb, buf2_len);
}
if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
((!ring_data->lro) ||
(ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
(dev->features & NETIF_F_RXCSUM)) {
l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
/*
* NIC verifies if the Checksum of the received
* frame is Ok or not and accordingly returns
* a flag in the RxD.
*/
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (ring_data->lro) {
u32 tcp_len = 0;
u8 *tcp;
int ret = 0;
ret = s2io_club_tcp_session(ring_data,
skb->data, &tcp,
&tcp_len, &lro,
rxdp, sp);
switch (ret) {
case 3: /* Begin anew */
lro->parent = skb;
goto aggregate;
case 1: /* Aggregate */
lro_append_pkt(sp, lro, skb, tcp_len);
goto aggregate;
case 4: /* Flush session */
lro_append_pkt(sp, lro, skb, tcp_len);
queue_rx_frame(lro->parent,
lro->vlan_tag);
clear_lro_session(lro);
swstats->flush_max_pkts++;
goto aggregate;
case 2: /* Flush both */
lro->parent->data_len = lro->frags_len;
swstats->sending_both++;
queue_rx_frame(lro->parent,
lro->vlan_tag);
clear_lro_session(lro);
goto send_up;
case 0: /* sessions exceeded */
case -1: /* non-TCP or not L2 aggregatable */
case 5: /*
* First pkt in session not
* L3/L4 aggregatable
*/
break;
default:
DBG_PRINT(ERR_DBG,
"%s: Samadhana!!\n",
__func__);
BUG();
}
}
} else {
/*
* Packet with erroneous checksum, let the
* upper layers deal with it.
*/
skb_checksum_none_assert(skb);
}
} else
skb_checksum_none_assert(skb);
swstats->mem_freed += skb->truesize;
send_up:
skb_record_rx_queue(skb, ring_no);
queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
aggregate:
sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
return SUCCESS;
}
/**
* s2io_link - stops/starts the Tx queue.
* @sp : private member of the device structure, which is a pointer to the
* s2io_nic structure.
* @link : inidicates whether link is UP/DOWN.
* Description:
* This function stops/starts the Tx queue depending on whether the link
* status of the NIC is is down or up. This is called by the Alarm
* interrupt handler whenever a link change interrupt comes up.
* Return value:
* void.
*/
static void s2io_link(struct s2io_nic *sp, int link)
{
struct net_device *dev = (struct net_device *)sp->dev;
struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
if (link != sp->last_link_state) {
init_tti(sp, link);
if (link == LINK_DOWN) {
DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
s2io_stop_all_tx_queue(sp);
netif_carrier_off(dev);
if (swstats->link_up_cnt)
swstats->link_up_time =
jiffies - sp->start_time;
swstats->link_down_cnt++;
} else {
DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
if (swstats->link_down_cnt)
swstats->link_down_time =
jiffies - sp->start_time;
swstats->link_up_cnt++;
netif_carrier_on(dev);
s2io_wake_all_tx_queue(sp);
}
}
sp->last_link_state = link;
sp->start_time = jiffies;
}
/**
* s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
* @sp : private member of the device structure, which is a pointer to the
* s2io_nic structure.
* Description:
* This function initializes a few of the PCI and PCI-X configuration registers
* with recommended values.
* Return value:
* void
*/
static void s2io_init_pci(struct s2io_nic *sp)
{
u16 pci_cmd = 0, pcix_cmd = 0;
/* Enable Data Parity Error Recovery in PCI-X command register. */
pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
&(pcix_cmd));
pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
(pcix_cmd | 1));
pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
&(pcix_cmd));
/* Set the PErr Response bit in PCI command register. */
pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
pci_write_config_word(sp->pdev, PCI_COMMAND,
(pci_cmd | PCI_COMMAND_PARITY));
pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
}
static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
u8 *dev_multiq)
{
int i;
if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
"(%d) not supported\n", tx_fifo_num);
if (tx_fifo_num < 1)
tx_fifo_num = 1;
else
tx_fifo_num = MAX_TX_FIFOS;
DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
}
if (multiq)
*dev_multiq = multiq;
if (tx_steering_type && (1 == tx_fifo_num)) {
if (tx_steering_type != TX_DEFAULT_STEERING)
DBG_PRINT(ERR_DBG,
"Tx steering is not supported with "
"one fifo. Disabling Tx steering.\n");
tx_steering_type = NO_STEERING;
}
if ((tx_steering_type < NO_STEERING) ||
(tx_steering_type > TX_DEFAULT_STEERING)) {
DBG_PRINT(ERR_DBG,
"Requested transmit steering not supported\n");
DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
tx_steering_type = NO_STEERING;
}
if (rx_ring_num > MAX_RX_RINGS) {
DBG_PRINT(ERR_DBG,
"Requested number of rx rings not supported\n");
DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
MAX_RX_RINGS);
rx_ring_num = MAX_RX_RINGS;
}
if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
"Defaulting to INTA\n");
*dev_intr_type = INTA;
}
if ((*dev_intr_type == MSI_X) &&
((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
(pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
"Defaulting to INTA\n");
*dev_intr_type = INTA;
}
if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
rx_ring_mode = 1;
}
for (i = 0; i < MAX_RX_RINGS; i++)
if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
DBG_PRINT(ERR_DBG, "Requested rx ring size not "
"supported\nDefaulting to %d\n",
MAX_RX_BLOCKS_PER_RING);
rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
}
return SUCCESS;
}
/**
* rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
* or Traffic class respectively.
* @nic: device private variable
* Description: The function configures the receive steering to
* desired receive ring.
* Return Value: SUCCESS on success and
* '-1' on failure (endian settings incorrect).
*/
static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
{
struct XENA_dev_config __iomem *bar0 = nic->bar0;
register u64 val64 = 0;
if (ds_codepoint > 63)
return FAILURE;
val64 = RTS_DS_MEM_DATA(ring);
writeq(val64, &bar0->rts_ds_mem_data);
val64 = RTS_DS_MEM_CTRL_WE |
RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
writeq(val64, &bar0->rts_ds_mem_ctrl);
return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
S2IO_BIT_RESET);
}
static const struct net_device_ops s2io_netdev_ops = {
.ndo_open = s2io_open,
.ndo_stop = s2io_close,
.ndo_get_stats = s2io_get_stats,
.ndo_start_xmit = s2io_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_multicast_list = s2io_set_multicast,
.ndo_do_ioctl = s2io_ioctl,
.ndo_set_mac_address = s2io_set_mac_addr,
.ndo_change_mtu = s2io_change_mtu,
.ndo_set_features = s2io_set_features,
.ndo_vlan_rx_register = s2io_vlan_rx_register,
.ndo_vlan_rx_kill_vid = s2io_vlan_rx_kill_vid,
.ndo_tx_timeout = s2io_tx_watchdog,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = s2io_netpoll,
#endif
};
/**
* s2io_init_nic - Initialization of the adapter .
* @pdev : structure containing the PCI related information of the device.
* @pre: List of PCI devices supported by the driver listed in s2io_tbl.
* Description:
* The function initializes an adapter identified by the pci_dec structure.
* All OS related initialization including memory and device structure and
* initlaization of the device private variable is done. Also the swapper
* control register is initialized to enable read and write into the I/O
* registers of the device.
* Return value:
* returns 0 on success and negative on failure.
*/
static int __devinit
s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
{
struct s2io_nic *sp;
struct net_device *dev;
int i, j, ret;
int dma_flag = false;
u32 mac_up, mac_down;
u64 val64 = 0, tmp64 = 0;
struct XENA_dev_config __iomem *bar0 = NULL;
u16 subid;
struct config_param *config;
struct mac_info *mac_control;
int mode;
u8 dev_intr_type = intr_type;
u8 dev_multiq = 0;
ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
if (ret)
return ret;
ret = pci_enable_device(pdev);
if (ret) {
DBG_PRINT(ERR_DBG,
"%s: pci_enable_device failed\n", __func__);
return ret;
}
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
dma_flag = true;
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
DBG_PRINT(ERR_DBG,
"Unable to obtain 64bit DMA "
"for consistent allocations\n");
pci_disable_device(pdev);
return -ENOMEM;
}
} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
} else {
pci_disable_device(pdev);
return -ENOMEM;
}
ret = pci_request_regions(pdev, s2io_driver_name);
if (ret) {
DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
__func__, ret);
pci_disable_device(pdev);
return -ENODEV;
}
if (dev_multiq)
dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
else
dev = alloc_etherdev(sizeof(struct s2io_nic));
if (dev == NULL) {
DBG_PRINT(ERR_DBG, "Device allocation failed\n");
pci_disable_device(pdev);
pci_release_regions(pdev);
return -ENODEV;
}
pci_set_master(pdev);
pci_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
/* Private member variable initialized to s2io NIC structure */
sp = netdev_priv(dev);
sp->dev = dev;
sp->pdev = pdev;
sp->high_dma_flag = dma_flag;
sp->device_enabled_once = false;
if (rx_ring_mode == 1)
sp->rxd_mode = RXD_MODE_1;
if (rx_ring_mode == 2)
sp->rxd_mode = RXD_MODE_3B;
sp->config.intr_type = dev_intr_type;
if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
(pdev->device == PCI_DEVICE_ID_HERC_UNI))
sp->device_type = XFRAME_II_DEVICE;
else
sp->device_type = XFRAME_I_DEVICE;
/* Initialize some PCI/PCI-X fields of the NIC. */
s2io_init_pci(sp);
/*
* Setting the device configuration parameters.
* Most of these parameters can be specified by the user during
* module insertion as they are module loadable parameters. If
* these parameters are not not specified during load time, they
* are initialized with default values.
*/
config = &sp->config;
mac_control = &sp->mac_control;
config->napi = napi;
config->tx_steering_type = tx_steering_type;
/* Tx side parameters. */
if (config->tx_steering_type == TX_PRIORITY_STEERING)
config->tx_fifo_num = MAX_TX_FIFOS;
else
config->tx_fifo_num = tx_fifo_num;
/* Initialize the fifos used for tx steering */
if (config->tx_fifo_num < 5) {
if (config->tx_fifo_num == 1)
sp->total_tcp_fifos = 1;
else
sp->total_tcp_fifos = config->tx_fifo_num - 1;
sp->udp_fifo_idx = config->tx_fifo_num - 1;
sp->total_udp_fifos = 1;
sp->other_fifo_idx = sp->total_tcp_fifos - 1;
} else {
sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
FIFO_OTHER_MAX_NUM);
sp->udp_fifo_idx = sp->total_tcp_fifos;
sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
}
config->multiq = dev_multiq;
for (i = 0; i < config->tx_fifo_num; i++) {
struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
tx_cfg->fifo_len = tx_fifo_len[i];
tx_cfg->fifo_priority = i;
}
/* mapping the QoS priority to the configured fifos */
for (i = 0; i < MAX_TX_FIFOS; i++)
config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
/* map the hashing selector table to the configured fifos */
for (i = 0; i < config->tx_fifo_num; i++)
sp->fifo_selector[i] = fifo_selector[i];
config->tx_intr_type = TXD_INT_TYPE_UTILZ;
for (i = 0; i < config->tx_fifo_num; i++) {
struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
if (tx_cfg->fifo_len < 65) {
config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
break;
}
}
/* + 2 because one Txd for skb->data and one Txd for UFO */
config->max_txds = MAX_SKB_FRAGS + 2;
/* Rx side parameters. */
config->rx_ring_num = rx_ring_num;
for (i = 0; i < config->rx_ring_num; i++) {
struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
struct ring_info *ring = &mac_control->rings[i];
rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
rx_cfg->ring_priority = i;
ring->rx_bufs_left = 0;
ring->rxd_mode = sp->rxd_mode;
ring->rxd_count = rxd_count[sp->rxd_mode];
ring->pdev = sp->pdev;
ring->dev = sp->dev;
}
for (i = 0; i < rx_ring_num; i++) {
struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
rx_cfg->ring_org = RING_ORG_BUFF1;
rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
}
/* Setting Mac Control parameters */
mac_control->rmac_pause_time = rmac_pause_time;
mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
/* initialize the shared memory used by the NIC and the host */
if (init_shared_mem(sp)) {
DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
ret = -ENOMEM;
goto mem_alloc_failed;
}
sp->bar0 = pci_ioremap_bar(pdev, 0);
if (!sp->bar0) {
DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
dev->name);
ret = -ENOMEM;
goto bar0_remap_failed;
}
sp->bar1 = pci_ioremap_bar(pdev, 2);
if (!sp->bar1) {
DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
dev->name);
ret = -ENOMEM;
goto bar1_remap_failed;
}
dev->irq = pdev->irq;
dev->base_addr = (unsigned long)sp->bar0;
/* Initializing the BAR1 address as the start of the FIFO pointer. */
for (j = 0; j < MAX_TX_FIFOS; j++) {
mac_control->tx_FIFO_start[j] =
(struct TxFIFO_element __iomem *)
(sp->bar1 + (j * 0x00020000));
}
/* Driver entry points */
dev->netdev_ops = &s2io_netdev_ops;
SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_RXCSUM | NETIF_F_LRO;
dev->features |= dev->hw_features |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
if (sp->device_type & XFRAME_II_DEVICE) {
dev->hw_features |= NETIF_F_UFO;
if (ufo)
dev->features |= NETIF_F_UFO;
}
if (sp->high_dma_flag == true)
dev->features |= NETIF_F_HIGHDMA;
dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
INIT_WORK(&sp->set_link_task, s2io_set_link);
pci_save_state(sp->pdev);
/* Setting swapper control on the NIC, for proper reset operation */
if (s2io_set_swapper(sp)) {
DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
dev->name);
ret = -EAGAIN;
goto set_swap_failed;
}
/* Verify if the Herc works on the slot its placed into */
if (sp->device_type & XFRAME_II_DEVICE) {
mode = s2io_verify_pci_mode(sp);
if (mode < 0) {
DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
__func__);
ret = -EBADSLT;
goto set_swap_failed;
}
}
if (sp->config.intr_type == MSI_X) {
sp->num_entries = config->rx_ring_num + 1;
ret = s2io_enable_msi_x(sp);
if (!ret) {
ret = s2io_test_msi(sp);
/* rollback MSI-X, will re-enable during add_isr() */
remove_msix_isr(sp);
}
if (ret) {
DBG_PRINT(ERR_DBG,
"MSI-X requested but failed to enable\n");
sp->config.intr_type = INTA;
}
}
if (config->intr_type == MSI_X) {
for (i = 0; i < config->rx_ring_num ; i++) {
struct ring_info *ring = &mac_control->rings[i];
netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
}
} else {
netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
}
/* Not needed for Herc */
if (sp->device_type & XFRAME_I_DEVICE) {
/*
* Fix for all "FFs" MAC address problems observed on
* Alpha platforms
*/
fix_mac_address(sp);
s2io_reset(sp);
}
/*
* MAC address initialization.
* For now only one mac address will be read and used.
*/
bar0 = sp->bar0;
val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
writeq(val64, &bar0->rmac_addr_cmd_mem);
wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
S2IO_BIT_RESET);
tmp64 = readq(&bar0->rmac_addr_data0_mem);
mac_down = (u32)tmp64;
mac_up = (u32) (tmp64 >> 32);
sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
/* Set the factory defined MAC address initially */
dev->addr_len = ETH_ALEN;
memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
/* initialize number of multicast & unicast MAC entries variables */
if (sp->device_type == XFRAME_I_DEVICE) {
config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
} else if (sp->device_type == XFRAME_II_DEVICE) {
config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
}
/* store mac addresses from CAM to s2io_nic structure */
do_s2io_store_unicast_mc(sp);
/* Configure MSIX vector for number of rings configured plus one */
if ((sp->device_type == XFRAME_II_DEVICE) &&
(config->intr_type == MSI_X))
sp->num_entries = config->rx_ring_num + 1;
/* Store the values of the MSIX table in the s2io_nic structure */
store_xmsi_data(sp);
/* reset Nic and bring it to known state */
s2io_reset(sp);
/*
* Initialize link state flags
* and the card state parameter
*/
sp->state = 0;
/* Initialize spinlocks */
for (i = 0; i < sp->config.tx_fifo_num; i++) {
struct fifo_info *fifo = &mac_control->fifos[i];
spin_lock_init(&fifo->tx_lock);
}
/*
* SXE-002: Configure link and activity LED to init state
* on driver load.
*/
subid = sp->pdev->subsystem_device;
if ((subid & 0xFF) >= 0x07) {
val64 = readq(&bar0->gpio_control);
val64 |= 0x0000800000000000ULL;
writeq(val64, &bar0->gpio_control);
val64 = 0x0411040400000000ULL;
writeq(val64, (void __iomem *)bar0 + 0x2700);
val64 = readq(&bar0->gpio_control);
}
sp->rx_csum = 1; /* Rx chksum verify enabled by default */
if (register_netdev(dev)) {
DBG_PRINT(ERR_DBG, "Device registration failed\n");
ret = -ENODEV;
goto register_failed;
}
s2io_vpd_read(sp);
DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
sp->product_name, pdev->revision);
DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
s2io_driver_version);
DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
if (sp->device_type & XFRAME_II_DEVICE) {
mode = s2io_print_pci_mode(sp);
if (mode < 0) {
ret = -EBADSLT;
unregister_netdev(dev);
goto set_swap_failed;
}
}
switch (sp->rxd_mode) {
case RXD_MODE_1:
DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
dev->name);
break;
case RXD_MODE_3B:
DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
dev->name);
break;
}
switch (sp->config.napi) {
case 0:
DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
break;
case 1:
DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
break;
}
DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
sp->config.tx_fifo_num);
DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
sp->config.rx_ring_num);
switch (sp->config.intr_type) {
case INTA:
DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
break;
case MSI_X:
DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
break;
}
if (sp->config.multiq) {
for (i = 0; i < sp->config.tx_fifo_num; i++) {
struct fifo_info *fifo = &mac_control->fifos[i];
fifo->multiq = config->multiq;
}
DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
dev->name);
} else
DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
dev->name);
switch (sp->config.tx_steering_type) {
case NO_STEERING:
DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
dev->name);
break;
case TX_PRIORITY_STEERING:
DBG_PRINT(ERR_DBG,
"%s: Priority steering enabled for transmit\n",
dev->name);
break;
case TX_DEFAULT_STEERING:
DBG_PRINT(ERR_DBG,
"%s: Default steering enabled for transmit\n",
dev->name);
}
DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
dev->name);
if (ufo)
DBG_PRINT(ERR_DBG,
"%s: UDP Fragmentation Offload(UFO) enabled\n",
dev->name);
/* Initialize device name */
sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
if (vlan_tag_strip)
sp->vlan_strip_flag = 1;
else
sp->vlan_strip_flag = 0;
/*
* Make Link state as off at this point, when the Link change
* interrupt comes the state will be automatically changed to
* the right state.
*/
netif_carrier_off(dev);
return 0;
register_failed:
set_swap_failed:
iounmap(sp->bar1);
bar1_remap_failed:
iounmap(sp->bar0);
bar0_remap_failed:
mem_alloc_failed:
free_shared_mem(sp);
pci_disable_device(pdev);
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
free_netdev(dev);
return ret;
}
/**
* s2io_rem_nic - Free the PCI device
* @pdev: structure containing the PCI related information of the device.
* Description: This function is called by the Pci subsystem to release a
* PCI device and free up all resource held up by the device. This could
* be in response to a Hot plug event or when the driver is to be removed
* from memory.
*/
static void __devexit s2io_rem_nic(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct s2io_nic *sp;
if (dev == NULL) {
DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
return;
}
sp = netdev_priv(dev);
cancel_work_sync(&sp->rst_timer_task);
cancel_work_sync(&sp->set_link_task);
unregister_netdev(dev);
free_shared_mem(sp);
iounmap(sp->bar0);
iounmap(sp->bar1);
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
free_netdev(dev);
pci_disable_device(pdev);
}
/**
* s2io_starter - Entry point for the driver
* Description: This function is the entry point for the driver. It verifies
* the module loadable parameters and initializes PCI configuration space.
*/
static int __init s2io_starter(void)
{
return pci_register_driver(&s2io_driver);
}
/**
* s2io_closer - Cleanup routine for the driver
* Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
*/
static __exit void s2io_closer(void)
{
pci_unregister_driver(&s2io_driver);
DBG_PRINT(INIT_DBG, "cleanup done\n");
}
module_init(s2io_starter);
module_exit(s2io_closer);
static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
struct tcphdr **tcp, struct RxD_t *rxdp,
struct s2io_nic *sp)
{
int ip_off;
u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
DBG_PRINT(INIT_DBG,
"%s: Non-TCP frames not supported for LRO\n",
__func__);
return -1;
}
/* Checking for DIX type or DIX type with VLAN */
if ((l2_type == 0) || (l2_type == 4)) {
ip_off = HEADER_ETHERNET_II_802_3_SIZE;
/*
* If vlan stripping is disabled and the frame is VLAN tagged,
* shift the offset by the VLAN header size bytes.
*/
if ((!sp->vlan_strip_flag) &&
(rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
ip_off += HEADER_VLAN_SIZE;
} else {
/* LLC, SNAP etc are considered non-mergeable */
return -1;
}
*ip = (struct iphdr *)((u8 *)buffer + ip_off);
ip_len = (u8)((*ip)->ihl);
ip_len <<= 2;
*tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
return 0;
}
static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
struct tcphdr *tcp)
{
DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
if ((lro->iph->saddr != ip->saddr) ||
(lro->iph->daddr != ip->daddr) ||
(lro->tcph->source != tcp->source) ||
(lro->tcph->dest != tcp->dest))
return -1;
return 0;
}
static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
{
return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
}
static void initiate_new_session(struct lro *lro, u8 *l2h,
struct iphdr *ip, struct tcphdr *tcp,
u32 tcp_pyld_len, u16 vlan_tag)
{
DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
lro->l2h = l2h;
lro->iph = ip;
lro->tcph = tcp;
lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
lro->tcp_ack = tcp->ack_seq;
lro->sg_num = 1;
lro->total_len = ntohs(ip->tot_len);
lro->frags_len = 0;
lro->vlan_tag = vlan_tag;
/*
* Check if we saw TCP timestamp.
* Other consistency checks have already been done.
*/
if (tcp->doff == 8) {
__be32 *ptr;
ptr = (__be32 *)(tcp+1);
lro->saw_ts = 1;
lro->cur_tsval = ntohl(*(ptr+1));
lro->cur_tsecr = *(ptr+2);
}
lro->in_use = 1;
}
static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
{
struct iphdr *ip = lro->iph;
struct tcphdr *tcp = lro->tcph;
__sum16 nchk;
struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
/* Update L3 header */
ip->tot_len = htons(lro->total_len);
ip->check = 0;
nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
ip->check = nchk;
/* Update L4 header */
tcp->ack_seq = lro->tcp_ack;
tcp->window = lro->window;
/* Update tsecr field if this session has timestamps enabled */
if (lro->saw_ts) {
__be32 *ptr = (__be32 *)(tcp + 1);
*(ptr+2) = lro->cur_tsecr;
}
/* Update counters required for calculation of
* average no. of packets aggregated.
*/
swstats->sum_avg_pkts_aggregated += lro->sg_num;
swstats->num_aggregations++;
}
static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
struct tcphdr *tcp, u32 l4_pyld)
{
DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
lro->total_len += l4_pyld;
lro->frags_len += l4_pyld;
lro->tcp_next_seq += l4_pyld;
lro->sg_num++;
/* Update ack seq no. and window ad(from this pkt) in LRO object */
lro->tcp_ack = tcp->ack_seq;
lro->window = tcp->window;
if (lro->saw_ts) {
__be32 *ptr;
/* Update tsecr and tsval from this packet */
ptr = (__be32 *)(tcp+1);
lro->cur_tsval = ntohl(*(ptr+1));
lro->cur_tsecr = *(ptr + 2);
}
}
static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
struct tcphdr *tcp, u32 tcp_pyld_len)
{
u8 *ptr;
DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
if (!tcp_pyld_len) {
/* Runt frame or a pure ack */
return -1;
}
if (ip->ihl != 5) /* IP has options */
return -1;
/* If we see CE codepoint in IP header, packet is not mergeable */
if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
return -1;
/* If we see ECE or CWR flags in TCP header, packet is not mergeable */
if (tcp->urg || tcp->psh || tcp->rst ||
tcp->syn || tcp->fin ||
tcp->ece || tcp->cwr || !tcp->ack) {
/*
* Currently recognize only the ack control word and
* any other control field being set would result in
* flushing the LRO session
*/
return -1;
}
/*
* Allow only one TCP timestamp option. Don't aggregate if
* any other options are detected.
*/
if (tcp->doff != 5 && tcp->doff != 8)
return -1;
if (tcp->doff == 8) {
ptr = (u8 *)(tcp + 1);
while (*ptr == TCPOPT_NOP)
ptr++;
if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
return -1;
/* Ensure timestamp value increases monotonically */
if (l_lro)
if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
return -1;
/* timestamp echo reply should be non-zero */
if (*((__be32 *)(ptr+6)) == 0)
return -1;
}
return 0;
}
static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
u8 **tcp, u32 *tcp_len, struct lro **lro,
struct RxD_t *rxdp, struct s2io_nic *sp)
{
struct iphdr *ip;
struct tcphdr *tcph;
int ret = 0, i;
u16 vlan_tag = 0;
struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
rxdp, sp);
if (ret)
return ret;
DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
tcph = (struct tcphdr *)*tcp;
*tcp_len = get_l4_pyld_length(ip, tcph);
for (i = 0; i < MAX_LRO_SESSIONS; i++) {
struct lro *l_lro = &ring_data->lro0_n[i];
if (l_lro->in_use) {
if (check_for_socket_match(l_lro, ip, tcph))
continue;
/* Sock pair matched */
*lro = l_lro;
if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
"expected 0x%x, actual 0x%x\n",
__func__,
(*lro)->tcp_next_seq,
ntohl(tcph->seq));
swstats->outof_sequence_pkts++;
ret = 2;
break;
}
if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
*tcp_len))
ret = 1; /* Aggregate */
else
ret = 2; /* Flush both */
break;
}
}
if (ret == 0) {
/* Before searching for available LRO objects,
* check if the pkt is L3/L4 aggregatable. If not
* don't create new LRO session. Just send this
* packet up.
*/
if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
return 5;
for (i = 0; i < MAX_LRO_SESSIONS; i++) {
struct lro *l_lro = &ring_data->lro0_n[i];
if (!(l_lro->in_use)) {
*lro = l_lro;
ret = 3; /* Begin anew */
break;
}
}
}
if (ret == 0) { /* sessions exceeded */
DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
__func__);
*lro = NULL;
return ret;
}
switch (ret) {
case 3:
initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
vlan_tag);
break;
case 2:
update_L3L4_header(sp, *lro);
break;
case 1:
aggregate_new_rx(*lro, ip, tcph, *tcp_len);
if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
update_L3L4_header(sp, *lro);
ret = 4; /* Flush the LRO */
}
break;
default:
DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
break;
}
return ret;
}
static void clear_lro_session(struct lro *lro)
{
static u16 lro_struct_size = sizeof(struct lro);
memset(lro, 0, lro_struct_size);
}
static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
{
struct net_device *dev = skb->dev;
struct s2io_nic *sp = netdev_priv(dev);
skb->protocol = eth_type_trans(skb, dev);
if (sp->vlgrp && vlan_tag && (sp->vlan_strip_flag)) {
/* Queueing the vlan frame to the upper layer */
if (sp->config.napi)
vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
else
vlan_hwaccel_rx(skb, sp->vlgrp, vlan_tag);
} else {
if (sp->config.napi)
netif_receive_skb(skb);
else
netif_rx(skb);
}
}
static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
struct sk_buff *skb, u32 tcp_len)
{
struct sk_buff *first = lro->parent;
struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
first->len += tcp_len;
first->data_len = lro->frags_len;
skb_pull(skb, (skb->len - tcp_len));
if (skb_shinfo(first)->frag_list)
lro->last_frag->next = skb;
else
skb_shinfo(first)->frag_list = skb;
first->truesize += skb->truesize;
lro->last_frag = skb;
swstats->clubbed_frms_cnt++;
}
/**
* s2io_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
* @state: The current pci connection state
*
* This function is called after a PCI bus error affecting
* this device has been detected.
*/
static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct s2io_nic *sp = netdev_priv(netdev);
netif_device_detach(netdev);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
if (netif_running(netdev)) {
/* Bring down the card, while avoiding PCI I/O */
do_s2io_card_down(sp, 0);
}
pci_disable_device(pdev);
return PCI_ERS_RESULT_NEED_RESET;
}
/**
* s2io_io_slot_reset - called after the pci bus has been reset.
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot.
* At this point, the card has exprienced a hard reset,
* followed by fixups by BIOS, and has its config space
* set up identically to what it was at cold boot.
*/
static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct s2io_nic *sp = netdev_priv(netdev);
if (pci_enable_device(pdev)) {
pr_err("Cannot re-enable PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
s2io_reset(sp);
return PCI_ERS_RESULT_RECOVERED;
}
/**
* s2io_io_resume - called when traffic can start flowing again.
* @pdev: Pointer to PCI device
*
* This callback is called when the error recovery driver tells
* us that its OK to resume normal operation.
*/
static void s2io_io_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct s2io_nic *sp = netdev_priv(netdev);
if (netif_running(netdev)) {
if (s2io_card_up(sp)) {
pr_err("Can't bring device back up after reset.\n");
return;
}
if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
s2io_card_down(sp);
pr_err("Can't restore mac addr after reset.\n");
return;
}
}
netif_device_attach(netdev);
netif_tx_wake_all_queues(netdev);
}
| gpl-2.0 |
Ca1ne/Enoch | arch/sparc/kernel/sigutil_32.c | 2887 | 3237 | #include <linux/kernel.h>
#include <linux/types.h>
#include <linux/thread_info.h>
#include <linux/uaccess.h>
#include <linux/sched.h>
#include <asm/sigcontext.h>
#include <asm/fpumacro.h>
#include <asm/ptrace.h>
#include "sigutil.h"
int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
{
int err = 0;
#ifdef CONFIG_SMP
if (test_tsk_thread_flag(current, TIF_USEDFPU)) {
put_psr(get_psr() | PSR_EF);
fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr,
¤t->thread.fpqueue[0], ¤t->thread.fpqdepth);
regs->psr &= ~(PSR_EF);
clear_tsk_thread_flag(current, TIF_USEDFPU);
}
#else
if (current == last_task_used_math) {
put_psr(get_psr() | PSR_EF);
fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr,
¤t->thread.fpqueue[0], ¤t->thread.fpqdepth);
last_task_used_math = NULL;
regs->psr &= ~(PSR_EF);
}
#endif
err |= __copy_to_user(&fpu->si_float_regs[0],
¤t->thread.float_regs[0],
(sizeof(unsigned long) * 32));
err |= __put_user(current->thread.fsr, &fpu->si_fsr);
err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth);
if (current->thread.fpqdepth != 0)
err |= __copy_to_user(&fpu->si_fpqueue[0],
¤t->thread.fpqueue[0],
((sizeof(unsigned long) +
(sizeof(unsigned long *)))*16));
clear_used_math();
return err;
}
int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
{
int err;
#ifdef CONFIG_SMP
if (test_tsk_thread_flag(current, TIF_USEDFPU))
regs->psr &= ~PSR_EF;
#else
if (current == last_task_used_math) {
last_task_used_math = NULL;
regs->psr &= ~PSR_EF;
}
#endif
set_used_math();
clear_tsk_thread_flag(current, TIF_USEDFPU);
if (!access_ok(VERIFY_READ, fpu, sizeof(*fpu)))
return -EFAULT;
err = __copy_from_user(¤t->thread.float_regs[0], &fpu->si_float_regs[0],
(sizeof(unsigned long) * 32));
err |= __get_user(current->thread.fsr, &fpu->si_fsr);
err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth);
if (current->thread.fpqdepth != 0)
err |= __copy_from_user(¤t->thread.fpqueue[0],
&fpu->si_fpqueue[0],
((sizeof(unsigned long) +
(sizeof(unsigned long *)))*16));
return err;
}
int save_rwin_state(int wsaved, __siginfo_rwin_t __user *rwin)
{
int i, err = __put_user(wsaved, &rwin->wsaved);
for (i = 0; i < wsaved; i++) {
struct reg_window32 *rp;
unsigned long fp;
rp = ¤t_thread_info()->reg_window[i];
fp = current_thread_info()->rwbuf_stkptrs[i];
err |= copy_to_user(&rwin->reg_window[i], rp,
sizeof(struct reg_window32));
err |= __put_user(fp, &rwin->rwbuf_stkptrs[i]);
}
return err;
}
int restore_rwin_state(__siginfo_rwin_t __user *rp)
{
struct thread_info *t = current_thread_info();
int i, wsaved, err;
__get_user(wsaved, &rp->wsaved);
if (wsaved > NSWINS)
return -EFAULT;
err = 0;
for (i = 0; i < wsaved; i++) {
err |= copy_from_user(&t->reg_window[i],
&rp->reg_window[i],
sizeof(struct reg_window32));
err |= __get_user(t->rwbuf_stkptrs[i],
&rp->rwbuf_stkptrs[i]);
}
if (err)
return err;
t->w_saved = wsaved;
synchronize_user_stack();
if (t->w_saved)
return -EFAULT;
return 0;
}
| gpl-2.0 |
NStep/nx_bullhead | drivers/platform/x86/msi-wmi.c | 3143 | 9490 | /*
* MSI WMI hotkeys
*
* Copyright (C) 2009 Novell <trenn@suse.de>
*
* Most stuff taken over from hp-wmi
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/acpi.h>
#include <linux/backlight.h>
#include <linux/slab.h>
#include <linux/module.h>
MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
MODULE_DESCRIPTION("MSI laptop WMI hotkeys driver");
MODULE_LICENSE("GPL");
#define DRV_NAME "msi-wmi"
#define MSIWMI_BIOS_GUID "551A1F84-FBDD-4125-91DB-3EA8F44F1D45"
#define MSIWMI_MSI_EVENT_GUID "B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2"
#define MSIWMI_WIND_EVENT_GUID "5B3CC38A-40D9-7245-8AE6-1145B751BE3F"
MODULE_ALIAS("wmi:" MSIWMI_BIOS_GUID);
MODULE_ALIAS("wmi:" MSIWMI_MSI_EVENT_GUID);
MODULE_ALIAS("wmi:" MSIWMI_WIND_EVENT_GUID);
enum msi_scancodes {
/* Generic MSI keys (not present on MSI Wind) */
MSI_KEY_BRIGHTNESSUP = 0xD0,
MSI_KEY_BRIGHTNESSDOWN,
MSI_KEY_VOLUMEUP,
MSI_KEY_VOLUMEDOWN,
MSI_KEY_MUTE,
/* MSI Wind keys */
WIND_KEY_TOUCHPAD = 0x08, /* Fn+F3 touchpad toggle */
WIND_KEY_BLUETOOTH = 0x56, /* Fn+F11 Bluetooth toggle */
WIND_KEY_CAMERA, /* Fn+F6 webcam toggle */
WIND_KEY_WLAN = 0x5f, /* Fn+F11 Wi-Fi toggle */
WIND_KEY_TURBO, /* Fn+F10 turbo mode toggle */
WIND_KEY_ECO = 0x69, /* Fn+F10 ECO mode toggle */
};
static struct key_entry msi_wmi_keymap[] = {
{ KE_KEY, MSI_KEY_BRIGHTNESSUP, {KEY_BRIGHTNESSUP} },
{ KE_KEY, MSI_KEY_BRIGHTNESSDOWN, {KEY_BRIGHTNESSDOWN} },
{ KE_KEY, MSI_KEY_VOLUMEUP, {KEY_VOLUMEUP} },
{ KE_KEY, MSI_KEY_VOLUMEDOWN, {KEY_VOLUMEDOWN} },
{ KE_KEY, MSI_KEY_MUTE, {KEY_MUTE} },
/* These keys work without WMI. Ignore them to avoid double keycodes */
{ KE_IGNORE, WIND_KEY_TOUCHPAD, {KEY_TOUCHPAD_TOGGLE} },
{ KE_IGNORE, WIND_KEY_BLUETOOTH, {KEY_BLUETOOTH} },
{ KE_IGNORE, WIND_KEY_CAMERA, {KEY_CAMERA} },
{ KE_IGNORE, WIND_KEY_WLAN, {KEY_WLAN} },
/* These are unknown WMI events found on MSI Wind */
{ KE_IGNORE, 0x00 },
{ KE_IGNORE, 0x62 },
{ KE_IGNORE, 0x63 },
/* These are MSI Wind keys that should be handled via WMI */
{ KE_KEY, WIND_KEY_TURBO, {KEY_PROG1} },
{ KE_KEY, WIND_KEY_ECO, {KEY_PROG2} },
{ KE_END, 0 }
};
static ktime_t last_pressed;
static const struct {
const char *guid;
bool quirk_last_pressed;
} *event_wmi, event_wmis[] = {
{ MSIWMI_MSI_EVENT_GUID, true },
{ MSIWMI_WIND_EVENT_GUID, false },
};
static struct backlight_device *backlight;
static int backlight_map[] = { 0x00, 0x33, 0x66, 0x99, 0xCC, 0xFF };
static struct input_dev *msi_wmi_input_dev;
static int msi_wmi_query_block(int instance, int *ret)
{
acpi_status status;
union acpi_object *obj;
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
status = wmi_query_block(MSIWMI_BIOS_GUID, instance, &output);
obj = output.pointer;
if (!obj || obj->type != ACPI_TYPE_INTEGER) {
if (obj) {
pr_err("query block returned object "
"type: %d - buffer length:%d\n", obj->type,
obj->type == ACPI_TYPE_BUFFER ?
obj->buffer.length : 0);
}
kfree(obj);
return -EINVAL;
}
*ret = obj->integer.value;
kfree(obj);
return 0;
}
static int msi_wmi_set_block(int instance, int value)
{
acpi_status status;
struct acpi_buffer input = { sizeof(int), &value };
pr_debug("Going to set block of instance: %d - value: %d\n",
instance, value);
status = wmi_set_block(MSIWMI_BIOS_GUID, instance, &input);
return ACPI_SUCCESS(status) ? 0 : 1;
}
static int bl_get(struct backlight_device *bd)
{
int level, err, ret;
/* Instance 1 is "get backlight", cmp with DSDT */
err = msi_wmi_query_block(1, &ret);
if (err) {
pr_err("Could not query backlight: %d\n", err);
return -EINVAL;
}
pr_debug("Get: Query block returned: %d\n", ret);
for (level = 0; level < ARRAY_SIZE(backlight_map); level++) {
if (backlight_map[level] == ret) {
pr_debug("Current backlight level: 0x%X - index: %d\n",
backlight_map[level], level);
break;
}
}
if (level == ARRAY_SIZE(backlight_map)) {
pr_err("get: Invalid brightness value: 0x%X\n", ret);
return -EINVAL;
}
return level;
}
static int bl_set_status(struct backlight_device *bd)
{
int bright = bd->props.brightness;
if (bright >= ARRAY_SIZE(backlight_map) || bright < 0)
return -EINVAL;
/* Instance 0 is "set backlight" */
return msi_wmi_set_block(0, backlight_map[bright]);
}
static const struct backlight_ops msi_backlight_ops = {
.get_brightness = bl_get,
.update_status = bl_set_status,
};
static void msi_wmi_notify(u32 value, void *context)
{
struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
static struct key_entry *key;
union acpi_object *obj;
acpi_status status;
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
pr_info("bad event status 0x%x\n", status);
return;
}
obj = (union acpi_object *)response.pointer;
if (obj && obj->type == ACPI_TYPE_INTEGER) {
int eventcode = obj->integer.value;
pr_debug("Eventcode: 0x%x\n", eventcode);
key = sparse_keymap_entry_from_scancode(msi_wmi_input_dev,
eventcode);
if (!key) {
pr_info("Unknown key pressed - %x\n", eventcode);
goto msi_wmi_notify_exit;
}
if (event_wmi->quirk_last_pressed) {
ktime_t cur = ktime_get_real();
ktime_t diff = ktime_sub(cur, last_pressed);
/* Ignore event if any event happened in a 50 ms
timeframe -> Key press may result in 10-20 GPEs */
if (ktime_to_us(diff) < 1000 * 50) {
pr_debug("Suppressed key event 0x%X - "
"Last press was %lld us ago\n",
key->code, ktime_to_us(diff));
goto msi_wmi_notify_exit;
}
last_pressed = cur;
}
if (key->type == KE_KEY &&
/* Brightness is served via acpi video driver */
(backlight ||
(key->code != MSI_KEY_BRIGHTNESSUP &&
key->code != MSI_KEY_BRIGHTNESSDOWN))) {
pr_debug("Send key: 0x%X - Input layer keycode: %d\n",
key->code, key->keycode);
sparse_keymap_report_entry(msi_wmi_input_dev, key, 1,
true);
}
} else
pr_info("Unknown event received\n");
msi_wmi_notify_exit:
kfree(response.pointer);
}
static int __init msi_wmi_backlight_setup(void)
{
int err;
struct backlight_properties props;
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = ARRAY_SIZE(backlight_map) - 1;
backlight = backlight_device_register(DRV_NAME, NULL, NULL,
&msi_backlight_ops,
&props);
if (IS_ERR(backlight))
return PTR_ERR(backlight);
err = bl_get(NULL);
if (err < 0) {
backlight_device_unregister(backlight);
return err;
}
backlight->props.brightness = err;
return 0;
}
static int __init msi_wmi_input_setup(void)
{
int err;
msi_wmi_input_dev = input_allocate_device();
if (!msi_wmi_input_dev)
return -ENOMEM;
msi_wmi_input_dev->name = "MSI WMI hotkeys";
msi_wmi_input_dev->phys = "wmi/input0";
msi_wmi_input_dev->id.bustype = BUS_HOST;
err = sparse_keymap_setup(msi_wmi_input_dev, msi_wmi_keymap, NULL);
if (err)
goto err_free_dev;
err = input_register_device(msi_wmi_input_dev);
if (err)
goto err_free_keymap;
last_pressed = ktime_set(0, 0);
return 0;
err_free_keymap:
sparse_keymap_free(msi_wmi_input_dev);
err_free_dev:
input_free_device(msi_wmi_input_dev);
return err;
}
static int __init msi_wmi_init(void)
{
int err;
int i;
for (i = 0; i < ARRAY_SIZE(event_wmis); i++) {
if (!wmi_has_guid(event_wmis[i].guid))
continue;
err = msi_wmi_input_setup();
if (err) {
pr_err("Unable to setup input device\n");
return err;
}
err = wmi_install_notify_handler(event_wmis[i].guid,
msi_wmi_notify, NULL);
if (ACPI_FAILURE(err)) {
pr_err("Unable to setup WMI notify handler\n");
goto err_free_input;
}
pr_debug("Event handler installed\n");
event_wmi = &event_wmis[i];
break;
}
if (wmi_has_guid(MSIWMI_BIOS_GUID) && !acpi_video_backlight_support()) {
err = msi_wmi_backlight_setup();
if (err) {
pr_err("Unable to setup backlight device\n");
goto err_uninstall_handler;
}
pr_debug("Backlight device created\n");
}
if (!event_wmi && !backlight) {
pr_err("This machine doesn't have neither MSI-hotkeys nor backlight through WMI\n");
return -ENODEV;
}
return 0;
err_uninstall_handler:
if (event_wmi)
wmi_remove_notify_handler(event_wmi->guid);
err_free_input:
if (event_wmi) {
sparse_keymap_free(msi_wmi_input_dev);
input_unregister_device(msi_wmi_input_dev);
}
return err;
}
static void __exit msi_wmi_exit(void)
{
if (event_wmi) {
wmi_remove_notify_handler(event_wmi->guid);
sparse_keymap_free(msi_wmi_input_dev);
input_unregister_device(msi_wmi_input_dev);
}
if (backlight)
backlight_device_unregister(backlight);
}
module_init(msi_wmi_init);
module_exit(msi_wmi_exit);
| gpl-2.0 |
ghsr/android_kernel_samsung_galaxys2plus-common | arch/cris/arch-v32/drivers/i2c.c | 3143 | 14575 | /*!***************************************************************************
*!
*! FILE NAME : i2c.c
*!
*! DESCRIPTION: implements an interface for IIC/I2C, both directly from other
*! kernel modules (i2c_writereg/readreg) and from userspace using
*! ioctl()'s
*!
*! Nov 30 1998 Torbjorn Eliasson Initial version.
*! Bjorn Wesen Elinux kernel version.
*! Jan 14 2000 Johan Adolfsson Fixed PB shadow register stuff -
*! don't use PB_I2C if DS1302 uses same bits,
*! use PB.
*| June 23 2003 Pieter Grimmerink Added 'i2c_sendnack'. i2c_readreg now
*| generates nack on last received byte,
*| instead of ack.
*| i2c_getack changed data level while clock
*| was high, causing DS75 to see a stop condition
*!
*! ---------------------------------------------------------------------------
*!
*! (C) Copyright 1999-2007 Axis Communications AB, LUND, SWEDEN
*!
*!***************************************************************************/
/****************** INCLUDE FILES SECTION ***********************************/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <asm/etraxi2c.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/delay.h>
#include "i2c.h"
/****************** I2C DEFINITION SECTION *************************/
#define D(x)
#define I2C_MAJOR 123 /* LOCAL/EXPERIMENTAL */
static DEFINE_MUTEX(i2c_mutex);
static const char i2c_name[] = "i2c";
#define CLOCK_LOW_TIME 8
#define CLOCK_HIGH_TIME 8
#define START_CONDITION_HOLD_TIME 8
#define STOP_CONDITION_HOLD_TIME 8
#define ENABLE_OUTPUT 0x01
#define ENABLE_INPUT 0x00
#define I2C_CLOCK_HIGH 1
#define I2C_CLOCK_LOW 0
#define I2C_DATA_HIGH 1
#define I2C_DATA_LOW 0
#define i2c_enable()
#define i2c_disable()
/* enable or disable output-enable, to select output or input on the i2c bus */
#define i2c_dir_out() crisv32_io_set_dir(&cris_i2c_data, crisv32_io_dir_out)
#define i2c_dir_in() crisv32_io_set_dir(&cris_i2c_data, crisv32_io_dir_in)
/* control the i2c clock and data signals */
#define i2c_clk(x) crisv32_io_set(&cris_i2c_clk, x)
#define i2c_data(x) crisv32_io_set(&cris_i2c_data, x)
/* read a bit from the i2c interface */
#define i2c_getbit() crisv32_io_rd(&cris_i2c_data)
#define i2c_delay(usecs) udelay(usecs)
static DEFINE_SPINLOCK(i2c_lock); /* Protect directions etc */
/****************** VARIABLE SECTION ************************************/
static struct crisv32_iopin cris_i2c_clk;
static struct crisv32_iopin cris_i2c_data;
/****************** FUNCTION DEFINITION SECTION *************************/
/* generate i2c start condition */
void
i2c_start(void)
{
/*
* SCL=1 SDA=1
*/
i2c_dir_out();
i2c_delay(CLOCK_HIGH_TIME/6);
i2c_data(I2C_DATA_HIGH);
i2c_clk(I2C_CLOCK_HIGH);
i2c_delay(CLOCK_HIGH_TIME);
/*
* SCL=1 SDA=0
*/
i2c_data(I2C_DATA_LOW);
i2c_delay(START_CONDITION_HOLD_TIME);
/*
* SCL=0 SDA=0
*/
i2c_clk(I2C_CLOCK_LOW);
i2c_delay(CLOCK_LOW_TIME);
}
/* generate i2c stop condition */
void
i2c_stop(void)
{
i2c_dir_out();
/*
* SCL=0 SDA=0
*/
i2c_clk(I2C_CLOCK_LOW);
i2c_data(I2C_DATA_LOW);
i2c_delay(CLOCK_LOW_TIME*2);
/*
* SCL=1 SDA=0
*/
i2c_clk(I2C_CLOCK_HIGH);
i2c_delay(CLOCK_HIGH_TIME*2);
/*
* SCL=1 SDA=1
*/
i2c_data(I2C_DATA_HIGH);
i2c_delay(STOP_CONDITION_HOLD_TIME);
i2c_dir_in();
}
/* write a byte to the i2c interface */
void
i2c_outbyte(unsigned char x)
{
int i;
i2c_dir_out();
for (i = 0; i < 8; i++) {
if (x & 0x80) {
i2c_data(I2C_DATA_HIGH);
} else {
i2c_data(I2C_DATA_LOW);
}
i2c_delay(CLOCK_LOW_TIME/2);
i2c_clk(I2C_CLOCK_HIGH);
i2c_delay(CLOCK_HIGH_TIME);
i2c_clk(I2C_CLOCK_LOW);
i2c_delay(CLOCK_LOW_TIME/2);
x <<= 1;
}
i2c_data(I2C_DATA_LOW);
i2c_delay(CLOCK_LOW_TIME/2);
/*
* enable input
*/
i2c_dir_in();
}
/* read a byte from the i2c interface */
unsigned char
i2c_inbyte(void)
{
unsigned char aBitByte = 0;
int i;
/* Switch off I2C to get bit */
i2c_disable();
i2c_dir_in();
i2c_delay(CLOCK_HIGH_TIME/2);
/* Get bit */
aBitByte |= i2c_getbit();
/* Enable I2C */
i2c_enable();
i2c_delay(CLOCK_LOW_TIME/2);
for (i = 1; i < 8; i++) {
aBitByte <<= 1;
/* Clock pulse */
i2c_clk(I2C_CLOCK_HIGH);
i2c_delay(CLOCK_HIGH_TIME);
i2c_clk(I2C_CLOCK_LOW);
i2c_delay(CLOCK_LOW_TIME);
/* Switch off I2C to get bit */
i2c_disable();
i2c_dir_in();
i2c_delay(CLOCK_HIGH_TIME/2);
/* Get bit */
aBitByte |= i2c_getbit();
/* Enable I2C */
i2c_enable();
i2c_delay(CLOCK_LOW_TIME/2);
}
i2c_clk(I2C_CLOCK_HIGH);
i2c_delay(CLOCK_HIGH_TIME);
/*
* we leave the clock low, getbyte is usually followed
* by sendack/nack, they assume the clock to be low
*/
i2c_clk(I2C_CLOCK_LOW);
return aBitByte;
}
/*#---------------------------------------------------------------------------
*#
*# FUNCTION NAME: i2c_getack
*#
*# DESCRIPTION : checks if ack was received from ic2
*#
*#--------------------------------------------------------------------------*/
int
i2c_getack(void)
{
int ack = 1;
/*
* enable output
*/
i2c_dir_out();
/*
* Release data bus by setting
* data high
*/
i2c_data(I2C_DATA_HIGH);
/*
* enable input
*/
i2c_dir_in();
i2c_delay(CLOCK_HIGH_TIME/4);
/*
* generate ACK clock pulse
*/
i2c_clk(I2C_CLOCK_HIGH);
#if 0
/*
* Use PORT PB instead of I2C
* for input. (I2C not working)
*/
i2c_clk(1);
i2c_data(1);
/*
* switch off I2C
*/
i2c_data(1);
i2c_disable();
i2c_dir_in();
#endif
/*
* now wait for ack
*/
i2c_delay(CLOCK_HIGH_TIME/2);
/*
* check for ack
*/
if (i2c_getbit())
ack = 0;
i2c_delay(CLOCK_HIGH_TIME/2);
if (!ack) {
if (!i2c_getbit()) /* receiver pulld SDA low */
ack = 1;
i2c_delay(CLOCK_HIGH_TIME/2);
}
/*
* our clock is high now, make sure data is low
* before we enable our output. If we keep data high
* and enable output, we would generate a stop condition.
*/
#if 0
i2c_data(I2C_DATA_LOW);
/*
* end clock pulse
*/
i2c_enable();
i2c_dir_out();
#endif
i2c_clk(I2C_CLOCK_LOW);
i2c_delay(CLOCK_HIGH_TIME/4);
/*
* enable output
*/
i2c_dir_out();
/*
* remove ACK clock pulse
*/
i2c_data(I2C_DATA_HIGH);
i2c_delay(CLOCK_LOW_TIME/2);
return ack;
}
/*#---------------------------------------------------------------------------
*#
*# FUNCTION NAME: I2C::sendAck
*#
*# DESCRIPTION : Send ACK on received data
*#
*#--------------------------------------------------------------------------*/
void
i2c_sendack(void)
{
/*
* enable output
*/
i2c_delay(CLOCK_LOW_TIME);
i2c_dir_out();
/*
* set ack pulse high
*/
i2c_data(I2C_DATA_LOW);
/*
* generate clock pulse
*/
i2c_delay(CLOCK_HIGH_TIME/6);
i2c_clk(I2C_CLOCK_HIGH);
i2c_delay(CLOCK_HIGH_TIME);
i2c_clk(I2C_CLOCK_LOW);
i2c_delay(CLOCK_LOW_TIME/6);
/*
* reset data out
*/
i2c_data(I2C_DATA_HIGH);
i2c_delay(CLOCK_LOW_TIME);
i2c_dir_in();
}
/*#---------------------------------------------------------------------------
*#
*# FUNCTION NAME: i2c_sendnack
*#
*# DESCRIPTION : Sends NACK on received data
*#
*#--------------------------------------------------------------------------*/
void
i2c_sendnack(void)
{
/*
* enable output
*/
i2c_delay(CLOCK_LOW_TIME);
i2c_dir_out();
/*
* set data high
*/
i2c_data(I2C_DATA_HIGH);
/*
* generate clock pulse
*/
i2c_delay(CLOCK_HIGH_TIME/6);
i2c_clk(I2C_CLOCK_HIGH);
i2c_delay(CLOCK_HIGH_TIME);
i2c_clk(I2C_CLOCK_LOW);
i2c_delay(CLOCK_LOW_TIME);
i2c_dir_in();
}
/*#---------------------------------------------------------------------------
*#
*# FUNCTION NAME: i2c_write
*#
*# DESCRIPTION : Writes a value to an I2C device
*#
*#--------------------------------------------------------------------------*/
int
i2c_write(unsigned char theSlave, void *data, size_t nbytes)
{
int error, cntr = 3;
unsigned char bytes_wrote = 0;
unsigned char value;
unsigned long flags;
spin_lock_irqsave(&i2c_lock, flags);
do {
error = 0;
i2c_start();
/*
* send slave address
*/
i2c_outbyte((theSlave & 0xfe));
/*
* wait for ack
*/
if (!i2c_getack())
error = 1;
/*
* send data
*/
for (bytes_wrote = 0; bytes_wrote < nbytes; bytes_wrote++) {
memcpy(&value, data + bytes_wrote, sizeof value);
i2c_outbyte(value);
/*
* now it's time to wait for ack
*/
if (!i2c_getack())
error |= 4;
}
/*
* end byte stream
*/
i2c_stop();
} while (error && cntr--);
i2c_delay(CLOCK_LOW_TIME);
spin_unlock_irqrestore(&i2c_lock, flags);
return -error;
}
/*#---------------------------------------------------------------------------
*#
*# FUNCTION NAME: i2c_read
*#
*# DESCRIPTION : Reads a value from an I2C device
*#
*#--------------------------------------------------------------------------*/
int
i2c_read(unsigned char theSlave, void *data, size_t nbytes)
{
unsigned char b = 0;
unsigned char bytes_read = 0;
int error, cntr = 3;
unsigned long flags;
spin_lock_irqsave(&i2c_lock, flags);
do {
error = 0;
memset(data, 0, nbytes);
/*
* generate start condition
*/
i2c_start();
/*
* send slave address
*/
i2c_outbyte((theSlave | 0x01));
/*
* wait for ack
*/
if (!i2c_getack())
error = 1;
/*
* fetch data
*/
for (bytes_read = 0; bytes_read < nbytes; bytes_read++) {
b = i2c_inbyte();
memcpy(data + bytes_read, &b, sizeof b);
if (bytes_read < (nbytes - 1))
i2c_sendack();
}
/*
* last received byte needs to be nacked
* instead of acked
*/
i2c_sendnack();
/*
* end sequence
*/
i2c_stop();
} while (error && cntr--);
spin_unlock_irqrestore(&i2c_lock, flags);
return -error;
}
/*#---------------------------------------------------------------------------
*#
*# FUNCTION NAME: i2c_writereg
*#
*# DESCRIPTION : Writes a value to an I2C device
*#
*#--------------------------------------------------------------------------*/
int
i2c_writereg(unsigned char theSlave, unsigned char theReg,
unsigned char theValue)
{
int error, cntr = 3;
unsigned long flags;
spin_lock_irqsave(&i2c_lock, flags);
do {
error = 0;
i2c_start();
/*
* send slave address
*/
i2c_outbyte((theSlave & 0xfe));
/*
* wait for ack
*/
if(!i2c_getack())
error = 1;
/*
* now select register
*/
i2c_dir_out();
i2c_outbyte(theReg);
/*
* now it's time to wait for ack
*/
if(!i2c_getack())
error |= 2;
/*
* send register register data
*/
i2c_outbyte(theValue);
/*
* now it's time to wait for ack
*/
if(!i2c_getack())
error |= 4;
/*
* end byte stream
*/
i2c_stop();
} while(error && cntr--);
i2c_delay(CLOCK_LOW_TIME);
spin_unlock_irqrestore(&i2c_lock, flags);
return -error;
}
/*#---------------------------------------------------------------------------
*#
*# FUNCTION NAME: i2c_readreg
*#
*# DESCRIPTION : Reads a value from the decoder registers.
*#
*#--------------------------------------------------------------------------*/
unsigned char
i2c_readreg(unsigned char theSlave, unsigned char theReg)
{
unsigned char b = 0;
int error, cntr = 3;
unsigned long flags;
spin_lock_irqsave(&i2c_lock, flags);
do {
error = 0;
/*
* generate start condition
*/
i2c_start();
/*
* send slave address
*/
i2c_outbyte((theSlave & 0xfe));
/*
* wait for ack
*/
if(!i2c_getack())
error = 1;
/*
* now select register
*/
i2c_dir_out();
i2c_outbyte(theReg);
/*
* now it's time to wait for ack
*/
if(!i2c_getack())
error |= 2;
/*
* repeat start condition
*/
i2c_delay(CLOCK_LOW_TIME);
i2c_start();
/*
* send slave address
*/
i2c_outbyte(theSlave | 0x01);
/*
* wait for ack
*/
if(!i2c_getack())
error |= 4;
/*
* fetch register
*/
b = i2c_inbyte();
/*
* last received byte needs to be nacked
* instead of acked
*/
i2c_sendnack();
/*
* end sequence
*/
i2c_stop();
} while(error && cntr--);
spin_unlock_irqrestore(&i2c_lock, flags);
return b;
}
static int
i2c_open(struct inode *inode, struct file *filp)
{
return 0;
}
static int
i2c_release(struct inode *inode, struct file *filp)
{
return 0;
}
/* Main device API. ioctl's to write or read to/from i2c registers.
*/
static long
i2c_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int ret;
if(_IOC_TYPE(cmd) != ETRAXI2C_IOCTYPE) {
return -ENOTTY;
}
switch (_IOC_NR(cmd)) {
case I2C_WRITEREG:
/* write to an i2c slave */
D(printk("i2cw %d %d %d\n",
I2C_ARGSLAVE(arg),
I2C_ARGREG(arg),
I2C_ARGVALUE(arg)));
mutex_lock(&i2c_mutex);
ret = i2c_writereg(I2C_ARGSLAVE(arg),
I2C_ARGREG(arg),
I2C_ARGVALUE(arg));
mutex_unlock(&i2c_mutex);
return ret;
case I2C_READREG:
{
unsigned char val;
/* read from an i2c slave */
D(printk("i2cr %d %d ",
I2C_ARGSLAVE(arg),
I2C_ARGREG(arg)));
mutex_lock(&i2c_mutex);
val = i2c_readreg(I2C_ARGSLAVE(arg), I2C_ARGREG(arg));
mutex_unlock(&i2c_mutex);
D(printk("= %d\n", val));
return val;
}
default:
return -EINVAL;
}
return 0;
}
static const struct file_operations i2c_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = i2c_ioctl,
.open = i2c_open,
.release = i2c_release,
.llseek = noop_llseek,
};
static int __init i2c_init(void)
{
static int res;
static int first = 1;
if (!first)
return res;
first = 0;
/* Setup and enable the DATA and CLK pins */
res = crisv32_io_get_name(&cris_i2c_data,
CONFIG_ETRAX_V32_I2C_DATA_PORT);
if (res < 0)
return res;
res = crisv32_io_get_name(&cris_i2c_clk, CONFIG_ETRAX_V32_I2C_CLK_PORT);
crisv32_io_set_dir(&cris_i2c_clk, crisv32_io_dir_out);
return res;
}
static int __init i2c_register(void)
{
int res;
res = i2c_init();
if (res < 0)
return res;
/* register char device */
res = register_chrdev(I2C_MAJOR, i2c_name, &i2c_fops);
if (res < 0) {
printk(KERN_ERR "i2c: couldn't get a major number.\n");
return res;
}
printk(KERN_INFO
"I2C driver v2.2, (c) 1999-2007 Axis Communications AB\n");
return 0;
}
/* this makes sure that i2c_init is called during boot */
module_init(i2c_register);
/****************** END OF FILE i2c.c ********************************/
| gpl-2.0 |
updateing/android_kernel_sony_msm8960 | drivers/leds/leds-msm-pdm.c | 3399 | 5732 | /* Copyright (c) 2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
#ifdef CONFIG_HAS_EARLYSUSPEND
#include <linux/earlysuspend.h>
/* Early-suspend level */
#define LED_SUSPEND_LEVEL 1
#endif
#define PDM_DUTY_MAXVAL BIT(16)
#define PDM_DUTY_REFVAL BIT(15)
struct pdm_led_data {
struct led_classdev cdev;
void __iomem *perph_base;
int pdm_offset;
#ifdef CONFIG_HAS_EARLYSUSPEND
struct early_suspend early_suspend;
#endif
};
static void msm_led_brightness_set_percent(struct pdm_led_data *led,
int duty_per)
{
u16 duty_val;
duty_val = PDM_DUTY_REFVAL - ((PDM_DUTY_MAXVAL * duty_per) / 100);
if (!duty_per)
duty_val--;
writel_relaxed(duty_val, led->perph_base + led->pdm_offset);
}
static void msm_led_brightness_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct pdm_led_data *led =
container_of(led_cdev, struct pdm_led_data, cdev);
msm_led_brightness_set_percent(led, (value * 100) / LED_FULL);
}
#ifdef CONFIG_PM_SLEEP
static int msm_led_pdm_suspend(struct device *dev)
{
struct pdm_led_data *led = dev_get_drvdata(dev);
msm_led_brightness_set_percent(led, 0);
return 0;
}
#ifdef CONFIG_HAS_EARLYSUSPEND
static void msm_led_pdm_early_suspend(struct early_suspend *h)
{
struct pdm_led_data *led = container_of(h,
struct pdm_led_data, early_suspend);
msm_led_pdm_suspend(led->cdev.dev->parent);
}
#endif
static const struct dev_pm_ops msm_led_pdm_pm_ops = {
#ifndef CONFIG_HAS_EARLYSUSPEND
.suspend = msm_led_pdm_suspend,
#endif
};
#endif
static int __devinit msm_pdm_led_probe(struct platform_device *pdev)
{
const struct led_info *pdata = pdev->dev.platform_data;
struct pdm_led_data *led;
struct resource *res, *ioregion;
u32 tcxo_pdm_ctl;
int rc;
if (!pdata) {
pr_err("platform data is invalid\n");
return -EINVAL;
}
if (pdev->id > 2) {
pr_err("pdm id is invalid\n");
return -EINVAL;
}
led = kzalloc(sizeof(struct pdm_led_data), GFP_KERNEL);
if (!led)
return -ENOMEM;
/* Enable runtime PM ops, start in ACTIVE mode */
rc = pm_runtime_set_active(&pdev->dev);
if (rc < 0)
dev_dbg(&pdev->dev, "unable to set runtime pm state\n");
pm_runtime_enable(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
pr_err("get resource failed\n");
rc = -EINVAL;
goto err_get_res;
}
ioregion = request_mem_region(res->start, resource_size(res),
pdev->name);
if (!ioregion) {
pr_err("request for mem region failed\n");
rc = -ENOMEM;
goto err_get_res;
}
led->perph_base = ioremap(res->start, resource_size(res));
if (!led->perph_base) {
pr_err("ioremap failed\n");
rc = -ENOMEM;
goto err_ioremap;
}
/* Pulse Density Modulation(PDM) ids start with 0 and
* every PDM register takes 4 bytes
*/
led->pdm_offset = ((pdev->id) + 1) * 4;
/* program tcxo_pdm_ctl register to enable pdm*/
tcxo_pdm_ctl = readl_relaxed(led->perph_base);
tcxo_pdm_ctl |= (1 << pdev->id);
writel_relaxed(tcxo_pdm_ctl, led->perph_base);
/* Start with LED in off state */
msm_led_brightness_set_percent(led, 0);
led->cdev.brightness_set = msm_led_brightness_set;
led->cdev.name = pdata->name ? : "leds-msm-pdm";
rc = led_classdev_register(&pdev->dev, &led->cdev);
if (rc) {
pr_err("led class registration failed\n");
goto err_led_reg;
}
#ifdef CONFIG_HAS_EARLYSUSPEND
led->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN +
LED_SUSPEND_LEVEL;
led->early_suspend.suspend = msm_led_pdm_early_suspend;
register_early_suspend(&led->early_suspend);
#endif
platform_set_drvdata(pdev, led);
return 0;
err_led_reg:
iounmap(led->perph_base);
err_ioremap:
release_mem_region(res->start, resource_size(res));
err_get_res:
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_disable(&pdev->dev);
kfree(led);
return rc;
}
static int __devexit msm_pdm_led_remove(struct platform_device *pdev)
{
struct pdm_led_data *led = platform_get_drvdata(pdev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
#ifdef CONFIG_HAS_EARLYSUSPEND
unregister_early_suspend(&led->early_suspend);
#endif
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_disable(&pdev->dev);
led_classdev_unregister(&led->cdev);
msm_led_brightness_set_percent(led, 0);
iounmap(led->perph_base);
release_mem_region(res->start, resource_size(res));
kfree(led);
return 0;
}
static struct platform_driver msm_pdm_led_driver = {
.probe = msm_pdm_led_probe,
.remove = __devexit_p(msm_pdm_led_remove),
.driver = {
.name = "leds-msm-pdm",
.owner = THIS_MODULE,
#ifdef CONFIG_PM_SLEEP
.pm = &msm_led_pdm_pm_ops,
#endif
},
};
static int __init msm_pdm_led_init(void)
{
return platform_driver_register(&msm_pdm_led_driver);
}
module_init(msm_pdm_led_init);
static void __exit msm_pdm_led_exit(void)
{
platform_driver_unregister(&msm_pdm_led_driver);
}
module_exit(msm_pdm_led_exit);
MODULE_DESCRIPTION("MSM PDM LEDs driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:leds-msm-pdm");
| gpl-2.0 |
civato/Note8.0-StormBorn | drivers/hwmon/gl520sm.c | 3655 | 28635 | /*
gl520sm.c - Part of lm_sensors, Linux kernel modules for hardware
monitoring
Copyright (c) 1998, 1999 Frodo Looijaard <frodol@dds.nl>,
Kyösti Mälkki <kmalkki@cc.hut.fi>
Copyright (c) 2005 Maarten Deprez <maartendeprez@users.sourceforge.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/hwmon-vid.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
/* Type of the extra sensor */
static unsigned short extra_sensor_type;
module_param(extra_sensor_type, ushort, 0);
MODULE_PARM_DESC(extra_sensor_type, "Type of extra sensor (0=autodetect, 1=temperature, 2=voltage)");
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END };
/* Many GL520 constants specified below
One of the inputs can be configured as either temp or voltage.
That's why _TEMP2 and _IN4 access the same register
*/
/* The GL520 registers */
#define GL520_REG_CHIP_ID 0x00
#define GL520_REG_REVISION 0x01
#define GL520_REG_CONF 0x03
#define GL520_REG_MASK 0x11
#define GL520_REG_VID_INPUT 0x02
static const u8 GL520_REG_IN_INPUT[] = { 0x15, 0x14, 0x13, 0x0d, 0x0e };
static const u8 GL520_REG_IN_LIMIT[] = { 0x0c, 0x09, 0x0a, 0x0b };
static const u8 GL520_REG_IN_MIN[] = { 0x0c, 0x09, 0x0a, 0x0b, 0x18 };
static const u8 GL520_REG_IN_MAX[] = { 0x0c, 0x09, 0x0a, 0x0b, 0x17 };
static const u8 GL520_REG_TEMP_INPUT[] = { 0x04, 0x0e };
static const u8 GL520_REG_TEMP_MAX[] = { 0x05, 0x17 };
static const u8 GL520_REG_TEMP_MAX_HYST[] = { 0x06, 0x18 };
#define GL520_REG_FAN_INPUT 0x07
#define GL520_REG_FAN_MIN 0x08
#define GL520_REG_FAN_DIV 0x0f
#define GL520_REG_FAN_OFF GL520_REG_FAN_DIV
#define GL520_REG_ALARMS 0x12
#define GL520_REG_BEEP_MASK 0x10
#define GL520_REG_BEEP_ENABLE GL520_REG_CONF
/*
* Function declarations
*/
static int gl520_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int gl520_detect(struct i2c_client *client, struct i2c_board_info *info);
static void gl520_init_client(struct i2c_client *client);
static int gl520_remove(struct i2c_client *client);
static int gl520_read_value(struct i2c_client *client, u8 reg);
static int gl520_write_value(struct i2c_client *client, u8 reg, u16 value);
static struct gl520_data *gl520_update_device(struct device *dev);
/* Driver data */
static const struct i2c_device_id gl520_id[] = {
{ "gl520sm", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, gl520_id);
static struct i2c_driver gl520_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "gl520sm",
},
.probe = gl520_probe,
.remove = gl520_remove,
.id_table = gl520_id,
.detect = gl520_detect,
.address_list = normal_i2c,
};
/* Client data */
struct gl520_data {
struct device *hwmon_dev;
struct mutex update_lock;
char valid; /* zero until the following fields are valid */
unsigned long last_updated; /* in jiffies */
u8 vid;
u8 vrm;
u8 in_input[5]; /* [0] = VVD */
u8 in_min[5]; /* [0] = VDD */
u8 in_max[5]; /* [0] = VDD */
u8 fan_input[2];
u8 fan_min[2];
u8 fan_div[2];
u8 fan_off;
u8 temp_input[2];
u8 temp_max[2];
u8 temp_max_hyst[2];
u8 alarms;
u8 beep_enable;
u8 beep_mask;
u8 alarm_mask;
u8 two_temps;
};
/*
* Sysfs stuff
*/
static ssize_t get_cpu_vid(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct gl520_data *data = gl520_update_device(dev);
return sprintf(buf, "%u\n", vid_from_reg(data->vid, data->vrm));
}
static DEVICE_ATTR(cpu0_vid, S_IRUGO, get_cpu_vid, NULL);
#define VDD_FROM_REG(val) (((val)*95+2)/4)
#define VDD_TO_REG(val) (SENSORS_LIMIT((((val)*4+47)/95),0,255))
#define IN_FROM_REG(val) ((val)*19)
#define IN_TO_REG(val) (SENSORS_LIMIT((((val)+9)/19),0,255))
static ssize_t get_in_input(struct device *dev, struct device_attribute *attr,
char *buf)
{
int n = to_sensor_dev_attr(attr)->index;
struct gl520_data *data = gl520_update_device(dev);
u8 r = data->in_input[n];
if (n == 0)
return sprintf(buf, "%d\n", VDD_FROM_REG(r));
else
return sprintf(buf, "%d\n", IN_FROM_REG(r));
}
static ssize_t get_in_min(struct device *dev, struct device_attribute *attr,
char *buf)
{
int n = to_sensor_dev_attr(attr)->index;
struct gl520_data *data = gl520_update_device(dev);
u8 r = data->in_min[n];
if (n == 0)
return sprintf(buf, "%d\n", VDD_FROM_REG(r));
else
return sprintf(buf, "%d\n", IN_FROM_REG(r));
}
static ssize_t get_in_max(struct device *dev, struct device_attribute *attr,
char *buf)
{
int n = to_sensor_dev_attr(attr)->index;
struct gl520_data *data = gl520_update_device(dev);
u8 r = data->in_max[n];
if (n == 0)
return sprintf(buf, "%d\n", VDD_FROM_REG(r));
else
return sprintf(buf, "%d\n", IN_FROM_REG(r));
}
static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct gl520_data *data = i2c_get_clientdata(client);
int n = to_sensor_dev_attr(attr)->index;
long v = simple_strtol(buf, NULL, 10);
u8 r;
mutex_lock(&data->update_lock);
if (n == 0)
r = VDD_TO_REG(v);
else
r = IN_TO_REG(v);
data->in_min[n] = r;
if (n < 4)
gl520_write_value(client, GL520_REG_IN_MIN[n],
(gl520_read_value(client, GL520_REG_IN_MIN[n])
& ~0xff) | r);
else
gl520_write_value(client, GL520_REG_IN_MIN[n], r);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct gl520_data *data = i2c_get_clientdata(client);
int n = to_sensor_dev_attr(attr)->index;
long v = simple_strtol(buf, NULL, 10);
u8 r;
if (n == 0)
r = VDD_TO_REG(v);
else
r = IN_TO_REG(v);
mutex_lock(&data->update_lock);
data->in_max[n] = r;
if (n < 4)
gl520_write_value(client, GL520_REG_IN_MAX[n],
(gl520_read_value(client, GL520_REG_IN_MAX[n])
& ~0xff00) | (r << 8));
else
gl520_write_value(client, GL520_REG_IN_MAX[n], r);
mutex_unlock(&data->update_lock);
return count;
}
static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, get_in_input, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, get_in_input, NULL, 1);
static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, get_in_input, NULL, 2);
static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, get_in_input, NULL, 3);
static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, get_in_input, NULL, 4);
static SENSOR_DEVICE_ATTR(in0_min, S_IRUGO | S_IWUSR,
get_in_min, set_in_min, 0);
static SENSOR_DEVICE_ATTR(in1_min, S_IRUGO | S_IWUSR,
get_in_min, set_in_min, 1);
static SENSOR_DEVICE_ATTR(in2_min, S_IRUGO | S_IWUSR,
get_in_min, set_in_min, 2);
static SENSOR_DEVICE_ATTR(in3_min, S_IRUGO | S_IWUSR,
get_in_min, set_in_min, 3);
static SENSOR_DEVICE_ATTR(in4_min, S_IRUGO | S_IWUSR,
get_in_min, set_in_min, 4);
static SENSOR_DEVICE_ATTR(in0_max, S_IRUGO | S_IWUSR,
get_in_max, set_in_max, 0);
static SENSOR_DEVICE_ATTR(in1_max, S_IRUGO | S_IWUSR,
get_in_max, set_in_max, 1);
static SENSOR_DEVICE_ATTR(in2_max, S_IRUGO | S_IWUSR,
get_in_max, set_in_max, 2);
static SENSOR_DEVICE_ATTR(in3_max, S_IRUGO | S_IWUSR,
get_in_max, set_in_max, 3);
static SENSOR_DEVICE_ATTR(in4_max, S_IRUGO | S_IWUSR,
get_in_max, set_in_max, 4);
#define DIV_FROM_REG(val) (1 << (val))
#define FAN_FROM_REG(val,div) ((val)==0 ? 0 : (480000/((val) << (div))))
#define FAN_TO_REG(val,div) ((val)<=0?0:SENSORS_LIMIT((480000 + ((val) << ((div)-1))) / ((val) << (div)), 1, 255));
static ssize_t get_fan_input(struct device *dev, struct device_attribute *attr,
char *buf)
{
int n = to_sensor_dev_attr(attr)->index;
struct gl520_data *data = gl520_update_device(dev);
return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_input[n],
data->fan_div[n]));
}
static ssize_t get_fan_min(struct device *dev, struct device_attribute *attr,
char *buf)
{
int n = to_sensor_dev_attr(attr)->index;
struct gl520_data *data = gl520_update_device(dev);
return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[n],
data->fan_div[n]));
}
static ssize_t get_fan_div(struct device *dev, struct device_attribute *attr,
char *buf)
{
int n = to_sensor_dev_attr(attr)->index;
struct gl520_data *data = gl520_update_device(dev);
return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[n]));
}
static ssize_t get_fan_off(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct gl520_data *data = gl520_update_device(dev);
return sprintf(buf, "%d\n", data->fan_off);
}
static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct gl520_data *data = i2c_get_clientdata(client);
int n = to_sensor_dev_attr(attr)->index;
unsigned long v = simple_strtoul(buf, NULL, 10);
u8 r;
mutex_lock(&data->update_lock);
r = FAN_TO_REG(v, data->fan_div[n]);
data->fan_min[n] = r;
if (n == 0)
gl520_write_value(client, GL520_REG_FAN_MIN,
(gl520_read_value(client, GL520_REG_FAN_MIN)
& ~0xff00) | (r << 8));
else
gl520_write_value(client, GL520_REG_FAN_MIN,
(gl520_read_value(client, GL520_REG_FAN_MIN)
& ~0xff) | r);
data->beep_mask = gl520_read_value(client, GL520_REG_BEEP_MASK);
if (data->fan_min[n] == 0)
data->alarm_mask &= (n == 0) ? ~0x20 : ~0x40;
else
data->alarm_mask |= (n == 0) ? 0x20 : 0x40;
data->beep_mask &= data->alarm_mask;
gl520_write_value(client, GL520_REG_BEEP_MASK, data->beep_mask);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct gl520_data *data = i2c_get_clientdata(client);
int n = to_sensor_dev_attr(attr)->index;
unsigned long v = simple_strtoul(buf, NULL, 10);
u8 r;
switch (v) {
case 1: r = 0; break;
case 2: r = 1; break;
case 4: r = 2; break;
case 8: r = 3; break;
default:
dev_err(&client->dev, "fan_div value %ld not supported. Choose one of 1, 2, 4 or 8!\n", v);
return -EINVAL;
}
mutex_lock(&data->update_lock);
data->fan_div[n] = r;
if (n == 0)
gl520_write_value(client, GL520_REG_FAN_DIV,
(gl520_read_value(client, GL520_REG_FAN_DIV)
& ~0xc0) | (r << 6));
else
gl520_write_value(client, GL520_REG_FAN_DIV,
(gl520_read_value(client, GL520_REG_FAN_DIV)
& ~0x30) | (r << 4));
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t set_fan_off(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct gl520_data *data = i2c_get_clientdata(client);
u8 r = simple_strtoul(buf, NULL, 10)?1:0;
mutex_lock(&data->update_lock);
data->fan_off = r;
gl520_write_value(client, GL520_REG_FAN_OFF,
(gl520_read_value(client, GL520_REG_FAN_OFF)
& ~0x0c) | (r << 2));
mutex_unlock(&data->update_lock);
return count;
}
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, get_fan_input, NULL, 0);
static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, get_fan_input, NULL, 1);
static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO | S_IWUSR,
get_fan_min, set_fan_min, 0);
static SENSOR_DEVICE_ATTR(fan2_min, S_IRUGO | S_IWUSR,
get_fan_min, set_fan_min, 1);
static SENSOR_DEVICE_ATTR(fan1_div, S_IRUGO | S_IWUSR,
get_fan_div, set_fan_div, 0);
static SENSOR_DEVICE_ATTR(fan2_div, S_IRUGO | S_IWUSR,
get_fan_div, set_fan_div, 1);
static DEVICE_ATTR(fan1_off, S_IRUGO | S_IWUSR,
get_fan_off, set_fan_off);
#define TEMP_FROM_REG(val) (((val) - 130) * 1000)
#define TEMP_TO_REG(val) (SENSORS_LIMIT(((((val)<0?(val)-500:(val)+500) / 1000)+130),0,255))
static ssize_t get_temp_input(struct device *dev, struct device_attribute *attr,
char *buf)
{
int n = to_sensor_dev_attr(attr)->index;
struct gl520_data *data = gl520_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_input[n]));
}
static ssize_t get_temp_max(struct device *dev, struct device_attribute *attr,
char *buf)
{
int n = to_sensor_dev_attr(attr)->index;
struct gl520_data *data = gl520_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[n]));
}
static ssize_t get_temp_max_hyst(struct device *dev, struct device_attribute
*attr, char *buf)
{
int n = to_sensor_dev_attr(attr)->index;
struct gl520_data *data = gl520_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max_hyst[n]));
}
static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct gl520_data *data = i2c_get_clientdata(client);
int n = to_sensor_dev_attr(attr)->index;
long v = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->temp_max[n] = TEMP_TO_REG(v);
gl520_write_value(client, GL520_REG_TEMP_MAX[n], data->temp_max[n]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t set_temp_max_hyst(struct device *dev, struct device_attribute
*attr, const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct gl520_data *data = i2c_get_clientdata(client);
int n = to_sensor_dev_attr(attr)->index;
long v = simple_strtol(buf, NULL, 10);
mutex_lock(&data->update_lock);
data->temp_max_hyst[n] = TEMP_TO_REG(v);
gl520_write_value(client, GL520_REG_TEMP_MAX_HYST[n],
data->temp_max_hyst[n]);
mutex_unlock(&data->update_lock);
return count;
}
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, get_temp_input, NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, get_temp_input, NULL, 1);
static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR,
get_temp_max, set_temp_max, 0);
static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO | S_IWUSR,
get_temp_max, set_temp_max, 1);
static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR,
get_temp_max_hyst, set_temp_max_hyst, 0);
static SENSOR_DEVICE_ATTR(temp2_max_hyst, S_IRUGO | S_IWUSR,
get_temp_max_hyst, set_temp_max_hyst, 1);
static ssize_t get_alarms(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct gl520_data *data = gl520_update_device(dev);
return sprintf(buf, "%d\n", data->alarms);
}
static ssize_t get_beep_enable(struct device *dev, struct device_attribute
*attr, char *buf)
{
struct gl520_data *data = gl520_update_device(dev);
return sprintf(buf, "%d\n", data->beep_enable);
}
static ssize_t get_beep_mask(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct gl520_data *data = gl520_update_device(dev);
return sprintf(buf, "%d\n", data->beep_mask);
}
static ssize_t set_beep_enable(struct device *dev, struct device_attribute
*attr, const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct gl520_data *data = i2c_get_clientdata(client);
u8 r = simple_strtoul(buf, NULL, 10)?0:1;
mutex_lock(&data->update_lock);
data->beep_enable = !r;
gl520_write_value(client, GL520_REG_BEEP_ENABLE,
(gl520_read_value(client, GL520_REG_BEEP_ENABLE)
& ~0x04) | (r << 2));
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t set_beep_mask(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct gl520_data *data = i2c_get_clientdata(client);
u8 r = simple_strtoul(buf, NULL, 10);
mutex_lock(&data->update_lock);
r &= data->alarm_mask;
data->beep_mask = r;
gl520_write_value(client, GL520_REG_BEEP_MASK, r);
mutex_unlock(&data->update_lock);
return count;
}
static DEVICE_ATTR(alarms, S_IRUGO, get_alarms, NULL);
static DEVICE_ATTR(beep_enable, S_IRUGO | S_IWUSR,
get_beep_enable, set_beep_enable);
static DEVICE_ATTR(beep_mask, S_IRUGO | S_IWUSR,
get_beep_mask, set_beep_mask);
static ssize_t get_alarm(struct device *dev, struct device_attribute *attr,
char *buf)
{
int bit_nr = to_sensor_dev_attr(attr)->index;
struct gl520_data *data = gl520_update_device(dev);
return sprintf(buf, "%d\n", (data->alarms >> bit_nr) & 1);
}
static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, get_alarm, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, get_alarm, NULL, 1);
static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, get_alarm, NULL, 2);
static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, get_alarm, NULL, 3);
static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, get_alarm, NULL, 4);
static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, get_alarm, NULL, 5);
static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, get_alarm, NULL, 6);
static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, get_alarm, NULL, 7);
static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, get_alarm, NULL, 7);
static ssize_t get_beep(struct device *dev, struct device_attribute *attr,
char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
struct gl520_data *data = gl520_update_device(dev);
return sprintf(buf, "%d\n", (data->beep_mask >> bitnr) & 1);
}
static ssize_t set_beep(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct gl520_data *data = i2c_get_clientdata(client);
int bitnr = to_sensor_dev_attr(attr)->index;
unsigned long bit;
bit = simple_strtoul(buf, NULL, 10);
if (bit & ~1)
return -EINVAL;
mutex_lock(&data->update_lock);
data->beep_mask = gl520_read_value(client, GL520_REG_BEEP_MASK);
if (bit)
data->beep_mask |= (1 << bitnr);
else
data->beep_mask &= ~(1 << bitnr);
gl520_write_value(client, GL520_REG_BEEP_MASK, data->beep_mask);
mutex_unlock(&data->update_lock);
return count;
}
static SENSOR_DEVICE_ATTR(in0_beep, S_IRUGO | S_IWUSR, get_beep, set_beep, 0);
static SENSOR_DEVICE_ATTR(in1_beep, S_IRUGO | S_IWUSR, get_beep, set_beep, 1);
static SENSOR_DEVICE_ATTR(in2_beep, S_IRUGO | S_IWUSR, get_beep, set_beep, 2);
static SENSOR_DEVICE_ATTR(in3_beep, S_IRUGO | S_IWUSR, get_beep, set_beep, 3);
static SENSOR_DEVICE_ATTR(temp1_beep, S_IRUGO | S_IWUSR, get_beep, set_beep, 4);
static SENSOR_DEVICE_ATTR(fan1_beep, S_IRUGO | S_IWUSR, get_beep, set_beep, 5);
static SENSOR_DEVICE_ATTR(fan2_beep, S_IRUGO | S_IWUSR, get_beep, set_beep, 6);
static SENSOR_DEVICE_ATTR(temp2_beep, S_IRUGO | S_IWUSR, get_beep, set_beep, 7);
static SENSOR_DEVICE_ATTR(in4_beep, S_IRUGO | S_IWUSR, get_beep, set_beep, 7);
static struct attribute *gl520_attributes[] = {
&dev_attr_cpu0_vid.attr,
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in0_min.dev_attr.attr,
&sensor_dev_attr_in0_max.dev_attr.attr,
&sensor_dev_attr_in0_alarm.dev_attr.attr,
&sensor_dev_attr_in0_beep.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in1_min.dev_attr.attr,
&sensor_dev_attr_in1_max.dev_attr.attr,
&sensor_dev_attr_in1_alarm.dev_attr.attr,
&sensor_dev_attr_in1_beep.dev_attr.attr,
&sensor_dev_attr_in2_input.dev_attr.attr,
&sensor_dev_attr_in2_min.dev_attr.attr,
&sensor_dev_attr_in2_max.dev_attr.attr,
&sensor_dev_attr_in2_alarm.dev_attr.attr,
&sensor_dev_attr_in2_beep.dev_attr.attr,
&sensor_dev_attr_in3_input.dev_attr.attr,
&sensor_dev_attr_in3_min.dev_attr.attr,
&sensor_dev_attr_in3_max.dev_attr.attr,
&sensor_dev_attr_in3_alarm.dev_attr.attr,
&sensor_dev_attr_in3_beep.dev_attr.attr,
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan1_min.dev_attr.attr,
&sensor_dev_attr_fan1_div.dev_attr.attr,
&sensor_dev_attr_fan1_alarm.dev_attr.attr,
&sensor_dev_attr_fan1_beep.dev_attr.attr,
&dev_attr_fan1_off.attr,
&sensor_dev_attr_fan2_input.dev_attr.attr,
&sensor_dev_attr_fan2_min.dev_attr.attr,
&sensor_dev_attr_fan2_div.dev_attr.attr,
&sensor_dev_attr_fan2_alarm.dev_attr.attr,
&sensor_dev_attr_fan2_beep.dev_attr.attr,
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
&sensor_dev_attr_temp1_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_beep.dev_attr.attr,
&dev_attr_alarms.attr,
&dev_attr_beep_enable.attr,
&dev_attr_beep_mask.attr,
NULL
};
static const struct attribute_group gl520_group = {
.attrs = gl520_attributes,
};
static struct attribute *gl520_attributes_opt[] = {
&sensor_dev_attr_in4_input.dev_attr.attr,
&sensor_dev_attr_in4_min.dev_attr.attr,
&sensor_dev_attr_in4_max.dev_attr.attr,
&sensor_dev_attr_in4_alarm.dev_attr.attr,
&sensor_dev_attr_in4_beep.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp2_max.dev_attr.attr,
&sensor_dev_attr_temp2_max_hyst.dev_attr.attr,
&sensor_dev_attr_temp2_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_beep.dev_attr.attr,
NULL
};
static const struct attribute_group gl520_group_opt = {
.attrs = gl520_attributes_opt,
};
/*
* Real code
*/
/* Return 0 if detection is successful, -ENODEV otherwise */
static int gl520_detect(struct i2c_client *client, struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_WORD_DATA))
return -ENODEV;
/* Determine the chip type. */
if ((gl520_read_value(client, GL520_REG_CHIP_ID) != 0x20) ||
((gl520_read_value(client, GL520_REG_REVISION) & 0x7f) != 0x00) ||
((gl520_read_value(client, GL520_REG_CONF) & 0x80) != 0x00)) {
dev_dbg(&client->dev, "Unknown chip type, skipping\n");
return -ENODEV;
}
strlcpy(info->type, "gl520sm", I2C_NAME_SIZE);
return 0;
}
static int gl520_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct gl520_data *data;
int err;
data = kzalloc(sizeof(struct gl520_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto exit;
}
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
/* Initialize the GL520SM chip */
gl520_init_client(client);
/* Register sysfs hooks */
if ((err = sysfs_create_group(&client->dev.kobj, &gl520_group)))
goto exit_free;
if (data->two_temps) {
if ((err = device_create_file(&client->dev,
&sensor_dev_attr_temp2_input.dev_attr))
|| (err = device_create_file(&client->dev,
&sensor_dev_attr_temp2_max.dev_attr))
|| (err = device_create_file(&client->dev,
&sensor_dev_attr_temp2_max_hyst.dev_attr))
|| (err = device_create_file(&client->dev,
&sensor_dev_attr_temp2_alarm.dev_attr))
|| (err = device_create_file(&client->dev,
&sensor_dev_attr_temp2_beep.dev_attr)))
goto exit_remove_files;
} else {
if ((err = device_create_file(&client->dev,
&sensor_dev_attr_in4_input.dev_attr))
|| (err = device_create_file(&client->dev,
&sensor_dev_attr_in4_min.dev_attr))
|| (err = device_create_file(&client->dev,
&sensor_dev_attr_in4_max.dev_attr))
|| (err = device_create_file(&client->dev,
&sensor_dev_attr_in4_alarm.dev_attr))
|| (err = device_create_file(&client->dev,
&sensor_dev_attr_in4_beep.dev_attr)))
goto exit_remove_files;
}
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto exit_remove_files;
}
return 0;
exit_remove_files:
sysfs_remove_group(&client->dev.kobj, &gl520_group);
sysfs_remove_group(&client->dev.kobj, &gl520_group_opt);
exit_free:
kfree(data);
exit:
return err;
}
/* Called when we have found a new GL520SM. */
static void gl520_init_client(struct i2c_client *client)
{
struct gl520_data *data = i2c_get_clientdata(client);
u8 oldconf, conf;
conf = oldconf = gl520_read_value(client, GL520_REG_CONF);
data->alarm_mask = 0xff;
data->vrm = vid_which_vrm();
if (extra_sensor_type == 1)
conf &= ~0x10;
else if (extra_sensor_type == 2)
conf |= 0x10;
data->two_temps = !(conf & 0x10);
/* If IRQ# is disabled, we can safely force comparator mode */
if (!(conf & 0x20))
conf &= 0xf7;
/* Enable monitoring if needed */
conf |= 0x40;
if (conf != oldconf)
gl520_write_value(client, GL520_REG_CONF, conf);
gl520_update_device(&(client->dev));
if (data->fan_min[0] == 0)
data->alarm_mask &= ~0x20;
if (data->fan_min[1] == 0)
data->alarm_mask &= ~0x40;
data->beep_mask &= data->alarm_mask;
gl520_write_value(client, GL520_REG_BEEP_MASK, data->beep_mask);
}
static int gl520_remove(struct i2c_client *client)
{
struct gl520_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &gl520_group);
sysfs_remove_group(&client->dev.kobj, &gl520_group_opt);
kfree(data);
return 0;
}
/* Registers 0x07 to 0x0c are word-sized, others are byte-sized
GL520 uses a high-byte first convention */
static int gl520_read_value(struct i2c_client *client, u8 reg)
{
if ((reg >= 0x07) && (reg <= 0x0c))
return swab16(i2c_smbus_read_word_data(client, reg));
else
return i2c_smbus_read_byte_data(client, reg);
}
static int gl520_write_value(struct i2c_client *client, u8 reg, u16 value)
{
if ((reg >= 0x07) && (reg <= 0x0c))
return i2c_smbus_write_word_data(client, reg, swab16(value));
else
return i2c_smbus_write_byte_data(client, reg, value);
}
static struct gl520_data *gl520_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct gl520_data *data = i2c_get_clientdata(client);
int val, i;
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) {
dev_dbg(&client->dev, "Starting gl520sm update\n");
data->alarms = gl520_read_value(client, GL520_REG_ALARMS);
data->beep_mask = gl520_read_value(client, GL520_REG_BEEP_MASK);
data->vid = gl520_read_value(client, GL520_REG_VID_INPUT) & 0x1f;
for (i = 0; i < 4; i++) {
data->in_input[i] = gl520_read_value(client,
GL520_REG_IN_INPUT[i]);
val = gl520_read_value(client, GL520_REG_IN_LIMIT[i]);
data->in_min[i] = val & 0xff;
data->in_max[i] = (val >> 8) & 0xff;
}
val = gl520_read_value(client, GL520_REG_FAN_INPUT);
data->fan_input[0] = (val >> 8) & 0xff;
data->fan_input[1] = val & 0xff;
val = gl520_read_value(client, GL520_REG_FAN_MIN);
data->fan_min[0] = (val >> 8) & 0xff;
data->fan_min[1] = val & 0xff;
data->temp_input[0] = gl520_read_value(client,
GL520_REG_TEMP_INPUT[0]);
data->temp_max[0] = gl520_read_value(client,
GL520_REG_TEMP_MAX[0]);
data->temp_max_hyst[0] = gl520_read_value(client,
GL520_REG_TEMP_MAX_HYST[0]);
val = gl520_read_value(client, GL520_REG_FAN_DIV);
data->fan_div[0] = (val >> 6) & 0x03;
data->fan_div[1] = (val >> 4) & 0x03;
data->fan_off = (val >> 2) & 0x01;
data->alarms &= data->alarm_mask;
val = gl520_read_value(client, GL520_REG_CONF);
data->beep_enable = !((val >> 2) & 1);
/* Temp1 and Vin4 are the same input */
if (data->two_temps) {
data->temp_input[1] = gl520_read_value(client,
GL520_REG_TEMP_INPUT[1]);
data->temp_max[1] = gl520_read_value(client,
GL520_REG_TEMP_MAX[1]);
data->temp_max_hyst[1] = gl520_read_value(client,
GL520_REG_TEMP_MAX_HYST[1]);
} else {
data->in_input[4] = gl520_read_value(client,
GL520_REG_IN_INPUT[4]);
data->in_min[4] = gl520_read_value(client,
GL520_REG_IN_MIN[4]);
data->in_max[4] = gl520_read_value(client,
GL520_REG_IN_MAX[4]);
}
data->last_updated = jiffies;
data->valid = 1;
}
mutex_unlock(&data->update_lock);
return data;
}
static int __init sensors_gl520sm_init(void)
{
return i2c_add_driver(&gl520_driver);
}
static void __exit sensors_gl520sm_exit(void)
{
i2c_del_driver(&gl520_driver);
}
MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>, "
"Kyösti Mälkki <kmalkki@cc.hut.fi>, "
"Maarten Deprez <maartendeprez@users.sourceforge.net>");
MODULE_DESCRIPTION("GL520SM driver");
MODULE_LICENSE("GPL");
module_init(sensors_gl520sm_init);
module_exit(sensors_gl520sm_exit);
| gpl-2.0 |
SomethingExplosive/android_kernel_lge_hammerhead | drivers/mfd/lpc_sch.c | 4935 | 4779 | /*
* lpc_sch.c - LPC interface for Intel Poulsbo SCH
*
* LPC bridge function of the Intel SCH contains many other
* functional units, such as Interrupt controllers, Timers,
* Power Management, System Management, GPIO, RTC, and LPC
* Configuration Registers.
*
* Copyright (c) 2010 CompuLab Ltd
* Author: Denis Turischev <denis@compulab.co.il>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/acpi.h>
#include <linux/pci.h>
#include <linux/mfd/core.h>
#define SMBASE 0x40
#define SMBUS_IO_SIZE 64
#define GPIOBASE 0x44
#define GPIO_IO_SIZE 64
#define WDTBASE 0x84
#define WDT_IO_SIZE 64
static struct resource smbus_sch_resource = {
.flags = IORESOURCE_IO,
};
static struct resource gpio_sch_resource = {
.flags = IORESOURCE_IO,
};
static struct mfd_cell lpc_sch_cells[] = {
{
.name = "isch_smbus",
.num_resources = 1,
.resources = &smbus_sch_resource,
},
{
.name = "sch_gpio",
.num_resources = 1,
.resources = &gpio_sch_resource,
},
};
static struct resource wdt_sch_resource = {
.flags = IORESOURCE_IO,
};
static struct mfd_cell tunnelcreek_cells[] = {
{
.name = "tunnelcreek_wdt",
.num_resources = 1,
.resources = &wdt_sch_resource,
},
};
static DEFINE_PCI_DEVICE_TABLE(lpc_sch_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SCH_LPC) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ITC_LPC) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, lpc_sch_ids);
static int __devinit lpc_sch_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
unsigned int base_addr_cfg;
unsigned short base_addr;
int i;
int ret;
pci_read_config_dword(dev, SMBASE, &base_addr_cfg);
if (!(base_addr_cfg & (1 << 31))) {
dev_err(&dev->dev, "Decode of the SMBus I/O range disabled\n");
return -ENODEV;
}
base_addr = (unsigned short)base_addr_cfg;
if (base_addr == 0) {
dev_err(&dev->dev, "I/O space for SMBus uninitialized\n");
return -ENODEV;
}
smbus_sch_resource.start = base_addr;
smbus_sch_resource.end = base_addr + SMBUS_IO_SIZE - 1;
pci_read_config_dword(dev, GPIOBASE, &base_addr_cfg);
if (!(base_addr_cfg & (1 << 31))) {
dev_err(&dev->dev, "Decode of the GPIO I/O range disabled\n");
return -ENODEV;
}
base_addr = (unsigned short)base_addr_cfg;
if (base_addr == 0) {
dev_err(&dev->dev, "I/O space for GPIO uninitialized\n");
return -ENODEV;
}
gpio_sch_resource.start = base_addr;
gpio_sch_resource.end = base_addr + GPIO_IO_SIZE - 1;
for (i=0; i < ARRAY_SIZE(lpc_sch_cells); i++)
lpc_sch_cells[i].id = id->device;
ret = mfd_add_devices(&dev->dev, 0,
lpc_sch_cells, ARRAY_SIZE(lpc_sch_cells), NULL, 0);
if (ret)
goto out_dev;
if (id->device == PCI_DEVICE_ID_INTEL_ITC_LPC) {
pci_read_config_dword(dev, WDTBASE, &base_addr_cfg);
if (!(base_addr_cfg & (1 << 31))) {
dev_err(&dev->dev, "Decode of the WDT I/O range disabled\n");
ret = -ENODEV;
goto out_dev;
}
base_addr = (unsigned short)base_addr_cfg;
if (base_addr == 0) {
dev_err(&dev->dev, "I/O space for WDT uninitialized\n");
ret = -ENODEV;
goto out_dev;
}
wdt_sch_resource.start = base_addr;
wdt_sch_resource.end = base_addr + WDT_IO_SIZE - 1;
for (i = 0; i < ARRAY_SIZE(tunnelcreek_cells); i++)
tunnelcreek_cells[i].id = id->device;
ret = mfd_add_devices(&dev->dev, 0, tunnelcreek_cells,
ARRAY_SIZE(tunnelcreek_cells), NULL, 0);
}
return ret;
out_dev:
mfd_remove_devices(&dev->dev);
return ret;
}
static void __devexit lpc_sch_remove(struct pci_dev *dev)
{
mfd_remove_devices(&dev->dev);
}
static struct pci_driver lpc_sch_driver = {
.name = "lpc_sch",
.id_table = lpc_sch_ids,
.probe = lpc_sch_probe,
.remove = __devexit_p(lpc_sch_remove),
};
static int __init lpc_sch_init(void)
{
return pci_register_driver(&lpc_sch_driver);
}
static void __exit lpc_sch_exit(void)
{
pci_unregister_driver(&lpc_sch_driver);
}
module_init(lpc_sch_init);
module_exit(lpc_sch_exit);
MODULE_AUTHOR("Denis Turischev <denis@compulab.co.il>");
MODULE_DESCRIPTION("LPC interface for Intel Poulsbo SCH");
MODULE_LICENSE("GPL");
| gpl-2.0 |
zombah/android_kernel_nokia_msm8610 | drivers/mfd/lpc_sch.c | 4935 | 4779 | /*
* lpc_sch.c - LPC interface for Intel Poulsbo SCH
*
* LPC bridge function of the Intel SCH contains many other
* functional units, such as Interrupt controllers, Timers,
* Power Management, System Management, GPIO, RTC, and LPC
* Configuration Registers.
*
* Copyright (c) 2010 CompuLab Ltd
* Author: Denis Turischev <denis@compulab.co.il>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/acpi.h>
#include <linux/pci.h>
#include <linux/mfd/core.h>
#define SMBASE 0x40
#define SMBUS_IO_SIZE 64
#define GPIOBASE 0x44
#define GPIO_IO_SIZE 64
#define WDTBASE 0x84
#define WDT_IO_SIZE 64
static struct resource smbus_sch_resource = {
.flags = IORESOURCE_IO,
};
static struct resource gpio_sch_resource = {
.flags = IORESOURCE_IO,
};
static struct mfd_cell lpc_sch_cells[] = {
{
.name = "isch_smbus",
.num_resources = 1,
.resources = &smbus_sch_resource,
},
{
.name = "sch_gpio",
.num_resources = 1,
.resources = &gpio_sch_resource,
},
};
static struct resource wdt_sch_resource = {
.flags = IORESOURCE_IO,
};
static struct mfd_cell tunnelcreek_cells[] = {
{
.name = "tunnelcreek_wdt",
.num_resources = 1,
.resources = &wdt_sch_resource,
},
};
static DEFINE_PCI_DEVICE_TABLE(lpc_sch_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SCH_LPC) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ITC_LPC) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, lpc_sch_ids);
static int __devinit lpc_sch_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
unsigned int base_addr_cfg;
unsigned short base_addr;
int i;
int ret;
pci_read_config_dword(dev, SMBASE, &base_addr_cfg);
if (!(base_addr_cfg & (1 << 31))) {
dev_err(&dev->dev, "Decode of the SMBus I/O range disabled\n");
return -ENODEV;
}
base_addr = (unsigned short)base_addr_cfg;
if (base_addr == 0) {
dev_err(&dev->dev, "I/O space for SMBus uninitialized\n");
return -ENODEV;
}
smbus_sch_resource.start = base_addr;
smbus_sch_resource.end = base_addr + SMBUS_IO_SIZE - 1;
pci_read_config_dword(dev, GPIOBASE, &base_addr_cfg);
if (!(base_addr_cfg & (1 << 31))) {
dev_err(&dev->dev, "Decode of the GPIO I/O range disabled\n");
return -ENODEV;
}
base_addr = (unsigned short)base_addr_cfg;
if (base_addr == 0) {
dev_err(&dev->dev, "I/O space for GPIO uninitialized\n");
return -ENODEV;
}
gpio_sch_resource.start = base_addr;
gpio_sch_resource.end = base_addr + GPIO_IO_SIZE - 1;
for (i=0; i < ARRAY_SIZE(lpc_sch_cells); i++)
lpc_sch_cells[i].id = id->device;
ret = mfd_add_devices(&dev->dev, 0,
lpc_sch_cells, ARRAY_SIZE(lpc_sch_cells), NULL, 0);
if (ret)
goto out_dev;
if (id->device == PCI_DEVICE_ID_INTEL_ITC_LPC) {
pci_read_config_dword(dev, WDTBASE, &base_addr_cfg);
if (!(base_addr_cfg & (1 << 31))) {
dev_err(&dev->dev, "Decode of the WDT I/O range disabled\n");
ret = -ENODEV;
goto out_dev;
}
base_addr = (unsigned short)base_addr_cfg;
if (base_addr == 0) {
dev_err(&dev->dev, "I/O space for WDT uninitialized\n");
ret = -ENODEV;
goto out_dev;
}
wdt_sch_resource.start = base_addr;
wdt_sch_resource.end = base_addr + WDT_IO_SIZE - 1;
for (i = 0; i < ARRAY_SIZE(tunnelcreek_cells); i++)
tunnelcreek_cells[i].id = id->device;
ret = mfd_add_devices(&dev->dev, 0, tunnelcreek_cells,
ARRAY_SIZE(tunnelcreek_cells), NULL, 0);
}
return ret;
out_dev:
mfd_remove_devices(&dev->dev);
return ret;
}
static void __devexit lpc_sch_remove(struct pci_dev *dev)
{
mfd_remove_devices(&dev->dev);
}
static struct pci_driver lpc_sch_driver = {
.name = "lpc_sch",
.id_table = lpc_sch_ids,
.probe = lpc_sch_probe,
.remove = __devexit_p(lpc_sch_remove),
};
static int __init lpc_sch_init(void)
{
return pci_register_driver(&lpc_sch_driver);
}
static void __exit lpc_sch_exit(void)
{
pci_unregister_driver(&lpc_sch_driver);
}
module_init(lpc_sch_init);
module_exit(lpc_sch_exit);
MODULE_AUTHOR("Denis Turischev <denis@compulab.co.il>");
MODULE_DESCRIPTION("LPC interface for Intel Poulsbo SCH");
MODULE_LICENSE("GPL");
| gpl-2.0 |
12thmantec/linux-3.5 | sound/isa/wavefront/wavefront_synth.c | 5191 | 53541 | /* Copyright (C) by Paul Barton-Davis 1998-1999
*
* Some portions of this file are taken from work that is
* copyright (C) by Hannu Savolainen 1993-1996
*
* This program is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
* Version 2 (June 1991). See the "COPYING" file distributed with this software
* for more info.
*/
/*
* An ALSA lowlevel driver for Turtle Beach ICS2115 wavetable synth
* (Maui, Tropez, Tropez Plus)
*
* This driver supports the onboard wavetable synthesizer (an ICS2115),
* including patch, sample and program loading and unloading, conversion
* of GUS patches during loading, and full user-level access to all
* WaveFront commands. It tries to provide semi-intelligent patch and
* sample management as well.
*
*/
#include <asm/io.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/time.h>
#include <linux/wait.h>
#include <linux/firmware.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/snd_wavefront.h>
#include <sound/initval.h>
static int wf_raw = 0; /* we normally check for "raw state" to firmware
loading. if non-zero, then during driver loading, the
state of the board is ignored, and we reset the
board and load the firmware anyway.
*/
static int fx_raw = 1; /* if this is zero, we'll leave the FX processor in
whatever state it is when the driver is loaded.
The default is to download the microprogram and
associated coefficients to set it up for "default"
operation, whatever that means.
*/
static int debug_default = 0; /* you can set this to control debugging
during driver loading. it takes any combination
of the WF_DEBUG_* flags defined in
wavefront.h
*/
/* XXX this needs to be made firmware and hardware version dependent */
#define DEFAULT_OSPATH "wavefront.os"
static char *ospath = DEFAULT_OSPATH; /* the firmware file name */
static int wait_usecs = 150; /* This magic number seems to give pretty optimal
throughput based on my limited experimentation.
If you want to play around with it and find a better
value, be my guest. Remember, the idea is to
get a number that causes us to just busy wait
for as many WaveFront commands as possible, without
coming up with a number so large that we hog the
whole CPU.
Specifically, with this number, out of about 134,000
status waits, only about 250 result in a sleep.
*/
static int sleep_interval = 100; /* HZ/sleep_interval seconds per sleep */
static int sleep_tries = 50; /* number of times we'll try to sleep */
static int reset_time = 2; /* hundreths of a second we wait after a HW
reset for the expected interrupt.
*/
static int ramcheck_time = 20; /* time in seconds to wait while ROM code
checks on-board RAM.
*/
static int osrun_time = 10; /* time in seconds we wait for the OS to
start running.
*/
module_param(wf_raw, int, 0444);
MODULE_PARM_DESC(wf_raw, "if non-zero, assume that we need to boot the OS");
module_param(fx_raw, int, 0444);
MODULE_PARM_DESC(fx_raw, "if non-zero, assume that the FX process needs help");
module_param(debug_default, int, 0444);
MODULE_PARM_DESC(debug_default, "debug parameters for card initialization");
module_param(wait_usecs, int, 0444);
MODULE_PARM_DESC(wait_usecs, "how long to wait without sleeping, usecs");
module_param(sleep_interval, int, 0444);
MODULE_PARM_DESC(sleep_interval, "how long to sleep when waiting for reply");
module_param(sleep_tries, int, 0444);
MODULE_PARM_DESC(sleep_tries, "how many times to try sleeping during a wait");
module_param(ospath, charp, 0444);
MODULE_PARM_DESC(ospath, "pathname to processed ICS2115 OS firmware");
module_param(reset_time, int, 0444);
MODULE_PARM_DESC(reset_time, "how long to wait for a reset to take effect");
module_param(ramcheck_time, int, 0444);
MODULE_PARM_DESC(ramcheck_time, "how many seconds to wait for the RAM test");
module_param(osrun_time, int, 0444);
MODULE_PARM_DESC(osrun_time, "how many seconds to wait for the ICS2115 OS");
/* if WF_DEBUG not defined, no run-time debugging messages will
be available via the debug flag setting. Given the current
beta state of the driver, this will remain set until a future
version.
*/
#define WF_DEBUG 1
#ifdef WF_DEBUG
#define DPRINT(cond, ...) \
if ((dev->debug & (cond)) == (cond)) { \
snd_printk (__VA_ARGS__); \
}
#else
#define DPRINT(cond, args...)
#endif /* WF_DEBUG */
#define LOGNAME "WaveFront: "
/* bitmasks for WaveFront status port value */
#define STAT_RINTR_ENABLED 0x01
#define STAT_CAN_READ 0x02
#define STAT_INTR_READ 0x04
#define STAT_WINTR_ENABLED 0x10
#define STAT_CAN_WRITE 0x20
#define STAT_INTR_WRITE 0x40
static int wavefront_delete_sample (snd_wavefront_t *, int sampnum);
static int wavefront_find_free_sample (snd_wavefront_t *);
struct wavefront_command {
int cmd;
char *action;
unsigned int read_cnt;
unsigned int write_cnt;
int need_ack;
};
static struct {
int errno;
const char *errstr;
} wavefront_errors[] = {
{ 0x01, "Bad sample number" },
{ 0x02, "Out of sample memory" },
{ 0x03, "Bad patch number" },
{ 0x04, "Error in number of voices" },
{ 0x06, "Sample load already in progress" },
{ 0x0B, "No sample load request pending" },
{ 0x0E, "Bad MIDI channel number" },
{ 0x10, "Download Record Error" },
{ 0x80, "Success" },
{ 0x0 }
};
#define NEEDS_ACK 1
static struct wavefront_command wavefront_commands[] = {
{ WFC_SET_SYNTHVOL, "set synthesizer volume", 0, 1, NEEDS_ACK },
{ WFC_GET_SYNTHVOL, "get synthesizer volume", 1, 0, 0},
{ WFC_SET_NVOICES, "set number of voices", 0, 1, NEEDS_ACK },
{ WFC_GET_NVOICES, "get number of voices", 1, 0, 0 },
{ WFC_SET_TUNING, "set synthesizer tuning", 0, 2, NEEDS_ACK },
{ WFC_GET_TUNING, "get synthesizer tuning", 2, 0, 0 },
{ WFC_DISABLE_CHANNEL, "disable synth channel", 0, 1, NEEDS_ACK },
{ WFC_ENABLE_CHANNEL, "enable synth channel", 0, 1, NEEDS_ACK },
{ WFC_GET_CHANNEL_STATUS, "get synth channel status", 3, 0, 0 },
{ WFC_MISYNTH_OFF, "disable midi-in to synth", 0, 0, NEEDS_ACK },
{ WFC_MISYNTH_ON, "enable midi-in to synth", 0, 0, NEEDS_ACK },
{ WFC_VMIDI_ON, "enable virtual midi mode", 0, 0, NEEDS_ACK },
{ WFC_VMIDI_OFF, "disable virtual midi mode", 0, 0, NEEDS_ACK },
{ WFC_MIDI_STATUS, "report midi status", 1, 0, 0 },
{ WFC_FIRMWARE_VERSION, "report firmware version", 2, 0, 0 },
{ WFC_HARDWARE_VERSION, "report hardware version", 2, 0, 0 },
{ WFC_GET_NSAMPLES, "report number of samples", 2, 0, 0 },
{ WFC_INSTOUT_LEVELS, "report instantaneous output levels", 7, 0, 0 },
{ WFC_PEAKOUT_LEVELS, "report peak output levels", 7, 0, 0 },
{ WFC_DOWNLOAD_SAMPLE, "download sample",
0, WF_SAMPLE_BYTES, NEEDS_ACK },
{ WFC_DOWNLOAD_BLOCK, "download block", 0, 0, NEEDS_ACK},
{ WFC_DOWNLOAD_SAMPLE_HEADER, "download sample header",
0, WF_SAMPLE_HDR_BYTES, NEEDS_ACK },
{ WFC_UPLOAD_SAMPLE_HEADER, "upload sample header", 13, 2, 0 },
/* This command requires a variable number of bytes to be written.
There is a hack in snd_wavefront_cmd() to support this. The actual
count is passed in as the read buffer ptr, cast appropriately.
Ugh.
*/
{ WFC_DOWNLOAD_MULTISAMPLE, "download multisample", 0, 0, NEEDS_ACK },
/* This one is a hack as well. We just read the first byte of the
response, don't fetch an ACK, and leave the rest to the
calling function. Ugly, ugly, ugly.
*/
{ WFC_UPLOAD_MULTISAMPLE, "upload multisample", 2, 1, 0 },
{ WFC_DOWNLOAD_SAMPLE_ALIAS, "download sample alias",
0, WF_ALIAS_BYTES, NEEDS_ACK },
{ WFC_UPLOAD_SAMPLE_ALIAS, "upload sample alias", WF_ALIAS_BYTES, 2, 0},
{ WFC_DELETE_SAMPLE, "delete sample", 0, 2, NEEDS_ACK },
{ WFC_IDENTIFY_SAMPLE_TYPE, "identify sample type", 5, 2, 0 },
{ WFC_UPLOAD_SAMPLE_PARAMS, "upload sample parameters" },
{ WFC_REPORT_FREE_MEMORY, "report free memory", 4, 0, 0 },
{ WFC_DOWNLOAD_PATCH, "download patch", 0, 134, NEEDS_ACK },
{ WFC_UPLOAD_PATCH, "upload patch", 132, 2, 0 },
{ WFC_DOWNLOAD_PROGRAM, "download program", 0, 33, NEEDS_ACK },
{ WFC_UPLOAD_PROGRAM, "upload program", 32, 1, 0 },
{ WFC_DOWNLOAD_EDRUM_PROGRAM, "download enhanced drum program", 0, 9,
NEEDS_ACK},
{ WFC_UPLOAD_EDRUM_PROGRAM, "upload enhanced drum program", 8, 1, 0},
{ WFC_SET_EDRUM_CHANNEL, "set enhanced drum program channel",
0, 1, NEEDS_ACK },
{ WFC_DISABLE_DRUM_PROGRAM, "disable drum program", 0, 1, NEEDS_ACK },
{ WFC_REPORT_CHANNEL_PROGRAMS, "report channel program numbers",
32, 0, 0 },
{ WFC_NOOP, "the no-op command", 0, 0, NEEDS_ACK },
{ 0x00 }
};
static const char *
wavefront_errorstr (int errnum)
{
int i;
for (i = 0; wavefront_errors[i].errstr; i++) {
if (wavefront_errors[i].errno == errnum) {
return wavefront_errors[i].errstr;
}
}
return "Unknown WaveFront error";
}
static struct wavefront_command *
wavefront_get_command (int cmd)
{
int i;
for (i = 0; wavefront_commands[i].cmd != 0; i++) {
if (cmd == wavefront_commands[i].cmd) {
return &wavefront_commands[i];
}
}
return NULL;
}
static inline int
wavefront_status (snd_wavefront_t *dev)
{
return inb (dev->status_port);
}
static int
wavefront_sleep (int limit)
{
schedule_timeout_interruptible(limit);
return signal_pending(current);
}
static int
wavefront_wait (snd_wavefront_t *dev, int mask)
{
int i;
/* Spin for a short period of time, because >99% of all
requests to the WaveFront can be serviced inline like this.
*/
for (i = 0; i < wait_usecs; i += 5) {
if (wavefront_status (dev) & mask) {
return 1;
}
udelay(5);
}
for (i = 0; i < sleep_tries; i++) {
if (wavefront_status (dev) & mask) {
return 1;
}
if (wavefront_sleep (HZ/sleep_interval)) {
return (0);
}
}
return (0);
}
static int
wavefront_read (snd_wavefront_t *dev)
{
if (wavefront_wait (dev, STAT_CAN_READ))
return inb (dev->data_port);
DPRINT (WF_DEBUG_DATA, "read timeout.\n");
return -1;
}
static int
wavefront_write (snd_wavefront_t *dev, unsigned char data)
{
if (wavefront_wait (dev, STAT_CAN_WRITE)) {
outb (data, dev->data_port);
return 0;
}
DPRINT (WF_DEBUG_DATA, "write timeout.\n");
return -1;
}
int
snd_wavefront_cmd (snd_wavefront_t *dev,
int cmd, unsigned char *rbuf, unsigned char *wbuf)
{
int ack;
unsigned int i;
int c;
struct wavefront_command *wfcmd;
if ((wfcmd = wavefront_get_command (cmd)) == NULL) {
snd_printk ("command 0x%x not supported.\n",
cmd);
return 1;
}
/* Hack to handle the one variable-size write command. See
wavefront_send_multisample() for the other half of this
gross and ugly strategy.
*/
if (cmd == WFC_DOWNLOAD_MULTISAMPLE) {
wfcmd->write_cnt = (unsigned long) rbuf;
rbuf = NULL;
}
DPRINT (WF_DEBUG_CMD, "0x%x [%s] (%d,%d,%d)\n",
cmd, wfcmd->action, wfcmd->read_cnt,
wfcmd->write_cnt, wfcmd->need_ack);
if (wavefront_write (dev, cmd)) {
DPRINT ((WF_DEBUG_IO|WF_DEBUG_CMD), "cannot request "
"0x%x [%s].\n",
cmd, wfcmd->action);
return 1;
}
if (wfcmd->write_cnt > 0) {
DPRINT (WF_DEBUG_DATA, "writing %d bytes "
"for 0x%x\n",
wfcmd->write_cnt, cmd);
for (i = 0; i < wfcmd->write_cnt; i++) {
if (wavefront_write (dev, wbuf[i])) {
DPRINT (WF_DEBUG_IO, "bad write for byte "
"%d of 0x%x [%s].\n",
i, cmd, wfcmd->action);
return 1;
}
DPRINT (WF_DEBUG_DATA, "write[%d] = 0x%x\n",
i, wbuf[i]);
}
}
if (wfcmd->read_cnt > 0) {
DPRINT (WF_DEBUG_DATA, "reading %d ints "
"for 0x%x\n",
wfcmd->read_cnt, cmd);
for (i = 0; i < wfcmd->read_cnt; i++) {
if ((c = wavefront_read (dev)) == -1) {
DPRINT (WF_DEBUG_IO, "bad read for byte "
"%d of 0x%x [%s].\n",
i, cmd, wfcmd->action);
return 1;
}
/* Now handle errors. Lots of special cases here */
if (c == 0xff) {
if ((c = wavefront_read (dev)) == -1) {
DPRINT (WF_DEBUG_IO, "bad read for "
"error byte at "
"read byte %d "
"of 0x%x [%s].\n",
i, cmd,
wfcmd->action);
return 1;
}
/* Can you believe this madness ? */
if (c == 1 &&
wfcmd->cmd == WFC_IDENTIFY_SAMPLE_TYPE) {
rbuf[0] = WF_ST_EMPTY;
return (0);
} else if (c == 3 &&
wfcmd->cmd == WFC_UPLOAD_PATCH) {
return 3;
} else if (c == 1 &&
wfcmd->cmd == WFC_UPLOAD_PROGRAM) {
return 1;
} else {
DPRINT (WF_DEBUG_IO, "error %d (%s) "
"during "
"read for byte "
"%d of 0x%x "
"[%s].\n",
c,
wavefront_errorstr (c),
i, cmd,
wfcmd->action);
return 1;
}
} else {
rbuf[i] = c;
}
DPRINT (WF_DEBUG_DATA, "read[%d] = 0x%x\n",i, rbuf[i]);
}
}
if ((wfcmd->read_cnt == 0 && wfcmd->write_cnt == 0) || wfcmd->need_ack) {
DPRINT (WF_DEBUG_CMD, "reading ACK for 0x%x\n", cmd);
/* Some commands need an ACK, but return zero instead
of the standard value.
*/
if ((ack = wavefront_read (dev)) == 0) {
ack = WF_ACK;
}
if (ack != WF_ACK) {
if (ack == -1) {
DPRINT (WF_DEBUG_IO, "cannot read ack for "
"0x%x [%s].\n",
cmd, wfcmd->action);
return 1;
} else {
int err = -1; /* something unknown */
if (ack == 0xff) { /* explicit error */
if ((err = wavefront_read (dev)) == -1) {
DPRINT (WF_DEBUG_DATA,
"cannot read err "
"for 0x%x [%s].\n",
cmd, wfcmd->action);
}
}
DPRINT (WF_DEBUG_IO, "0x%x [%s] "
"failed (0x%x, 0x%x, %s)\n",
cmd, wfcmd->action, ack, err,
wavefront_errorstr (err));
return -err;
}
}
DPRINT (WF_DEBUG_DATA, "ack received "
"for 0x%x [%s]\n",
cmd, wfcmd->action);
} else {
DPRINT (WF_DEBUG_CMD, "0x%x [%s] does not need "
"ACK (%d,%d,%d)\n",
cmd, wfcmd->action, wfcmd->read_cnt,
wfcmd->write_cnt, wfcmd->need_ack);
}
return 0;
}
/***********************************************************************
WaveFront data munging
Things here are weird. All data written to the board cannot
have its most significant bit set. Any data item with values
potentially > 0x7F (127) must be split across multiple bytes.
Sometimes, we need to munge numeric values that are represented on
the x86 side as 8-32 bit values. Sometimes, we need to munge data
that is represented on the x86 side as an array of bytes. The most
efficient approach to handling both cases seems to be to use 2
different functions for munging and 2 for de-munging. This avoids
weird casting and worrying about bit-level offsets.
**********************************************************************/
static unsigned char *
munge_int32 (unsigned int src,
unsigned char *dst,
unsigned int dst_size)
{
unsigned int i;
for (i = 0; i < dst_size; i++) {
*dst = src & 0x7F; /* Mask high bit of LSB */
src = src >> 7; /* Rotate Right 7 bits */
/* Note: we leave the upper bits in place */
dst++;
};
return dst;
};
static int
demunge_int32 (unsigned char* src, int src_size)
{
int i;
int outval = 0;
for (i = src_size - 1; i >= 0; i--) {
outval=(outval<<7)+src[i];
}
return outval;
};
static
unsigned char *
munge_buf (unsigned char *src, unsigned char *dst, unsigned int dst_size)
{
unsigned int i;
unsigned int last = dst_size / 2;
for (i = 0; i < last; i++) {
*dst++ = src[i] & 0x7f;
*dst++ = src[i] >> 7;
}
return dst;
}
static
unsigned char *
demunge_buf (unsigned char *src, unsigned char *dst, unsigned int src_bytes)
{
int i;
unsigned char *end = src + src_bytes;
end = src + src_bytes;
/* NOTE: src and dst *CAN* point to the same address */
for (i = 0; src != end; i++) {
dst[i] = *src++;
dst[i] |= (*src++)<<7;
}
return dst;
}
/***********************************************************************
WaveFront: sample, patch and program management.
***********************************************************************/
static int
wavefront_delete_sample (snd_wavefront_t *dev, int sample_num)
{
unsigned char wbuf[2];
int x;
wbuf[0] = sample_num & 0x7f;
wbuf[1] = sample_num >> 7;
if ((x = snd_wavefront_cmd (dev, WFC_DELETE_SAMPLE, NULL, wbuf)) == 0) {
dev->sample_status[sample_num] = WF_ST_EMPTY;
}
return x;
}
static int
wavefront_get_sample_status (snd_wavefront_t *dev, int assume_rom)
{
int i;
unsigned char rbuf[32], wbuf[32];
unsigned int sc_real, sc_alias, sc_multi;
/* check sample status */
if (snd_wavefront_cmd (dev, WFC_GET_NSAMPLES, rbuf, wbuf)) {
snd_printk ("cannot request sample count.\n");
return -1;
}
sc_real = sc_alias = sc_multi = dev->samples_used = 0;
for (i = 0; i < WF_MAX_SAMPLE; i++) {
wbuf[0] = i & 0x7f;
wbuf[1] = i >> 7;
if (snd_wavefront_cmd (dev, WFC_IDENTIFY_SAMPLE_TYPE, rbuf, wbuf)) {
snd_printk(KERN_WARNING "cannot identify sample "
"type of slot %d\n", i);
dev->sample_status[i] = WF_ST_EMPTY;
continue;
}
dev->sample_status[i] = (WF_SLOT_FILLED|rbuf[0]);
if (assume_rom) {
dev->sample_status[i] |= WF_SLOT_ROM;
}
switch (rbuf[0] & WF_ST_MASK) {
case WF_ST_SAMPLE:
sc_real++;
break;
case WF_ST_MULTISAMPLE:
sc_multi++;
break;
case WF_ST_ALIAS:
sc_alias++;
break;
case WF_ST_EMPTY:
break;
default:
snd_printk ("unknown sample type for "
"slot %d (0x%x)\n",
i, rbuf[0]);
}
if (rbuf[0] != WF_ST_EMPTY) {
dev->samples_used++;
}
}
snd_printk ("%d samples used (%d real, %d aliases, %d multi), "
"%d empty\n", dev->samples_used, sc_real, sc_alias, sc_multi,
WF_MAX_SAMPLE - dev->samples_used);
return (0);
}
static int
wavefront_get_patch_status (snd_wavefront_t *dev)
{
unsigned char patchbuf[WF_PATCH_BYTES];
unsigned char patchnum[2];
wavefront_patch *p;
int i, x, cnt, cnt2;
for (i = 0; i < WF_MAX_PATCH; i++) {
patchnum[0] = i & 0x7f;
patchnum[1] = i >> 7;
if ((x = snd_wavefront_cmd (dev, WFC_UPLOAD_PATCH, patchbuf,
patchnum)) == 0) {
dev->patch_status[i] |= WF_SLOT_FILLED;
p = (wavefront_patch *) patchbuf;
dev->sample_status
[p->sample_number|(p->sample_msb<<7)] |=
WF_SLOT_USED;
} else if (x == 3) { /* Bad patch number */
dev->patch_status[i] = 0;
} else {
snd_printk ("upload patch "
"error 0x%x\n", x);
dev->patch_status[i] = 0;
return 1;
}
}
/* program status has already filled in slot_used bits */
for (i = 0, cnt = 0, cnt2 = 0; i < WF_MAX_PATCH; i++) {
if (dev->patch_status[i] & WF_SLOT_FILLED) {
cnt++;
}
if (dev->patch_status[i] & WF_SLOT_USED) {
cnt2++;
}
}
snd_printk ("%d patch slots filled, %d in use\n", cnt, cnt2);
return (0);
}
static int
wavefront_get_program_status (snd_wavefront_t *dev)
{
unsigned char progbuf[WF_PROGRAM_BYTES];
wavefront_program prog;
unsigned char prognum;
int i, x, l, cnt;
for (i = 0; i < WF_MAX_PROGRAM; i++) {
prognum = i;
if ((x = snd_wavefront_cmd (dev, WFC_UPLOAD_PROGRAM, progbuf,
&prognum)) == 0) {
dev->prog_status[i] |= WF_SLOT_USED;
demunge_buf (progbuf, (unsigned char *) &prog,
WF_PROGRAM_BYTES);
for (l = 0; l < WF_NUM_LAYERS; l++) {
if (prog.layer[l].mute) {
dev->patch_status
[prog.layer[l].patch_number] |=
WF_SLOT_USED;
}
}
} else if (x == 1) { /* Bad program number */
dev->prog_status[i] = 0;
} else {
snd_printk ("upload program "
"error 0x%x\n", x);
dev->prog_status[i] = 0;
}
}
for (i = 0, cnt = 0; i < WF_MAX_PROGRAM; i++) {
if (dev->prog_status[i]) {
cnt++;
}
}
snd_printk ("%d programs slots in use\n", cnt);
return (0);
}
static int
wavefront_send_patch (snd_wavefront_t *dev, wavefront_patch_info *header)
{
unsigned char buf[WF_PATCH_BYTES+2];
unsigned char *bptr;
DPRINT (WF_DEBUG_LOAD_PATCH, "downloading patch %d\n",
header->number);
dev->patch_status[header->number] |= WF_SLOT_FILLED;
bptr = buf;
bptr = munge_int32 (header->number, buf, 2);
munge_buf ((unsigned char *)&header->hdr.p, bptr, WF_PATCH_BYTES);
if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_PATCH, NULL, buf)) {
snd_printk ("download patch failed\n");
return -(EIO);
}
return (0);
}
static int
wavefront_send_program (snd_wavefront_t *dev, wavefront_patch_info *header)
{
unsigned char buf[WF_PROGRAM_BYTES+1];
int i;
DPRINT (WF_DEBUG_LOAD_PATCH, "downloading program %d\n",
header->number);
dev->prog_status[header->number] = WF_SLOT_USED;
/* XXX need to zero existing SLOT_USED bit for program_status[i]
where `i' is the program that's being (potentially) overwritten.
*/
for (i = 0; i < WF_NUM_LAYERS; i++) {
if (header->hdr.pr.layer[i].mute) {
dev->patch_status[header->hdr.pr.layer[i].patch_number] |=
WF_SLOT_USED;
/* XXX need to mark SLOT_USED for sample used by
patch_number, but this means we have to load it. Ick.
*/
}
}
buf[0] = header->number;
munge_buf ((unsigned char *)&header->hdr.pr, &buf[1], WF_PROGRAM_BYTES);
if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_PROGRAM, NULL, buf)) {
snd_printk ("download patch failed\n");
return -(EIO);
}
return (0);
}
static int
wavefront_freemem (snd_wavefront_t *dev)
{
char rbuf[8];
if (snd_wavefront_cmd (dev, WFC_REPORT_FREE_MEMORY, rbuf, NULL)) {
snd_printk ("can't get memory stats.\n");
return -1;
} else {
return demunge_int32 (rbuf, 4);
}
}
static int
wavefront_send_sample (snd_wavefront_t *dev,
wavefront_patch_info *header,
u16 __user *dataptr,
int data_is_unsigned)
{
/* samples are downloaded via a 16-bit wide i/o port
(you could think of it as 2 adjacent 8-bit wide ports
but its less efficient that way). therefore, all
the blocksizes and so forth listed in the documentation,
and used conventionally to refer to sample sizes,
which are given in 8-bit units (bytes), need to be
divided by 2.
*/
u16 sample_short = 0;
u32 length;
u16 __user *data_end = NULL;
unsigned int i;
const unsigned int max_blksize = 4096/2;
unsigned int written;
unsigned int blocksize;
int dma_ack;
int blocknum;
unsigned char sample_hdr[WF_SAMPLE_HDR_BYTES];
unsigned char *shptr;
int skip = 0;
int initial_skip = 0;
DPRINT (WF_DEBUG_LOAD_PATCH, "sample %sdownload for slot %d, "
"type %d, %d bytes from 0x%lx\n",
header->size ? "" : "header ",
header->number, header->subkey,
header->size,
(unsigned long) header->dataptr);
if (header->number == WAVEFRONT_FIND_FREE_SAMPLE_SLOT) {
int x;
if ((x = wavefront_find_free_sample (dev)) < 0) {
return -ENOMEM;
}
snd_printk ("unspecified sample => %d\n", x);
header->number = x;
}
if (header->size) {
/* XXX it's a debatable point whether or not RDONLY semantics
on the ROM samples should cover just the sample data or
the sample header. For now, it only covers the sample data,
so anyone is free at all times to rewrite sample headers.
My reason for this is that we have the sample headers
available in the WFB file for General MIDI, and so these
can always be reset if needed. The sample data, however,
cannot be recovered without a complete reset and firmware
reload of the ICS2115, which is a very expensive operation.
So, doing things this way allows us to honor the notion of
"RESETSAMPLES" reasonably cheaply. Note however, that this
is done purely at user level: there is no WFB parser in
this driver, and so a complete reset (back to General MIDI,
or theoretically some other configuration) is the
responsibility of the user level library.
To try to do this in the kernel would be a little
crazy: we'd need 158K of kernel space just to hold
a copy of the patch/program/sample header data.
*/
if (dev->rom_samples_rdonly) {
if (dev->sample_status[header->number] & WF_SLOT_ROM) {
snd_printk ("sample slot %d "
"write protected\n",
header->number);
return -EACCES;
}
}
wavefront_delete_sample (dev, header->number);
}
if (header->size) {
dev->freemem = wavefront_freemem (dev);
if (dev->freemem < (int)header->size) {
snd_printk ("insufficient memory to "
"load %d byte sample.\n",
header->size);
return -ENOMEM;
}
}
skip = WF_GET_CHANNEL(&header->hdr.s);
if (skip > 0 && header->hdr.s.SampleResolution != LINEAR_16BIT) {
snd_printk ("channel selection only "
"possible on 16-bit samples");
return -(EINVAL);
}
switch (skip) {
case 0:
initial_skip = 0;
skip = 1;
break;
case 1:
initial_skip = 0;
skip = 2;
break;
case 2:
initial_skip = 1;
skip = 2;
break;
case 3:
initial_skip = 2;
skip = 3;
break;
case 4:
initial_skip = 3;
skip = 4;
break;
case 5:
initial_skip = 4;
skip = 5;
break;
case 6:
initial_skip = 5;
skip = 6;
break;
}
DPRINT (WF_DEBUG_LOAD_PATCH, "channel selection: %d => "
"initial skip = %d, skip = %d\n",
WF_GET_CHANNEL (&header->hdr.s),
initial_skip, skip);
/* Be safe, and zero the "Unused" bits ... */
WF_SET_CHANNEL(&header->hdr.s, 0);
/* adjust size for 16 bit samples by dividing by two. We always
send 16 bits per write, even for 8 bit samples, so the length
is always half the size of the sample data in bytes.
*/
length = header->size / 2;
/* the data we're sent has not been munged, and in fact, the
header we have to send isn't just a munged copy either.
so, build the sample header right here.
*/
shptr = &sample_hdr[0];
shptr = munge_int32 (header->number, shptr, 2);
if (header->size) {
shptr = munge_int32 (length, shptr, 4);
}
/* Yes, a 4 byte result doesn't contain all of the offset bits,
but the offset only uses 24 bits.
*/
shptr = munge_int32 (*((u32 *) &header->hdr.s.sampleStartOffset),
shptr, 4);
shptr = munge_int32 (*((u32 *) &header->hdr.s.loopStartOffset),
shptr, 4);
shptr = munge_int32 (*((u32 *) &header->hdr.s.loopEndOffset),
shptr, 4);
shptr = munge_int32 (*((u32 *) &header->hdr.s.sampleEndOffset),
shptr, 4);
/* This one is truly weird. What kind of weirdo decided that in
a system dominated by 16 and 32 bit integers, they would use
a just 12 bits ?
*/
shptr = munge_int32 (header->hdr.s.FrequencyBias, shptr, 3);
/* Why is this nybblified, when the MSB is *always* zero ?
Anyway, we can't take address of bitfield, so make a
good-faith guess at where it starts.
*/
shptr = munge_int32 (*(&header->hdr.s.FrequencyBias+1),
shptr, 2);
if (snd_wavefront_cmd (dev,
header->size ?
WFC_DOWNLOAD_SAMPLE : WFC_DOWNLOAD_SAMPLE_HEADER,
NULL, sample_hdr)) {
snd_printk ("sample %sdownload refused.\n",
header->size ? "" : "header ");
return -(EIO);
}
if (header->size == 0) {
goto sent; /* Sorry. Just had to have one somewhere */
}
data_end = dataptr + length;
/* Do any initial skip over an unused channel's data */
dataptr += initial_skip;
for (written = 0, blocknum = 0;
written < length; written += max_blksize, blocknum++) {
if ((length - written) > max_blksize) {
blocksize = max_blksize;
} else {
/* round to nearest 16-byte value */
blocksize = ALIGN(length - written, 8);
}
if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_BLOCK, NULL, NULL)) {
snd_printk ("download block "
"request refused.\n");
return -(EIO);
}
for (i = 0; i < blocksize; i++) {
if (dataptr < data_end) {
__get_user (sample_short, dataptr);
dataptr += skip;
if (data_is_unsigned) { /* GUS ? */
if (WF_SAMPLE_IS_8BIT(&header->hdr.s)) {
/* 8 bit sample
resolution, sign
extend both bytes.
*/
((unsigned char*)
&sample_short)[0] += 0x7f;
((unsigned char*)
&sample_short)[1] += 0x7f;
} else {
/* 16 bit sample
resolution, sign
extend the MSB.
*/
sample_short += 0x7fff;
}
}
} else {
/* In padding section of final block:
Don't fetch unsupplied data from
user space, just continue with
whatever the final value was.
*/
}
if (i < blocksize - 1) {
outw (sample_short, dev->block_port);
} else {
outw (sample_short, dev->last_block_port);
}
}
/* Get "DMA page acknowledge", even though its really
nothing to do with DMA at all.
*/
if ((dma_ack = wavefront_read (dev)) != WF_DMA_ACK) {
if (dma_ack == -1) {
snd_printk ("upload sample "
"DMA ack timeout\n");
return -(EIO);
} else {
snd_printk ("upload sample "
"DMA ack error 0x%x\n",
dma_ack);
return -(EIO);
}
}
}
dev->sample_status[header->number] = (WF_SLOT_FILLED|WF_ST_SAMPLE);
/* Note, label is here because sending the sample header shouldn't
alter the sample_status info at all.
*/
sent:
return (0);
}
static int
wavefront_send_alias (snd_wavefront_t *dev, wavefront_patch_info *header)
{
unsigned char alias_hdr[WF_ALIAS_BYTES];
DPRINT (WF_DEBUG_LOAD_PATCH, "download alias, %d is "
"alias for %d\n",
header->number,
header->hdr.a.OriginalSample);
munge_int32 (header->number, &alias_hdr[0], 2);
munge_int32 (header->hdr.a.OriginalSample, &alias_hdr[2], 2);
munge_int32 (*((unsigned int *)&header->hdr.a.sampleStartOffset),
&alias_hdr[4], 4);
munge_int32 (*((unsigned int *)&header->hdr.a.loopStartOffset),
&alias_hdr[8], 4);
munge_int32 (*((unsigned int *)&header->hdr.a.loopEndOffset),
&alias_hdr[12], 4);
munge_int32 (*((unsigned int *)&header->hdr.a.sampleEndOffset),
&alias_hdr[16], 4);
munge_int32 (header->hdr.a.FrequencyBias, &alias_hdr[20], 3);
munge_int32 (*(&header->hdr.a.FrequencyBias+1), &alias_hdr[23], 2);
if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_SAMPLE_ALIAS, NULL, alias_hdr)) {
snd_printk ("download alias failed.\n");
return -(EIO);
}
dev->sample_status[header->number] = (WF_SLOT_FILLED|WF_ST_ALIAS);
return (0);
}
static int
wavefront_send_multisample (snd_wavefront_t *dev, wavefront_patch_info *header)
{
int i;
int num_samples;
unsigned char *msample_hdr;
msample_hdr = kmalloc(sizeof(WF_MSAMPLE_BYTES), GFP_KERNEL);
if (! msample_hdr)
return -ENOMEM;
munge_int32 (header->number, &msample_hdr[0], 2);
/* You'll recall at this point that the "number of samples" value
in a wavefront_multisample struct is actually the log2 of the
real number of samples.
*/
num_samples = (1<<(header->hdr.ms.NumberOfSamples&7));
msample_hdr[2] = (unsigned char) header->hdr.ms.NumberOfSamples;
DPRINT (WF_DEBUG_LOAD_PATCH, "multi %d with %d=%d samples\n",
header->number,
header->hdr.ms.NumberOfSamples,
num_samples);
for (i = 0; i < num_samples; i++) {
DPRINT(WF_DEBUG_LOAD_PATCH|WF_DEBUG_DATA, "sample[%d] = %d\n",
i, header->hdr.ms.SampleNumber[i]);
munge_int32 (header->hdr.ms.SampleNumber[i],
&msample_hdr[3+(i*2)], 2);
}
/* Need a hack here to pass in the number of bytes
to be written to the synth. This is ugly, and perhaps
one day, I'll fix it.
*/
if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_MULTISAMPLE,
(unsigned char *) (long) ((num_samples*2)+3),
msample_hdr)) {
snd_printk ("download of multisample failed.\n");
kfree(msample_hdr);
return -(EIO);
}
dev->sample_status[header->number] = (WF_SLOT_FILLED|WF_ST_MULTISAMPLE);
kfree(msample_hdr);
return (0);
}
static int
wavefront_fetch_multisample (snd_wavefront_t *dev,
wavefront_patch_info *header)
{
int i;
unsigned char log_ns[1];
unsigned char number[2];
int num_samples;
munge_int32 (header->number, number, 2);
if (snd_wavefront_cmd (dev, WFC_UPLOAD_MULTISAMPLE, log_ns, number)) {
snd_printk ("upload multisample failed.\n");
return -(EIO);
}
DPRINT (WF_DEBUG_DATA, "msample %d has %d samples\n",
header->number, log_ns[0]);
header->hdr.ms.NumberOfSamples = log_ns[0];
/* get the number of samples ... */
num_samples = (1 << log_ns[0]);
for (i = 0; i < num_samples; i++) {
char d[2];
int val;
if ((val = wavefront_read (dev)) == -1) {
snd_printk ("upload multisample failed "
"during sample loop.\n");
return -(EIO);
}
d[0] = val;
if ((val = wavefront_read (dev)) == -1) {
snd_printk ("upload multisample failed "
"during sample loop.\n");
return -(EIO);
}
d[1] = val;
header->hdr.ms.SampleNumber[i] =
demunge_int32 ((unsigned char *) d, 2);
DPRINT (WF_DEBUG_DATA, "msample sample[%d] = %d\n",
i, header->hdr.ms.SampleNumber[i]);
}
return (0);
}
static int
wavefront_send_drum (snd_wavefront_t *dev, wavefront_patch_info *header)
{
unsigned char drumbuf[WF_DRUM_BYTES];
wavefront_drum *drum = &header->hdr.d;
int i;
DPRINT (WF_DEBUG_LOAD_PATCH, "downloading edrum for MIDI "
"note %d, patch = %d\n",
header->number, drum->PatchNumber);
drumbuf[0] = header->number & 0x7f;
for (i = 0; i < 4; i++) {
munge_int32 (((unsigned char *)drum)[i], &drumbuf[1+(i*2)], 2);
}
if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_EDRUM_PROGRAM, NULL, drumbuf)) {
snd_printk ("download drum failed.\n");
return -(EIO);
}
return (0);
}
static int
wavefront_find_free_sample (snd_wavefront_t *dev)
{
int i;
for (i = 0; i < WF_MAX_SAMPLE; i++) {
if (!(dev->sample_status[i] & WF_SLOT_FILLED)) {
return i;
}
}
snd_printk ("no free sample slots!\n");
return -1;
}
#if 0
static int
wavefront_find_free_patch (snd_wavefront_t *dev)
{
int i;
for (i = 0; i < WF_MAX_PATCH; i++) {
if (!(dev->patch_status[i] & WF_SLOT_FILLED)) {
return i;
}
}
snd_printk ("no free patch slots!\n");
return -1;
}
#endif
static int
wavefront_load_patch (snd_wavefront_t *dev, const char __user *addr)
{
wavefront_patch_info *header;
int err;
header = kmalloc(sizeof(*header), GFP_KERNEL);
if (! header)
return -ENOMEM;
if (copy_from_user (header, addr, sizeof(wavefront_patch_info) -
sizeof(wavefront_any))) {
snd_printk ("bad address for load patch.\n");
err = -EFAULT;
goto __error;
}
DPRINT (WF_DEBUG_LOAD_PATCH, "download "
"Sample type: %d "
"Sample number: %d "
"Sample size: %d\n",
header->subkey,
header->number,
header->size);
switch (header->subkey) {
case WF_ST_SAMPLE: /* sample or sample_header, based on patch->size */
if (copy_from_user (&header->hdr.s, header->hdrptr,
sizeof (wavefront_sample))) {
err = -EFAULT;
break;
}
err = wavefront_send_sample (dev, header, header->dataptr, 0);
break;
case WF_ST_MULTISAMPLE:
if (copy_from_user (&header->hdr.s, header->hdrptr,
sizeof (wavefront_multisample))) {
err = -EFAULT;
break;
}
err = wavefront_send_multisample (dev, header);
break;
case WF_ST_ALIAS:
if (copy_from_user (&header->hdr.a, header->hdrptr,
sizeof (wavefront_alias))) {
err = -EFAULT;
break;
}
err = wavefront_send_alias (dev, header);
break;
case WF_ST_DRUM:
if (copy_from_user (&header->hdr.d, header->hdrptr,
sizeof (wavefront_drum))) {
err = -EFAULT;
break;
}
err = wavefront_send_drum (dev, header);
break;
case WF_ST_PATCH:
if (copy_from_user (&header->hdr.p, header->hdrptr,
sizeof (wavefront_patch))) {
err = -EFAULT;
break;
}
err = wavefront_send_patch (dev, header);
break;
case WF_ST_PROGRAM:
if (copy_from_user (&header->hdr.pr, header->hdrptr,
sizeof (wavefront_program))) {
err = -EFAULT;
break;
}
err = wavefront_send_program (dev, header);
break;
default:
snd_printk ("unknown patch type %d.\n",
header->subkey);
err = -EINVAL;
break;
}
__error:
kfree(header);
return err;
}
/***********************************************************************
WaveFront: hardware-dependent interface
***********************************************************************/
static void
process_sample_hdr (u8 *buf)
{
wavefront_sample s;
u8 *ptr;
ptr = buf;
/* The board doesn't send us an exact copy of a "wavefront_sample"
in response to an Upload Sample Header command. Instead, we
have to convert the data format back into our data structure,
just as in the Download Sample command, where we have to do
something very similar in the reverse direction.
*/
*((u32 *) &s.sampleStartOffset) = demunge_int32 (ptr, 4); ptr += 4;
*((u32 *) &s.loopStartOffset) = demunge_int32 (ptr, 4); ptr += 4;
*((u32 *) &s.loopEndOffset) = demunge_int32 (ptr, 4); ptr += 4;
*((u32 *) &s.sampleEndOffset) = demunge_int32 (ptr, 4); ptr += 4;
*((u32 *) &s.FrequencyBias) = demunge_int32 (ptr, 3); ptr += 3;
s.SampleResolution = *ptr & 0x3;
s.Loop = *ptr & 0x8;
s.Bidirectional = *ptr & 0x10;
s.Reverse = *ptr & 0x40;
/* Now copy it back to where it came from */
memcpy (buf, (unsigned char *) &s, sizeof (wavefront_sample));
}
static int
wavefront_synth_control (snd_wavefront_card_t *acard,
wavefront_control *wc)
{
snd_wavefront_t *dev = &acard->wavefront;
unsigned char patchnumbuf[2];
int i;
DPRINT (WF_DEBUG_CMD, "synth control with "
"cmd 0x%x\n", wc->cmd);
/* Pre-handling of or for various commands */
switch (wc->cmd) {
case WFC_DISABLE_INTERRUPTS:
snd_printk ("interrupts disabled.\n");
outb (0x80|0x20, dev->control_port);
dev->interrupts_are_midi = 1;
return 0;
case WFC_ENABLE_INTERRUPTS:
snd_printk ("interrupts enabled.\n");
outb (0x80|0x40|0x20, dev->control_port);
dev->interrupts_are_midi = 1;
return 0;
case WFC_INTERRUPT_STATUS:
wc->rbuf[0] = dev->interrupts_are_midi;
return 0;
case WFC_ROMSAMPLES_RDONLY:
dev->rom_samples_rdonly = wc->wbuf[0];
wc->status = 0;
return 0;
case WFC_IDENTIFY_SLOT_TYPE:
i = wc->wbuf[0] | (wc->wbuf[1] << 7);
if (i <0 || i >= WF_MAX_SAMPLE) {
snd_printk ("invalid slot ID %d\n",
i);
wc->status = EINVAL;
return -EINVAL;
}
wc->rbuf[0] = dev->sample_status[i];
wc->status = 0;
return 0;
case WFC_DEBUG_DRIVER:
dev->debug = wc->wbuf[0];
snd_printk ("debug = 0x%x\n", dev->debug);
return 0;
case WFC_UPLOAD_PATCH:
munge_int32 (*((u32 *) wc->wbuf), patchnumbuf, 2);
memcpy (wc->wbuf, patchnumbuf, 2);
break;
case WFC_UPLOAD_MULTISAMPLE:
/* multisamples have to be handled differently, and
cannot be dealt with properly by snd_wavefront_cmd() alone.
*/
wc->status = wavefront_fetch_multisample
(dev, (wavefront_patch_info *) wc->rbuf);
return 0;
case WFC_UPLOAD_SAMPLE_ALIAS:
snd_printk ("support for sample alias upload "
"being considered.\n");
wc->status = EINVAL;
return -EINVAL;
}
wc->status = snd_wavefront_cmd (dev, wc->cmd, wc->rbuf, wc->wbuf);
/* Post-handling of certain commands.
In particular, if the command was an upload, demunge the data
so that the user-level doesn't have to think about it.
*/
if (wc->status == 0) {
switch (wc->cmd) {
/* intercept any freemem requests so that we know
we are always current with the user-level view
of things.
*/
case WFC_REPORT_FREE_MEMORY:
dev->freemem = demunge_int32 (wc->rbuf, 4);
break;
case WFC_UPLOAD_PATCH:
demunge_buf (wc->rbuf, wc->rbuf, WF_PATCH_BYTES);
break;
case WFC_UPLOAD_PROGRAM:
demunge_buf (wc->rbuf, wc->rbuf, WF_PROGRAM_BYTES);
break;
case WFC_UPLOAD_EDRUM_PROGRAM:
demunge_buf (wc->rbuf, wc->rbuf, WF_DRUM_BYTES - 1);
break;
case WFC_UPLOAD_SAMPLE_HEADER:
process_sample_hdr (wc->rbuf);
break;
case WFC_UPLOAD_SAMPLE_ALIAS:
snd_printk ("support for "
"sample aliases still "
"being considered.\n");
break;
case WFC_VMIDI_OFF:
snd_wavefront_midi_disable_virtual (acard);
break;
case WFC_VMIDI_ON:
snd_wavefront_midi_enable_virtual (acard);
break;
}
}
return 0;
}
int
snd_wavefront_synth_open (struct snd_hwdep *hw, struct file *file)
{
if (!try_module_get(hw->card->module))
return -EFAULT;
file->private_data = hw;
return 0;
}
int
snd_wavefront_synth_release (struct snd_hwdep *hw, struct file *file)
{
module_put(hw->card->module);
return 0;
}
int
snd_wavefront_synth_ioctl (struct snd_hwdep *hw, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct snd_card *card;
snd_wavefront_t *dev;
snd_wavefront_card_t *acard;
wavefront_control *wc;
void __user *argp = (void __user *)arg;
int err;
card = (struct snd_card *) hw->card;
if (snd_BUG_ON(!card))
return -ENODEV;
if (snd_BUG_ON(!card->private_data))
return -ENODEV;
acard = card->private_data;
dev = &acard->wavefront;
switch (cmd) {
case WFCTL_LOAD_SPP:
if (wavefront_load_patch (dev, argp) != 0) {
return -EIO;
}
break;
case WFCTL_WFCMD:
wc = memdup_user(argp, sizeof(*wc));
if (IS_ERR(wc))
return PTR_ERR(wc);
if (wavefront_synth_control (acard, wc) < 0)
err = -EIO;
else if (copy_to_user (argp, wc, sizeof (*wc)))
err = -EFAULT;
else
err = 0;
kfree(wc);
return err;
default:
return -EINVAL;
}
return 0;
}
/***********************************************************************/
/* WaveFront: interface for card-level wavefront module */
/***********************************************************************/
void
snd_wavefront_internal_interrupt (snd_wavefront_card_t *card)
{
snd_wavefront_t *dev = &card->wavefront;
/*
Some comments on interrupts. I attempted a version of this
driver that used interrupts throughout the code instead of
doing busy and/or sleep-waiting. Alas, it appears that once
the Motorola firmware is downloaded, the card *never*
generates an RX interrupt. These are successfully generated
during firmware loading, and after that wavefront_status()
reports that an interrupt is pending on the card from time
to time, but it never seems to be delivered to this
driver. Note also that wavefront_status() continues to
report that RX interrupts are enabled, suggesting that I
didn't goof up and disable them by mistake.
Thus, I stepped back to a prior version of
wavefront_wait(), the only place where this really
matters. Its sad, but I've looked through the code to check
on things, and I really feel certain that the Motorola
firmware prevents RX-ready interrupts.
*/
if ((wavefront_status(dev) & (STAT_INTR_READ|STAT_INTR_WRITE)) == 0) {
return;
}
spin_lock(&dev->irq_lock);
dev->irq_ok = 1;
dev->irq_cnt++;
spin_unlock(&dev->irq_lock);
wake_up(&dev->interrupt_sleeper);
}
/* STATUS REGISTER
0 Host Rx Interrupt Enable (1=Enabled)
1 Host Rx Register Full (1=Full)
2 Host Rx Interrupt Pending (1=Interrupt)
3 Unused
4 Host Tx Interrupt (1=Enabled)
5 Host Tx Register empty (1=Empty)
6 Host Tx Interrupt Pending (1=Interrupt)
7 Unused
*/
static int __devinit
snd_wavefront_interrupt_bits (int irq)
{
int bits;
switch (irq) {
case 9:
bits = 0x00;
break;
case 5:
bits = 0x08;
break;
case 12:
bits = 0x10;
break;
case 15:
bits = 0x18;
break;
default:
snd_printk ("invalid IRQ %d\n", irq);
bits = -1;
}
return bits;
}
static void __devinit
wavefront_should_cause_interrupt (snd_wavefront_t *dev,
int val, int port, unsigned long timeout)
{
wait_queue_t wait;
init_waitqueue_entry(&wait, current);
spin_lock_irq(&dev->irq_lock);
add_wait_queue(&dev->interrupt_sleeper, &wait);
dev->irq_ok = 0;
outb (val,port);
spin_unlock_irq(&dev->irq_lock);
while (!dev->irq_ok && time_before(jiffies, timeout)) {
schedule_timeout_uninterruptible(1);
barrier();
}
}
static int __devinit
wavefront_reset_to_cleanliness (snd_wavefront_t *dev)
{
int bits;
int hwv[2];
/* IRQ already checked */
bits = snd_wavefront_interrupt_bits (dev->irq);
/* try reset of port */
outb (0x0, dev->control_port);
/* At this point, the board is in reset, and the H/W initialization
register is accessed at the same address as the data port.
Bit 7 - Enable IRQ Driver
0 - Tri-state the Wave-Board drivers for the PC Bus IRQs
1 - Enable IRQ selected by bits 5:3 to be driven onto the PC Bus.
Bit 6 - MIDI Interface Select
0 - Use the MIDI Input from the 26-pin WaveBlaster
compatible header as the serial MIDI source
1 - Use the MIDI Input from the 9-pin D connector as the
serial MIDI source.
Bits 5:3 - IRQ Selection
0 0 0 - IRQ 2/9
0 0 1 - IRQ 5
0 1 0 - IRQ 12
0 1 1 - IRQ 15
1 0 0 - Reserved
1 0 1 - Reserved
1 1 0 - Reserved
1 1 1 - Reserved
Bits 2:1 - Reserved
Bit 0 - Disable Boot ROM
0 - memory accesses to 03FC30-03FFFFH utilize the internal Boot ROM
1 - memory accesses to 03FC30-03FFFFH are directed to external
storage.
*/
/* configure hardware: IRQ, enable interrupts,
plus external 9-pin MIDI interface selected
*/
outb (0x80 | 0x40 | bits, dev->data_port);
/* CONTROL REGISTER
0 Host Rx Interrupt Enable (1=Enabled) 0x1
1 Unused 0x2
2 Unused 0x4
3 Unused 0x8
4 Host Tx Interrupt Enable 0x10
5 Mute (0=Mute; 1=Play) 0x20
6 Master Interrupt Enable (1=Enabled) 0x40
7 Master Reset (0=Reset; 1=Run) 0x80
Take us out of reset, mute output, master + TX + RX interrupts on.
We'll get an interrupt presumably to tell us that the TX
register is clear.
*/
wavefront_should_cause_interrupt(dev, 0x80|0x40|0x10|0x1,
dev->control_port,
(reset_time*HZ)/100);
/* Note: data port is now the data port, not the h/w initialization
port.
*/
if (!dev->irq_ok) {
snd_printk ("intr not received after h/w un-reset.\n");
goto gone_bad;
}
/* Note: data port is now the data port, not the h/w initialization
port.
At this point, only "HW VERSION" or "DOWNLOAD OS" commands
will work. So, issue one of them, and wait for TX
interrupt. This can take a *long* time after a cold boot,
while the ISC ROM does its RAM test. The SDK says up to 4
seconds - with 12MB of RAM on a Tropez+, it takes a lot
longer than that (~16secs). Note that the card understands
the difference between a warm and a cold boot, so
subsequent ISC2115 reboots (say, caused by module
reloading) will get through this much faster.
XXX Interesting question: why is no RX interrupt received first ?
*/
wavefront_should_cause_interrupt(dev, WFC_HARDWARE_VERSION,
dev->data_port, ramcheck_time*HZ);
if (!dev->irq_ok) {
snd_printk ("post-RAM-check interrupt not received.\n");
goto gone_bad;
}
if (!wavefront_wait (dev, STAT_CAN_READ)) {
snd_printk ("no response to HW version cmd.\n");
goto gone_bad;
}
if ((hwv[0] = wavefront_read (dev)) == -1) {
snd_printk ("board not responding correctly.\n");
goto gone_bad;
}
if (hwv[0] == 0xFF) { /* NAK */
/* Board's RAM test failed. Try to read error code,
and tell us about it either way.
*/
if ((hwv[0] = wavefront_read (dev)) == -1) {
snd_printk ("on-board RAM test failed "
"(bad error code).\n");
} else {
snd_printk ("on-board RAM test failed "
"(error code: 0x%x).\n",
hwv[0]);
}
goto gone_bad;
}
/* We're OK, just get the next byte of the HW version response */
if ((hwv[1] = wavefront_read (dev)) == -1) {
snd_printk ("incorrect h/w response.\n");
goto gone_bad;
}
snd_printk ("hardware version %d.%d\n",
hwv[0], hwv[1]);
return 0;
gone_bad:
return (1);
}
static int __devinit
wavefront_download_firmware (snd_wavefront_t *dev, char *path)
{
const unsigned char *buf;
int len, err;
int section_cnt_downloaded = 0;
const struct firmware *firmware;
err = request_firmware(&firmware, path, dev->card->dev);
if (err < 0) {
snd_printk(KERN_ERR "firmware (%s) download failed!!!\n", path);
return 1;
}
len = 0;
buf = firmware->data;
for (;;) {
int section_length = *(signed char *)buf;
if (section_length == 0)
break;
if (section_length < 0 || section_length > WF_SECTION_MAX) {
snd_printk(KERN_ERR
"invalid firmware section length %d\n",
section_length);
goto failure;
}
buf++;
len++;
if (firmware->size < len + section_length) {
snd_printk(KERN_ERR "firmware section read error.\n");
goto failure;
}
/* Send command */
if (wavefront_write(dev, WFC_DOWNLOAD_OS))
goto failure;
for (; section_length; section_length--) {
if (wavefront_write(dev, *buf))
goto failure;
buf++;
len++;
}
/* get ACK */
if (!wavefront_wait(dev, STAT_CAN_READ)) {
snd_printk(KERN_ERR "time out for firmware ACK.\n");
goto failure;
}
err = inb(dev->data_port);
if (err != WF_ACK) {
snd_printk(KERN_ERR
"download of section #%d not "
"acknowledged, ack = 0x%x\n",
section_cnt_downloaded + 1, err);
goto failure;
}
section_cnt_downloaded++;
}
release_firmware(firmware);
return 0;
failure:
release_firmware(firmware);
snd_printk(KERN_ERR "firmware download failed!!!\n");
return 1;
}
static int __devinit
wavefront_do_reset (snd_wavefront_t *dev)
{
char voices[1];
if (wavefront_reset_to_cleanliness (dev)) {
snd_printk ("hw reset failed.\n");
goto gone_bad;
}
if (dev->israw) {
if (wavefront_download_firmware (dev, ospath)) {
goto gone_bad;
}
dev->israw = 0;
/* Wait for the OS to get running. The protocol for
this is non-obvious, and was determined by
using port-IO tracing in DOSemu and some
experimentation here.
Rather than using timed waits, use interrupts creatively.
*/
wavefront_should_cause_interrupt (dev, WFC_NOOP,
dev->data_port,
(osrun_time*HZ));
if (!dev->irq_ok) {
snd_printk ("no post-OS interrupt.\n");
goto gone_bad;
}
/* Now, do it again ! */
wavefront_should_cause_interrupt (dev, WFC_NOOP,
dev->data_port, (10*HZ));
if (!dev->irq_ok) {
snd_printk ("no post-OS interrupt(2).\n");
goto gone_bad;
}
/* OK, no (RX/TX) interrupts any more, but leave mute
in effect.
*/
outb (0x80|0x40, dev->control_port);
}
/* SETUPSND.EXE asks for sample memory config here, but since i
have no idea how to interpret the result, we'll forget
about it.
*/
if ((dev->freemem = wavefront_freemem (dev)) < 0) {
goto gone_bad;
}
snd_printk ("available DRAM %dk\n", dev->freemem / 1024);
if (wavefront_write (dev, 0xf0) ||
wavefront_write (dev, 1) ||
(wavefront_read (dev) < 0)) {
dev->debug = 0;
snd_printk ("MPU emulation mode not set.\n");
goto gone_bad;
}
voices[0] = 32;
if (snd_wavefront_cmd (dev, WFC_SET_NVOICES, NULL, voices)) {
snd_printk ("cannot set number of voices to 32.\n");
goto gone_bad;
}
return 0;
gone_bad:
/* reset that sucker so that it doesn't bother us. */
outb (0x0, dev->control_port);
dev->interrupts_are_midi = 0;
return 1;
}
int __devinit
snd_wavefront_start (snd_wavefront_t *dev)
{
int samples_are_from_rom;
/* IMPORTANT: assumes that snd_wavefront_detect() and/or
wavefront_reset_to_cleanliness() has already been called
*/
if (dev->israw) {
samples_are_from_rom = 1;
} else {
/* XXX is this always true ? */
samples_are_from_rom = 0;
}
if (dev->israw || fx_raw) {
if (wavefront_do_reset (dev)) {
return -1;
}
}
/* Check for FX device, present only on Tropez+ */
dev->has_fx = (snd_wavefront_fx_detect (dev) == 0);
if (dev->has_fx && fx_raw) {
snd_wavefront_fx_start (dev);
}
wavefront_get_sample_status (dev, samples_are_from_rom);
wavefront_get_program_status (dev);
wavefront_get_patch_status (dev);
/* Start normal operation: unreset, master interrupt enabled, no mute
*/
outb (0x80|0x40|0x20, dev->control_port);
return (0);
}
int __devinit
snd_wavefront_detect (snd_wavefront_card_t *card)
{
unsigned char rbuf[4], wbuf[4];
snd_wavefront_t *dev = &card->wavefront;
/* returns zero if a WaveFront card is successfully detected.
negative otherwise.
*/
dev->israw = 0;
dev->has_fx = 0;
dev->debug = debug_default;
dev->interrupts_are_midi = 0;
dev->irq_cnt = 0;
dev->rom_samples_rdonly = 1;
if (snd_wavefront_cmd (dev, WFC_FIRMWARE_VERSION, rbuf, wbuf) == 0) {
dev->fw_version[0] = rbuf[0];
dev->fw_version[1] = rbuf[1];
snd_printk ("firmware %d.%d already loaded.\n",
rbuf[0], rbuf[1]);
/* check that a command actually works */
if (snd_wavefront_cmd (dev, WFC_HARDWARE_VERSION,
rbuf, wbuf) == 0) {
dev->hw_version[0] = rbuf[0];
dev->hw_version[1] = rbuf[1];
} else {
snd_printk ("not raw, but no "
"hardware version!\n");
return -1;
}
if (!wf_raw) {
return 0;
} else {
snd_printk ("reloading firmware as you requested.\n");
dev->israw = 1;
}
} else {
dev->israw = 1;
snd_printk ("no response to firmware probe, assume raw.\n");
}
return 0;
}
MODULE_FIRMWARE(DEFAULT_OSPATH);
| gpl-2.0 |
wooshy1/android-tegra-nv-3.1 | drivers/platform/x86/intel_pmic_gpio.c | 8007 | 8320 | /* Moorestown PMIC GPIO (access through IPC) driver
* Copyright (c) 2008 - 2009, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* Supports:
* Moorestown platform PMIC chip
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/stddef.h>
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <asm/intel_scu_ipc.h>
#include <linux/device.h>
#include <linux/intel_pmic_gpio.h>
#include <linux/platform_device.h>
#define DRIVER_NAME "pmic_gpio"
/* register offset that IPC driver should use
* 8 GPIO + 8 GPOSW (6 controllable) + 8GPO
*/
enum pmic_gpio_register {
GPIO0 = 0xE0,
GPIO7 = 0xE7,
GPIOINT = 0xE8,
GPOSWCTL0 = 0xEC,
GPOSWCTL5 = 0xF1,
GPO = 0xF4,
};
/* bits definition for GPIO & GPOSW */
#define GPIO_DRV 0x01
#define GPIO_DIR 0x02
#define GPIO_DIN 0x04
#define GPIO_DOU 0x08
#define GPIO_INTCTL 0x30
#define GPIO_DBC 0xc0
#define GPOSW_DRV 0x01
#define GPOSW_DOU 0x08
#define GPOSW_RDRV 0x30
#define GPIO_UPDATE_TYPE 0x80000000
#define NUM_GPIO 24
struct pmic_gpio {
struct mutex buslock;
struct gpio_chip chip;
void *gpiointr;
int irq;
unsigned irq_base;
unsigned int update_type;
u32 trigger_type;
};
static void pmic_program_irqtype(int gpio, int type)
{
if (type & IRQ_TYPE_EDGE_RISING)
intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20);
else
intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20);
if (type & IRQ_TYPE_EDGE_FALLING)
intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10);
else
intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10);
};
static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
if (offset > 8) {
pr_err("only pin 0-7 support input\n");
return -1;/* we only have 8 GPIO can use as input */
}
return intel_scu_ipc_update_register(GPIO0 + offset,
GPIO_DIR, GPIO_DIR);
}
static int pmic_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
int rc = 0;
if (offset < 8)/* it is GPIO */
rc = intel_scu_ipc_update_register(GPIO0 + offset,
GPIO_DRV | (value ? GPIO_DOU : 0),
GPIO_DRV | GPIO_DOU | GPIO_DIR);
else if (offset < 16)/* it is GPOSW */
rc = intel_scu_ipc_update_register(GPOSWCTL0 + offset - 8,
GPOSW_DRV | (value ? GPOSW_DOU : 0),
GPOSW_DRV | GPOSW_DOU | GPOSW_RDRV);
else if (offset > 15 && offset < 24)/* it is GPO */
rc = intel_scu_ipc_update_register(GPO,
value ? 1 << (offset - 16) : 0,
1 << (offset - 16));
else {
pr_err("invalid PMIC GPIO pin %d!\n", offset);
WARN_ON(1);
}
return rc;
}
static int pmic_gpio_get(struct gpio_chip *chip, unsigned offset)
{
u8 r;
int ret;
/* we only have 8 GPIO pins we can use as input */
if (offset > 8)
return -EOPNOTSUPP;
ret = intel_scu_ipc_ioread8(GPIO0 + offset, &r);
if (ret < 0)
return ret;
return r & GPIO_DIN;
}
static void pmic_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
if (offset < 8)/* it is GPIO */
intel_scu_ipc_update_register(GPIO0 + offset,
GPIO_DRV | (value ? GPIO_DOU : 0),
GPIO_DRV | GPIO_DOU);
else if (offset < 16)/* it is GPOSW */
intel_scu_ipc_update_register(GPOSWCTL0 + offset - 8,
GPOSW_DRV | (value ? GPOSW_DOU : 0),
GPOSW_DRV | GPOSW_DOU | GPOSW_RDRV);
else if (offset > 15 && offset < 24) /* it is GPO */
intel_scu_ipc_update_register(GPO,
value ? 1 << (offset - 16) : 0,
1 << (offset - 16));
}
/*
* This is called from genirq with pg->buslock locked and
* irq_desc->lock held. We can not access the scu bus here, so we
* store the change and update in the bus_sync_unlock() function below
*/
static int pmic_irq_type(struct irq_data *data, unsigned type)
{
struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
u32 gpio = data->irq - pg->irq_base;
if (gpio >= pg->chip.ngpio)
return -EINVAL;
pg->trigger_type = type;
pg->update_type = gpio | GPIO_UPDATE_TYPE;
return 0;
}
static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct pmic_gpio *pg = container_of(chip, struct pmic_gpio, chip);
return pg->irq_base + offset;
}
static void pmic_bus_lock(struct irq_data *data)
{
struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
mutex_lock(&pg->buslock);
}
static void pmic_bus_sync_unlock(struct irq_data *data)
{
struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
if (pg->update_type) {
unsigned int gpio = pg->update_type & ~GPIO_UPDATE_TYPE;
pmic_program_irqtype(gpio, pg->trigger_type);
pg->update_type = 0;
}
mutex_unlock(&pg->buslock);
}
/* the gpiointr register is read-clear, so just do nothing. */
static void pmic_irq_unmask(struct irq_data *data) { }
static void pmic_irq_mask(struct irq_data *data) { }
static struct irq_chip pmic_irqchip = {
.name = "PMIC-GPIO",
.irq_mask = pmic_irq_mask,
.irq_unmask = pmic_irq_unmask,
.irq_set_type = pmic_irq_type,
.irq_bus_lock = pmic_bus_lock,
.irq_bus_sync_unlock = pmic_bus_sync_unlock,
};
static irqreturn_t pmic_irq_handler(int irq, void *data)
{
struct pmic_gpio *pg = data;
u8 intsts = *((u8 *)pg->gpiointr + 4);
int gpio;
irqreturn_t ret = IRQ_NONE;
for (gpio = 0; gpio < 8; gpio++) {
if (intsts & (1 << gpio)) {
pr_debug("pmic pin %d triggered\n", gpio);
generic_handle_irq(pg->irq_base + gpio);
ret = IRQ_HANDLED;
}
}
return ret;
}
static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int irq = platform_get_irq(pdev, 0);
struct intel_pmic_gpio_platform_data *pdata = dev->platform_data;
struct pmic_gpio *pg;
int retval;
int i;
if (irq < 0) {
dev_dbg(dev, "no IRQ line\n");
return -EINVAL;
}
if (!pdata || !pdata->gpio_base || !pdata->irq_base) {
dev_dbg(dev, "incorrect or missing platform data\n");
return -EINVAL;
}
pg = kzalloc(sizeof(*pg), GFP_KERNEL);
if (!pg)
return -ENOMEM;
dev_set_drvdata(dev, pg);
pg->irq = irq;
/* setting up SRAM mapping for GPIOINT register */
pg->gpiointr = ioremap_nocache(pdata->gpiointr, 8);
if (!pg->gpiointr) {
pr_err("Can not map GPIOINT\n");
retval = -EINVAL;
goto err2;
}
pg->irq_base = pdata->irq_base;
pg->chip.label = "intel_pmic";
pg->chip.direction_input = pmic_gpio_direction_input;
pg->chip.direction_output = pmic_gpio_direction_output;
pg->chip.get = pmic_gpio_get;
pg->chip.set = pmic_gpio_set;
pg->chip.to_irq = pmic_gpio_to_irq;
pg->chip.base = pdata->gpio_base;
pg->chip.ngpio = NUM_GPIO;
pg->chip.can_sleep = 1;
pg->chip.dev = dev;
mutex_init(&pg->buslock);
pg->chip.dev = dev;
retval = gpiochip_add(&pg->chip);
if (retval) {
pr_err("Can not add pmic gpio chip\n");
goto err;
}
retval = request_irq(pg->irq, pmic_irq_handler, 0, "pmic", pg);
if (retval) {
pr_warn("Interrupt request failed\n");
goto err;
}
for (i = 0; i < 8; i++) {
irq_set_chip_and_handler_name(i + pg->irq_base,
&pmic_irqchip,
handle_simple_irq,
"demux");
irq_set_chip_data(i + pg->irq_base, pg);
}
return 0;
err:
iounmap(pg->gpiointr);
err2:
kfree(pg);
return retval;
}
/* at the same time, register a platform driver
* this supports the sfi 0.81 fw */
static struct platform_driver platform_pmic_gpio_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
},
.probe = platform_pmic_gpio_probe,
};
static int __init platform_pmic_gpio_init(void)
{
return platform_driver_register(&platform_pmic_gpio_driver);
}
subsys_initcall(platform_pmic_gpio_init);
MODULE_AUTHOR("Alek Du <alek.du@intel.com>");
MODULE_DESCRIPTION("Intel Moorestown PMIC GPIO driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
transi/kernel_amazon_bowser-common | drivers/mtd/maps/ck804xrom.c | 8007 | 10906 | /*
* ck804xrom.c
*
* Normal mappings of chips in physical memory
*
* Dave Olsen <dolsen@lnxi.com>
* Ryan Jackson <rjackson@lnxi.com>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/cfi.h>
#include <linux/mtd/flashchip.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/list.h>
#define MOD_NAME KBUILD_BASENAME
#define ADDRESS_NAME_LEN 18
#define ROM_PROBE_STEP_SIZE (64*1024)
#define DEV_CK804 1
#define DEV_MCP55 2
struct ck804xrom_window {
void __iomem *virt;
unsigned long phys;
unsigned long size;
struct list_head maps;
struct resource rsrc;
struct pci_dev *pdev;
};
struct ck804xrom_map_info {
struct list_head list;
struct map_info map;
struct mtd_info *mtd;
struct resource rsrc;
char map_name[sizeof(MOD_NAME) + 2 + ADDRESS_NAME_LEN];
};
/*
* The following applies to ck804 only:
* The 2 bits controlling the window size are often set to allow reading
* the BIOS, but too small to allow writing, since the lock registers are
* 4MiB lower in the address space than the data.
*
* This is intended to prevent flashing the bios, perhaps accidentally.
*
* This parameter allows the normal driver to override the BIOS settings.
*
* The bits are 6 and 7. If both bits are set, it is a 5MiB window.
* If only the 7 Bit is set, it is a 4MiB window. Otherwise, a
* 64KiB window.
*
* The following applies to mcp55 only:
* The 15 bits controlling the window size are distributed as follows:
* byte @0x88: bit 0..7
* byte @0x8c: bit 8..15
* word @0x90: bit 16..30
* If all bits are enabled, we have a 16? MiB window
* Please set win_size_bits to 0x7fffffff if you actually want to do something
*/
static uint win_size_bits = 0;
module_param(win_size_bits, uint, 0);
MODULE_PARM_DESC(win_size_bits, "ROM window size bits override, normally set by BIOS.");
static struct ck804xrom_window ck804xrom_window = {
.maps = LIST_HEAD_INIT(ck804xrom_window.maps),
};
static void ck804xrom_cleanup(struct ck804xrom_window *window)
{
struct ck804xrom_map_info *map, *scratch;
u8 byte;
if (window->pdev) {
/* Disable writes through the rom window */
pci_read_config_byte(window->pdev, 0x6d, &byte);
pci_write_config_byte(window->pdev, 0x6d, byte & ~1);
}
/* Free all of the mtd devices */
list_for_each_entry_safe(map, scratch, &window->maps, list) {
if (map->rsrc.parent)
release_resource(&map->rsrc);
mtd_device_unregister(map->mtd);
map_destroy(map->mtd);
list_del(&map->list);
kfree(map);
}
if (window->rsrc.parent)
release_resource(&window->rsrc);
if (window->virt) {
iounmap(window->virt);
window->virt = NULL;
window->phys = 0;
window->size = 0;
}
pci_dev_put(window->pdev);
}
static int __devinit ck804xrom_init_one (struct pci_dev *pdev,
const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
u8 byte;
u16 word;
struct ck804xrom_window *window = &ck804xrom_window;
struct ck804xrom_map_info *map = NULL;
unsigned long map_top;
/* Remember the pci dev I find the window in */
window->pdev = pci_dev_get(pdev);
switch (ent->driver_data) {
case DEV_CK804:
/* Enable the selected rom window. This is often incorrectly
* set up by the BIOS, and the 4MiB offset for the lock registers
* requires the full 5MiB of window space.
*
* This 'write, then read' approach leaves the bits for
* other uses of the hardware info.
*/
pci_read_config_byte(pdev, 0x88, &byte);
pci_write_config_byte(pdev, 0x88, byte | win_size_bits );
/* Assume the rom window is properly setup, and find it's size */
pci_read_config_byte(pdev, 0x88, &byte);
if ((byte & ((1<<7)|(1<<6))) == ((1<<7)|(1<<6)))
window->phys = 0xffb00000; /* 5MiB */
else if ((byte & (1<<7)) == (1<<7))
window->phys = 0xffc00000; /* 4MiB */
else
window->phys = 0xffff0000; /* 64KiB */
break;
case DEV_MCP55:
pci_read_config_byte(pdev, 0x88, &byte);
pci_write_config_byte(pdev, 0x88, byte | (win_size_bits & 0xff));
pci_read_config_byte(pdev, 0x8c, &byte);
pci_write_config_byte(pdev, 0x8c, byte | ((win_size_bits & 0xff00) >> 8));
pci_read_config_word(pdev, 0x90, &word);
pci_write_config_word(pdev, 0x90, word | ((win_size_bits & 0x7fff0000) >> 16));
window->phys = 0xff000000; /* 16MiB, hardcoded for now */
break;
}
window->size = 0xffffffffUL - window->phys + 1UL;
/*
* Try to reserve the window mem region. If this fails then
* it is likely due to a fragment of the window being
* "reserved" by the BIOS. In the case that the
* request_mem_region() fails then once the rom size is
* discovered we will try to reserve the unreserved fragment.
*/
window->rsrc.name = MOD_NAME;
window->rsrc.start = window->phys;
window->rsrc.end = window->phys + window->size - 1;
window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
if (request_resource(&iomem_resource, &window->rsrc)) {
window->rsrc.parent = NULL;
printk(KERN_ERR MOD_NAME
" %s(): Unable to register resource %pR - kernel bug?\n",
__func__, &window->rsrc);
}
/* Enable writes through the rom window */
pci_read_config_byte(pdev, 0x6d, &byte);
pci_write_config_byte(pdev, 0x6d, byte | 1);
/* FIXME handle registers 0x80 - 0x8C the bios region locks */
/* For write accesses caches are useless */
window->virt = ioremap_nocache(window->phys, window->size);
if (!window->virt) {
printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
window->phys, window->size);
goto out;
}
/* Get the first address to look for a rom chip at */
map_top = window->phys;
#if 1
/* The probe sequence run over the firmware hub lock
* registers sets them to 0x7 (no access).
* Probe at most the last 4MiB of the address space.
*/
if (map_top < 0xffc00000)
map_top = 0xffc00000;
#endif
/* Loop through and look for rom chips. Since we don't know the
* starting address for each chip, probe every ROM_PROBE_STEP_SIZE
* bytes from the starting address of the window.
*/
while((map_top - 1) < 0xffffffffUL) {
struct cfi_private *cfi;
unsigned long offset;
int i;
if (!map)
map = kmalloc(sizeof(*map), GFP_KERNEL);
if (!map) {
printk(KERN_ERR MOD_NAME ": kmalloc failed");
goto out;
}
memset(map, 0, sizeof(*map));
INIT_LIST_HEAD(&map->list);
map->map.name = map->map_name;
map->map.phys = map_top;
offset = map_top - window->phys;
map->map.virt = (void __iomem *)
(((unsigned long)(window->virt)) + offset);
map->map.size = 0xffffffffUL - map_top + 1UL;
/* Set the name of the map to the address I am trying */
sprintf(map->map_name, "%s @%08Lx",
MOD_NAME, (unsigned long long)map->map.phys);
/* There is no generic VPP support */
for(map->map.bankwidth = 32; map->map.bankwidth;
map->map.bankwidth >>= 1)
{
char **probe_type;
/* Skip bankwidths that are not supported */
if (!map_bankwidth_supported(map->map.bankwidth))
continue;
/* Setup the map methods */
simple_map_init(&map->map);
/* Try all of the probe methods */
probe_type = rom_probe_types;
for(; *probe_type; probe_type++) {
map->mtd = do_map_probe(*probe_type, &map->map);
if (map->mtd)
goto found;
}
}
map_top += ROM_PROBE_STEP_SIZE;
continue;
found:
/* Trim the size if we are larger than the map */
if (map->mtd->size > map->map.size) {
printk(KERN_WARNING MOD_NAME
" rom(%llu) larger than window(%lu). fixing...\n",
(unsigned long long)map->mtd->size, map->map.size);
map->mtd->size = map->map.size;
}
if (window->rsrc.parent) {
/*
* Registering the MTD device in iomem may not be possible
* if there is a BIOS "reserved" and BUSY range. If this
* fails then continue anyway.
*/
map->rsrc.name = map->map_name;
map->rsrc.start = map->map.phys;
map->rsrc.end = map->map.phys + map->mtd->size - 1;
map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
if (request_resource(&window->rsrc, &map->rsrc)) {
printk(KERN_ERR MOD_NAME
": cannot reserve MTD resource\n");
map->rsrc.parent = NULL;
}
}
/* Make the whole region visible in the map */
map->map.virt = window->virt;
map->map.phys = window->phys;
cfi = map->map.fldrv_priv;
for(i = 0; i < cfi->numchips; i++)
cfi->chips[i].start += offset;
/* Now that the mtd devices is complete claim and export it */
map->mtd->owner = THIS_MODULE;
if (mtd_device_register(map->mtd, NULL, 0)) {
map_destroy(map->mtd);
map->mtd = NULL;
goto out;
}
/* Calculate the new value of map_top */
map_top += map->mtd->size;
/* File away the map structure */
list_add(&map->list, &window->maps);
map = NULL;
}
out:
/* Free any left over map structures */
if (map)
kfree(map);
/* See if I have any map structures */
if (list_empty(&window->maps)) {
ck804xrom_cleanup(window);
return -ENODEV;
}
return 0;
}
static void __devexit ck804xrom_remove_one (struct pci_dev *pdev)
{
struct ck804xrom_window *window = &ck804xrom_window;
ck804xrom_cleanup(window);
}
static struct pci_device_id ck804xrom_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0051), .driver_data = DEV_CK804 },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0360), .driver_data = DEV_MCP55 },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0361), .driver_data = DEV_MCP55 },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0362), .driver_data = DEV_MCP55 },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0363), .driver_data = DEV_MCP55 },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0364), .driver_data = DEV_MCP55 },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0365), .driver_data = DEV_MCP55 },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0366), .driver_data = DEV_MCP55 },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, 0x0367), .driver_data = DEV_MCP55 },
{ 0, }
};
#if 0
MODULE_DEVICE_TABLE(pci, ck804xrom_pci_tbl);
static struct pci_driver ck804xrom_driver = {
.name = MOD_NAME,
.id_table = ck804xrom_pci_tbl,
.probe = ck804xrom_init_one,
.remove = ck804xrom_remove_one,
};
#endif
static int __init init_ck804xrom(void)
{
struct pci_dev *pdev;
struct pci_device_id *id;
int retVal;
pdev = NULL;
for(id = ck804xrom_pci_tbl; id->vendor; id++) {
pdev = pci_get_device(id->vendor, id->device, NULL);
if (pdev)
break;
}
if (pdev) {
retVal = ck804xrom_init_one(pdev, id);
pci_dev_put(pdev);
return retVal;
}
return -ENXIO;
#if 0
return pci_register_driver(&ck804xrom_driver);
#endif
}
static void __exit cleanup_ck804xrom(void)
{
ck804xrom_remove_one(ck804xrom_window.pdev);
}
module_init(init_ck804xrom);
module_exit(cleanup_ck804xrom);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Eric Biederman <ebiederman@lnxi.com>, Dave Olsen <dolsen@lnxi.com>");
MODULE_DESCRIPTION("MTD map driver for BIOS chips on the Nvidia ck804 southbridge");
| gpl-2.0 |
bryan2894/D851_Kernel | drivers/parisc/pdc_stable.c | 8263 | 30951 | /*
* Interfaces to retrieve and set PDC Stable options (firmware)
*
* Copyright (C) 2005-2006 Thibaut VARENE <varenet@parisc-linux.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*
* DEV NOTE: the PDC Procedures reference states that:
* "A minimum of 96 bytes of Stable Storage is required. Providing more than
* 96 bytes of Stable Storage is optional [...]. Failure to provide the
* optional locations from 96 to 192 results in the loss of certain
* functionality during boot."
*
* Since locations between 96 and 192 are the various paths, most (if not
* all) PA-RISC machines should have them. Anyway, for safety reasons, the
* following code can deal with just 96 bytes of Stable Storage, and all
* sizes between 96 and 192 bytes (provided they are multiple of struct
* device_path size, eg: 128, 160 and 192) to provide full information.
* One last word: there's one path we can always count on: the primary path.
* Anything above 224 bytes is used for 'osdep2' OS-dependent storage area.
*
* The first OS-dependent area should always be available. Obviously, this is
* not true for the other one. Also bear in mind that reading/writing from/to
* osdep2 is much more expensive than from/to osdep1.
* NOTE: We do not handle the 2 bytes OS-dep area at 0x5D, nor the first
* 2 bytes of storage available right after OSID. That's a total of 4 bytes
* sacrificed: -ETOOLAZY :P
*
* The current policy wrt file permissions is:
* - write: root only
* - read: (reading triggers PDC calls) ? root only : everyone
* The rationale is that PDC calls could hog (DoS) the machine.
*
* TODO:
* - timer/fastsize write calls
*/
#undef PDCS_DEBUG
#ifdef PDCS_DEBUG
#define DPRINTK(fmt, args...) printk(KERN_DEBUG fmt, ## args)
#else
#define DPRINTK(fmt, args...)
#endif
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/capability.h>
#include <linux/ctype.h>
#include <linux/sysfs.h>
#include <linux/kobject.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <asm/pdc.h>
#include <asm/page.h>
#include <asm/uaccess.h>
#include <asm/hardware.h>
#define PDCS_VERSION "0.30"
#define PDCS_PREFIX "PDC Stable Storage"
#define PDCS_ADDR_PPRI 0x00
#define PDCS_ADDR_OSID 0x40
#define PDCS_ADDR_OSD1 0x48
#define PDCS_ADDR_DIAG 0x58
#define PDCS_ADDR_FSIZ 0x5C
#define PDCS_ADDR_PCON 0x60
#define PDCS_ADDR_PALT 0x80
#define PDCS_ADDR_PKBD 0xA0
#define PDCS_ADDR_OSD2 0xE0
MODULE_AUTHOR("Thibaut VARENE <varenet@parisc-linux.org>");
MODULE_DESCRIPTION("sysfs interface to HP PDC Stable Storage data");
MODULE_LICENSE("GPL");
MODULE_VERSION(PDCS_VERSION);
/* holds Stable Storage size. Initialized once and for all, no lock needed */
static unsigned long pdcs_size __read_mostly;
/* holds OS ID. Initialized once and for all, hopefully to 0x0006 */
static u16 pdcs_osid __read_mostly;
/* This struct defines what we need to deal with a parisc pdc path entry */
struct pdcspath_entry {
rwlock_t rw_lock; /* to protect path entry access */
short ready; /* entry record is valid if != 0 */
unsigned long addr; /* entry address in stable storage */
char *name; /* entry name */
struct device_path devpath; /* device path in parisc representation */
struct device *dev; /* corresponding device */
struct kobject kobj;
};
struct pdcspath_attribute {
struct attribute attr;
ssize_t (*show)(struct pdcspath_entry *entry, char *buf);
ssize_t (*store)(struct pdcspath_entry *entry, const char *buf, size_t count);
};
#define PDCSPATH_ENTRY(_addr, _name) \
struct pdcspath_entry pdcspath_entry_##_name = { \
.ready = 0, \
.addr = _addr, \
.name = __stringify(_name), \
};
#define PDCS_ATTR(_name, _mode, _show, _store) \
struct kobj_attribute pdcs_attr_##_name = { \
.attr = {.name = __stringify(_name), .mode = _mode}, \
.show = _show, \
.store = _store, \
};
#define PATHS_ATTR(_name, _mode, _show, _store) \
struct pdcspath_attribute paths_attr_##_name = { \
.attr = {.name = __stringify(_name), .mode = _mode}, \
.show = _show, \
.store = _store, \
};
#define to_pdcspath_attribute(_attr) container_of(_attr, struct pdcspath_attribute, attr)
#define to_pdcspath_entry(obj) container_of(obj, struct pdcspath_entry, kobj)
/**
* pdcspath_fetch - This function populates the path entry structs.
* @entry: A pointer to an allocated pdcspath_entry.
*
* The general idea is that you don't read from the Stable Storage every time
* you access the files provided by the facilities. We store a copy of the
* content of the stable storage WRT various paths in these structs. We read
* these structs when reading the files, and we will write to these structs when
* writing to the files, and only then write them back to the Stable Storage.
*
* This function expects to be called with @entry->rw_lock write-hold.
*/
static int
pdcspath_fetch(struct pdcspath_entry *entry)
{
struct device_path *devpath;
if (!entry)
return -EINVAL;
devpath = &entry->devpath;
DPRINTK("%s: fetch: 0x%p, 0x%p, addr: 0x%lx\n", __func__,
entry, devpath, entry->addr);
/* addr, devpath and count must be word aligned */
if (pdc_stable_read(entry->addr, devpath, sizeof(*devpath)) != PDC_OK)
return -EIO;
/* Find the matching device.
NOTE: hardware_path overlays with device_path, so the nice cast can
be used */
entry->dev = hwpath_to_device((struct hardware_path *)devpath);
entry->ready = 1;
DPRINTK("%s: device: 0x%p\n", __func__, entry->dev);
return 0;
}
/**
* pdcspath_store - This function writes a path to stable storage.
* @entry: A pointer to an allocated pdcspath_entry.
*
* It can be used in two ways: either by passing it a preset devpath struct
* containing an already computed hardware path, or by passing it a device
* pointer, from which it'll find out the corresponding hardware path.
* For now we do not handle the case where there's an error in writing to the
* Stable Storage area, so you'd better not mess up the data :P
*
* This function expects to be called with @entry->rw_lock write-hold.
*/
static void
pdcspath_store(struct pdcspath_entry *entry)
{
struct device_path *devpath;
BUG_ON(!entry);
devpath = &entry->devpath;
/* We expect the caller to set the ready flag to 0 if the hardware
path struct provided is invalid, so that we know we have to fill it.
First case, we don't have a preset hwpath... */
if (!entry->ready) {
/* ...but we have a device, map it */
BUG_ON(!entry->dev);
device_to_hwpath(entry->dev, (struct hardware_path *)devpath);
}
/* else, we expect the provided hwpath to be valid. */
DPRINTK("%s: store: 0x%p, 0x%p, addr: 0x%lx\n", __func__,
entry, devpath, entry->addr);
/* addr, devpath and count must be word aligned */
if (pdc_stable_write(entry->addr, devpath, sizeof(*devpath)) != PDC_OK) {
printk(KERN_ERR "%s: an error occurred when writing to PDC.\n"
"It is likely that the Stable Storage data has been corrupted.\n"
"Please check it carefully upon next reboot.\n", __func__);
WARN_ON(1);
}
/* kobject is already registered */
entry->ready = 2;
DPRINTK("%s: device: 0x%p\n", __func__, entry->dev);
}
/**
* pdcspath_hwpath_read - This function handles hardware path pretty printing.
* @entry: An allocated and populated pdscpath_entry struct.
* @buf: The output buffer to write to.
*
* We will call this function to format the output of the hwpath attribute file.
*/
static ssize_t
pdcspath_hwpath_read(struct pdcspath_entry *entry, char *buf)
{
char *out = buf;
struct device_path *devpath;
short i;
if (!entry || !buf)
return -EINVAL;
read_lock(&entry->rw_lock);
devpath = &entry->devpath;
i = entry->ready;
read_unlock(&entry->rw_lock);
if (!i) /* entry is not ready */
return -ENODATA;
for (i = 0; i < 6; i++) {
if (devpath->bc[i] >= 128)
continue;
out += sprintf(out, "%u/", (unsigned char)devpath->bc[i]);
}
out += sprintf(out, "%u\n", (unsigned char)devpath->mod);
return out - buf;
}
/**
* pdcspath_hwpath_write - This function handles hardware path modifying.
* @entry: An allocated and populated pdscpath_entry struct.
* @buf: The input buffer to read from.
* @count: The number of bytes to be read.
*
* We will call this function to change the current hardware path.
* Hardware paths are to be given '/'-delimited, without brackets.
* We make sure that the provided path actually maps to an existing
* device, BUT nothing would prevent some foolish user to set the path to some
* PCI bridge or even a CPU...
* A better work around would be to make sure we are at the end of a device tree
* for instance, but it would be IMHO beyond the simple scope of that driver.
* The aim is to provide a facility. Data correctness is left to userland.
*/
static ssize_t
pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t count)
{
struct hardware_path hwpath;
unsigned short i;
char in[count+1], *temp;
struct device *dev;
int ret;
if (!entry || !buf || !count)
return -EINVAL;
/* We'll use a local copy of buf */
memset(in, 0, count+1);
strncpy(in, buf, count);
/* Let's clean up the target. 0xff is a blank pattern */
memset(&hwpath, 0xff, sizeof(hwpath));
/* First, pick the mod field (the last one of the input string) */
if (!(temp = strrchr(in, '/')))
return -EINVAL;
hwpath.mod = simple_strtoul(temp+1, NULL, 10);
in[temp-in] = '\0'; /* truncate the remaining string. just precaution */
DPRINTK("%s: mod: %d\n", __func__, hwpath.mod);
/* Then, loop for each delimiter, making sure we don't have too many.
we write the bc fields in a down-top way. No matter what, we stop
before writing the last field. If there are too many fields anyway,
then the user is a moron and it'll be caught up later when we'll
check the consistency of the given hwpath. */
for (i=5; ((temp = strrchr(in, '/'))) && (temp-in > 0) && (likely(i)); i--) {
hwpath.bc[i] = simple_strtoul(temp+1, NULL, 10);
in[temp-in] = '\0';
DPRINTK("%s: bc[%d]: %d\n", __func__, i, hwpath.bc[i]);
}
/* Store the final field */
hwpath.bc[i] = simple_strtoul(in, NULL, 10);
DPRINTK("%s: bc[%d]: %d\n", __func__, i, hwpath.bc[i]);
/* Now we check that the user isn't trying to lure us */
if (!(dev = hwpath_to_device((struct hardware_path *)&hwpath))) {
printk(KERN_WARNING "%s: attempt to set invalid \"%s\" "
"hardware path: %s\n", __func__, entry->name, buf);
return -EINVAL;
}
/* So far so good, let's get in deep */
write_lock(&entry->rw_lock);
entry->ready = 0;
entry->dev = dev;
/* Now, dive in. Write back to the hardware */
pdcspath_store(entry);
/* Update the symlink to the real device */
sysfs_remove_link(&entry->kobj, "device");
ret = sysfs_create_link(&entry->kobj, &entry->dev->kobj, "device");
WARN_ON(ret);
write_unlock(&entry->rw_lock);
printk(KERN_INFO PDCS_PREFIX ": changed \"%s\" path to \"%s\"\n",
entry->name, buf);
return count;
}
/**
* pdcspath_layer_read - Extended layer (eg. SCSI ids) pretty printing.
* @entry: An allocated and populated pdscpath_entry struct.
* @buf: The output buffer to write to.
*
* We will call this function to format the output of the layer attribute file.
*/
static ssize_t
pdcspath_layer_read(struct pdcspath_entry *entry, char *buf)
{
char *out = buf;
struct device_path *devpath;
short i;
if (!entry || !buf)
return -EINVAL;
read_lock(&entry->rw_lock);
devpath = &entry->devpath;
i = entry->ready;
read_unlock(&entry->rw_lock);
if (!i) /* entry is not ready */
return -ENODATA;
for (i = 0; i < 6 && devpath->layers[i]; i++)
out += sprintf(out, "%u ", devpath->layers[i]);
out += sprintf(out, "\n");
return out - buf;
}
/**
* pdcspath_layer_write - This function handles extended layer modifying.
* @entry: An allocated and populated pdscpath_entry struct.
* @buf: The input buffer to read from.
* @count: The number of bytes to be read.
*
* We will call this function to change the current layer value.
* Layers are to be given '.'-delimited, without brackets.
* XXX beware we are far less checky WRT input data provided than for hwpath.
* Potential harm can be done, since there's no way to check the validity of
* the layer fields.
*/
static ssize_t
pdcspath_layer_write(struct pdcspath_entry *entry, const char *buf, size_t count)
{
unsigned int layers[6]; /* device-specific info (ctlr#, unit#, ...) */
unsigned short i;
char in[count+1], *temp;
if (!entry || !buf || !count)
return -EINVAL;
/* We'll use a local copy of buf */
memset(in, 0, count+1);
strncpy(in, buf, count);
/* Let's clean up the target. 0 is a blank pattern */
memset(&layers, 0, sizeof(layers));
/* First, pick the first layer */
if (unlikely(!isdigit(*in)))
return -EINVAL;
layers[0] = simple_strtoul(in, NULL, 10);
DPRINTK("%s: layer[0]: %d\n", __func__, layers[0]);
temp = in;
for (i=1; ((temp = strchr(temp, '.'))) && (likely(i<6)); i++) {
if (unlikely(!isdigit(*(++temp))))
return -EINVAL;
layers[i] = simple_strtoul(temp, NULL, 10);
DPRINTK("%s: layer[%d]: %d\n", __func__, i, layers[i]);
}
/* So far so good, let's get in deep */
write_lock(&entry->rw_lock);
/* First, overwrite the current layers with the new ones, not touching
the hardware path. */
memcpy(&entry->devpath.layers, &layers, sizeof(layers));
/* Now, dive in. Write back to the hardware */
pdcspath_store(entry);
write_unlock(&entry->rw_lock);
printk(KERN_INFO PDCS_PREFIX ": changed \"%s\" layers to \"%s\"\n",
entry->name, buf);
return count;
}
/**
* pdcspath_attr_show - Generic read function call wrapper.
* @kobj: The kobject to get info from.
* @attr: The attribute looked upon.
* @buf: The output buffer.
*/
static ssize_t
pdcspath_attr_show(struct kobject *kobj, struct attribute *attr, char *buf)
{
struct pdcspath_entry *entry = to_pdcspath_entry(kobj);
struct pdcspath_attribute *pdcs_attr = to_pdcspath_attribute(attr);
ssize_t ret = 0;
if (pdcs_attr->show)
ret = pdcs_attr->show(entry, buf);
return ret;
}
/**
* pdcspath_attr_store - Generic write function call wrapper.
* @kobj: The kobject to write info to.
* @attr: The attribute to be modified.
* @buf: The input buffer.
* @count: The size of the buffer.
*/
static ssize_t
pdcspath_attr_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
struct pdcspath_entry *entry = to_pdcspath_entry(kobj);
struct pdcspath_attribute *pdcs_attr = to_pdcspath_attribute(attr);
ssize_t ret = 0;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (pdcs_attr->store)
ret = pdcs_attr->store(entry, buf, count);
return ret;
}
static const struct sysfs_ops pdcspath_attr_ops = {
.show = pdcspath_attr_show,
.store = pdcspath_attr_store,
};
/* These are the two attributes of any PDC path. */
static PATHS_ATTR(hwpath, 0644, pdcspath_hwpath_read, pdcspath_hwpath_write);
static PATHS_ATTR(layer, 0644, pdcspath_layer_read, pdcspath_layer_write);
static struct attribute *paths_subsys_attrs[] = {
&paths_attr_hwpath.attr,
&paths_attr_layer.attr,
NULL,
};
/* Specific kobject type for our PDC paths */
static struct kobj_type ktype_pdcspath = {
.sysfs_ops = &pdcspath_attr_ops,
.default_attrs = paths_subsys_attrs,
};
/* We hard define the 4 types of path we expect to find */
static PDCSPATH_ENTRY(PDCS_ADDR_PPRI, primary);
static PDCSPATH_ENTRY(PDCS_ADDR_PCON, console);
static PDCSPATH_ENTRY(PDCS_ADDR_PALT, alternative);
static PDCSPATH_ENTRY(PDCS_ADDR_PKBD, keyboard);
/* An array containing all PDC paths we will deal with */
static struct pdcspath_entry *pdcspath_entries[] = {
&pdcspath_entry_primary,
&pdcspath_entry_alternative,
&pdcspath_entry_console,
&pdcspath_entry_keyboard,
NULL,
};
/* For more insight of what's going on here, refer to PDC Procedures doc,
* Section PDC_STABLE */
/**
* pdcs_size_read - Stable Storage size output.
* @buf: The output buffer to write to.
*/
static ssize_t pdcs_size_read(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
char *out = buf;
if (!buf)
return -EINVAL;
/* show the size of the stable storage */
out += sprintf(out, "%ld\n", pdcs_size);
return out - buf;
}
/**
* pdcs_auto_read - Stable Storage autoboot/search flag output.
* @buf: The output buffer to write to.
* @knob: The PF_AUTOBOOT or PF_AUTOSEARCH flag
*/
static ssize_t pdcs_auto_read(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf, int knob)
{
char *out = buf;
struct pdcspath_entry *pathentry;
if (!buf)
return -EINVAL;
/* Current flags are stored in primary boot path entry */
pathentry = &pdcspath_entry_primary;
read_lock(&pathentry->rw_lock);
out += sprintf(out, "%s\n", (pathentry->devpath.flags & knob) ?
"On" : "Off");
read_unlock(&pathentry->rw_lock);
return out - buf;
}
/**
* pdcs_autoboot_read - Stable Storage autoboot flag output.
* @buf: The output buffer to write to.
*/
static ssize_t pdcs_autoboot_read(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return pdcs_auto_read(kobj, attr, buf, PF_AUTOBOOT);
}
/**
* pdcs_autosearch_read - Stable Storage autoboot flag output.
* @buf: The output buffer to write to.
*/
static ssize_t pdcs_autosearch_read(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return pdcs_auto_read(kobj, attr, buf, PF_AUTOSEARCH);
}
/**
* pdcs_timer_read - Stable Storage timer count output (in seconds).
* @buf: The output buffer to write to.
*
* The value of the timer field correponds to a number of seconds in powers of 2.
*/
static ssize_t pdcs_timer_read(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
char *out = buf;
struct pdcspath_entry *pathentry;
if (!buf)
return -EINVAL;
/* Current flags are stored in primary boot path entry */
pathentry = &pdcspath_entry_primary;
/* print the timer value in seconds */
read_lock(&pathentry->rw_lock);
out += sprintf(out, "%u\n", (pathentry->devpath.flags & PF_TIMER) ?
(1 << (pathentry->devpath.flags & PF_TIMER)) : 0);
read_unlock(&pathentry->rw_lock);
return out - buf;
}
/**
* pdcs_osid_read - Stable Storage OS ID register output.
* @buf: The output buffer to write to.
*/
static ssize_t pdcs_osid_read(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
char *out = buf;
if (!buf)
return -EINVAL;
out += sprintf(out, "%s dependent data (0x%.4x)\n",
os_id_to_string(pdcs_osid), pdcs_osid);
return out - buf;
}
/**
* pdcs_osdep1_read - Stable Storage OS-Dependent data area 1 output.
* @buf: The output buffer to write to.
*
* This can hold 16 bytes of OS-Dependent data.
*/
static ssize_t pdcs_osdep1_read(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
char *out = buf;
u32 result[4];
if (!buf)
return -EINVAL;
if (pdc_stable_read(PDCS_ADDR_OSD1, &result, sizeof(result)) != PDC_OK)
return -EIO;
out += sprintf(out, "0x%.8x\n", result[0]);
out += sprintf(out, "0x%.8x\n", result[1]);
out += sprintf(out, "0x%.8x\n", result[2]);
out += sprintf(out, "0x%.8x\n", result[3]);
return out - buf;
}
/**
* pdcs_diagnostic_read - Stable Storage Diagnostic register output.
* @buf: The output buffer to write to.
*
* I have NFC how to interpret the content of that register ;-).
*/
static ssize_t pdcs_diagnostic_read(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
char *out = buf;
u32 result;
if (!buf)
return -EINVAL;
/* get diagnostic */
if (pdc_stable_read(PDCS_ADDR_DIAG, &result, sizeof(result)) != PDC_OK)
return -EIO;
out += sprintf(out, "0x%.4x\n", (result >> 16));
return out - buf;
}
/**
* pdcs_fastsize_read - Stable Storage FastSize register output.
* @buf: The output buffer to write to.
*
* This register holds the amount of system RAM to be tested during boot sequence.
*/
static ssize_t pdcs_fastsize_read(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
char *out = buf;
u32 result;
if (!buf)
return -EINVAL;
/* get fast-size */
if (pdc_stable_read(PDCS_ADDR_FSIZ, &result, sizeof(result)) != PDC_OK)
return -EIO;
if ((result & 0x0F) < 0x0E)
out += sprintf(out, "%d kB", (1<<(result & 0x0F))*256);
else
out += sprintf(out, "All");
out += sprintf(out, "\n");
return out - buf;
}
/**
* pdcs_osdep2_read - Stable Storage OS-Dependent data area 2 output.
* @buf: The output buffer to write to.
*
* This can hold pdcs_size - 224 bytes of OS-Dependent data, when available.
*/
static ssize_t pdcs_osdep2_read(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
char *out = buf;
unsigned long size;
unsigned short i;
u32 result;
if (unlikely(pdcs_size <= 224))
return -ENODATA;
size = pdcs_size - 224;
if (!buf)
return -EINVAL;
for (i=0; i<size; i+=4) {
if (unlikely(pdc_stable_read(PDCS_ADDR_OSD2 + i, &result,
sizeof(result)) != PDC_OK))
return -EIO;
out += sprintf(out, "0x%.8x\n", result);
}
return out - buf;
}
/**
* pdcs_auto_write - This function handles autoboot/search flag modifying.
* @buf: The input buffer to read from.
* @count: The number of bytes to be read.
* @knob: The PF_AUTOBOOT or PF_AUTOSEARCH flag
*
* We will call this function to change the current autoboot flag.
* We expect a precise syntax:
* \"n\" (n == 0 or 1) to toggle AutoBoot Off or On
*/
static ssize_t pdcs_auto_write(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf,
size_t count, int knob)
{
struct pdcspath_entry *pathentry;
unsigned char flags;
char in[count+1], *temp;
char c;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!buf || !count)
return -EINVAL;
/* We'll use a local copy of buf */
memset(in, 0, count+1);
strncpy(in, buf, count);
/* Current flags are stored in primary boot path entry */
pathentry = &pdcspath_entry_primary;
/* Be nice to the existing flag record */
read_lock(&pathentry->rw_lock);
flags = pathentry->devpath.flags;
read_unlock(&pathentry->rw_lock);
DPRINTK("%s: flags before: 0x%X\n", __func__, flags);
temp = skip_spaces(in);
c = *temp++ - '0';
if ((c != 0) && (c != 1))
goto parse_error;
if (c == 0)
flags &= ~knob;
else
flags |= knob;
DPRINTK("%s: flags after: 0x%X\n", __func__, flags);
/* So far so good, let's get in deep */
write_lock(&pathentry->rw_lock);
/* Change the path entry flags first */
pathentry->devpath.flags = flags;
/* Now, dive in. Write back to the hardware */
pdcspath_store(pathentry);
write_unlock(&pathentry->rw_lock);
printk(KERN_INFO PDCS_PREFIX ": changed \"%s\" to \"%s\"\n",
(knob & PF_AUTOBOOT) ? "autoboot" : "autosearch",
(flags & knob) ? "On" : "Off");
return count;
parse_error:
printk(KERN_WARNING "%s: Parse error: expect \"n\" (n == 0 or 1)\n", __func__);
return -EINVAL;
}
/**
* pdcs_autoboot_write - This function handles autoboot flag modifying.
* @buf: The input buffer to read from.
* @count: The number of bytes to be read.
*
* We will call this function to change the current boot flags.
* We expect a precise syntax:
* \"n\" (n == 0 or 1) to toggle AutoSearch Off or On
*/
static ssize_t pdcs_autoboot_write(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
return pdcs_auto_write(kobj, attr, buf, count, PF_AUTOBOOT);
}
/**
* pdcs_autosearch_write - This function handles autosearch flag modifying.
* @buf: The input buffer to read from.
* @count: The number of bytes to be read.
*
* We will call this function to change the current boot flags.
* We expect a precise syntax:
* \"n\" (n == 0 or 1) to toggle AutoSearch Off or On
*/
static ssize_t pdcs_autosearch_write(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
return pdcs_auto_write(kobj, attr, buf, count, PF_AUTOSEARCH);
}
/**
* pdcs_osdep1_write - Stable Storage OS-Dependent data area 1 input.
* @buf: The input buffer to read from.
* @count: The number of bytes to be read.
*
* This can store 16 bytes of OS-Dependent data. We use a byte-by-byte
* write approach. It's up to userspace to deal with it when constructing
* its input buffer.
*/
static ssize_t pdcs_osdep1_write(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
u8 in[16];
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!buf || !count)
return -EINVAL;
if (unlikely(pdcs_osid != OS_ID_LINUX))
return -EPERM;
if (count > 16)
return -EMSGSIZE;
/* We'll use a local copy of buf */
memset(in, 0, 16);
memcpy(in, buf, count);
if (pdc_stable_write(PDCS_ADDR_OSD1, &in, sizeof(in)) != PDC_OK)
return -EIO;
return count;
}
/**
* pdcs_osdep2_write - Stable Storage OS-Dependent data area 2 input.
* @buf: The input buffer to read from.
* @count: The number of bytes to be read.
*
* This can store pdcs_size - 224 bytes of OS-Dependent data. We use a
* byte-by-byte write approach. It's up to userspace to deal with it when
* constructing its input buffer.
*/
static ssize_t pdcs_osdep2_write(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
unsigned long size;
unsigned short i;
u8 in[4];
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!buf || !count)
return -EINVAL;
if (unlikely(pdcs_size <= 224))
return -ENOSYS;
if (unlikely(pdcs_osid != OS_ID_LINUX))
return -EPERM;
size = pdcs_size - 224;
if (count > size)
return -EMSGSIZE;
/* We'll use a local copy of buf */
for (i=0; i<count; i+=4) {
memset(in, 0, 4);
memcpy(in, buf+i, (count-i < 4) ? count-i : 4);
if (unlikely(pdc_stable_write(PDCS_ADDR_OSD2 + i, &in,
sizeof(in)) != PDC_OK))
return -EIO;
}
return count;
}
/* The remaining attributes. */
static PDCS_ATTR(size, 0444, pdcs_size_read, NULL);
static PDCS_ATTR(autoboot, 0644, pdcs_autoboot_read, pdcs_autoboot_write);
static PDCS_ATTR(autosearch, 0644, pdcs_autosearch_read, pdcs_autosearch_write);
static PDCS_ATTR(timer, 0444, pdcs_timer_read, NULL);
static PDCS_ATTR(osid, 0444, pdcs_osid_read, NULL);
static PDCS_ATTR(osdep1, 0600, pdcs_osdep1_read, pdcs_osdep1_write);
static PDCS_ATTR(diagnostic, 0400, pdcs_diagnostic_read, NULL);
static PDCS_ATTR(fastsize, 0400, pdcs_fastsize_read, NULL);
static PDCS_ATTR(osdep2, 0600, pdcs_osdep2_read, pdcs_osdep2_write);
static struct attribute *pdcs_subsys_attrs[] = {
&pdcs_attr_size.attr,
&pdcs_attr_autoboot.attr,
&pdcs_attr_autosearch.attr,
&pdcs_attr_timer.attr,
&pdcs_attr_osid.attr,
&pdcs_attr_osdep1.attr,
&pdcs_attr_diagnostic.attr,
&pdcs_attr_fastsize.attr,
&pdcs_attr_osdep2.attr,
NULL,
};
static struct attribute_group pdcs_attr_group = {
.attrs = pdcs_subsys_attrs,
};
static struct kobject *stable_kobj;
static struct kset *paths_kset;
/**
* pdcs_register_pathentries - Prepares path entries kobjects for sysfs usage.
*
* It creates kobjects corresponding to each path entry with nice sysfs
* links to the real device. This is where the magic takes place: when
* registering the subsystem attributes during module init, each kobject hereby
* created will show in the sysfs tree as a folder containing files as defined
* by path_subsys_attr[].
*/
static inline int __init
pdcs_register_pathentries(void)
{
unsigned short i;
struct pdcspath_entry *entry;
int err;
/* Initialize the entries rw_lock before anything else */
for (i = 0; (entry = pdcspath_entries[i]); i++)
rwlock_init(&entry->rw_lock);
for (i = 0; (entry = pdcspath_entries[i]); i++) {
write_lock(&entry->rw_lock);
err = pdcspath_fetch(entry);
write_unlock(&entry->rw_lock);
if (err < 0)
continue;
entry->kobj.kset = paths_kset;
err = kobject_init_and_add(&entry->kobj, &ktype_pdcspath, NULL,
"%s", entry->name);
if (err)
return err;
/* kobject is now registered */
write_lock(&entry->rw_lock);
entry->ready = 2;
/* Add a nice symlink to the real device */
if (entry->dev) {
err = sysfs_create_link(&entry->kobj, &entry->dev->kobj, "device");
WARN_ON(err);
}
write_unlock(&entry->rw_lock);
kobject_uevent(&entry->kobj, KOBJ_ADD);
}
return 0;
}
/**
* pdcs_unregister_pathentries - Routine called when unregistering the module.
*/
static inline void
pdcs_unregister_pathentries(void)
{
unsigned short i;
struct pdcspath_entry *entry;
for (i = 0; (entry = pdcspath_entries[i]); i++) {
read_lock(&entry->rw_lock);
if (entry->ready >= 2)
kobject_put(&entry->kobj);
read_unlock(&entry->rw_lock);
}
}
/*
* For now we register the stable subsystem with the firmware subsystem
* and the paths subsystem with the stable subsystem
*/
static int __init
pdc_stable_init(void)
{
int rc = 0, error = 0;
u32 result;
/* find the size of the stable storage */
if (pdc_stable_get_size(&pdcs_size) != PDC_OK)
return -ENODEV;
/* make sure we have enough data */
if (pdcs_size < 96)
return -ENODATA;
printk(KERN_INFO PDCS_PREFIX " facility v%s\n", PDCS_VERSION);
/* get OSID */
if (pdc_stable_read(PDCS_ADDR_OSID, &result, sizeof(result)) != PDC_OK)
return -EIO;
/* the actual result is 16 bits away */
pdcs_osid = (u16)(result >> 16);
/* For now we'll register the directory at /sys/firmware/stable */
stable_kobj = kobject_create_and_add("stable", firmware_kobj);
if (!stable_kobj) {
rc = -ENOMEM;
goto fail_firmreg;
}
/* Don't forget the root entries */
error = sysfs_create_group(stable_kobj, &pdcs_attr_group);
/* register the paths kset as a child of the stable kset */
paths_kset = kset_create_and_add("paths", NULL, stable_kobj);
if (!paths_kset) {
rc = -ENOMEM;
goto fail_ksetreg;
}
/* now we create all "files" for the paths kset */
if ((rc = pdcs_register_pathentries()))
goto fail_pdcsreg;
return rc;
fail_pdcsreg:
pdcs_unregister_pathentries();
kset_unregister(paths_kset);
fail_ksetreg:
kobject_put(stable_kobj);
fail_firmreg:
printk(KERN_INFO PDCS_PREFIX " bailing out\n");
return rc;
}
static void __exit
pdc_stable_exit(void)
{
pdcs_unregister_pathentries();
kset_unregister(paths_kset);
kobject_put(stable_kobj);
}
module_init(pdc_stable_init);
module_exit(pdc_stable_exit);
| gpl-2.0 |
1N4148/android_kernel_samsung_smdk4412 | arch/mips/sgi-ip27/ip27-berr.c | 9287 | 3064 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994, 1995, 1996, 1999, 2000 by Ralf Baechle
* Copyright (C) 1999, 2000 by Silicon Graphics
* Copyright (C) 2002 Maciej W. Rozycki
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/signal.h> /* for SIGBUS */
#include <linux/sched.h> /* schow_regs(), force_sig() */
#include <asm/module.h>
#include <asm/sn/addrs.h>
#include <asm/sn/arch.h>
#include <asm/sn/sn0/hub.h>
#include <asm/tlbdebug.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
static void dump_hub_information(unsigned long errst0, unsigned long errst1)
{
static char *err_type[2][8] = {
{ NULL, "Uncached Partial Read PRERR", "DERR", "Read Timeout",
NULL, NULL, NULL, NULL },
{ "WERR", "Uncached Partial Write", "PWERR", "Write Timeout",
NULL, NULL, NULL, NULL }
};
int wrb = errst1 & PI_ERR_ST1_WRBRRB_MASK;
if (!(errst0 & PI_ERR_ST0_VALID_MASK)) {
printk("Hub does not contain valid error information\n");
return;
}
printk("Hub has valid error information:\n");
if (errst0 & PI_ERR_ST0_OVERRUN_MASK)
printk("Overrun is set. Error stack may contain additional "
"information.\n");
printk("Hub error address is %08lx\n",
(errst0 & PI_ERR_ST0_ADDR_MASK) >> (PI_ERR_ST0_ADDR_SHFT - 3));
printk("Incoming message command 0x%lx\n",
(errst0 & PI_ERR_ST0_CMD_MASK) >> PI_ERR_ST0_CMD_SHFT);
printk("Supplemental field of incoming message is 0x%lx\n",
(errst0 & PI_ERR_ST0_SUPPL_MASK) >> PI_ERR_ST0_SUPPL_SHFT);
printk("T5 Rn (for RRB only) is 0x%lx\n",
(errst0 & PI_ERR_ST0_REQNUM_MASK) >> PI_ERR_ST0_REQNUM_SHFT);
printk("Error type is %s\n", err_type[wrb]
[(errst0 & PI_ERR_ST0_TYPE_MASK) >> PI_ERR_ST0_TYPE_SHFT]
? : "invalid");
}
int ip27_be_handler(struct pt_regs *regs, int is_fixup)
{
unsigned long errst0, errst1;
int data = regs->cp0_cause & 4;
int cpu = LOCAL_HUB_L(PI_CPU_NUM);
if (is_fixup)
return MIPS_BE_FIXUP;
printk("Slice %c got %cbe at 0x%lx\n", 'A' + cpu, data ? 'd' : 'i',
regs->cp0_epc);
printk("Hub information:\n");
printk("ERR_INT_PEND = 0x%06llx\n", LOCAL_HUB_L(PI_ERR_INT_PEND));
errst0 = LOCAL_HUB_L(cpu ? PI_ERR_STATUS0_B : PI_ERR_STATUS0_A);
errst1 = LOCAL_HUB_L(cpu ? PI_ERR_STATUS1_B : PI_ERR_STATUS1_A);
dump_hub_information(errst0, errst1);
show_regs(regs);
dump_tlb_all();
while(1);
force_sig(SIGBUS, current);
}
void __init ip27_be_init(void)
{
/* XXX Initialize all the Hub & Bridge error handling here. */
int cpu = LOCAL_HUB_L(PI_CPU_NUM);
int cpuoff = cpu << 8;
board_be_handler = ip27_be_handler;
LOCAL_HUB_S(PI_ERR_INT_PEND,
cpu ? PI_ERR_CLEAR_ALL_B : PI_ERR_CLEAR_ALL_A);
LOCAL_HUB_S(PI_ERR_INT_MASK_A + cpuoff, 0);
LOCAL_HUB_S(PI_ERR_STACK_ADDR_A + cpuoff, 0);
LOCAL_HUB_S(PI_ERR_STACK_SIZE, 0); /* Disable error stack */
LOCAL_HUB_S(PI_SYSAD_ERRCHK_EN, PI_SYSAD_CHECK_ALL);
}
| gpl-2.0 |
96boards/linux | drivers/media/rc/keymaps/rc-terratec-slim-2.c | 9543 | 2242 | /*
* TerraTec remote controller keytable
*
* Copyright (C) 2011 Martin Groszhauser <mgroszhauser@gmail.com>
* Copyright (C) 2011 Antti Palosaari <crope@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <media/rc-map.h>
#include <linux/module.h>
/*
* TerraTec slim remote, 6 rows, 3 columns.
* Keytable from Martin Groszhauser <mgroszhauser@gmail.com>
*/
static struct rc_map_table terratec_slim_2[] = {
{ 0x8001, KEY_MUTE }, /* MUTE */
{ 0x8002, KEY_VOLUMEDOWN },
{ 0x8003, KEY_CHANNELDOWN },
{ 0x8004, KEY_1 },
{ 0x8005, KEY_2 },
{ 0x8006, KEY_3 },
{ 0x8007, KEY_4 },
{ 0x8008, KEY_5 },
{ 0x8009, KEY_6 },
{ 0x800a, KEY_7 },
{ 0x800c, KEY_ZOOM }, /* [fullscreen] */
{ 0x800d, KEY_0 },
{ 0x800e, KEY_AGAIN }, /* [two arrows forming a circle] */
{ 0x8012, KEY_POWER2 }, /* [red power button] */
{ 0x801a, KEY_VOLUMEUP },
{ 0x801b, KEY_8 },
{ 0x801e, KEY_CHANNELUP },
{ 0x801f, KEY_9 },
};
static struct rc_map_list terratec_slim_2_map = {
.map = {
.scan = terratec_slim_2,
.size = ARRAY_SIZE(terratec_slim_2),
.rc_type = RC_TYPE_NEC,
.name = RC_MAP_TERRATEC_SLIM_2,
}
};
static int __init init_rc_map_terratec_slim_2(void)
{
return rc_map_register(&terratec_slim_2_map);
}
static void __exit exit_rc_map_terratec_slim_2(void)
{
rc_map_unregister(&terratec_slim_2_map);
}
module_init(init_rc_map_terratec_slim_2)
module_exit(exit_rc_map_terratec_slim_2)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
| gpl-2.0 |
F4uzan/lge-kernel-lproj | fs/lockd/grace.c | 9799 | 1594 | /*
* Common code for control of lockd and nfsv4 grace periods.
*/
#include <linux/module.h>
#include <linux/lockd/bind.h>
static LIST_HEAD(grace_list);
static DEFINE_SPINLOCK(grace_lock);
/**
* locks_start_grace
* @lm: who this grace period is for
*
* A grace period is a period during which locks should not be given
* out. Currently grace periods are only enforced by the two lock
* managers (lockd and nfsd), using the locks_in_grace() function to
* check when they are in a grace period.
*
* This function is called to start a grace period.
*/
void locks_start_grace(struct lock_manager *lm)
{
spin_lock(&grace_lock);
list_add(&lm->list, &grace_list);
spin_unlock(&grace_lock);
}
EXPORT_SYMBOL_GPL(locks_start_grace);
/**
* locks_end_grace
* @lm: who this grace period is for
*
* Call this function to state that the given lock manager is ready to
* resume regular locking. The grace period will not end until all lock
* managers that called locks_start_grace() also call locks_end_grace().
* Note that callers count on it being safe to call this more than once,
* and the second call should be a no-op.
*/
void locks_end_grace(struct lock_manager *lm)
{
spin_lock(&grace_lock);
list_del_init(&lm->list);
spin_unlock(&grace_lock);
}
EXPORT_SYMBOL_GPL(locks_end_grace);
/**
* locks_in_grace
*
* Lock managers call this function to determine when it is OK for them
* to answer ordinary lock requests, and when they should accept only
* lock reclaims.
*/
int locks_in_grace(void)
{
return !list_empty(&grace_list);
}
EXPORT_SYMBOL_GPL(locks_in_grace);
| gpl-2.0 |
bmsitech/linux-imx6 | drivers/net/fddi/skfp/fplustm.c | 10567 | 39704 | /******************************************************************************
*
* (C)Copyright 1998,1999 SysKonnect,
* a business unit of Schneider & Koch & Co. Datensysteme GmbH.
*
* See the file "skfddi.c" for further information.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* The information in this file is provided "AS IS" without warranty.
*
******************************************************************************/
/*
* FORMAC+ Driver for tag mode
*/
#include "h/types.h"
#include "h/fddi.h"
#include "h/smc.h"
#include "h/supern_2.h"
#include <linux/bitrev.h>
#ifndef lint
static const char ID_sccs[] = "@(#)fplustm.c 1.32 99/02/23 (C) SK " ;
#endif
#ifndef UNUSED
#ifdef lint
#define UNUSED(x) (x) = (x)
#else
#define UNUSED(x)
#endif
#endif
#define FM_ADDRX (FM_ADDET|FM_EXGPA0|FM_EXGPA1)
#define MS2BCLK(x) ((x)*12500L)
#define US2BCLK(x) ((x)*1250L)
/*
* prototypes for static function
*/
static void build_claim_beacon(struct s_smc *smc, u_long t_request);
static int init_mac(struct s_smc *smc, int all);
static void rtm_init(struct s_smc *smc);
static void smt_split_up_fifo(struct s_smc *smc);
#if (!defined(NO_SMT_PANIC) || defined(DEBUG))
static char write_mdr_warning [] = "E350 write_mdr() FM_SNPPND is set\n";
static char cam_warning [] = "E_SMT_004: CAM still busy\n";
#endif
#define DUMMY_READ() smc->hw.mc_dummy = (u_short) inp(ADDR(B0_RAP))
#define CHECK_NPP() { unsigned k = 10000 ;\
while ((inpw(FM_A(FM_STMCHN)) & FM_SNPPND) && k) k--;\
if (!k) { \
SMT_PANIC(smc,SMT_E0130, SMT_E0130_MSG) ; \
} \
}
#define CHECK_CAM() { unsigned k = 10 ;\
while (!(inpw(FM_A(FM_AFSTAT)) & FM_DONE) && k) k--;\
if (!k) { \
SMT_PANIC(smc,SMT_E0131, SMT_E0131_MSG) ; \
} \
}
const struct fddi_addr fddi_broadcast = {{0xff,0xff,0xff,0xff,0xff,0xff}};
static const struct fddi_addr null_addr = {{0,0,0,0,0,0}};
static const struct fddi_addr dbeacon_multi = {{0x01,0x80,0xc2,0x00,0x01,0x00}};
static const u_short my_said = 0xffff ; /* short address (n.u.) */
static const u_short my_sagp = 0xffff ; /* short group address (n.u.) */
/*
* define my address
*/
#ifdef USE_CAN_ADDR
#define MA smc->hw.fddi_canon_addr
#else
#define MA smc->hw.fddi_home_addr
#endif
/*
* useful interrupt bits
*/
static const int mac_imsk1u = FM_STXABRS | FM_STXABRA0 | FM_SXMTABT ;
static const int mac_imsk1l = FM_SQLCKS | FM_SQLCKA0 | FM_SPCEPDS | FM_SPCEPDA0|
FM_STBURS | FM_STBURA0 ;
/* delete FM_SRBFL after tests */
static const int mac_imsk2u = FM_SERRSF | FM_SNFSLD | FM_SRCVOVR | FM_SRBFL |
FM_SMYCLM ;
static const int mac_imsk2l = FM_STRTEXR | FM_SDUPCLM | FM_SFRMCTR |
FM_SERRCTR | FM_SLSTCTR |
FM_STRTEXP | FM_SMULTDA | FM_SRNGOP ;
static const int mac_imsk3u = FM_SRCVOVR2 | FM_SRBFL2 ;
static const int mac_imsk3l = FM_SRPERRQ2 | FM_SRPERRQ1 ;
static const int mac_beacon_imsk2u = FM_SOTRBEC | FM_SMYBEC | FM_SBEC |
FM_SLOCLM | FM_SHICLM | FM_SMYCLM | FM_SCLM ;
static u_long mac_get_tneg(struct s_smc *smc)
{
u_long tneg ;
tneg = (u_long)((long)inpw(FM_A(FM_TNEG))<<5) ;
return (u_long)((tneg + ((inpw(FM_A(FM_TMRS))>>10)&0x1f)) |
0xffe00000L) ;
}
void mac_update_counter(struct s_smc *smc)
{
smc->mib.m[MAC0].fddiMACFrame_Ct =
(smc->mib.m[MAC0].fddiMACFrame_Ct & 0xffff0000L)
+ (u_short) inpw(FM_A(FM_FCNTR)) ;
smc->mib.m[MAC0].fddiMACLost_Ct =
(smc->mib.m[MAC0].fddiMACLost_Ct & 0xffff0000L)
+ (u_short) inpw(FM_A(FM_LCNTR)) ;
smc->mib.m[MAC0].fddiMACError_Ct =
(smc->mib.m[MAC0].fddiMACError_Ct & 0xffff0000L)
+ (u_short) inpw(FM_A(FM_ECNTR)) ;
smc->mib.m[MAC0].fddiMACT_Neg = mac_get_tneg(smc) ;
#ifdef SMT_REAL_TOKEN_CT
/*
* If the token counter is emulated it is updated in smt_event.
*/
TBD
#else
smt_emulate_token_ct( smc, MAC0 );
#endif
}
/*
* write long value into buffer memory over memory data register (MDR),
*/
static void write_mdr(struct s_smc *smc, u_long val)
{
CHECK_NPP() ;
MDRW(val) ;
}
#if 0
/*
* read long value from buffer memory over memory data register (MDR),
*/
static u_long read_mdr(struct s_smc *smc, unsigned int addr)
{
long p ;
CHECK_NPP() ;
MARR(addr) ;
outpw(FM_A(FM_CMDREG1),FM_IRMEMWO) ;
CHECK_NPP() ; /* needed for PCI to prevent from timeing violations */
/* p = MDRR() ; */ /* bad read values if the workaround */
/* smc->hw.mc_dummy = *((short volatile far *)(addr)))*/
/* is used */
p = (u_long)inpw(FM_A(FM_MDRU))<<16 ;
p += (u_long)inpw(FM_A(FM_MDRL)) ;
return p;
}
#endif
/*
* clear buffer memory
*/
static void init_ram(struct s_smc *smc)
{
u_short i ;
smc->hw.fp.fifo.rbc_ram_start = 0 ;
smc->hw.fp.fifo.rbc_ram_end =
smc->hw.fp.fifo.rbc_ram_start + RBC_MEM_SIZE ;
CHECK_NPP() ;
MARW(smc->hw.fp.fifo.rbc_ram_start) ;
for (i = smc->hw.fp.fifo.rbc_ram_start;
i < (u_short) (smc->hw.fp.fifo.rbc_ram_end-1); i++)
write_mdr(smc,0L) ;
/* Erase the last byte too */
write_mdr(smc,0L) ;
}
/*
* set receive FIFO pointer
*/
static void set_recvptr(struct s_smc *smc)
{
/*
* initialize the pointer for receive queue 1
*/
outpw(FM_A(FM_RPR1),smc->hw.fp.fifo.rx1_fifo_start) ; /* RPR1 */
outpw(FM_A(FM_SWPR1),smc->hw.fp.fifo.rx1_fifo_start) ; /* SWPR1 */
outpw(FM_A(FM_WPR1),smc->hw.fp.fifo.rx1_fifo_start) ; /* WPR1 */
outpw(FM_A(FM_EARV1),smc->hw.fp.fifo.tx_s_start-1) ; /* EARV1 */
/*
* initialize the pointer for receive queue 2
*/
if (smc->hw.fp.fifo.rx2_fifo_size) {
outpw(FM_A(FM_RPR2),smc->hw.fp.fifo.rx2_fifo_start) ;
outpw(FM_A(FM_SWPR2),smc->hw.fp.fifo.rx2_fifo_start) ;
outpw(FM_A(FM_WPR2),smc->hw.fp.fifo.rx2_fifo_start) ;
outpw(FM_A(FM_EARV2),smc->hw.fp.fifo.rbc_ram_end-1) ;
}
else {
outpw(FM_A(FM_RPR2),smc->hw.fp.fifo.rbc_ram_end-1) ;
outpw(FM_A(FM_SWPR2),smc->hw.fp.fifo.rbc_ram_end-1) ;
outpw(FM_A(FM_WPR2),smc->hw.fp.fifo.rbc_ram_end-1) ;
outpw(FM_A(FM_EARV2),smc->hw.fp.fifo.rbc_ram_end-1) ;
}
}
/*
* set transmit FIFO pointer
*/
static void set_txptr(struct s_smc *smc)
{
outpw(FM_A(FM_CMDREG2),FM_IRSTQ) ; /* reset transmit queues */
/*
* initialize the pointer for asynchronous transmit queue
*/
outpw(FM_A(FM_RPXA0),smc->hw.fp.fifo.tx_a0_start) ; /* RPXA0 */
outpw(FM_A(FM_SWPXA0),smc->hw.fp.fifo.tx_a0_start) ; /* SWPXA0 */
outpw(FM_A(FM_WPXA0),smc->hw.fp.fifo.tx_a0_start) ; /* WPXA0 */
outpw(FM_A(FM_EAA0),smc->hw.fp.fifo.rx2_fifo_start-1) ; /* EAA0 */
/*
* initialize the pointer for synchronous transmit queue
*/
if (smc->hw.fp.fifo.tx_s_size) {
outpw(FM_A(FM_RPXS),smc->hw.fp.fifo.tx_s_start) ;
outpw(FM_A(FM_SWPXS),smc->hw.fp.fifo.tx_s_start) ;
outpw(FM_A(FM_WPXS),smc->hw.fp.fifo.tx_s_start) ;
outpw(FM_A(FM_EAS),smc->hw.fp.fifo.tx_a0_start-1) ;
}
else {
outpw(FM_A(FM_RPXS),smc->hw.fp.fifo.tx_a0_start-1) ;
outpw(FM_A(FM_SWPXS),smc->hw.fp.fifo.tx_a0_start-1) ;
outpw(FM_A(FM_WPXS),smc->hw.fp.fifo.tx_a0_start-1) ;
outpw(FM_A(FM_EAS),smc->hw.fp.fifo.tx_a0_start-1) ;
}
}
/*
* init memory buffer management registers
*/
static void init_rbc(struct s_smc *smc)
{
u_short rbc_ram_addr ;
/*
* set unused pointers or permanent pointers
*/
rbc_ram_addr = smc->hw.fp.fifo.rx2_fifo_start - 1 ;
outpw(FM_A(FM_RPXA1),rbc_ram_addr) ; /* a1-send pointer */
outpw(FM_A(FM_WPXA1),rbc_ram_addr) ;
outpw(FM_A(FM_SWPXA1),rbc_ram_addr) ;
outpw(FM_A(FM_EAA1),rbc_ram_addr) ;
set_recvptr(smc) ;
set_txptr(smc) ;
}
/*
* init rx pointer
*/
static void init_rx(struct s_smc *smc)
{
struct s_smt_rx_queue *queue ;
/*
* init all tx data structures for receive queue 1
*/
smc->hw.fp.rx[QUEUE_R1] = queue = &smc->hw.fp.rx_q[QUEUE_R1] ;
queue->rx_bmu_ctl = (HW_PTR) ADDR(B0_R1_CSR) ;
queue->rx_bmu_dsc = (HW_PTR) ADDR(B4_R1_DA) ;
/*
* init all tx data structures for receive queue 2
*/
smc->hw.fp.rx[QUEUE_R2] = queue = &smc->hw.fp.rx_q[QUEUE_R2] ;
queue->rx_bmu_ctl = (HW_PTR) ADDR(B0_R2_CSR) ;
queue->rx_bmu_dsc = (HW_PTR) ADDR(B4_R2_DA) ;
}
/*
* set the TSYNC register of the FORMAC to regulate synchronous transmission
*/
void set_formac_tsync(struct s_smc *smc, long sync_bw)
{
outpw(FM_A(FM_TSYNC),(unsigned int) (((-sync_bw) >> 5) & 0xffff) ) ;
}
/*
* init all tx data structures
*/
static void init_tx(struct s_smc *smc)
{
struct s_smt_tx_queue *queue ;
/*
* init all tx data structures for the synchronous queue
*/
smc->hw.fp.tx[QUEUE_S] = queue = &smc->hw.fp.tx_q[QUEUE_S] ;
queue->tx_bmu_ctl = (HW_PTR) ADDR(B0_XS_CSR) ;
queue->tx_bmu_dsc = (HW_PTR) ADDR(B5_XS_DA) ;
#ifdef ESS
set_formac_tsync(smc,smc->ess.sync_bw) ;
#endif
/*
* init all tx data structures for the asynchronous queue 0
*/
smc->hw.fp.tx[QUEUE_A0] = queue = &smc->hw.fp.tx_q[QUEUE_A0] ;
queue->tx_bmu_ctl = (HW_PTR) ADDR(B0_XA_CSR) ;
queue->tx_bmu_dsc = (HW_PTR) ADDR(B5_XA_DA) ;
llc_recover_tx(smc) ;
}
static void mac_counter_init(struct s_smc *smc)
{
int i ;
u_long *ec ;
/*
* clear FORMAC+ frame-, lost- and error counter
*/
outpw(FM_A(FM_FCNTR),0) ;
outpw(FM_A(FM_LCNTR),0) ;
outpw(FM_A(FM_ECNTR),0) ;
/*
* clear internal error counter structure
*/
ec = (u_long *)&smc->hw.fp.err_stats ;
for (i = (sizeof(struct err_st)/sizeof(long)) ; i ; i--)
*ec++ = 0L ;
smc->mib.m[MAC0].fddiMACRingOp_Ct = 0 ;
}
/*
* set FORMAC address, and t_request
*/
static void set_formac_addr(struct s_smc *smc)
{
long t_requ = smc->mib.m[MAC0].fddiMACT_Req ;
outpw(FM_A(FM_SAID),my_said) ; /* set short address */
outpw(FM_A(FM_LAIL),(unsigned)((smc->hw.fddi_home_addr.a[4]<<8) +
smc->hw.fddi_home_addr.a[5])) ;
outpw(FM_A(FM_LAIC),(unsigned)((smc->hw.fddi_home_addr.a[2]<<8) +
smc->hw.fddi_home_addr.a[3])) ;
outpw(FM_A(FM_LAIM),(unsigned)((smc->hw.fddi_home_addr.a[0]<<8) +
smc->hw.fddi_home_addr.a[1])) ;
outpw(FM_A(FM_SAGP),my_sagp) ; /* set short group address */
outpw(FM_A(FM_LAGL),(unsigned)((smc->hw.fp.group_addr.a[4]<<8) +
smc->hw.fp.group_addr.a[5])) ;
outpw(FM_A(FM_LAGC),(unsigned)((smc->hw.fp.group_addr.a[2]<<8) +
smc->hw.fp.group_addr.a[3])) ;
outpw(FM_A(FM_LAGM),(unsigned)((smc->hw.fp.group_addr.a[0]<<8) +
smc->hw.fp.group_addr.a[1])) ;
/* set r_request regs. (MSW & LSW of TRT ) */
outpw(FM_A(FM_TREQ1),(unsigned)(t_requ>>16)) ;
outpw(FM_A(FM_TREQ0),(unsigned)t_requ) ;
}
static void set_int(char *p, int l)
{
p[0] = (char)(l >> 24) ;
p[1] = (char)(l >> 16) ;
p[2] = (char)(l >> 8) ;
p[3] = (char)(l >> 0) ;
}
/*
* copy TX descriptor to buffer mem
* append FC field and MAC frame
* if more bit is set in descr
* append pointer to descriptor (endless loop)
* else
* append 'end of chain' pointer
*/
static void copy_tx_mac(struct s_smc *smc, u_long td, struct fddi_mac *mac,
unsigned off, int len)
/* u_long td; transmit descriptor */
/* struct fddi_mac *mac; mac frame pointer */
/* unsigned off; start address within buffer memory */
/* int len ; length of the frame including the FC */
{
int i ;
__le32 *p ;
CHECK_NPP() ;
MARW(off) ; /* set memory address reg for writes */
p = (__le32 *) mac ;
for (i = (len + 3)/4 ; i ; i--) {
if (i == 1) {
/* last word, set the tag bit */
outpw(FM_A(FM_CMDREG2),FM_ISTTB) ;
}
write_mdr(smc,le32_to_cpu(*p)) ;
p++ ;
}
outpw(FM_A(FM_CMDREG2),FM_ISTTB) ; /* set the tag bit */
write_mdr(smc,td) ; /* write over memory data reg to buffer */
}
/*
BEGIN_MANUAL_ENTRY(module;tests;3)
How to test directed beacon frames
----------------------------------------------------------------
o Insert a break point in the function build_claim_beacon()
before calling copy_tx_mac() for building the claim frame.
o Modify the RM3_DETECT case so that the RM6_DETECT state
will always entered from the RM3_DETECT state (function rmt_fsm(),
rmt.c)
o Compile the driver.
o Set the parameter TREQ in the protocol.ini or net.cfg to a
small value to make sure your station will win the claim
process.
o Start the driver.
o When you reach the break point, modify the SA and DA address
of the claim frame (e.g. SA = DA = 10005affffff).
o When you see RM3_DETECT and RM6_DETECT, observe the direct
beacon frames on the UPPSLANA.
END_MANUAL_ENTRY
*/
static void directed_beacon(struct s_smc *smc)
{
SK_LOC_DECL(__le32,a[2]) ;
/*
* set UNA in frame
* enable FORMAC to send endless queue of directed beacon
* important: the UNA starts at byte 1 (not at byte 0)
*/
* (char *) a = (char) ((long)DBEACON_INFO<<24L) ;
a[1] = 0 ;
memcpy((char *)a+1,(char *) &smc->mib.m[MAC0].fddiMACUpstreamNbr,6) ;
CHECK_NPP() ;
/* set memory address reg for writes */
MARW(smc->hw.fp.fifo.rbc_ram_start+DBEACON_FRAME_OFF+4) ;
write_mdr(smc,le32_to_cpu(a[0])) ;
outpw(FM_A(FM_CMDREG2),FM_ISTTB) ; /* set the tag bit */
write_mdr(smc,le32_to_cpu(a[1])) ;
outpw(FM_A(FM_SABC),smc->hw.fp.fifo.rbc_ram_start + DBEACON_FRAME_OFF) ;
}
/*
setup claim & beacon pointer
NOTE :
special frame packets end with a pointer to their own
descriptor, and the MORE bit is set in the descriptor
*/
static void build_claim_beacon(struct s_smc *smc, u_long t_request)
{
u_int td ;
int len ;
struct fddi_mac_sf *mac ;
/*
* build claim packet
*/
len = 17 ;
td = TX_DESCRIPTOR | ((((u_int)len-1)&3)<<27) ;
mac = &smc->hw.fp.mac_sfb ;
mac->mac_fc = FC_CLAIM ;
/* DA == SA in claim frame */
mac->mac_source = mac->mac_dest = MA ;
/* 2's complement */
set_int((char *)mac->mac_info,(int)t_request) ;
copy_tx_mac(smc,td,(struct fddi_mac *)mac,
smc->hw.fp.fifo.rbc_ram_start + CLAIM_FRAME_OFF,len) ;
/* set CLAIM start pointer */
outpw(FM_A(FM_SACL),smc->hw.fp.fifo.rbc_ram_start + CLAIM_FRAME_OFF) ;
/*
* build beacon packet
*/
len = 17 ;
td = TX_DESCRIPTOR | ((((u_int)len-1)&3)<<27) ;
mac->mac_fc = FC_BEACON ;
mac->mac_source = MA ;
mac->mac_dest = null_addr ; /* DA == 0 in beacon frame */
set_int((char *) mac->mac_info,((int)BEACON_INFO<<24) + 0 ) ;
copy_tx_mac(smc,td,(struct fddi_mac *)mac,
smc->hw.fp.fifo.rbc_ram_start + BEACON_FRAME_OFF,len) ;
/* set beacon start pointer */
outpw(FM_A(FM_SABC),smc->hw.fp.fifo.rbc_ram_start + BEACON_FRAME_OFF) ;
/*
* build directed beacon packet
* contains optional UNA
*/
len = 23 ;
td = TX_DESCRIPTOR | ((((u_int)len-1)&3)<<27) ;
mac->mac_fc = FC_BEACON ;
mac->mac_source = MA ;
mac->mac_dest = dbeacon_multi ; /* multicast */
set_int((char *) mac->mac_info,((int)DBEACON_INFO<<24) + 0 ) ;
set_int((char *) mac->mac_info+4,0) ;
set_int((char *) mac->mac_info+8,0) ;
copy_tx_mac(smc,td,(struct fddi_mac *)mac,
smc->hw.fp.fifo.rbc_ram_start + DBEACON_FRAME_OFF,len) ;
/* end of claim/beacon queue */
outpw(FM_A(FM_EACB),smc->hw.fp.fifo.rx1_fifo_start-1) ;
outpw(FM_A(FM_WPXSF),0) ;
outpw(FM_A(FM_RPXSF),0) ;
}
static void formac_rcv_restart(struct s_smc *smc)
{
/* enable receive function */
SETMASK(FM_A(FM_MDREG1),smc->hw.fp.rx_mode,FM_ADDRX) ;
outpw(FM_A(FM_CMDREG1),FM_ICLLR) ; /* clear receive lock */
}
void formac_tx_restart(struct s_smc *smc)
{
outpw(FM_A(FM_CMDREG1),FM_ICLLS) ; /* clear s-frame lock */
outpw(FM_A(FM_CMDREG1),FM_ICLLA0) ; /* clear a-frame lock */
}
static void enable_formac(struct s_smc *smc)
{
/* set formac IMSK : 0 enables irq */
outpw(FM_A(FM_IMSK1U),(unsigned short)~mac_imsk1u);
outpw(FM_A(FM_IMSK1L),(unsigned short)~mac_imsk1l);
outpw(FM_A(FM_IMSK2U),(unsigned short)~mac_imsk2u);
outpw(FM_A(FM_IMSK2L),(unsigned short)~mac_imsk2l);
outpw(FM_A(FM_IMSK3U),(unsigned short)~mac_imsk3u);
outpw(FM_A(FM_IMSK3L),(unsigned short)~mac_imsk3l);
}
#if 0 /* Removed because the driver should use the ASICs TX complete IRQ. */
/* The FORMACs tx complete IRQ should be used any longer */
/*
BEGIN_MANUAL_ENTRY(if,func;others;4)
void enable_tx_irq(smc, queue)
struct s_smc *smc ;
u_short queue ;
Function DOWNCALL (SMT, fplustm.c)
enable_tx_irq() enables the FORMACs transmit complete
interrupt of the queue.
Para queue = QUEUE_S: synchronous queue
= QUEUE_A0: asynchronous queue
Note After any ring operational change the transmit complete
interrupts are disabled.
The operating system dependent module must enable
the transmit complete interrupt of a queue,
- when it queues the first frame,
because of no transmit resources are beeing
available and
- when it escapes from the function llc_restart_tx
while some frames are still queued.
END_MANUAL_ENTRY
*/
void enable_tx_irq(struct s_smc *smc, u_short queue)
/* u_short queue; 0 = synchronous queue, 1 = asynchronous queue 0 */
{
u_short imask ;
imask = ~(inpw(FM_A(FM_IMSK1U))) ;
if (queue == 0) {
outpw(FM_A(FM_IMSK1U),~(imask|FM_STEFRMS)) ;
}
if (queue == 1) {
outpw(FM_A(FM_IMSK1U),~(imask|FM_STEFRMA0)) ;
}
}
/*
BEGIN_MANUAL_ENTRY(if,func;others;4)
void disable_tx_irq(smc, queue)
struct s_smc *smc ;
u_short queue ;
Function DOWNCALL (SMT, fplustm.c)
disable_tx_irq disables the FORMACs transmit complete
interrupt of the queue
Para queue = QUEUE_S: synchronous queue
= QUEUE_A0: asynchronous queue
Note The operating system dependent module should disable
the transmit complete interrupts if it escapes from the
function llc_restart_tx and no frames are queued.
END_MANUAL_ENTRY
*/
void disable_tx_irq(struct s_smc *smc, u_short queue)
/* u_short queue; 0 = synchronous queue, 1 = asynchronous queue 0 */
{
u_short imask ;
imask = ~(inpw(FM_A(FM_IMSK1U))) ;
if (queue == 0) {
outpw(FM_A(FM_IMSK1U),~(imask&~FM_STEFRMS)) ;
}
if (queue == 1) {
outpw(FM_A(FM_IMSK1U),~(imask&~FM_STEFRMA0)) ;
}
}
#endif
static void disable_formac(struct s_smc *smc)
{
/* clear formac IMSK : 1 disables irq */
outpw(FM_A(FM_IMSK1U),MW) ;
outpw(FM_A(FM_IMSK1L),MW) ;
outpw(FM_A(FM_IMSK2U),MW) ;
outpw(FM_A(FM_IMSK2L),MW) ;
outpw(FM_A(FM_IMSK3U),MW) ;
outpw(FM_A(FM_IMSK3L),MW) ;
}
static void mac_ring_up(struct s_smc *smc, int up)
{
if (up) {
formac_rcv_restart(smc) ; /* enable receive function */
smc->hw.mac_ring_is_up = TRUE ;
llc_restart_tx(smc) ; /* TX queue */
}
else {
/* disable receive function */
SETMASK(FM_A(FM_MDREG1),FM_MDISRCV,FM_ADDET) ;
/* abort current transmit activity */
outpw(FM_A(FM_CMDREG2),FM_IACTR) ;
smc->hw.mac_ring_is_up = FALSE ;
}
}
/*--------------------------- ISR handling ----------------------------------*/
/*
* mac1_irq is in drvfbi.c
*/
/*
* mac2_irq: status bits for the receive queue 1, and ring status
* ring status indication bits
*/
void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l)
{
u_short change_s2l ;
u_short change_s2u ;
/* (jd) 22-Feb-1999
* Restart 2_DMax Timer after end of claiming or beaconing
*/
if (code_s2u & (FM_SCLM|FM_SHICLM|FM_SBEC|FM_SOTRBEC)) {
queue_event(smc,EVENT_RMT,RM_TX_STATE_CHANGE) ;
}
else if (code_s2l & (FM_STKISS)) {
queue_event(smc,EVENT_RMT,RM_TX_STATE_CHANGE) ;
}
/*
* XOR current st bits with the last to avoid useless RMT event queuing
*/
change_s2l = smc->hw.fp.s2l ^ code_s2l ;
change_s2u = smc->hw.fp.s2u ^ code_s2u ;
if ((change_s2l & FM_SRNGOP) ||
(!smc->hw.mac_ring_is_up && ((code_s2l & FM_SRNGOP)))) {
if (code_s2l & FM_SRNGOP) {
mac_ring_up(smc,1) ;
queue_event(smc,EVENT_RMT,RM_RING_OP) ;
smc->mib.m[MAC0].fddiMACRingOp_Ct++ ;
}
else {
mac_ring_up(smc,0) ;
queue_event(smc,EVENT_RMT,RM_RING_NON_OP) ;
}
goto mac2_end ;
}
if (code_s2l & FM_SMISFRM) { /* missed frame */
smc->mib.m[MAC0].fddiMACNotCopied_Ct++ ;
}
if (code_s2u & (FM_SRCVOVR | /* recv. FIFO overflow */
FM_SRBFL)) { /* recv. buffer full */
smc->hw.mac_ct.mac_r_restart_counter++ ;
/* formac_rcv_restart(smc) ; */
smt_stat_counter(smc,1) ;
/* goto mac2_end ; */
}
if (code_s2u & FM_SOTRBEC)
queue_event(smc,EVENT_RMT,RM_OTHER_BEACON) ;
if (code_s2u & FM_SMYBEC)
queue_event(smc,EVENT_RMT,RM_MY_BEACON) ;
if (change_s2u & code_s2u & FM_SLOCLM) {
DB_RMTN(2,"RMT : lower claim received\n",0,0) ;
}
if ((code_s2u & FM_SMYCLM) && !(code_s2l & FM_SDUPCLM)) {
/*
* This is my claim and that claim is not detected as a
* duplicate one.
*/
queue_event(smc,EVENT_RMT,RM_MY_CLAIM) ;
}
if (code_s2l & FM_SDUPCLM) {
/*
* If a duplicate claim frame (same SA but T_Bid != T_Req)
* this flag will be set.
* In the RMT state machine we need a RM_VALID_CLAIM event
* to do the appropriate state change.
* RM(34c)
*/
queue_event(smc,EVENT_RMT,RM_VALID_CLAIM) ;
}
if (change_s2u & code_s2u & FM_SHICLM) {
DB_RMTN(2,"RMT : higher claim received\n",0,0) ;
}
if ( (code_s2l & FM_STRTEXP) ||
(code_s2l & FM_STRTEXR) )
queue_event(smc,EVENT_RMT,RM_TRT_EXP) ;
if (code_s2l & FM_SMULTDA) {
/*
* The MAC has found a 2. MAC with the same address.
* Signal dup_addr_test = failed to RMT state machine.
* RM(25)
*/
smc->r.dup_addr_test = DA_FAILED ;
queue_event(smc,EVENT_RMT,RM_DUP_ADDR) ;
}
if (code_s2u & FM_SBEC)
smc->hw.fp.err_stats.err_bec_stat++ ;
if (code_s2u & FM_SCLM)
smc->hw.fp.err_stats.err_clm_stat++ ;
if (code_s2l & FM_STVXEXP)
smc->mib.m[MAC0].fddiMACTvxExpired_Ct++ ;
if ((code_s2u & (FM_SBEC|FM_SCLM))) {
if (!(change_s2l & FM_SRNGOP) && (smc->hw.fp.s2l & FM_SRNGOP)) {
mac_ring_up(smc,0) ;
queue_event(smc,EVENT_RMT,RM_RING_NON_OP) ;
mac_ring_up(smc,1) ;
queue_event(smc,EVENT_RMT,RM_RING_OP) ;
smc->mib.m[MAC0].fddiMACRingOp_Ct++ ;
}
}
if (code_s2l & FM_SPHINV)
smc->hw.fp.err_stats.err_phinv++ ;
if (code_s2l & FM_SSIFG)
smc->hw.fp.err_stats.err_sifg_det++ ;
if (code_s2l & FM_STKISS)
smc->hw.fp.err_stats.err_tkiss++ ;
if (code_s2l & FM_STKERR)
smc->hw.fp.err_stats.err_tkerr++ ;
if (code_s2l & FM_SFRMCTR)
smc->mib.m[MAC0].fddiMACFrame_Ct += 0x10000L ;
if (code_s2l & FM_SERRCTR)
smc->mib.m[MAC0].fddiMACError_Ct += 0x10000L ;
if (code_s2l & FM_SLSTCTR)
smc->mib.m[MAC0].fddiMACLost_Ct += 0x10000L ;
if (code_s2u & FM_SERRSF) {
SMT_PANIC(smc,SMT_E0114, SMT_E0114_MSG) ;
}
mac2_end:
/* notice old status */
smc->hw.fp.s2l = code_s2l ;
smc->hw.fp.s2u = code_s2u ;
outpw(FM_A(FM_IMSK2U),~mac_imsk2u) ;
}
/*
* mac3_irq: receive queue 2 bits and address detection bits
*/
void mac3_irq(struct s_smc *smc, u_short code_s3u, u_short code_s3l)
{
UNUSED(code_s3l) ;
if (code_s3u & (FM_SRCVOVR2 | /* recv. FIFO overflow */
FM_SRBFL2)) { /* recv. buffer full */
smc->hw.mac_ct.mac_r_restart_counter++ ;
smt_stat_counter(smc,1);
}
if (code_s3u & FM_SRPERRQ2) { /* parity error receive queue 2 */
SMT_PANIC(smc,SMT_E0115, SMT_E0115_MSG) ;
}
if (code_s3u & FM_SRPERRQ1) { /* parity error receive queue 2 */
SMT_PANIC(smc,SMT_E0116, SMT_E0116_MSG) ;
}
}
/*
* take formac offline
*/
static void formac_offline(struct s_smc *smc)
{
outpw(FM_A(FM_CMDREG2),FM_IACTR) ;/* abort current transmit activity */
/* disable receive function */
SETMASK(FM_A(FM_MDREG1),FM_MDISRCV,FM_ADDET) ;
/* FORMAC+ 'Initialize Mode' */
SETMASK(FM_A(FM_MDREG1),FM_MINIT,FM_MMODE) ;
disable_formac(smc) ;
smc->hw.mac_ring_is_up = FALSE ;
smc->hw.hw_state = STOPPED ;
}
/*
* bring formac online
*/
static void formac_online(struct s_smc *smc)
{
enable_formac(smc) ;
SETMASK(FM_A(FM_MDREG1),FM_MONLINE | FM_SELRA | MDR1INIT |
smc->hw.fp.rx_mode, FM_MMODE | FM_SELRA | FM_ADDRX) ;
}
/*
* FORMAC+ full init. (tx, rx, timer, counter, claim & beacon)
*/
int init_fplus(struct s_smc *smc)
{
smc->hw.fp.nsa_mode = FM_MRNNSAFNMA ;
smc->hw.fp.rx_mode = FM_MDAMA ;
smc->hw.fp.group_addr = fddi_broadcast ;
smc->hw.fp.func_addr = 0 ;
smc->hw.fp.frselreg_init = 0 ;
init_driver_fplus(smc) ;
if (smc->s.sas == SMT_DAS)
smc->hw.fp.mdr3init |= FM_MENDAS ;
smc->hw.mac_ct.mac_nobuf_counter = 0 ;
smc->hw.mac_ct.mac_r_restart_counter = 0 ;
smc->hw.fp.fm_st1u = (HW_PTR) ADDR(B0_ST1U) ;
smc->hw.fp.fm_st1l = (HW_PTR) ADDR(B0_ST1L) ;
smc->hw.fp.fm_st2u = (HW_PTR) ADDR(B0_ST2U) ;
smc->hw.fp.fm_st2l = (HW_PTR) ADDR(B0_ST2L) ;
smc->hw.fp.fm_st3u = (HW_PTR) ADDR(B0_ST3U) ;
smc->hw.fp.fm_st3l = (HW_PTR) ADDR(B0_ST3L) ;
smc->hw.fp.s2l = smc->hw.fp.s2u = 0 ;
smc->hw.mac_ring_is_up = 0 ;
mac_counter_init(smc) ;
/* convert BCKL units to symbol time */
smc->hw.mac_pa.t_neg = (u_long)0 ;
smc->hw.mac_pa.t_pri = (u_long)0 ;
/* make sure all PCI settings are correct */
mac_do_pci_fix(smc) ;
return init_mac(smc, 1);
/* enable_formac(smc) ; */
}
static int init_mac(struct s_smc *smc, int all)
{
u_short t_max,x ;
u_long time=0 ;
/*
* clear memory
*/
outpw(FM_A(FM_MDREG1),FM_MINIT) ; /* FORMAC+ init mode */
set_formac_addr(smc) ;
outpw(FM_A(FM_MDREG1),FM_MMEMACT) ; /* FORMAC+ memory activ mode */
/* Note: Mode register 2 is set here, incase parity is enabled. */
outpw(FM_A(FM_MDREG2),smc->hw.fp.mdr2init) ;
if (all) {
init_ram(smc) ;
}
else {
/*
* reset the HPI, the Master and the BMUs
*/
outp(ADDR(B0_CTRL), CTRL_HPI_SET) ;
time = hwt_quick_read(smc) ;
}
/*
* set all pointers, frames etc
*/
smt_split_up_fifo(smc) ;
init_tx(smc) ;
init_rx(smc) ;
init_rbc(smc) ;
build_claim_beacon(smc,smc->mib.m[MAC0].fddiMACT_Req) ;
/* set RX threshold */
/* see Errata #SN2 Phantom receive overflow */
outpw(FM_A(FM_FRMTHR),14<<12) ; /* switch on */
/* set formac work mode */
outpw(FM_A(FM_MDREG1),MDR1INIT | FM_SELRA | smc->hw.fp.rx_mode) ;
outpw(FM_A(FM_MDREG2),smc->hw.fp.mdr2init) ;
outpw(FM_A(FM_MDREG3),smc->hw.fp.mdr3init) ;
outpw(FM_A(FM_FRSELREG),smc->hw.fp.frselreg_init) ;
/* set timer */
/*
* errata #22 fplus:
* T_MAX must not be FFFE
* or one of FFDF, FFB8, FF91 (-0x27 etc..)
*/
t_max = (u_short)(smc->mib.m[MAC0].fddiMACT_Max/32) ;
x = t_max/0x27 ;
x *= 0x27 ;
if ((t_max == 0xfffe) || (t_max - x == 0x16))
t_max-- ;
outpw(FM_A(FM_TMAX),(u_short)t_max) ;
/* BugFix for report #10204 */
if (smc->mib.m[MAC0].fddiMACTvxValue < (u_long) (- US2BCLK(52))) {
outpw(FM_A(FM_TVX), (u_short) (- US2BCLK(52))/255 & MB) ;
} else {
outpw(FM_A(FM_TVX),
(u_short)((smc->mib.m[MAC0].fddiMACTvxValue/255) & MB)) ;
}
outpw(FM_A(FM_CMDREG1),FM_ICLLS) ; /* clear s-frame lock */
outpw(FM_A(FM_CMDREG1),FM_ICLLA0) ; /* clear a-frame lock */
outpw(FM_A(FM_CMDREG1),FM_ICLLR); /* clear receive lock */
/* Auto unlock receice threshold for receive queue 1 and 2 */
outpw(FM_A(FM_UNLCKDLY),(0xff|(0xff<<8))) ;
rtm_init(smc) ; /* RT-Monitor */
if (!all) {
/*
* after 10ms, reset the BMUs and repair the rings
*/
hwt_wait_time(smc,time,MS2BCLK(10)) ;
outpd(ADDR(B0_R1_CSR),CSR_SET_RESET) ;
outpd(ADDR(B0_XA_CSR),CSR_SET_RESET) ;
outpd(ADDR(B0_XS_CSR),CSR_SET_RESET) ;
outp(ADDR(B0_CTRL), CTRL_HPI_CLR) ;
outpd(ADDR(B0_R1_CSR),CSR_CLR_RESET) ;
outpd(ADDR(B0_XA_CSR),CSR_CLR_RESET) ;
outpd(ADDR(B0_XS_CSR),CSR_CLR_RESET) ;
if (!smc->hw.hw_is_64bit) {
outpd(ADDR(B4_R1_F), RX_WATERMARK) ;
outpd(ADDR(B5_XA_F), TX_WATERMARK) ;
outpd(ADDR(B5_XS_F), TX_WATERMARK) ;
}
smc->hw.hw_state = STOPPED ;
mac_drv_repair_descr(smc) ;
}
smc->hw.hw_state = STARTED ;
return 0;
}
/*
* called by CFM
*/
void config_mux(struct s_smc *smc, int mux)
{
plc_config_mux(smc,mux) ;
SETMASK(FM_A(FM_MDREG1),FM_SELRA,FM_SELRA) ;
}
/*
* called by RMT
* enable CLAIM/BEACON interrupts
* (only called if these events are of interest, e.g. in DETECT state
* the interrupt must not be permanently enabled
* RMT calls this function periodically (timer driven polling)
*/
void sm_mac_check_beacon_claim(struct s_smc *smc)
{
/* set formac IMSK : 0 enables irq */
outpw(FM_A(FM_IMSK2U),~(mac_imsk2u | mac_beacon_imsk2u)) ;
/* the driver must receive the directed beacons */
formac_rcv_restart(smc) ;
process_receive(smc) ;
}
/*-------------------------- interface functions ----------------------------*/
/*
* control MAC layer (called by RMT)
*/
void sm_ma_control(struct s_smc *smc, int mode)
{
switch(mode) {
case MA_OFFLINE :
/* Add to make the MAC offline in RM0_ISOLATED state */
formac_offline(smc) ;
break ;
case MA_RESET :
(void)init_mac(smc,0) ;
break ;
case MA_BEACON :
formac_online(smc) ;
break ;
case MA_DIRECTED :
directed_beacon(smc) ;
break ;
case MA_TREQ :
/*
* no actions necessary, TREQ is already set
*/
break ;
}
}
int sm_mac_get_tx_state(struct s_smc *smc)
{
return (inpw(FM_A(FM_STMCHN))>>4) & 7;
}
/*
* multicast functions
*/
static struct s_fpmc* mac_get_mc_table(struct s_smc *smc,
struct fddi_addr *user,
struct fddi_addr *own,
int del, int can)
{
struct s_fpmc *tb ;
struct s_fpmc *slot ;
u_char *p ;
int i ;
/*
* set own = can(user)
*/
*own = *user ;
if (can) {
p = own->a ;
for (i = 0 ; i < 6 ; i++, p++)
*p = bitrev8(*p);
}
slot = NULL;
for (i = 0, tb = smc->hw.fp.mc.table ; i < FPMAX_MULTICAST ; i++, tb++){
if (!tb->n) { /* not used */
if (!del && !slot) /* if !del save first free */
slot = tb ;
continue ;
}
if (memcmp((char *)&tb->a,(char *)own,6))
continue ;
return tb;
}
return slot; /* return first free or NULL */
}
/*
BEGIN_MANUAL_ENTRY(if,func;others;2)
void mac_clear_multicast(smc)
struct s_smc *smc ;
Function DOWNCALL (SMT, fplustm.c)
Clear all multicast entries
END_MANUAL_ENTRY()
*/
void mac_clear_multicast(struct s_smc *smc)
{
struct s_fpmc *tb ;
int i ;
smc->hw.fp.os_slots_used = 0 ; /* note the SMT addresses */
/* will not be deleted */
for (i = 0, tb = smc->hw.fp.mc.table ; i < FPMAX_MULTICAST ; i++, tb++){
if (!tb->perm) {
tb->n = 0 ;
}
}
}
/*
BEGIN_MANUAL_ENTRY(if,func;others;2)
int mac_add_multicast(smc,addr,can)
struct s_smc *smc ;
struct fddi_addr *addr ;
int can ;
Function DOWNCALL (SMC, fplustm.c)
Add an entry to the multicast table
Para addr pointer to a multicast address
can = 0: the multicast address has the physical format
= 1: the multicast address has the canonical format
| 0x80 permanent
Returns 0: success
1: address table full
Note After a 'driver reset' or a 'station set address' all
entries of the multicast table are cleared.
In this case the driver has to fill the multicast table again.
After the operating system dependent module filled
the multicast table it must call mac_update_multicast
to activate the new multicast addresses!
END_MANUAL_ENTRY()
*/
int mac_add_multicast(struct s_smc *smc, struct fddi_addr *addr, int can)
{
SK_LOC_DECL(struct fddi_addr,own) ;
struct s_fpmc *tb ;
/*
* check if there are free table entries
*/
if (can & 0x80) {
if (smc->hw.fp.smt_slots_used >= SMT_MAX_MULTI) {
return 1;
}
}
else {
if (smc->hw.fp.os_slots_used >= FPMAX_MULTICAST-SMT_MAX_MULTI) {
return 1;
}
}
/*
* find empty slot
*/
if (!(tb = mac_get_mc_table(smc,addr,&own,0,can & ~0x80)))
return 1;
tb->n++ ;
tb->a = own ;
tb->perm = (can & 0x80) ? 1 : 0 ;
if (can & 0x80)
smc->hw.fp.smt_slots_used++ ;
else
smc->hw.fp.os_slots_used++ ;
return 0;
}
/*
* mode
*/
#define RX_MODE_PROM 0x1
#define RX_MODE_ALL_MULTI 0x2
/*
BEGIN_MANUAL_ENTRY(if,func;others;2)
void mac_update_multicast(smc)
struct s_smc *smc ;
Function DOWNCALL (SMT, fplustm.c)
Update FORMAC multicast registers
END_MANUAL_ENTRY()
*/
void mac_update_multicast(struct s_smc *smc)
{
struct s_fpmc *tb ;
u_char *fu ;
int i ;
/*
* invalidate the CAM
*/
outpw(FM_A(FM_AFCMD),FM_IINV_CAM) ;
/*
* set the functional address
*/
if (smc->hw.fp.func_addr) {
fu = (u_char *) &smc->hw.fp.func_addr ;
outpw(FM_A(FM_AFMASK2),0xffff) ;
outpw(FM_A(FM_AFMASK1),(u_short) ~((fu[0] << 8) + fu[1])) ;
outpw(FM_A(FM_AFMASK0),(u_short) ~((fu[2] << 8) + fu[3])) ;
outpw(FM_A(FM_AFPERS),FM_VALID|FM_DA) ;
outpw(FM_A(FM_AFCOMP2), 0xc000) ;
outpw(FM_A(FM_AFCOMP1), 0x0000) ;
outpw(FM_A(FM_AFCOMP0), 0x0000) ;
outpw(FM_A(FM_AFCMD),FM_IWRITE_CAM) ;
}
/*
* set the mask and the personality register(s)
*/
outpw(FM_A(FM_AFMASK0),0xffff) ;
outpw(FM_A(FM_AFMASK1),0xffff) ;
outpw(FM_A(FM_AFMASK2),0xffff) ;
outpw(FM_A(FM_AFPERS),FM_VALID|FM_DA) ;
for (i = 0, tb = smc->hw.fp.mc.table; i < FPMAX_MULTICAST; i++, tb++) {
if (tb->n) {
CHECK_CAM() ;
/*
* write the multicast address into the CAM
*/
outpw(FM_A(FM_AFCOMP2),
(u_short)((tb->a.a[0]<<8)+tb->a.a[1])) ;
outpw(FM_A(FM_AFCOMP1),
(u_short)((tb->a.a[2]<<8)+tb->a.a[3])) ;
outpw(FM_A(FM_AFCOMP0),
(u_short)((tb->a.a[4]<<8)+tb->a.a[5])) ;
outpw(FM_A(FM_AFCMD),FM_IWRITE_CAM) ;
}
}
}
/*
BEGIN_MANUAL_ENTRY(if,func;others;3)
void mac_set_rx_mode(smc,mode)
struct s_smc *smc ;
int mode ;
Function DOWNCALL/INTERN (SMT, fplustm.c)
This function enables / disables the selected receive.
Don't call this function if the hardware module is
used -- use mac_drv_rx_mode() instead of.
Para mode = 1 RX_ENABLE_ALLMULTI enable all multicasts
2 RX_DISABLE_ALLMULTI disable "enable all multicasts"
3 RX_ENABLE_PROMISC enable promiscuous
4 RX_DISABLE_PROMISC disable promiscuous
5 RX_ENABLE_NSA enable reception of NSA frames
6 RX_DISABLE_NSA disable reception of NSA frames
Note The selected receive modes will be lost after 'driver reset'
or 'set station address'
END_MANUAL_ENTRY
*/
void mac_set_rx_mode(struct s_smc *smc, int mode)
{
switch (mode) {
case RX_ENABLE_ALLMULTI :
smc->hw.fp.rx_prom |= RX_MODE_ALL_MULTI ;
break ;
case RX_DISABLE_ALLMULTI :
smc->hw.fp.rx_prom &= ~RX_MODE_ALL_MULTI ;
break ;
case RX_ENABLE_PROMISC :
smc->hw.fp.rx_prom |= RX_MODE_PROM ;
break ;
case RX_DISABLE_PROMISC :
smc->hw.fp.rx_prom &= ~RX_MODE_PROM ;
break ;
case RX_ENABLE_NSA :
smc->hw.fp.nsa_mode = FM_MDAMA ;
smc->hw.fp.rx_mode = (smc->hw.fp.rx_mode & ~FM_ADDET) |
smc->hw.fp.nsa_mode ;
break ;
case RX_DISABLE_NSA :
smc->hw.fp.nsa_mode = FM_MRNNSAFNMA ;
smc->hw.fp.rx_mode = (smc->hw.fp.rx_mode & ~FM_ADDET) |
smc->hw.fp.nsa_mode ;
break ;
}
if (smc->hw.fp.rx_prom & RX_MODE_PROM) {
smc->hw.fp.rx_mode = FM_MLIMPROM ;
}
else if (smc->hw.fp.rx_prom & RX_MODE_ALL_MULTI) {
smc->hw.fp.rx_mode = smc->hw.fp.nsa_mode | FM_EXGPA0 ;
}
else
smc->hw.fp.rx_mode = smc->hw.fp.nsa_mode ;
SETMASK(FM_A(FM_MDREG1),smc->hw.fp.rx_mode,FM_ADDRX) ;
mac_update_multicast(smc) ;
}
/*
BEGIN_MANUAL_ENTRY(module;tests;3)
How to test the Restricted Token Monitor
----------------------------------------------------------------
o Insert a break point in the function rtm_irq()
o Remove all stations with a restricted token monitor from the
network.
o Connect a UPPS ISA or EISA station to the network.
o Give the FORMAC of UPPS station the command to send
restricted tokens until the ring becomes instable.
o Now connect your test test client.
o The restricted token monitor should detect the restricted token,
and your break point will be reached.
o You can ovserve how the station will clean the ring.
END_MANUAL_ENTRY
*/
void rtm_irq(struct s_smc *smc)
{
outpw(ADDR(B2_RTM_CRTL),TIM_CL_IRQ) ; /* clear IRQ */
if (inpw(ADDR(B2_RTM_CRTL)) & TIM_RES_TOK) {
outpw(FM_A(FM_CMDREG1),FM_ICL) ; /* force claim */
DB_RMT("RMT: fddiPATHT_Rmode expired\n",0,0) ;
AIX_EVENT(smc, (u_long) FDDI_RING_STATUS,
(u_long) FDDI_SMT_EVENT,
(u_long) FDDI_RTT, smt_get_event_word(smc));
}
outpw(ADDR(B2_RTM_CRTL),TIM_START) ; /* enable RTM monitoring */
}
static void rtm_init(struct s_smc *smc)
{
outpd(ADDR(B2_RTM_INI),0) ; /* timer = 0 */
outpw(ADDR(B2_RTM_CRTL),TIM_START) ; /* enable IRQ */
}
void rtm_set_timer(struct s_smc *smc)
{
/*
* MIB timer and hardware timer have the same resolution of 80nS
*/
DB_RMT("RMT: setting new fddiPATHT_Rmode, t = %d ns\n",
(int) smc->mib.a[PATH0].fddiPATHT_Rmode,0) ;
outpd(ADDR(B2_RTM_INI),smc->mib.a[PATH0].fddiPATHT_Rmode) ;
}
static void smt_split_up_fifo(struct s_smc *smc)
{
/*
BEGIN_MANUAL_ENTRY(module;mem;1)
-------------------------------------------------------------
RECEIVE BUFFER MEMORY DIVERSION
-------------------------------------------------------------
R1_RxD == SMT_R1_RXD_COUNT
R2_RxD == SMT_R2_RXD_COUNT
SMT_R1_RXD_COUNT must be unequal zero
| R1_RxD R2_RxD |R1_RxD R2_RxD | R1_RxD R2_RxD
| x 0 | x 1-3 | x < 3
----------------------------------------------------------------------
| 63,75 kB | 54,75 | R1_RxD
rx queue 1 | RX_FIFO_SPACE | RX_LARGE_FIFO| ------------- * 63,75 kB
| | | R1_RxD+R2_RxD
----------------------------------------------------------------------
| | 9 kB | R2_RxD
rx queue 2 | 0 kB | RX_SMALL_FIFO| ------------- * 63,75 kB
| (not used) | | R1_RxD+R2_RxD
END_MANUAL_ENTRY
*/
if (SMT_R1_RXD_COUNT == 0) {
SMT_PANIC(smc,SMT_E0117, SMT_E0117_MSG) ;
}
switch(SMT_R2_RXD_COUNT) {
case 0:
smc->hw.fp.fifo.rx1_fifo_size = RX_FIFO_SPACE ;
smc->hw.fp.fifo.rx2_fifo_size = 0 ;
break ;
case 1:
case 2:
case 3:
smc->hw.fp.fifo.rx1_fifo_size = RX_LARGE_FIFO ;
smc->hw.fp.fifo.rx2_fifo_size = RX_SMALL_FIFO ;
break ;
default: /* this is not the real defaule */
smc->hw.fp.fifo.rx1_fifo_size = RX_FIFO_SPACE *
SMT_R1_RXD_COUNT/(SMT_R1_RXD_COUNT+SMT_R2_RXD_COUNT) ;
smc->hw.fp.fifo.rx2_fifo_size = RX_FIFO_SPACE *
SMT_R2_RXD_COUNT/(SMT_R1_RXD_COUNT+SMT_R2_RXD_COUNT) ;
break ;
}
/*
BEGIN_MANUAL_ENTRY(module;mem;1)
-------------------------------------------------------------
TRANSMIT BUFFER MEMORY DIVERSION
-------------------------------------------------------------
| no sync bw | sync bw available and | sync bw available and
| available | SynchTxMode = SPLIT | SynchTxMode = ALL
-----------------------------------------------------------------------
sync tx | 0 kB | 32 kB | 55 kB
queue | | TX_MEDIUM_FIFO | TX_LARGE_FIFO
-----------------------------------------------------------------------
async tx | 64 kB | 32 kB | 9 k
queue | TX_FIFO_SPACE| TX_MEDIUM_FIFO | TX_SMALL_FIFO
END_MANUAL_ENTRY
*/
/*
* set the tx mode bits
*/
if (smc->mib.a[PATH0].fddiPATHSbaPayload) {
#ifdef ESS
smc->hw.fp.fifo.fifo_config_mode |=
smc->mib.fddiESSSynchTxMode | SYNC_TRAFFIC_ON ;
#endif
}
else {
smc->hw.fp.fifo.fifo_config_mode &=
~(SEND_ASYNC_AS_SYNC|SYNC_TRAFFIC_ON) ;
}
/*
* split up the FIFO
*/
if (smc->hw.fp.fifo.fifo_config_mode & SYNC_TRAFFIC_ON) {
if (smc->hw.fp.fifo.fifo_config_mode & SEND_ASYNC_AS_SYNC) {
smc->hw.fp.fifo.tx_s_size = TX_LARGE_FIFO ;
smc->hw.fp.fifo.tx_a0_size = TX_SMALL_FIFO ;
}
else {
smc->hw.fp.fifo.tx_s_size = TX_MEDIUM_FIFO ;
smc->hw.fp.fifo.tx_a0_size = TX_MEDIUM_FIFO ;
}
}
else {
smc->hw.fp.fifo.tx_s_size = 0 ;
smc->hw.fp.fifo.tx_a0_size = TX_FIFO_SPACE ;
}
smc->hw.fp.fifo.rx1_fifo_start = smc->hw.fp.fifo.rbc_ram_start +
RX_FIFO_OFF ;
smc->hw.fp.fifo.tx_s_start = smc->hw.fp.fifo.rx1_fifo_start +
smc->hw.fp.fifo.rx1_fifo_size ;
smc->hw.fp.fifo.tx_a0_start = smc->hw.fp.fifo.tx_s_start +
smc->hw.fp.fifo.tx_s_size ;
smc->hw.fp.fifo.rx2_fifo_start = smc->hw.fp.fifo.tx_a0_start +
smc->hw.fp.fifo.tx_a0_size ;
DB_SMT("FIFO split: mode = %x\n",smc->hw.fp.fifo.fifo_config_mode,0) ;
DB_SMT("rbc_ram_start = %x rbc_ram_end = %x\n",
smc->hw.fp.fifo.rbc_ram_start, smc->hw.fp.fifo.rbc_ram_end) ;
DB_SMT("rx1_fifo_start = %x tx_s_start = %x\n",
smc->hw.fp.fifo.rx1_fifo_start, smc->hw.fp.fifo.tx_s_start) ;
DB_SMT("tx_a0_start = %x rx2_fifo_start = %x\n",
smc->hw.fp.fifo.tx_a0_start, smc->hw.fp.fifo.rx2_fifo_start) ;
}
void formac_reinit_tx(struct s_smc *smc)
{
/*
* Split up the FIFO and reinitialize the MAC if synchronous
* bandwidth becomes available but no synchronous queue is
* configured.
*/
if (!smc->hw.fp.fifo.tx_s_size && smc->mib.a[PATH0].fddiPATHSbaPayload){
(void)init_mac(smc,0) ;
}
}
| gpl-2.0 |
EPDCenter/android_kernel_woxter_nimbus_98q | drivers/media/video/cx18/cx18-audio.c | 14407 | 2589 | /*
* cx18 audio-related functions
*
* Derived from ivtv-audio.c
*
* Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
* 02111-1307 USA
*/
#include "cx18-driver.h"
#include "cx18-io.h"
#include "cx18-cards.h"
#include "cx18-audio.h"
#define CX18_AUDIO_ENABLE 0xc72014
#define CX18_AI1_MUX_MASK 0x30
#define CX18_AI1_MUX_I2S1 0x00
#define CX18_AI1_MUX_I2S2 0x10
#define CX18_AI1_MUX_843_I2S 0x20
/* Selects the audio input and output according to the current
settings. */
int cx18_audio_set_io(struct cx18 *cx)
{
const struct cx18_card_audio_input *in;
u32 u, v;
int err;
/* Determine which input to use */
if (test_bit(CX18_F_I_RADIO_USER, &cx->i_flags))
in = &cx->card->radio_input;
else
in = &cx->card->audio_inputs[cx->audio_input];
/* handle muxer chips */
v4l2_subdev_call(cx->sd_extmux, audio, s_routing,
(u32) in->muxer_input, 0, 0);
err = cx18_call_hw_err(cx, cx->card->hw_audio_ctrl,
audio, s_routing, in->audio_input, 0, 0);
if (err)
return err;
/* FIXME - this internal mux should be abstracted to a subdev */
u = cx18_read_reg(cx, CX18_AUDIO_ENABLE);
v = u & ~CX18_AI1_MUX_MASK;
switch (in->audio_input) {
case CX18_AV_AUDIO_SERIAL1:
v |= CX18_AI1_MUX_I2S1;
break;
case CX18_AV_AUDIO_SERIAL2:
v |= CX18_AI1_MUX_I2S2;
break;
default:
v |= CX18_AI1_MUX_843_I2S;
break;
}
if (v == u) {
/* force a toggle of some AI1 MUX control bits */
u &= ~CX18_AI1_MUX_MASK;
switch (in->audio_input) {
case CX18_AV_AUDIO_SERIAL1:
u |= CX18_AI1_MUX_843_I2S;
break;
case CX18_AV_AUDIO_SERIAL2:
u |= CX18_AI1_MUX_843_I2S;
break;
default:
u |= CX18_AI1_MUX_I2S1;
break;
}
cx18_write_reg_expect(cx, u | 0xb00, CX18_AUDIO_ENABLE,
u, CX18_AI1_MUX_MASK);
}
cx18_write_reg_expect(cx, v | 0xb00, CX18_AUDIO_ENABLE,
v, CX18_AI1_MUX_MASK);
return 0;
}
| gpl-2.0 |
ikpb/android_kernel_blu_studio6lte | drivers/scsi/scsi_module.c | 14919 | 1688 | /*
* Copyright (C) 2003 Christoph Hellwig.
* Released under GPL v2.
*
* Support for old-style host templates.
*
* NOTE: Do not use this for new drivers ever.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <scsi/scsi_host.h>
static int __init init_this_scsi_driver(void)
{
struct scsi_host_template *sht = &driver_template;
struct Scsi_Host *shost;
struct list_head *l;
int error;
if (!sht->release) {
printk(KERN_ERR
"scsi HBA driver %s didn't set a release method.\n",
sht->name);
return -EINVAL;
}
sht->module = THIS_MODULE;
INIT_LIST_HEAD(&sht->legacy_hosts);
sht->detect(sht);
if (list_empty(&sht->legacy_hosts))
return -ENODEV;
list_for_each_entry(shost, &sht->legacy_hosts, sht_legacy_list) {
error = scsi_add_host(shost, NULL);
if (error)
goto fail;
scsi_scan_host(shost);
}
return 0;
fail:
l = &shost->sht_legacy_list;
while ((l = l->prev) != &sht->legacy_hosts)
scsi_remove_host(list_entry(l, struct Scsi_Host, sht_legacy_list));
return error;
}
static void __exit exit_this_scsi_driver(void)
{
struct scsi_host_template *sht = &driver_template;
struct Scsi_Host *shost, *s;
list_for_each_entry(shost, &sht->legacy_hosts, sht_legacy_list)
scsi_remove_host(shost);
list_for_each_entry_safe(shost, s, &sht->legacy_hosts, sht_legacy_list)
sht->release(shost);
if (list_empty(&sht->legacy_hosts))
return;
printk(KERN_WARNING "%s did not call scsi_unregister\n", sht->name);
dump_stack();
list_for_each_entry_safe(shost, s, &sht->legacy_hosts, sht_legacy_list)
scsi_unregister(shost);
}
module_init(init_this_scsi_driver);
module_exit(exit_this_scsi_driver);
| gpl-2.0 |
sk806/N5_Kernel | drivers/misc/modem_v1/link_device_lli.c | 72 | 20267 | /*
* Copyright (C) 2010 Samsung Electronics.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/time.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/wakelock.h>
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <linux/platform_device.h>
#include <linux/kallsyms.h>
#include <linux/suspend.h>
#include <plat/gpio-cfg.h>
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/mipi-lli.h>
#include "modem_prj.h"
#include "modem_utils.h"
#include "link_device_memory.h"
static int sleep_timeout = 100;
module_param(sleep_timeout, int, S_IRUGO);
MODULE_PARM_DESC(sleep_timeout, "LLI sleep timeout");
static int pm_enable = 1;
module_param(pm_enable, int, S_IRUGO);
MODULE_PARM_DESC(pm_enable, "LLI PM enable");
static inline void send_ap2cp_irq(struct mem_link_device *mld, u16 mask)
{
#ifdef CONFIG_EXYNOS_MIPI_LLI_GPIO_SIDEBAND
int val;
unsigned long flags;
spin_lock_irqsave(&mld->sig_lock, flags);
mipi_lli_send_interrupt(mask);
/* invert previous signal level */
val = gpio_get_value(mld->gpio_ipc_int2cp);
val = 1 - val;
gpio_set_value(mld->gpio_ipc_int2cp, val);
trace_send_sig(mask, val);
spin_unlock_irqrestore(&mld->sig_lock, flags);
#else
mipi_lli_send_interrupt(mask);
#endif
}
#ifdef CONFIG_LINK_POWER_MANAGEMENT
#ifdef CONFIG_LINK_POWER_MANAGEMENT_WITH_FSM
/**
@brief forbid CP from going to sleep
Wakes up a CP if it can sleep and increases the "ref_cnt" counter in the
mem_link_device instance.
@param mld the pointer to a mem_link_device instance
@remark CAUTION!!! permit_cp_sleep() MUST be invoked after
forbid_cp_sleep() success to decrease the "ref_cnt" counter.
*/
static void forbid_cp_sleep(struct mem_link_device *mld)
{
struct modem_link_pm *pm = &mld->link_dev.pm;
int ref_cnt;
ref_cnt = atomic_inc_return(&mld->ref_cnt);
mif_debug("ref_cnt %d\n", ref_cnt);
if (ref_cnt > 1)
return;
if (pm->request_hold)
pm->request_hold(pm);
}
/**
@brief permit CP to go sleep
Decreases the "ref_cnt" counter in the mem_link_device instance if it can go
sleep and allows CP to go sleep only if the value of "ref_cnt" counter is less
than or equal to 0.
@param mld the pointer to a mem_link_device instance
@remark MUST be invoked after forbid_cp_sleep() success to decrease the
"ref_cnt" counter.
*/
static void permit_cp_sleep(struct mem_link_device *mld)
{
struct modem_link_pm *pm = &mld->link_dev.pm;
int ref_cnt;
ref_cnt = atomic_dec_return(&mld->ref_cnt);
if (ref_cnt > 0)
return;
if (ref_cnt < 0) {
mif_info("WARNING! ref_cnt %d < 0\n", ref_cnt);
atomic_set(&mld->ref_cnt, 0);
ref_cnt = 0;
}
if (pm->release_hold)
pm->release_hold(pm);
}
static bool check_link_status(struct mem_link_device *mld)
{
struct link_device *ld = &mld->link_dev;
struct modem_ctl *mc = ld->mc;
struct modem_link_pm *pm = &ld->pm;
if (gpio_get_value(mld->gpio_cp_status) == 0)
return false;
if (mipi_lli_get_link_status() != LLI_MOUNTED)
return false;
if (cp_online(mc))
return pm->link_active ? pm->link_active(pm) : true;
return true;
}
static void pm_fail_cb(struct modem_link_pm *pm)
{
mipi_lli_debug_info();
modemctl_notify_event(MDM_CRASH_PM_FAIL);
}
static void pm_cp_fail_cb(struct modem_link_pm *pm)
{
struct link_device *ld = pm_to_link_device(pm);
struct mem_link_device *mld = ld_to_mem_link_device(ld);
struct modem_ctl *mc = ld->mc;
struct io_device *iod = mc->iod;
unsigned long flags;
mipi_lli_debug_info();
spin_lock_irqsave(&mc->lock, flags);
if (cp_online(mc)) {
spin_unlock_irqrestore(&mc->lock, flags);
if (mld->stop_pm)
mld->stop_pm(mld);
modemctl_notify_event(MDM_CRASH_PM_CP_FAIL);
return;
}
if (cp_booting(mc)) {
iod->modem_state_changed(iod, STATE_OFFLINE);
ld->reset(ld);
spin_unlock_irqrestore(&mc->lock, flags);
return;
}
spin_unlock_irqrestore(&mc->lock, flags);
}
static void start_pm(struct mem_link_device *mld)
{
struct link_device *ld = &mld->link_dev;
struct modem_link_pm *pm = &ld->pm;
if (!pm->start)
return;
if (pm_enable) {
if (mld->iosm)
pm->start(pm, PM_EVENT_NO_EVENT);
else
pm->start(pm, PM_EVENT_CP_BOOTING);
} else {
pm->start(pm, PM_EVENT_LOCK_ON);
}
}
static void stop_pm(struct mem_link_device *mld)
{
struct modem_link_pm *pm = &mld->link_dev.pm;
if (pm->stop)
pm->stop(pm);
}
static int init_pm(struct mem_link_device *mld)
{
struct link_device *ld = &mld->link_dev;
struct modem_link_pm *pm = &ld->pm;
struct link_pm_svc *pm_svc;
int ret;
spin_lock_init(&mld->sig_lock);
atomic_set(&mld->ref_cnt, 0);
pm_svc = NULL;
ret = init_link_device_pm(ld, pm, pm_svc, pm_fail_cb, pm_cp_fail_cb);
if (ret < 0)
return ret;
return 0;
}
#else
static inline void change_irq_type(unsigned int irq, unsigned int value)
{
unsigned int type;
type = value ? IRQ_TYPE_LEVEL_LOW : IRQ_TYPE_LEVEL_HIGH;
irq_set_irq_type(irq, type);
}
/**
@brief finalize handling the PHONE_START command from CP
@param mld the pointer to a mem_link_device instance
*/
static void finalize_cp_start(struct mem_link_device *mld)
{
int ap_wakeup = gpio_get_value(mld->gpio_ap_wakeup);
int cp_status = gpio_get_value(mld->gpio_cp_status);
change_irq_type(mld->irq_ap_wakeup.num, ap_wakeup);
change_irq_type(mld->irq_cp_status.num, cp_status);
if (ap_wakeup) {
if (wake_lock_active(&mld->ap_wlock))
wake_lock(&mld->ap_wlock);
} else {
if (wake_lock_active(&mld->ap_wlock))
wake_unlock(&mld->ap_wlock);
}
if (cp_status) {
if (!wake_lock_active(&mld->ap_wlock))
wake_lock(&mld->cp_wlock);
} else {
if (wake_lock_active(&mld->ap_wlock))
wake_unlock(&mld->cp_wlock);
}
print_pm_status(mld);
}
static bool check_link_status(struct mem_link_device *mld)
{
if (mipi_lli_get_link_status() != LLI_MOUNTED)
return false;
if (gpio_get_value(mld->gpio_cp_status) == 0)
return false;
return true;
}
static void release_cp_wakeup(struct work_struct *ws)
{
struct mem_link_device *mld;
int i;
unsigned long flags;
mld = container_of(ws, struct mem_link_device, cp_sleep_dwork.work);
if (work_pending(&mld->cp_sleep_dwork.work))
cancel_delayed_work(&mld->cp_sleep_dwork);
spin_lock_irqsave(&mld->pm_lock, flags);
i = atomic_read(&mld->ref_cnt);
spin_unlock_irqrestore(&mld->pm_lock, flags);
if (i > 0)
goto reschedule;
if (gpio_get_value(mld->gpio_ap_wakeup) == 0) {
gpio_set_value(mld->gpio_cp_wakeup, 0);
gpio_set_value(mld->gpio_ap_status, 0);
}
#if 1
print_pm_status(mld);
#endif
return;
reschedule:
queue_delayed_work(system_nrt_wq, &mld->cp_sleep_dwork,
msecs_to_jiffies(sleep_timeout));
}
/**
@brief forbid CP from going to sleep
Wakes up a CP if it can sleep and increases the "ref_cnt" counter in the
mem_link_device instance.
@param mld the pointer to a mem_link_device instance
@remark CAUTION!!! permit_cp_sleep() MUST be invoked after
forbid_cp_sleep() success to decrease the "ref_cnt" counter.
*/
static void forbid_cp_sleep(struct mem_link_device *mld)
{
int ref_cnt;
unsigned long flags;
int cp_wakeup;
spin_lock_irqsave(&mld->pm_lock, flags);
if (work_pending(&mld->cp_sleep_dwork.work))
cancel_delayed_work(&mld->cp_sleep_dwork);
ref_cnt = atomic_inc_return(&mld->ref_cnt);
mif_debug("ref_cnt %d\n", ref_cnt);
cp_wakeup = gpio_get_value(mld->gpio_cp_wakeup);
gpio_set_value(mld->gpio_cp_wakeup, 1);
if (cp_wakeup == 0)
print_pm_status(mld);
spin_unlock_irqrestore(&mld->pm_lock, flags);
}
/**
@brief permit CP to go sleep
Decreases the "ref_cnt" counter in the mem_link_device instance if it can go
sleep and allows CP to go sleep only if the value of "ref_cnt" counter is less
than or equal to 0.
@param mld the pointer to a mem_link_device instance
@remark MUST be invoked after forbid_cp_sleep() success to decrease the
"ref_cnt" counter.
*/
static void permit_cp_sleep(struct mem_link_device *mld)
{
int ref_cnt;
unsigned long flags;
spin_lock_irqsave(&mld->pm_lock, flags);
ref_cnt = atomic_dec_return(&mld->ref_cnt);
if (ref_cnt > 0)
goto exit;
if (ref_cnt < 0) {
mif_info("WARNING! ref_cnt %d < 0\n", ref_cnt);
atomic_set(&mld->ref_cnt, 0);
}
exit:
spin_unlock_irqrestore(&mld->pm_lock, flags);
}
/**
@brief interrupt handler for a wakeup interrupt
1) Reads the interrupt value\n
2) Performs interrupt handling\n
@param irq the IRQ number
@param data the pointer to a data
*/
static irqreturn_t ap_wakeup_interrupt(int irq, void *data)
{
struct mem_link_device *mld = (struct mem_link_device *)data;
int ap_wakeup = gpio_get_value(mld->gpio_ap_wakeup);
int cp_wakeup = gpio_get_value(mld->gpio_cp_wakeup);
int cpu = raw_smp_processor_id();
change_irq_type(irq, ap_wakeup);
if (work_pending(&mld->cp_sleep_dwork.work))
cancel_delayed_work(&mld->cp_sleep_dwork);
if (ap_wakeup) {
mld->last_cp2ap_intr = cpu_clock(cpu);
if (!cp_wakeup)
gpio_set_value(mld->gpio_cp_wakeup, 1);
if (!wake_lock_active(&mld->ap_wlock))
wake_lock(&mld->ap_wlock);
if (mipi_lli_get_link_status() == LLI_UNMOUNTED)
mipi_lli_set_link_status(LLI_WAITFORMOUNT);
if (!mipi_lli_suspended())
gpio_set_value(mld->gpio_ap_status, 1);
} else {
if (wake_lock_active(&mld->ap_wlock))
wake_unlock(&mld->ap_wlock);
if (mipi_lli_get_link_status() & LLI_WAITFORMOUNT)
mipi_lli_set_link_status(LLI_UNMOUNTED);
queue_delayed_work(system_nrt_wq, &mld->cp_sleep_dwork,
msecs_to_jiffies(sleep_timeout));
}
print_pm_status(mld);
return IRQ_HANDLED;
}
static irqreturn_t cp_status_handler(int irq, void *data)
{
struct mem_link_device *mld = (struct mem_link_device *)data;
struct link_device *ld = &mld->link_dev;
struct modem_ctl *mc = ld->mc;
int cp_status = gpio_get_value(mld->gpio_cp_status);
unsigned long flags;
spin_lock_irqsave(&mld->pm_lock, flags);
change_irq_type(irq, cp_status);
if (!cp_online(mc))
goto exit;
if (cp_status) {
if (!wake_lock_active(&mld->cp_wlock))
wake_lock(&mld->cp_wlock);
} else {
gpio_set_value(mld->gpio_ap_status, 0);
if (wake_lock_active(&mld->cp_wlock))
wake_unlock(&mld->cp_wlock);
}
exit:
print_pm_status(mld);
spin_unlock_irqrestore(&mld->pm_lock, flags);
return IRQ_HANDLED;
}
static void start_pm(struct mem_link_device *mld)
{
if (pm_enable) {
int ap_wakeup = gpio_get_value(mld->gpio_ap_wakeup);
int cp_status = gpio_get_value(mld->gpio_cp_status);
print_pm_status(mld);
change_irq_type(mld->irq_ap_wakeup.num, ap_wakeup);
mif_enable_irq(&mld->irq_ap_wakeup);
change_irq_type(mld->irq_cp_status.num, cp_status);
mif_enable_irq(&mld->irq_cp_status);
} else {
wake_lock(&mld->ap_wlock);
}
}
static void stop_pm(struct mem_link_device *mld)
{
print_pm_status(mld);
mif_disable_irq(&mld->irq_ap_wakeup);
mif_disable_irq(&mld->irq_cp_status);
}
static int init_pm(struct mem_link_device *mld)
{
int err;
unsigned int gpio;
unsigned int irq_ap_wakeup;
unsigned int irq_cp_status;
unsigned long flags;
gpio_set_value(mld->gpio_ap_status, 0);
/*
Retrieve GPIO#, IRQ#, and IRQ flags for PM
*/
gpio = mld->gpio_ap_wakeup;
irq_ap_wakeup = gpio_to_irq(gpio);
mif_err("CP2AP_WAKEUP GPIO:%d IRQ:%d\n", gpio, irq_ap_wakeup);
gpio = mld->gpio_cp_wakeup;
mif_err("AP2CP_WAKEUP GPIO:%d\n", gpio);
gpio = mld->gpio_cp_status;
irq_cp_status = gpio_to_irq(gpio);
mif_err("CP2AP_STATUS GPIO:%d IRQ:%d\n", gpio, irq_cp_status);
gpio = mld->gpio_ap_status;
mif_err("AP2CP_STATUS GPIO:%d\n", gpio);
/*
Initialize locks, completions, bottom halves, etc.
*/
wake_lock_init(&mld->ap_wlock, WAKE_LOCK_SUSPEND, "lli_ap_wlock");
wake_lock_init(&mld->cp_wlock, WAKE_LOCK_SUSPEND, "lli_cp_wlock");
INIT_DELAYED_WORK(&mld->cp_sleep_dwork, release_cp_wakeup);
spin_lock_init(&mld->pm_lock);
spin_lock_init(&mld->sig_lock);
atomic_set(&mld->ref_cnt, 0);
/*
Enable IRQs for PM
*/
print_pm_status(mld);
flags = (IRQF_TRIGGER_HIGH | IRQF_ONESHOT);
mif_init_irq(&mld->irq_ap_wakeup, irq_ap_wakeup,
"lli_cp2ap_wakeup", flags);
err = mif_request_irq(&mld->irq_ap_wakeup, ap_wakeup_interrupt, mld);
if (err)
return err;
mif_disable_irq(&mld->irq_ap_wakeup);
mif_init_irq(&mld->irq_cp_status, irq_cp_status,
"lli_cp2ap_status", flags);
err = mif_request_irq(&mld->irq_cp_status, cp_status_handler, mld);
if (err)
return err;
mif_disable_irq(&mld->irq_cp_status);
return 0;
}
#endif
#endif
static void lli_link_reset(struct link_device *ld)
{
mif_err("%s: PM %s <%pf>\n", ld->name, FUNC, CALLER);
mipi_lli_intr_enable();
mipi_lli_reset();
}
static void lli_link_reload(struct link_device *ld)
{
mif_err("%s: PM %s <%pf>\n", ld->name, FUNC, CALLER);
mipi_lli_reload();
}
static void lli_link_off(struct link_device *ld)
{
mif_err("%s: PM %s <%pf>\n", ld->name, FUNC, CALLER);
mipi_lli_intr_disable();
stop_pm(ld_to_mem_link_device(ld));
}
static bool lli_link_unmounted(struct link_device *ld)
{
return (mipi_lli_get_link_status() == LLI_UNMOUNTED);
}
static bool lli_link_suspended(struct link_device *ld)
{
return mipi_lli_suspended() ? true : false;
}
static void lli_disable_irq(struct link_device *ld)
{
mipi_lli_mask_sb_intr(true);
}
static void lli_enable_irq(struct link_device *ld)
{
mipi_lli_mask_sb_intr(false);
}
/**
@brief interrupt handler for a MIPI-LLI IPC interrupt
1) Get a free mst buffer\n
2) Reads the RXQ status and saves the status to the mst buffer\n
3) Saves the interrupt value to the mst buffer\n
4) Invokes mem_irq_handler that is common to all memory-type interfaces\n
@param data the pointer to a mem_link_device instance
@param intr the interrupt value
*/
static void lli_irq_handler(void *data, enum mipi_lli_event event, u32 intr)
{
struct mem_link_device *mld = (struct mem_link_device *)data;
struct mst_buff *msb;
if (event == LLI_EVENT_SIG) {
msb = mem_take_snapshot(mld, RX);
if (!msb)
return;
msb->snapshot.int2ap = (u16)intr;
mem_irq_handler(mld, msb);
} else {
struct link_device *ld = &mld->link_dev;
struct modem_link_pm *pm = &ld->pm;
check_lli_irq(pm, event);
}
}
static struct mem_link_device *g_mld;
#ifdef DEBUG_MODEM_IF
#define DEBUGFS_BUF_SIZE (SZ_32K - SZ_256)
/*
* Due to the lack of the allocated memory size,
* some hard-coded values are used to limit the size of line and row.
* need to invent more neater and cleaner way.
*/
#if 0
static ssize_t dump_rb_frame(char *buf, size_t size, struct sbd_ring_buffer *rb)
{
int idx;
u32 i, j, nr, nc, len = 0;
nr = min_t(u32, rb->len, sipc_ps_ch(rb->ch) ? 48 : 32);
/* 52 Bytes = ip header(20) + TCP header(32) */
nc = sipc_ps_ch(rb->ch) ? 52 : 16;
/* dumps recent n frames */
for (i = 0; i < nr; i++) {
idx = *rb->wp - i - 1;
if (idx < 0)
idx = rb->len + idx;
/*
len += snprintf((buf + len), (size - len), "rb[%03d] ", idx);
*/
for (j = 0; j < nc; j++)
len += snprintf((buf + len), (size - len),
"%02x", rb->buff[idx][j]);
len += snprintf((buf + len), (size - len), "\n");
}
return len;
}
static ssize_t dbgfs_frame(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos)
{
char *buf;
ssize_t size;
u32 i, dir, len = 0;
struct mem_link_device *mld;
struct sbd_link_device *sl;
mld = file->private_data;
sl = &mld->sbd_link_dev;
if (!mld || !sl)
return 0;
buf = kzalloc(DEBUGFS_BUF_SIZE, GFP_KERNEL);
if (!buf) {
mif_err("not enough memory...\n");
return 0;
}
for (i = 0; i < sl->num_channels; i++)
for (dir = UL; dir <= DL; dir++) {
struct sbd_ring_buffer *rb = sbd_id2rb(sl, i, dir);
if (!rb || !sipc_major_ch(rb->ch))
break;
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
">> ch:%d len:%d size:%d [%s w:%d r:%d]\n",
rb->ch, rb->len, rb->buff_size,
udl_str(rb->dir), *rb->wp, *rb->rp);
len += dump_rb_frame((buf + len),
(DEBUGFS_BUF_SIZE - len), rb);
len += snprintf((buf + len),
(DEBUGFS_BUF_SIZE - len), "\n");
}
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), "\n");
mif_info("Total output length = %d\n", len);
size = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
return size;
}
static const struct file_operations dbgfs_frame_fops = {
.open = simple_open,
.read = dbgfs_frame,
.owner = THIS_MODULE
};
#endif
static inline void dev_debugfs_add(struct mem_link_device *mld)
{
mld->dbgfs_dir = debugfs_create_dir("svnet", NULL);
mld->mem_dump_blob.data = mld->base;
mld->mem_dump_blob.size = mld->size;
debugfs_create_blob("mem_dump", S_IRUGO, mld->dbgfs_dir,
&mld->mem_dump_blob);
/* mld->dbgfs_frame = debugfs_create_file("frame", S_IRUGO,
mld->dbgfs_dir, mld, &dbgfs_frame_fops);
*/
}
#else
static inline void dev_debugfs_add(struct mem_link_device *mld) {}
#endif
struct link_device *lli_create_link_device(struct platform_device *pdev)
{
struct modem_data *modem;
struct mem_link_device *mld;
struct link_device *ld;
int err;
unsigned long start;
unsigned long size;
/**
* Get the modem (platform) data
*/
modem = (struct modem_data *)pdev->dev.platform_data;
if (!modem) {
mif_err("ERR! modem == NULL\n");
return NULL;
}
if (!modem->gpio_ap_wakeup) {
mif_err("ERR! no gpio_ap_wakeup\n");
return NULL;
}
if (!modem->gpio_cp_status) {
mif_err("ERR! no gpio_cp_status\n");
return NULL;
}
mif_err("MODEM:%s LINK:%s\n", modem->name, modem->link_name);
/**
* Create a MEMORY link device instance
*/
mld = mem_create_link_device(MEM_LLI_SHMEM, modem);
if (!mld) {
mif_err("%s: ERR! create_link_device fail\n", modem->link_name);
return NULL;
}
g_mld = mld;
ld = &mld->link_dev;
ld->reset = lli_link_reset;
ld->reload = lli_link_reload;
ld->off = lli_link_off;
ld->unmounted = lli_link_unmounted;
ld->suspended = lli_link_suspended;
ld->enable_irq = lli_enable_irq;
ld->disable_irq = lli_disable_irq;
/**
* Link local functions to the corresponding function pointers that are
* mandatory for all memory-type link devices
*/
mld->send_ap2cp_irq = send_ap2cp_irq;
/*
** Link local functions to the corresponding function pointers
*/
#ifndef CONFIG_LINK_POWER_MANAGEMENT_WITH_FSM
mld->finalize_cp_start = finalize_cp_start;
#endif
#ifdef CONFIG_LINK_POWER_MANAGEMENT
mld->start_pm = start_pm;
mld->stop_pm = stop_pm;
mld->forbid_cp_sleep = forbid_cp_sleep;
mld->permit_cp_sleep = permit_cp_sleep;
mld->link_active = check_link_status;
#endif
#ifdef DEBUG_MODEM_IF
mld->debug_info = mipi_lli_debug_info;
#endif
/**
* Initialize SHMEM maps for IPC (physical map -> logical map)
*/
start = mipi_lli_get_phys_base();
size = mipi_lli_get_phys_size();
err = mem_register_ipc_rgn(mld, start, size);
if (err < 0) {
mif_err("%s: ERR! register_ipc_rgn fail (%d)\n", ld->name, err);
goto error;
}
err = mem_setup_ipc_map(mld);
if (err < 0) {
mif_err("%s: ERR! setup_ipc_map fail (%d)\n", ld->name, err);
mem_unregister_ipc_rgn(mld);
goto error;
}
#ifdef CONFIG_LINK_DEVICE_WITH_SBD_ARCH
if (ld->sbd_ipc) {
struct sbd_link_device *sld = &mld->sbd_link_dev;
err = create_sbd_link_device(ld, sld, mld->base, mld->size);
if (err < 0)
goto error;
}
#endif
/**
* Register interrupt handlers
*/
err = mipi_lli_register_handler(lli_irq_handler, mld);
if (err) {
mif_err("%s: ERR! register_handler fail (%d)\n", ld->name, err);
goto error;
}
/*
** Retrieve GPIO#, IRQ#, and IRQ flags for PM
*/
mld->gpio_ap_wakeup = modem->gpio_ap_wakeup;
mld->gpio_cp_wakeup = modem->gpio_cp_wakeup;
mld->gpio_cp_status = modem->gpio_cp_status;
mld->gpio_ap_status = modem->gpio_ap_status;
mld->gpio_ipc_int2cp = modem->gpio_ipc_int2cp;
#ifdef CONFIG_LINK_POWER_MANAGEMENT
err = init_pm(mld);
if (err)
goto error;
#endif
#ifdef DEBUG_MODEM_IF
dev_debugfs_add(mld);
#endif
return ld;
error:
kfree(mld);
mif_err("xxx\n");
return NULL;
}
| gpl-2.0 |
dan-and/linux-sunxi | drivers/power/axp_power/axp18-sply.c | 72 | 35817 | /*
* Battery charger driver for Dialog Semiconductor DA9030
*
* Copyright (C) 2008 Compulab, Ltd.
* Mike Rapoport <mike@compulab.co.il>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
#include <linux/workqueue.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/sched.h>
#include <linux/input.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/mfd/axp-mfd.h>
#include "axp-cfg.h"
#include "axp-sply.h"
static inline int axp18_vbat_to_vbat(uint8_t reg)
{
return reg * 8 + 2500;
}
static inline int axp18_vbat_to_reg(int vbat)
{
return (vbat - 2500) / 8;
}
static inline int axp18_vac_to_vbat(uint8_t reg)
{
return reg * 12 + 3750;
}
static inline int axp18_vac_to_reg(int vbat)
{
return (vbat - 3750) / 12;
}
static inline int axp18_i_to_ibat(uint8_t reg)
{
return reg * 2000 / 300 ;
}
static inline int axp18_i_to_reg(int ibat)
{
return ibat * 300 / 2000;
}
static inline void axp_read_adc(struct axp_charger *charger,
struct axp_adc_res *adc)
{
uint8_t tmp;
//axp_reads(charger->master, AXP18_VBAT_RES,sizeof(*adc), (uint8_t *)adc);//axp18 can't support muti-reads
axp_read(charger->master,AXP18_VBAT_RES,&tmp);
adc->vbat_res = tmp;
axp_read(charger->master,AXP18_IBAT_RES,&tmp);
adc->ibat_res = tmp;
axp_read(charger->master,AXP18_VAC_RES,&tmp);
adc->vac_res = tmp;
axp_read(charger->master,AXP18_IAC_RES,&tmp);
adc->iac_res = tmp;
}
static void axp_charger_update_state(struct axp_charger *charger)
{
uint8_t val,tmp;
axp_read(charger->master, AXP18_CHARGE_STATUS, &val);
charger->is_on = (val & AXP18_IN_CHARGE) ? 1 : 0;
axp_read(charger->master,AXP18_FAULT_LOG1,&charger->fault);
axp_read(charger->master, AXP18_FAULT_LOG2, &val);
charger->is_finish = (val & AXP18_FINISH_CHARGE) ? 1 : 0;
tmp = val & 0x22;
val = tmp >> 5 | tmp << 5;
charger->fault |= val;
axp_read(charger->master, AXP18_STATUS, &val);
charger->bat_det = (val & AXP18_STATUS_BATEN) ? 1 : 0;
charger->ac_det = (val & AXP18_STATUS_DCIEN) ? 1 : 0;
charger->usb_det = (val & AXP18_STATUS_USBEN) ? 1 : 0;
charger->ext_valid = (val & AXP18_STATUS_EXTVA) ? 1 : 0;
}
static void axp_charger_update(struct axp_charger *charger)
{
uint8_t tmp;
struct axp_adc_res adc;
charger->adc = &adc;
axp_read_adc(charger, &adc);
tmp = charger->adc->vbat_res;
charger->vbat = axp18_vbat_to_vbat(tmp);
tmp = charger->adc->ibat_res;
charger->ibat = axp18_i_to_ibat(tmp);
tmp = charger->adc->vac_res;
charger->vac = axp18_vac_to_vbat(tmp);
tmp = charger->adc->iac_res;
charger->iac = axp18_i_to_ibat(tmp);
}
#if defined (CONFIG_AXP_CHARGEINIT)
static void axp_set_charge(struct axp_charger *charger)
{
uint8_t val,tmp;
val = 0x00;
if(charger->chgvol < 4200)
val &= ~(3 << 5);
else if (charger->chgvol<4360){
val &= ~(3 << 5);
val |= 1 << 6;
}
else
val |= 3 << 5;
if(charger->limit_on)
val |= ((charger->chgcur - 100) / 200) | (1 << 3);
else
val |= ((charger->chgcur - 100) / 200) ;
val &= 0x7F;
val |= charger->chgen << 7;
axp_read(charger->master, AXP18_CHARGE_CONTROL2, &tmp);
tmp &= 0x3C;
if(charger->chgpretime < 30)
charger->chgpretime = 30;
if(charger->chgcsttime < 420)
charger->chgcsttime = 420;
tmp |= ((charger->chgpretime - 30) / 10) << 6 \
| (charger->chgcsttime - 420) / 60;
axp_write(charger->master, AXP18_CHARGE_CONTROL1, val);
axp_write(charger->master, AXP18_CHARGE_CONTROL2, tmp);
axp_read(charger->master, AXP18_CHARGE_STATUS, &val);
if(charger ->chgend == 10)
val &= ~(1 << 6);
else
val |= 1 << 6;
axp_write(charger->master, AXP18_CHARGE_STATUS, val);
}
#else
static void axp_set_charge(struct axp_charger *charger)
{
}
#endif
static enum power_supply_property axp_battery_props[] = {
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
};
static enum power_supply_property axp_ac_props[] = {
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_NOW,
};
static enum power_supply_property axp_usb_props[] = {
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_ONLINE,
};
static void axp_battery_check_status(struct axp_charger *charger,
union power_supply_propval *val)
{
if (charger->bat_det) {
if (charger->is_on)
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else if (charger->rest_vol == 100 && charger->ext_valid)
val->intval = POWER_SUPPLY_STATUS_FULL;
else if (charger->ext_valid)
val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
else
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
}
else
val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
}
static void axp_battery_check_health(struct axp_charger *charger,
union power_supply_propval *val)
{
if (charger->fault & AXP18_FAULT_LOG_BATINACT)
val->intval = POWER_SUPPLY_HEALTH_DEAD;
else if (charger->fault & AXP18_FAULT_LOG_OVER_TEMP)
val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
else if (charger->fault & AXP18_FAULT_LOG_COLD)
val->intval = POWER_SUPPLY_HEALTH_COLD;
/* low voltage worning */
else if (charger->fault & AXP18_FAULT_LOG_VBAT_LOW)
val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
else if (charger->fault & AXP18_FAULT_LOG_VBAT_OVER)
val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
else
val->intval = POWER_SUPPLY_HEALTH_GOOD;
}
static int axp_battery_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct axp_charger *charger;
int ret = 0;
charger = container_of(psy, struct axp_charger, batt);
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
axp_battery_check_status(charger, val);
break;
case POWER_SUPPLY_PROP_HEALTH:
axp_battery_check_health(charger, val);
break;
case POWER_SUPPLY_PROP_TECHNOLOGY:
val->intval = charger->battery_info->technology;
break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
val->intval = charger->battery_info->voltage_max_design;
break;
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
val->intval = charger->battery_info->voltage_min_design;
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
val->intval = charger->vbat * 1000;
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
val->intval = charger->ibat * 1000;
break;
case POWER_SUPPLY_PROP_MODEL_NAME:
val->strval = charger->battery_info->name;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
val->intval = charger->battery_info->charge_full_design;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
val->intval = charger->battery_info->charge_full_design;
break;
case POWER_SUPPLY_PROP_CAPACITY:
val->intval = charger->rest_vol;
break;
case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
if(charger->bat_det && !(charger->is_on) && !(charger->ext_valid))
val->intval = charger->rest_time;
else
val->intval = 0;
break;
case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW:
if(charger->bat_det && charger->is_on)
val->intval = charger->rest_time;
else
val->intval = 0;
break;
case POWER_SUPPLY_PROP_PRESENT:
val->intval = charger->bat_det;
break;
case POWER_SUPPLY_PROP_ONLINE:
val->intval = (!charger->is_on) && (charger->bat_det)&& (! charger->ext_valid);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int axp_ac_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct axp_charger *charger;
int ret = 0;
charger = container_of(psy, struct axp_charger, ac);
switch(psp){
case POWER_SUPPLY_PROP_MODEL_NAME:
val->strval = charger->ac.name;
break;
case POWER_SUPPLY_PROP_ONLINE:
val->intval = (charger->ac_det) && (charger->ext_valid);
break;
case POWER_SUPPLY_PROP_PRESENT:
val->intval = charger->ac_det;
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
val->intval = charger->vac;
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
val->intval = charger->iac;
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int axp_usb_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct axp_charger *charger;
int ret = 0;
charger = container_of(psy, struct axp_charger, usb);
switch(psp){
case POWER_SUPPLY_PROP_MODEL_NAME:
val->strval = charger->usb.name;
break;
case POWER_SUPPLY_PROP_PRESENT:
val->intval = charger->usb_det;
break;
case POWER_SUPPLY_PROP_ONLINE:
val->intval =(charger->usb_det)&&(charger->ext_valid);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int axp_battery_event(struct notifier_block *nb, unsigned long event,
void *data)
{
struct axp_charger *charger =
container_of(nb, struct axp_charger, nb);
switch (event) {
case AXP18_IRQ_BATIN:
case AXP18_IRQ_EXTIN:
axp_set_bits(charger->master, AXP18_CHARGE_CONTROL1, 0x80);
break;
case AXP18_IRQ_BATRE:
case AXP18_IRQ_EXTOV:
case AXP18_IRQ_EXTRE:
case AXP18_IRQ_TEMOV:
case AXP18_IRQ_TEMLO:
axp_clr_bits(charger->master, AXP18_CHARGE_CONTROL1, 0x80);
break;
default:
break;
}
return 0;
}
static void axp_battery_setup_psy(struct axp_charger *charger)
{
struct power_supply *batt = &charger->batt;
struct power_supply *ac = &charger->ac;
struct power_supply *usb = &charger->usb;
struct power_supply_info *info = charger->battery_info;
batt->name = "battery";
batt->type = POWER_SUPPLY_TYPE_BATTERY;
batt->get_property = axp_battery_get_property;
batt->use_for_apm = info->use_for_apm;
batt->properties = axp_battery_props;
batt->num_properties = ARRAY_SIZE(axp_battery_props);
ac->name = "ac";
ac->type = POWER_SUPPLY_TYPE_MAINS;
ac->get_property = axp_ac_get_property;
ac->properties = axp_ac_props;
ac->num_properties = ARRAY_SIZE(axp_ac_props);
usb->name = "usb";
usb->type = POWER_SUPPLY_TYPE_USB;
usb->get_property = axp_usb_get_property;
usb->properties = axp_usb_props;
usb->num_properties = ARRAY_SIZE(axp_usb_props);
}
#if defined (CONFIG_AXP_CHARGEINIT)
static int axp_battery_adc_set(struct axp_charger *charger)
{
int ret ;
uint8_t val;
/*enable adc and set adc */
val=(charger->sample_time / 8 - 1) << 2 | AXP18_ADC_BATVOL_ENABLE
| AXP18_ADC_BATCUR_ENABLE | AXP18_ADC_ACCUR_ENABLE
| AXP18_ADC_ACVOL_ENABLE;
ret = axp_write(charger->master, AXP18_ADC_CONTROL, val);
return ret;
}
#else
static int axp_battery_adc_set(struct axp_charger *charger)
{
return 0;
}
#endif
static int axp_battery_first_init(struct axp_charger *charger)
{
int ret;
axp_set_charge(charger);
ret = axp_battery_adc_set(charger);
return ret;
}
static int axp_get_rdc(struct axp_charger *charger)
{
uint8_t val[3];
unsigned int i,temp,pre_temp;
int averPreVol = 0, averPreCur = 0,averNextVol = 0,averNextCur = 0;
//axp_reads(charger->master,AXP18_DATA_BUFFER1,2,val);
axp_read(charger->master,AXP18_DATA_BUFFER1,val);
axp_read(charger->master,AXP18_DATA_BUFFER2,val+1);
pre_temp = (((val[0] & 0x7F) << 8 ) + val[1]);
printk("%d:pre_temp = %d\n",__LINE__,pre_temp);
if( charger->is_on){
for(i = 0; i< AXP18_RDC_COUNT; i++){
axp_charger_update(charger);
averPreVol += charger->vbat;
averPreCur += charger->ibat;
msleep(50);
}
averPreVol /= AXP18_RDC_COUNT;
averPreCur /= AXP18_RDC_COUNT;
axp_clr_bits(charger->master,AXP18_CHARGE_CONTROL2,0x80);
msleep(500);
for(i = 0; i< AXP18_RDC_COUNT; i++){
axp_charger_update(charger);
averNextVol += charger->vbat;
averNextCur += charger->ibat;
msleep(50);
}
averNextVol /= AXP18_RDC_COUNT;
averNextVol /= AXP18_RDC_COUNT;
axp_set_bits(charger->master,AXP18_CHARGE_CONTROL2,0x80);
msleep(500);
if(ABS(averPreCur - averNextCur) > 200){
temp = 1000 * ABS(averPreVol - averNextVol) / ABS(averPreCur);
if((temp < 5) || (temp > 5000)){
return pre_temp;
}
else {
temp += pre_temp;
temp >>= 1;
val[0] = ((temp & 0xFF00) | 0x8000) >> 8;
val[1] = AXP18_DATA_BUFFER2;
val[2] = temp & 0x00FF;
axp_writes(charger->master,AXP18_DATA_BUFFER1,3,val );
return temp;
}
}
else
return pre_temp;
}
else
return pre_temp;
}
static int axp_cal_restvol(int vol)
{
if(vol > 4150)
{
return 100;
}
else if(vol < 2700)
{
return 0;
}
else if(vol < 3200)
{
return (10 * (vol - 2700) / 5000);
}
else if(vol < 3650)
{
return (1500+ 17000 * (vol - 3200) / 450)/1000;
}
else if(vol < 3750)
{
return (18500 + 1500 * (vol - 3650) / 10)/1000; //20%改为18%
}
else if(vol < 3830)
{
return (33500 + (1500 * (vol - 3750)/(383 - 375)))/1000;
}
else if(vol < 4000)
{
return (48500 + (4000 * (vol - 3830)/(400 - 383)))/1000; //40%改为37%
}
else
{
if(vol > 4150)
{
vol = 4150;
}
return (855 + (150 * (vol - 4000)/150))/10; //4200-3950 = 250,13%改为15%
}
}
int Bat_Pre_Cur = 1;
static void axp_cal_rest(struct axp_charger *charger, int this_rdc)
{
int battery_cap;
uint16_t Iconst_current = 1;
uint8_t DCIN_Presence, DCIN_Pre_Presence = 0;
battery_cap = charger->battery_info->charge_full_design;
if(charger->vac < 4200){
charger->ac_not_enough = 1;
}
else {
charger->ac_not_enough = 0;
}
if(charger->bat_det){
int Ichgset, total_vol = 0, Iendchg, Tcv_Rest_Vol, Tcv = 0;
int Internal_Ibat = 1;
if(charger->ibat == 0){
charger->ibat = 1;
}
total_vol = charger->vbat;
Internal_Ibat = charger->ibat;
Ichgset = charger->chgcur;
Iendchg = Ichgset * charger->chgend/100;
DCIN_Presence = charger->ac_det;
if((charger->vac < charger->vbat + 200) || (charger->vac < 4200)){
if((charger->ibat < (3 * Ichgset / 5)) && (charger->ext_valid)){
charger->ac_not_enough = 1;
}
else {
charger->ac_not_enough = 0;
}
}
else {
charger->ac_not_enough = 0;
}
if(charger->ext_valid){
total_vol -= charger->ibat * this_rdc * CHG_RDC_RATE / 100000;
charger->vbat = total_vol;
}
else {
charger->ibat *= DISCHARGE_CUR_RATE / 10;
if(charger->ibat > (MAX_BAT_CUR * Ichgset / 10)){
charger->ibat = 10 * charger->ibat / DISCHARGE_CUR_RATE;
}
charger->ibat = (charger->ibat + Bat_Pre_Cur)/2;
if(DCIN_Pre_Presence != DCIN_Presence){
charger->ibat = Internal_Ibat;
}
total_vol += charger->ibat * (this_rdc - DISCHARGE_RDC_CAL) / 1000;
charger->vbat = total_vol;
}
Bat_Pre_Cur = charger->ibat;
DCIN_Pre_Presence = DCIN_Presence;
charger->rest_vol = axp_cal_restvol(total_vol);
if(charger->ext_valid && charger->is_on){
if(charger->vbat < 4190){
Tcv_Rest_Vol = axp_cal_restvol(4200 - charger->ibat * this_rdc / 1000);
Iconst_current = charger->ibat;
if(Tcv_Rest_Vol < 70){
Tcv = 60 * (100 - Tcv_Rest_Vol) * battery_cap / (45 * charger->ibat);
}
else {
Tcv = 60 * (100 - Tcv_Rest_Vol) * battery_cap / (35 * charger->ibat);
}
charger->rest_time = 6 * battery_cap * ABS(Tcv_Rest_Vol - charger->rest_vol) \
/ charger->ibat / 10 + Tcv ;
}
else {
if(Iconst_current == 1){
Iconst_current = Ichgset;
}
if(Tcv == 0){
Tcv_Rest_Vol = axp_cal_restvol(4200 - charger->ibat * this_rdc / 1000);
if(Tcv_Rest_Vol < 70){
Tcv = 60 * (100 - Tcv_Rest_Vol) * battery_cap / (45 * charger->ibat);
}
else {
Tcv = 60 * (100 - Tcv_Rest_Vol) * battery_cap / (35 * charger->ibat);
}
}
if(charger->ibat < Iendchg){
charger->rest_time = 1;
}
else {
charger->rest_time = Tcv * (90 + 100 * Iendchg / charger->ibat) * \
(90 + 100 * Iendchg / charger->ibat) * ABS(charger->ibat - Iendchg) \
/ Iconst_current / 10000;
}
}
}
else {
if(total_vol < 3000){
charger->rest_time = 0;
}
else {
charger->rest_time = (60 * battery_cap * ABS(charger->rest_vol - 6) / charger->ibat \
+ 50) / 102;
}
}
}
else {
charger->vbat = 2500;
charger->ibat = 0;
charger->rest_time = 0;
charger->rest_vol = 0;
}
}
static int axp_main_task(void *arg)
{
struct axp_charger *charger = arg;
int batcap_count = 0, battime_count = 0;
uint16_t batcap[AXP18_VOL_MAX], battime[AXP18_TIME_MAX];
uint16_t pre_batcap = 0;
uint8_t rdc_flag = 0, tmp_value[2];
uint8_t pre_charge_status = 0;
uint16_t batcap_index = 0, battime_index = 0;
int total_vol = 0, total_time = 0;
int this_rdc;
uint8_t v[3] = {0, 0, 0};
uint8_t w[5] = {0, 0, 0, 0, 0};
int events;
bool peklong;
bool pekshort;
uint8_t long_cnt = 0;
bool status_usb, pre_status_usb;
bool status_ac, pre_status_ac;
bool status_bat, pre_status_bat;
bool pre_rdcflag;
status_usb = 0;
pre_status_usb = 0;
status_ac = 0;
pre_status_ac = 0;
status_bat = 0;
pre_status_bat =0;
//axp_reads(charger->master,AXP18_DATA_BUFFER1,2,tmp_value);
axp_read(charger->master,AXP18_DATA_BUFFER1,tmp_value);
axp_read(charger->master,AXP18_DATA_BUFFER2,tmp_value+1);
this_rdc = (tmp_value[0] & 0x7F << 8) + tmp_value[1];
pre_rdcflag = tmp_value[0] >> 7;
if(this_rdc > 5000 || pre_rdcflag == 0)
this_rdc = BATRDC;
while(1){
if(kthread_should_stop()) break;
axp_charger_update_state(charger);
axp_charger_update(charger);
//axp_reads(charger->master,POWER18_INTSTS1, 3, v);
axp_read(charger->master,POWER18_INTSTS1,v);
axp_read(charger->master,POWER18_INTSTS2,v+1);
axp_read(charger->master,POWER18_INTSTS3,v+2);
events = (v[2] << 16) | (v[1] << 8) | v[0];
w[0] = v[0];
w[1] = POWER18_INTSTS2;
w[2] = v[1];
w[3] = POWER18_INTSTS3;
w[4] = v[2];
peklong = (events & AXP18_IRQ_PEKLO)? 1 : 0;
pekshort = (events & AXP18_IRQ_PEKSH )? 1 : 0;
status_ac = charger->ac_det;
status_usb = charger->usb_det;
status_bat = (!charger->is_on)&&(charger->bat_det);
if(status_usb != pre_status_usb || status_ac != pre_status_ac || status_bat != pre_status_bat )
{
power_supply_changed(&charger->batt);
pre_status_ac = status_ac;
pre_status_usb = status_usb;
pre_status_bat = status_bat;
}
/* simulate a key_up after peklong*/
if(long_cnt)
{
long_cnt--;
if(long_cnt == 0 )
{
printk("press long up\n");
input_report_key(powerkeydev, KEY_POWER, 0);
input_sync(powerkeydev);
}
}
if(peklong)
{
printk("press long\n");
axp_writes(charger->master,POWER18_INTSTS1,5,w);
input_report_key(powerkeydev, KEY_POWER, 1);
input_sync(powerkeydev);
long_cnt = 5;
//msleep(100);
//input_report_key(powerkeydev, KEY_POWER, 0);
//input_sync(powerkeydev);
}
if(pekshort)
{
printk("press short\n");
axp_writes(charger->master,POWER18_INTSTS1,5,w);
input_report_key(powerkeydev, KEY_POWER, 1);
input_sync(powerkeydev);
msleep(100);
input_report_key(powerkeydev, KEY_POWER, 0);
input_sync(powerkeydev);
}
if((charger->is_on)&&(!rdc_flag)){
if(charger->ibat > 220){
rdc_flag = 1;
this_rdc = axp_get_rdc(charger);
}
}
if(charger->bat_det == 0){
charger->rest_time = 0;
charger->rest_vol = 0;
}
else{
axp_cal_rest(charger, this_rdc);
if(battime_index == AXP18_TIME_MAX){
battime_index = 0;
}
if(battime_count < AXP18_TIME_MAX){
battime[battime_index ++ ] = charger->rest_time;
total_time += charger->rest_time;
battime_count ++;
}
else{
total_time -= battime[battime_index];
total_time += charger->rest_time;
battime[battime_index ++ ] = charger->rest_time;
}
charger->rest_time = total_time / battime_count;
if(batcap_index == AXP18_VOL_MAX){
batcap_index = 0;
}
if(batcap_count < AXP18_VOL_MAX){
batcap[batcap_index ++ ] = charger->rest_vol;
total_vol += charger->rest_vol;
batcap_count ++;
}
else{
total_vol -= batcap[batcap_index];
total_vol += charger->rest_vol;
batcap[batcap_index ++ ] = charger->rest_vol;
}
charger->rest_vol = total_vol / batcap_count;
//printk("charger->rest_vol = %d\n",charger->rest_vol);
if((charger->is_on) && (charger->rest_vol == 100)){
charger->rest_vol = 99;
}
if((charger->is_on) && (batcap_count == AXP18_VOL_MAX)){
if(charger->rest_vol < pre_batcap){
charger->rest_vol = pre_batcap;
}
}
if((!charger->is_on) && (batcap_count == AXP18_VOL_MAX)){
if(charger->rest_vol > pre_batcap){
charger->rest_vol = pre_batcap;
}
}
if((pre_charge_status == 1) && (!charger->is_on) && (charger->bat_det) && (charger->ext_valid)){//充电结束时刷新为100
charger->rest_vol = total_vol / batcap_count;
}
pre_charge_status = charger->is_on;
//printk("charger->rest_vol = %d\n",charger->rest_vol);
/* if battery volume changed, inform uevent */
if(charger->rest_vol - pre_batcap)
{
printk("battery vol change: %d, %d \n", pre_batcap, charger->rest_vol);
pre_batcap = charger->rest_vol;
power_supply_changed(&charger->batt);
}
}
ssleep(1);
}
return 0;
}
static ssize_t chgen_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct axp_charger *charger = dev_get_drvdata(dev);
uint8_t val;
axp_read(charger->master, AXP18_CHARGE_CONTROL1, &val);
charger->chgen = val >> 7;
return sprintf(buf, "%d\n",charger->chgen);
}
static ssize_t chgen_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct axp_charger *charger = dev_get_drvdata(dev);
int var;
var = simple_strtoul(buf, NULL, 10);
if(var){
charger->chgen = 1;
axp_set_bits(charger->master, AXP18_CHARGE_CONTROL1, 0x80);
}
else{
charger->chgen = 0;
axp_clr_bits(charger->master, AXP18_CHARGE_CONTROL1, 0x80);
}
return count;
}
static ssize_t chgcurlimen_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct axp_charger *charger = dev_get_drvdata(dev);
char val;
axp_read(charger->master, AXP18_CHARGE_CONTROL1, &val);
charger->limit_on = val >> 3 & 0x01;
return sprintf(buf, "%d\n",charger->limit_on);
}
static ssize_t chgcurlimen_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct axp_charger *charger = dev_get_drvdata(dev);
int var;
var = simple_strtoul(buf, NULL, 10);
if(var){
charger->limit_on = 1;
axp_set_bits(charger->master, AXP18_CHARGE_CONTROL1, 0x08);
}
else{
charger->limit_on = 0;
axp_clr_bits(charger->master, AXP18_CHARGE_CONTROL1, 0x08);
}
return count;
}
static ssize_t chgmicrovol_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct axp_charger *charger = dev_get_drvdata(dev);
uint8_t val;
axp_read(charger->master, AXP18_CHARGE_CONTROL1, &val);
switch ((val >> 5) & 0x03){
case 0: charger->chgvol = 4100000;break;
case 1: charger->chgvol = 4200000;break;
case 2: charger->chgvol = 4200000;break;
case 3: charger->chgvol = 4360000;break;
default:break;
}
return sprintf(buf, "%d\n",charger->chgvol);
}
static ssize_t chgmicrovol_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct axp_charger *charger = dev_get_drvdata(dev);
int var;
uint8_t tmp, val;
var = simple_strtoul(buf, NULL, 10);
switch(var){
case 4100000:tmp = 0;break;
case 4200000:tmp = 2;break;
case 4360000:tmp = 3;break;
default: tmp = 4;break;
}
if(tmp < 4){
charger->chgvol = var;
axp_read(charger->master, AXP18_CHARGE_CONTROL1, &val);
val &= 0x9F;
val |= tmp << 5;
axp_write(charger->master, AXP18_CHARGE_CONTROL1, val);
}
return count;
}
static ssize_t chgmicrocur_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct axp_charger *charger = dev_get_drvdata(dev);
uint8_t val;
axp_read(charger->master, AXP18_CHARGE_CONTROL1, &val);
charger->chgcur = (val & 0x07) * 200000 +100000;
return sprintf(buf, "%d\n",charger->chgcur);
}
static ssize_t chgmicrocur_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct axp_charger *charger = dev_get_drvdata(dev);
int var;
uint8_t val;
var = simple_strtoul(buf, NULL, 10);
if(var >= 100000 && var <= 1500000){
val = (var -100000)/200000;
charger->chgcur = val *200000 + 100000;
axp_read(charger->master, AXP18_CHARGE_CONTROL1, &val);
val &= 0xF8;
val |= val;
axp_write(charger->master, AXP18_CHARGE_CONTROL1, val);
}
return count;
}
static ssize_t chgendcur_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct axp_charger *charger = dev_get_drvdata(dev);
uint8_t val;
axp_read(charger->master, AXP18_CHARGE_STATUS, &val);
charger->chgend = ((val >> 6)& 0x01)? 15 : 10;
return sprintf(buf, "%d\n",charger->chgend);
}
static ssize_t chgendcur_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct axp_charger *charger = dev_get_drvdata(dev);
int var;
var = simple_strtoul(buf, NULL, 10);
if(var == 10 ){
charger->chgend = var;
axp_clr_bits(charger->master ,AXP18_CHARGE_STATUS,0x40);
}
else if (var == 15){
charger->chgend = var;
axp_set_bits(charger->master ,AXP18_CHARGE_STATUS,0x40);
}
return count;
}
static ssize_t chgpretimemin_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct axp_charger *charger = dev_get_drvdata(dev);
uint8_t val;
axp_read(charger->master,AXP18_CHARGE_CONTROL2, &val);
charger->chgpretime = (val >> 6) * 10 +30;
return sprintf(buf, "%d\n",charger->chgpretime);
}
static ssize_t chgpretimemin_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct axp_charger *charger = dev_get_drvdata(dev);
int var;
uint8_t val,tmp;
var = simple_strtoul(buf, NULL, 10);
if(var >= 30 && var <= 60){
tmp = (var - 30)/10;
charger->chgpretime = tmp * 10 + 30;
axp_read(charger->master,AXP18_CHARGE_CONTROL2,&val);
val &= 0x3F;
val |= (tmp << 6);
axp_write(charger->master,AXP18_CHARGE_CONTROL2,val);
}
return count;
}
static ssize_t chgcsttimemin_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct axp_charger *charger = dev_get_drvdata(dev);
uint8_t val;
axp_read(charger->master,AXP18_CHARGE_CONTROL2, &val);
charger->chgcsttime = (val & 0x03) *60 + 420;
return sprintf(buf, "%d\n",charger->chgcsttime);
}
static ssize_t chgcsttimemin_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct axp_charger *charger = dev_get_drvdata(dev);
int var;
uint8_t val,tmp;
var = simple_strtoul(buf, NULL, 10);
if(var >= 420 && var <= 600){
tmp = (var - 420)/60;
charger->chgcsttime = tmp * 60 + 420;
axp_read(charger->master,AXP18_CHARGE_CONTROL2,&val);
val &= 0xFC;
val |= tmp;
axp_write(charger->master,AXP18_CHARGE_CONTROL2,val);
}
return count;
}
static ssize_t adcfreq_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct axp_charger *charger = dev_get_drvdata(dev);
uint8_t val;
axp_read(charger->master, AXP18_ADC_CONTROL, &val);
switch ((val >> 2) & 0x03){
case 0: charger->sample_time = 8;break;
case 1: charger->sample_time = 16;break;
case 2: charger->sample_time = 25;break;
case 3: charger->sample_time = 32;break;
default:break;
}
return sprintf(buf, "%d\n",charger->sample_time);
}
static ssize_t adcfreq_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct axp_charger *charger = dev_get_drvdata(dev);
int var;
uint8_t val;
var = simple_strtoul(buf, NULL, 10);
axp_read(charger->master, AXP18_ADC_CONTROL, &val);
switch (var){
case 8: val &= ~(3 << 2);charger->sample_time = 8;break;
case 16: val &= ~(3 << 2);val |= 1 << 2;charger->sample_time = 16;break;
case 25: val &= ~(3 << 2);val |= 2 << 2;charger->sample_time = 25;break;
case 32: val |= 3 << 2;charger->sample_time = 32;break;
default: break;
}
axp_write(charger->master, AXP18_ADC_CONTROL, val);
return count;
}
static ssize_t vholden_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct axp_charger *charger = dev_get_drvdata(dev);
uint8_t val;
axp_read(charger->master,AXP18_CHARGE_VBUS, &val);
val = (val>>6) & 0x01;
return sprintf(buf, "%d\n",val);
}
static ssize_t vholden_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct axp_charger *charger = dev_get_drvdata(dev);
int var;
var = simple_strtoul(buf, NULL, 10);
if(var)
axp_set_bits(charger->master, AXP18_CHARGE_VBUS, 0x40);
else
axp_clr_bits(charger->master, AXP18_CHARGE_VBUS, 0x40);
return count;
}
static ssize_t vhold_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct axp_charger *charger = dev_get_drvdata(dev);
uint8_t val;
int vhold;
axp_read(charger->master,AXP18_CHARGE_VBUS, &val);
switch((val>>4)& 0x03)
{
case 0: vhold = 4220000;break;
case 1: vhold = 4400000;break;
case 2: vhold = 4550000;break;
case 3: vhold = 4700000;break;
default:return -EINVAL;
}
return sprintf(buf, "%d\n",vhold);
}
static ssize_t vhold_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct axp_charger *charger = dev_get_drvdata(dev);
int var;
uint8_t val,tmp;
var = simple_strtoul(buf, NULL, 10);
if(var >= 4220000 && var <=4700000){
if(var == 4220000)
tmp = 0;
else if(val <= 4400000)
tmp = 1;
else if(val <= 4550000)
tmp = 2;
else
tmp = 3;
axp_read(charger->master, AXP19_CHARGE_VBUS,&val);
val &= 0xCF;
val |= tmp << 4;
axp_write(charger->master, AXP19_CHARGE_VBUS,val);
}
return count;
}
static struct device_attribute axp_charger_attrs[] = {
AXP_CHG_ATTR(chgen),
AXP_CHG_ATTR(chgcurlimen),
AXP_CHG_ATTR(chgmicrovol),
AXP_CHG_ATTR(chgmicrocur),
AXP_CHG_ATTR(chgendcur),
AXP_CHG_ATTR(chgpretimemin),
AXP_CHG_ATTR(chgcsttimemin),
AXP_CHG_ATTR(adcfreq),
AXP_CHG_ATTR(vholden),
AXP_CHG_ATTR(vhold),
};
int axp_charger_create_attrs(struct power_supply *psy)
{
int j,ret;
for (j = 0; j < ARRAY_SIZE(axp_charger_attrs); j++) {
ret = device_create_file(psy->dev,
&axp_charger_attrs[j]);
if (ret)
goto sysfs_failed;
}
goto succeed;
sysfs_failed:
while (j--)
device_remove_file(psy->dev,
&axp_charger_attrs[j]);
succeed:
return ret;
}
static int axp_battery_probe(struct platform_device *pdev)
{
struct axp_charger *charger;
struct axp_supply_init_data *pdata = pdev->dev.platform_data;
int ret;
powerkeydev = input_allocate_device();
if (!powerkeydev) {
kfree(powerkeydev);
return -ENODEV;
}
powerkeydev->name = pdev->name;
powerkeydev->phys = "m1kbd/input2";
powerkeydev->id.bustype = BUS_HOST;
powerkeydev->id.vendor = 0x0001;
powerkeydev->id.product = 0x0001;
powerkeydev->id.version = 0x0100;
powerkeydev->open = NULL;
powerkeydev->close = NULL;
powerkeydev->dev.parent = &pdev->dev;
set_bit(EV_KEY, powerkeydev->evbit);
set_bit(EV_REL, powerkeydev->evbit);
set_bit(KEY_POWER, powerkeydev->keybit);
ret = input_register_device(powerkeydev);
if(ret)
{
printk("Unable to Register the power key\n");
}
if (pdata == NULL)
return -EINVAL;
if (pdata->chgcur > 1500 ||
pdata->chgvol < 4100 ||
pdata->chgvol > 4360){
printk("charger milliamp is too high or target voltage is over range\n");
return -EINVAL;
}
if (pdata->chgpretime < 30 || pdata->chgpretime >60 ||
pdata->chgcsttime < 420 || pdata->chgcsttime > 600){
printk("prechaging time or constant current charging time is over range\n");
return -EINVAL;
}
charger = kzalloc(sizeof(*charger), GFP_KERNEL);
if (charger == NULL)
return -ENOMEM;
charger->master = pdev->dev.parent;
charger->chgcur = pdata->chgcur;
charger->chgvol = pdata->chgvol;
charger->chgend = pdata->chgend;
charger->sample_time = pdata->sample_time;
charger->chgen = pdata->chgen;
charger->limit_on = pdata->limit_on;
charger->chgpretime = pdata->chgpretime;
charger->chgcsttime = pdata->chgcsttime;
charger->battery_info = pdata->battery_info;
charger->battery_low = pdata->battery_low;
charger->battery_critical = pdata->battery_critical;
ret = axp_battery_first_init(charger);
if (ret)
goto err_charger_init;
charger->nb.notifier_call = axp_battery_event;
ret = axp_register_notifier(charger->master, &charger->nb, AXP18_NOTIFIER_ON);
if (ret)
goto err_notifier;
axp_battery_setup_psy(charger);
ret = power_supply_register(&pdev->dev, &charger->batt);
if (ret)
goto err_ps_register;
ret = power_supply_register(&pdev->dev, &charger->ac);
if (ret){
power_supply_unregister(&charger->batt);
goto err_ps_register;
}
ret = power_supply_register(&pdev->dev, &charger->usb);
if (ret){
power_supply_unregister(&charger->ac);
power_supply_unregister(&charger->batt);
goto err_ps_register;
}
ret = axp_charger_create_attrs(&charger->batt);
if(ret){
return ret;
}
platform_set_drvdata(pdev, charger);
main_task = kthread_run(axp_main_task,charger,"kaxp18");
if(IS_ERR(main_task)){
printk("Unable to start main task.\n");
ret = PTR_ERR(main_task);
main_task = NULL;
return ret;
}
return 0;
err_ps_register:
axp_unregister_notifier(charger->master, &charger->nb, AXP18_NOTIFIER_ON);
err_notifier:
//cancel_delayed_work(&charger->work);
err_charger_init:
kfree(charger);
input_unregister_device(powerkeydev);
kfree(powerkeydev);
return ret;
}
static int axp_battery_remove(struct platform_device *dev)
{
struct axp_charger *charger = platform_get_drvdata(dev);
if(main_task){
kthread_stop(main_task);
main_task = NULL;
}
axp_unregister_notifier(charger->master, &charger->nb, AXP18_NOTIFIER_ON);
//cancel_delayed_work(&charger->work);
power_supply_unregister(&charger->usb);
power_supply_unregister(&charger->ac);
power_supply_unregister(&charger->batt);
kfree(charger);
input_unregister_device(powerkeydev);
kfree(powerkeydev);
return 0;
}
static struct platform_driver axp_battery_driver = {
.driver = {
.name = "axp18-supplyer",
.owner = THIS_MODULE,
},
.probe = axp_battery_probe,
.remove = axp_battery_remove,
};
static int axp_battery_init(void)
{
return platform_driver_register(&axp_battery_driver);
}
static void axp_battery_exit(void)
{
platform_driver_unregister(&axp_battery_driver);
}
module_init(axp_battery_init);
module_exit(axp_battery_exit);
MODULE_DESCRIPTION("AXP18 battery charger driver");
MODULE_AUTHOR("Donglu Zhang, Krosspower");
MODULE_LICENSE("GPL");
| gpl-2.0 |
bbrezillon/linux-sunxi | net/openvswitch/flow_table.c | 328 | 19322 | /*
* Copyright (c) 2007-2014 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
*/
#include "flow.h"
#include "datapath.h"
#include "flow_netlink.h"
#include <linux/uaccess.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <net/llc_pdu.h>
#include <linux/kernel.h>
#include <linux/jhash.h>
#include <linux/jiffies.h>
#include <linux/llc.h>
#include <linux/module.h>
#include <linux/in.h>
#include <linux/rcupdate.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/sctp.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/rculist.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/ndisc.h>
#define TBL_MIN_BUCKETS 1024
#define REHASH_INTERVAL (10 * 60 * HZ)
static struct kmem_cache *flow_cache;
struct kmem_cache *flow_stats_cache __read_mostly;
static u16 range_n_bytes(const struct sw_flow_key_range *range)
{
return range->end - range->start;
}
void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
bool full, const struct sw_flow_mask *mask)
{
int start = full ? 0 : mask->range.start;
int len = full ? sizeof *dst : range_n_bytes(&mask->range);
const long *m = (const long *)((const u8 *)&mask->key + start);
const long *s = (const long *)((const u8 *)src + start);
long *d = (long *)((u8 *)dst + start);
int i;
/* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
* if 'full' is false the memory outside of the 'mask->range' is left
* uninitialized. This can be used as an optimization when further
* operations on 'dst' only use contents within 'mask->range'.
*/
for (i = 0; i < len; i += sizeof(long))
*d++ = *s++ & *m++;
}
struct sw_flow *ovs_flow_alloc(void)
{
struct sw_flow *flow;
struct flow_stats *stats;
int node;
flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
if (!flow)
return ERR_PTR(-ENOMEM);
flow->sf_acts = NULL;
flow->mask = NULL;
flow->id.unmasked_key = NULL;
flow->id.ufid_len = 0;
flow->stats_last_writer = NUMA_NO_NODE;
/* Initialize the default stat node. */
stats = kmem_cache_alloc_node(flow_stats_cache,
GFP_KERNEL | __GFP_ZERO,
node_online(0) ? 0 : NUMA_NO_NODE);
if (!stats)
goto err;
spin_lock_init(&stats->lock);
RCU_INIT_POINTER(flow->stats[0], stats);
for_each_node(node)
if (node != 0)
RCU_INIT_POINTER(flow->stats[node], NULL);
return flow;
err:
kmem_cache_free(flow_cache, flow);
return ERR_PTR(-ENOMEM);
}
int ovs_flow_tbl_count(const struct flow_table *table)
{
return table->count;
}
static struct flex_array *alloc_buckets(unsigned int n_buckets)
{
struct flex_array *buckets;
int i, err;
buckets = flex_array_alloc(sizeof(struct hlist_head),
n_buckets, GFP_KERNEL);
if (!buckets)
return NULL;
err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
if (err) {
flex_array_free(buckets);
return NULL;
}
for (i = 0; i < n_buckets; i++)
INIT_HLIST_HEAD((struct hlist_head *)
flex_array_get(buckets, i));
return buckets;
}
static void flow_free(struct sw_flow *flow)
{
int node;
if (ovs_identifier_is_key(&flow->id))
kfree(flow->id.unmasked_key);
if (flow->sf_acts)
ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
for_each_node(node)
if (flow->stats[node])
kmem_cache_free(flow_stats_cache,
(struct flow_stats __force *)flow->stats[node]);
kmem_cache_free(flow_cache, flow);
}
static void rcu_free_flow_callback(struct rcu_head *rcu)
{
struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
flow_free(flow);
}
void ovs_flow_free(struct sw_flow *flow, bool deferred)
{
if (!flow)
return;
if (deferred)
call_rcu(&flow->rcu, rcu_free_flow_callback);
else
flow_free(flow);
}
static void free_buckets(struct flex_array *buckets)
{
flex_array_free(buckets);
}
static void __table_instance_destroy(struct table_instance *ti)
{
free_buckets(ti->buckets);
kfree(ti);
}
static struct table_instance *table_instance_alloc(int new_size)
{
struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
if (!ti)
return NULL;
ti->buckets = alloc_buckets(new_size);
if (!ti->buckets) {
kfree(ti);
return NULL;
}
ti->n_buckets = new_size;
ti->node_ver = 0;
ti->keep_flows = false;
get_random_bytes(&ti->hash_seed, sizeof(u32));
return ti;
}
int ovs_flow_tbl_init(struct flow_table *table)
{
struct table_instance *ti, *ufid_ti;
ti = table_instance_alloc(TBL_MIN_BUCKETS);
if (!ti)
return -ENOMEM;
ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
if (!ufid_ti)
goto free_ti;
rcu_assign_pointer(table->ti, ti);
rcu_assign_pointer(table->ufid_ti, ufid_ti);
INIT_LIST_HEAD(&table->mask_list);
table->last_rehash = jiffies;
table->count = 0;
table->ufid_count = 0;
return 0;
free_ti:
__table_instance_destroy(ti);
return -ENOMEM;
}
static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
{
struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
__table_instance_destroy(ti);
}
static void table_instance_destroy(struct table_instance *ti,
struct table_instance *ufid_ti,
bool deferred)
{
int i;
if (!ti)
return;
BUG_ON(!ufid_ti);
if (ti->keep_flows)
goto skip_flows;
for (i = 0; i < ti->n_buckets; i++) {
struct sw_flow *flow;
struct hlist_head *head = flex_array_get(ti->buckets, i);
struct hlist_node *n;
int ver = ti->node_ver;
int ufid_ver = ufid_ti->node_ver;
hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
hlist_del_rcu(&flow->flow_table.node[ver]);
if (ovs_identifier_is_ufid(&flow->id))
hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
ovs_flow_free(flow, deferred);
}
}
skip_flows:
if (deferred) {
call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
} else {
__table_instance_destroy(ti);
__table_instance_destroy(ufid_ti);
}
}
/* No need for locking this function is called from RCU callback or
* error path.
*/
void ovs_flow_tbl_destroy(struct flow_table *table)
{
struct table_instance *ti = rcu_dereference_raw(table->ti);
struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
table_instance_destroy(ti, ufid_ti, false);
}
struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
u32 *bucket, u32 *last)
{
struct sw_flow *flow;
struct hlist_head *head;
int ver;
int i;
ver = ti->node_ver;
while (*bucket < ti->n_buckets) {
i = 0;
head = flex_array_get(ti->buckets, *bucket);
hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
if (i < *last) {
i++;
continue;
}
*last = i + 1;
return flow;
}
(*bucket)++;
*last = 0;
}
return NULL;
}
static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
{
hash = jhash_1word(hash, ti->hash_seed);
return flex_array_get(ti->buckets,
(hash & (ti->n_buckets - 1)));
}
static void table_instance_insert(struct table_instance *ti,
struct sw_flow *flow)
{
struct hlist_head *head;
head = find_bucket(ti, flow->flow_table.hash);
hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
}
static void ufid_table_instance_insert(struct table_instance *ti,
struct sw_flow *flow)
{
struct hlist_head *head;
head = find_bucket(ti, flow->ufid_table.hash);
hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
}
static void flow_table_copy_flows(struct table_instance *old,
struct table_instance *new, bool ufid)
{
int old_ver;
int i;
old_ver = old->node_ver;
new->node_ver = !old_ver;
/* Insert in new table. */
for (i = 0; i < old->n_buckets; i++) {
struct sw_flow *flow;
struct hlist_head *head;
head = flex_array_get(old->buckets, i);
if (ufid)
hlist_for_each_entry(flow, head,
ufid_table.node[old_ver])
ufid_table_instance_insert(new, flow);
else
hlist_for_each_entry(flow, head,
flow_table.node[old_ver])
table_instance_insert(new, flow);
}
old->keep_flows = true;
}
static struct table_instance *table_instance_rehash(struct table_instance *ti,
int n_buckets, bool ufid)
{
struct table_instance *new_ti;
new_ti = table_instance_alloc(n_buckets);
if (!new_ti)
return NULL;
flow_table_copy_flows(ti, new_ti, ufid);
return new_ti;
}
int ovs_flow_tbl_flush(struct flow_table *flow_table)
{
struct table_instance *old_ti, *new_ti;
struct table_instance *old_ufid_ti, *new_ufid_ti;
new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
if (!new_ti)
return -ENOMEM;
new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
if (!new_ufid_ti)
goto err_free_ti;
old_ti = ovsl_dereference(flow_table->ti);
old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
rcu_assign_pointer(flow_table->ti, new_ti);
rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
flow_table->last_rehash = jiffies;
flow_table->count = 0;
flow_table->ufid_count = 0;
table_instance_destroy(old_ti, old_ufid_ti, true);
return 0;
err_free_ti:
__table_instance_destroy(new_ti);
return -ENOMEM;
}
static u32 flow_hash(const struct sw_flow_key *key,
const struct sw_flow_key_range *range)
{
int key_start = range->start;
int key_end = range->end;
const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
int hash_u32s = (key_end - key_start) >> 2;
/* Make sure number of hash bytes are multiple of u32. */
BUILD_BUG_ON(sizeof(long) % sizeof(u32));
return jhash2(hash_key, hash_u32s, 0);
}
static int flow_key_start(const struct sw_flow_key *key)
{
if (key->tun_proto)
return 0;
else
return rounddown(offsetof(struct sw_flow_key, phy),
sizeof(long));
}
static bool cmp_key(const struct sw_flow_key *key1,
const struct sw_flow_key *key2,
int key_start, int key_end)
{
const long *cp1 = (const long *)((const u8 *)key1 + key_start);
const long *cp2 = (const long *)((const u8 *)key2 + key_start);
long diffs = 0;
int i;
for (i = key_start; i < key_end; i += sizeof(long))
diffs |= *cp1++ ^ *cp2++;
return diffs == 0;
}
static bool flow_cmp_masked_key(const struct sw_flow *flow,
const struct sw_flow_key *key,
const struct sw_flow_key_range *range)
{
return cmp_key(&flow->key, key, range->start, range->end);
}
static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
const struct sw_flow_match *match)
{
struct sw_flow_key *key = match->key;
int key_start = flow_key_start(key);
int key_end = match->range.end;
BUG_ON(ovs_identifier_is_ufid(&flow->id));
return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
}
static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
const struct sw_flow_key *unmasked,
const struct sw_flow_mask *mask)
{
struct sw_flow *flow;
struct hlist_head *head;
u32 hash;
struct sw_flow_key masked_key;
ovs_flow_mask_key(&masked_key, unmasked, false, mask);
hash = flow_hash(&masked_key, &mask->range);
head = find_bucket(ti, hash);
hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
if (flow->mask == mask && flow->flow_table.hash == hash &&
flow_cmp_masked_key(flow, &masked_key, &mask->range))
return flow;
}
return NULL;
}
struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
const struct sw_flow_key *key,
u32 *n_mask_hit)
{
struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
struct sw_flow_mask *mask;
struct sw_flow *flow;
*n_mask_hit = 0;
list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
(*n_mask_hit)++;
flow = masked_flow_lookup(ti, key, mask);
if (flow) /* Found */
return flow;
}
return NULL;
}
struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
const struct sw_flow_key *key)
{
u32 __always_unused n_mask_hit;
return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
}
struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
const struct sw_flow_match *match)
{
struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
struct sw_flow_mask *mask;
struct sw_flow *flow;
/* Always called under ovs-mutex. */
list_for_each_entry(mask, &tbl->mask_list, list) {
flow = masked_flow_lookup(ti, match->key, mask);
if (flow && ovs_identifier_is_key(&flow->id) &&
ovs_flow_cmp_unmasked_key(flow, match))
return flow;
}
return NULL;
}
static u32 ufid_hash(const struct sw_flow_id *sfid)
{
return jhash(sfid->ufid, sfid->ufid_len, 0);
}
static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
const struct sw_flow_id *sfid)
{
if (flow->id.ufid_len != sfid->ufid_len)
return false;
return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
}
bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
{
if (ovs_identifier_is_ufid(&flow->id))
return flow_cmp_masked_key(flow, match->key, &match->range);
return ovs_flow_cmp_unmasked_key(flow, match);
}
struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
const struct sw_flow_id *ufid)
{
struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
struct sw_flow *flow;
struct hlist_head *head;
u32 hash;
hash = ufid_hash(ufid);
head = find_bucket(ti, hash);
hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
if (flow->ufid_table.hash == hash &&
ovs_flow_cmp_ufid(flow, ufid))
return flow;
}
return NULL;
}
int ovs_flow_tbl_num_masks(const struct flow_table *table)
{
struct sw_flow_mask *mask;
int num = 0;
list_for_each_entry(mask, &table->mask_list, list)
num++;
return num;
}
static struct table_instance *table_instance_expand(struct table_instance *ti,
bool ufid)
{
return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
}
/* Remove 'mask' from the mask list, if it is not needed any more. */
static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
{
if (mask) {
/* ovs-lock is required to protect mask-refcount and
* mask list.
*/
ASSERT_OVSL();
BUG_ON(!mask->ref_count);
mask->ref_count--;
if (!mask->ref_count) {
list_del_rcu(&mask->list);
kfree_rcu(mask, rcu);
}
}
}
/* Must be called with OVS mutex held. */
void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
{
struct table_instance *ti = ovsl_dereference(table->ti);
struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
BUG_ON(table->count == 0);
hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
table->count--;
if (ovs_identifier_is_ufid(&flow->id)) {
hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
table->ufid_count--;
}
/* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
* accessible as long as the RCU read lock is held.
*/
flow_mask_remove(table, flow->mask);
}
static struct sw_flow_mask *mask_alloc(void)
{
struct sw_flow_mask *mask;
mask = kmalloc(sizeof(*mask), GFP_KERNEL);
if (mask)
mask->ref_count = 1;
return mask;
}
static bool mask_equal(const struct sw_flow_mask *a,
const struct sw_flow_mask *b)
{
const u8 *a_ = (const u8 *)&a->key + a->range.start;
const u8 *b_ = (const u8 *)&b->key + b->range.start;
return (a->range.end == b->range.end)
&& (a->range.start == b->range.start)
&& (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
}
static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
const struct sw_flow_mask *mask)
{
struct list_head *ml;
list_for_each(ml, &tbl->mask_list) {
struct sw_flow_mask *m;
m = container_of(ml, struct sw_flow_mask, list);
if (mask_equal(mask, m))
return m;
}
return NULL;
}
/* Add 'mask' into the mask list, if it is not already there. */
static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
const struct sw_flow_mask *new)
{
struct sw_flow_mask *mask;
mask = flow_mask_find(tbl, new);
if (!mask) {
/* Allocate a new mask if none exsits. */
mask = mask_alloc();
if (!mask)
return -ENOMEM;
mask->key = new->key;
mask->range = new->range;
list_add_rcu(&mask->list, &tbl->mask_list);
} else {
BUG_ON(!mask->ref_count);
mask->ref_count++;
}
flow->mask = mask;
return 0;
}
/* Must be called with OVS mutex held. */
static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
{
struct table_instance *new_ti = NULL;
struct table_instance *ti;
flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
ti = ovsl_dereference(table->ti);
table_instance_insert(ti, flow);
table->count++;
/* Expand table, if necessary, to make room. */
if (table->count > ti->n_buckets)
new_ti = table_instance_expand(ti, false);
else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
new_ti = table_instance_rehash(ti, ti->n_buckets, false);
if (new_ti) {
rcu_assign_pointer(table->ti, new_ti);
call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
table->last_rehash = jiffies;
}
}
/* Must be called with OVS mutex held. */
static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
{
struct table_instance *ti;
flow->ufid_table.hash = ufid_hash(&flow->id);
ti = ovsl_dereference(table->ufid_ti);
ufid_table_instance_insert(ti, flow);
table->ufid_count++;
/* Expand table, if necessary, to make room. */
if (table->ufid_count > ti->n_buckets) {
struct table_instance *new_ti;
new_ti = table_instance_expand(ti, true);
if (new_ti) {
rcu_assign_pointer(table->ufid_ti, new_ti);
call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
}
}
}
/* Must be called with OVS mutex held. */
int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
const struct sw_flow_mask *mask)
{
int err;
err = flow_mask_insert(table, flow, mask);
if (err)
return err;
flow_key_insert(table, flow);
if (ovs_identifier_is_ufid(&flow->id))
flow_ufid_insert(table, flow);
return 0;
}
/* Initializes the flow module.
* Returns zero if successful or a negative error code. */
int ovs_flow_init(void)
{
BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
+ (nr_node_ids
* sizeof(struct flow_stats *)),
0, 0, NULL);
if (flow_cache == NULL)
return -ENOMEM;
flow_stats_cache
= kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
0, SLAB_HWCACHE_ALIGN, NULL);
if (flow_stats_cache == NULL) {
kmem_cache_destroy(flow_cache);
flow_cache = NULL;
return -ENOMEM;
}
return 0;
}
/* Uninitializes the flow module. */
void ovs_flow_exit(void)
{
kmem_cache_destroy(flow_stats_cache);
kmem_cache_destroy(flow_cache);
}
| gpl-2.0 |
deadman96385/android_kernel_leeco_msm8996 | drivers/s390/cio/eadm_sch.c | 328 | 9412 | /*
* Driver for s390 eadm subchannels
*
* Copyright IBM Corp. 2012
* Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
*/
#include <linux/kernel_stat.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <asm/css_chars.h>
#include <asm/debug.h>
#include <asm/isc.h>
#include <asm/cio.h>
#include <asm/scsw.h>
#include <asm/eadm.h>
#include "eadm_sch.h"
#include "ioasm.h"
#include "cio.h"
#include "css.h"
#include "orb.h"
MODULE_DESCRIPTION("driver for s390 eadm subchannels");
MODULE_LICENSE("GPL");
#define EADM_TIMEOUT (5 * HZ)
static DEFINE_SPINLOCK(list_lock);
static LIST_HEAD(eadm_list);
static debug_info_t *eadm_debug;
#define EADM_LOG(imp, txt) do { \
debug_text_event(eadm_debug, imp, txt); \
} while (0)
static void EADM_LOG_HEX(int level, void *data, int length)
{
if (!debug_level_enabled(eadm_debug, level))
return;
while (length > 0) {
debug_event(eadm_debug, level, data, length);
length -= eadm_debug->buf_size;
data += eadm_debug->buf_size;
}
}
static void orb_init(union orb *orb)
{
memset(orb, 0, sizeof(union orb));
orb->eadm.compat1 = 1;
orb->eadm.compat2 = 1;
orb->eadm.fmt = 1;
orb->eadm.x = 1;
}
static int eadm_subchannel_start(struct subchannel *sch, struct aob *aob)
{
union orb *orb = &get_eadm_private(sch)->orb;
int cc;
orb_init(orb);
orb->eadm.aob = (u32)__pa(aob);
orb->eadm.intparm = (u32)(addr_t)sch;
orb->eadm.key = PAGE_DEFAULT_KEY >> 4;
EADM_LOG(6, "start");
EADM_LOG_HEX(6, &sch->schid, sizeof(sch->schid));
cc = ssch(sch->schid, orb);
switch (cc) {
case 0:
sch->schib.scsw.eadm.actl |= SCSW_ACTL_START_PEND;
break;
case 1: /* status pending */
case 2: /* busy */
return -EBUSY;
case 3: /* not operational */
return -ENODEV;
}
return 0;
}
static int eadm_subchannel_clear(struct subchannel *sch)
{
int cc;
cc = csch(sch->schid);
if (cc)
return -ENODEV;
sch->schib.scsw.eadm.actl |= SCSW_ACTL_CLEAR_PEND;
return 0;
}
static void eadm_subchannel_timeout(unsigned long data)
{
struct subchannel *sch = (struct subchannel *) data;
spin_lock_irq(sch->lock);
EADM_LOG(1, "timeout");
EADM_LOG_HEX(1, &sch->schid, sizeof(sch->schid));
if (eadm_subchannel_clear(sch))
EADM_LOG(0, "clear failed");
spin_unlock_irq(sch->lock);
}
static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires)
{
struct eadm_private *private = get_eadm_private(sch);
if (expires == 0) {
del_timer(&private->timer);
return;
}
if (timer_pending(&private->timer)) {
if (mod_timer(&private->timer, jiffies + expires))
return;
}
private->timer.function = eadm_subchannel_timeout;
private->timer.data = (unsigned long) sch;
private->timer.expires = jiffies + expires;
add_timer(&private->timer);
}
static void eadm_subchannel_irq(struct subchannel *sch)
{
struct eadm_private *private = get_eadm_private(sch);
struct eadm_scsw *scsw = &sch->schib.scsw.eadm;
struct irb *irb = this_cpu_ptr(&cio_irb);
int error = 0;
EADM_LOG(6, "irq");
EADM_LOG_HEX(6, irb, sizeof(*irb));
inc_irq_stat(IRQIO_ADM);
if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))
&& scsw->eswf == 1 && irb->esw.eadm.erw.r)
error = -EIO;
if (scsw->fctl & SCSW_FCTL_CLEAR_FUNC)
error = -ETIMEDOUT;
eadm_subchannel_set_timeout(sch, 0);
if (private->state != EADM_BUSY) {
EADM_LOG(1, "irq unsol");
EADM_LOG_HEX(1, irb, sizeof(*irb));
private->state = EADM_NOT_OPER;
css_sched_sch_todo(sch, SCH_TODO_EVAL);
return;
}
scm_irq_handler((struct aob *)(unsigned long)scsw->aob, error);
private->state = EADM_IDLE;
if (private->completion)
complete(private->completion);
}
static struct subchannel *eadm_get_idle_sch(void)
{
struct eadm_private *private;
struct subchannel *sch;
unsigned long flags;
spin_lock_irqsave(&list_lock, flags);
list_for_each_entry(private, &eadm_list, head) {
sch = private->sch;
spin_lock(sch->lock);
if (private->state == EADM_IDLE) {
private->state = EADM_BUSY;
list_move_tail(&private->head, &eadm_list);
spin_unlock(sch->lock);
spin_unlock_irqrestore(&list_lock, flags);
return sch;
}
spin_unlock(sch->lock);
}
spin_unlock_irqrestore(&list_lock, flags);
return NULL;
}
int eadm_start_aob(struct aob *aob)
{
struct eadm_private *private;
struct subchannel *sch;
unsigned long flags;
int ret;
sch = eadm_get_idle_sch();
if (!sch)
return -EBUSY;
spin_lock_irqsave(sch->lock, flags);
eadm_subchannel_set_timeout(sch, EADM_TIMEOUT);
ret = eadm_subchannel_start(sch, aob);
if (!ret)
goto out_unlock;
/* Handle start subchannel failure. */
eadm_subchannel_set_timeout(sch, 0);
private = get_eadm_private(sch);
private->state = EADM_NOT_OPER;
css_sched_sch_todo(sch, SCH_TODO_EVAL);
out_unlock:
spin_unlock_irqrestore(sch->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(eadm_start_aob);
static int eadm_subchannel_probe(struct subchannel *sch)
{
struct eadm_private *private;
int ret;
private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
if (!private)
return -ENOMEM;
INIT_LIST_HEAD(&private->head);
init_timer(&private->timer);
spin_lock_irq(sch->lock);
set_eadm_private(sch, private);
private->state = EADM_IDLE;
private->sch = sch;
sch->isc = EADM_SCH_ISC;
ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
if (ret) {
set_eadm_private(sch, NULL);
spin_unlock_irq(sch->lock);
kfree(private);
goto out;
}
spin_unlock_irq(sch->lock);
spin_lock_irq(&list_lock);
list_add(&private->head, &eadm_list);
spin_unlock_irq(&list_lock);
if (dev_get_uevent_suppress(&sch->dev)) {
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
}
out:
return ret;
}
static void eadm_quiesce(struct subchannel *sch)
{
struct eadm_private *private = get_eadm_private(sch);
DECLARE_COMPLETION_ONSTACK(completion);
int ret;
spin_lock_irq(sch->lock);
if (private->state != EADM_BUSY)
goto disable;
if (eadm_subchannel_clear(sch))
goto disable;
private->completion = &completion;
spin_unlock_irq(sch->lock);
wait_for_completion_io(&completion);
spin_lock_irq(sch->lock);
private->completion = NULL;
disable:
eadm_subchannel_set_timeout(sch, 0);
do {
ret = cio_disable_subchannel(sch);
} while (ret == -EBUSY);
spin_unlock_irq(sch->lock);
}
static int eadm_subchannel_remove(struct subchannel *sch)
{
struct eadm_private *private = get_eadm_private(sch);
spin_lock_irq(&list_lock);
list_del(&private->head);
spin_unlock_irq(&list_lock);
eadm_quiesce(sch);
spin_lock_irq(sch->lock);
set_eadm_private(sch, NULL);
spin_unlock_irq(sch->lock);
kfree(private);
return 0;
}
static void eadm_subchannel_shutdown(struct subchannel *sch)
{
eadm_quiesce(sch);
}
static int eadm_subchannel_freeze(struct subchannel *sch)
{
return cio_disable_subchannel(sch);
}
static int eadm_subchannel_restore(struct subchannel *sch)
{
return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
}
/**
* eadm_subchannel_sch_event - process subchannel event
* @sch: subchannel
* @process: non-zero if function is called in process context
*
* An unspecified event occurred for this subchannel. Adjust data according
* to the current operational state of the subchannel. Return zero when the
* event has been handled sufficiently or -EAGAIN when this function should
* be called again in process context.
*/
static int eadm_subchannel_sch_event(struct subchannel *sch, int process)
{
struct eadm_private *private;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(sch->lock, flags);
if (!device_is_registered(&sch->dev))
goto out_unlock;
if (work_pending(&sch->todo_work))
goto out_unlock;
if (cio_update_schib(sch)) {
css_sched_sch_todo(sch, SCH_TODO_UNREG);
goto out_unlock;
}
private = get_eadm_private(sch);
if (private->state == EADM_NOT_OPER)
private->state = EADM_IDLE;
out_unlock:
spin_unlock_irqrestore(sch->lock, flags);
return ret;
}
static struct css_device_id eadm_subchannel_ids[] = {
{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_ADM, },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(css, eadm_subchannel_ids);
static struct css_driver eadm_subchannel_driver = {
.drv = {
.name = "eadm_subchannel",
.owner = THIS_MODULE,
},
.subchannel_type = eadm_subchannel_ids,
.irq = eadm_subchannel_irq,
.probe = eadm_subchannel_probe,
.remove = eadm_subchannel_remove,
.shutdown = eadm_subchannel_shutdown,
.sch_event = eadm_subchannel_sch_event,
.freeze = eadm_subchannel_freeze,
.thaw = eadm_subchannel_restore,
.restore = eadm_subchannel_restore,
};
static int __init eadm_sch_init(void)
{
int ret;
if (!css_general_characteristics.eadm)
return -ENXIO;
eadm_debug = debug_register("eadm_log", 16, 1, 16);
if (!eadm_debug)
return -ENOMEM;
debug_register_view(eadm_debug, &debug_hex_ascii_view);
debug_set_level(eadm_debug, 2);
isc_register(EADM_SCH_ISC);
ret = css_driver_register(&eadm_subchannel_driver);
if (ret)
goto cleanup;
return ret;
cleanup:
isc_unregister(EADM_SCH_ISC);
debug_unregister(eadm_debug);
return ret;
}
static void __exit eadm_sch_exit(void)
{
css_driver_unregister(&eadm_subchannel_driver);
isc_unregister(EADM_SCH_ISC);
debug_unregister(eadm_debug);
}
module_init(eadm_sch_init);
module_exit(eadm_sch_exit);
| gpl-2.0 |
tq-systems/linux-mxs | drivers/media/video/adv7343.c | 1096 | 12605 | /*
* adv7343 - ADV7343 Video Encoder Driver
*
* The encoder hardware does not support SECAM.
*
* Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed .as is. WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/videodev2.h>
#include <linux/uaccess.h>
#include <media/adv7343.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
#include "adv7343_regs.h"
MODULE_DESCRIPTION("ADV7343 video encoder driver");
MODULE_LICENSE("GPL");
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Debug level 0-1");
struct adv7343_state {
struct v4l2_subdev sd;
u8 reg00;
u8 reg01;
u8 reg02;
u8 reg35;
u8 reg80;
u8 reg82;
int bright;
int hue;
int gain;
u32 output;
v4l2_std_id std;
};
static inline struct adv7343_state *to_state(struct v4l2_subdev *sd)
{
return container_of(sd, struct adv7343_state, sd);
}
static inline int adv7343_write(struct v4l2_subdev *sd, u8 reg, u8 value)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
return i2c_smbus_write_byte_data(client, reg, value);
}
static const u8 adv7343_init_reg_val[] = {
ADV7343_SOFT_RESET, ADV7343_SOFT_RESET_DEFAULT,
ADV7343_POWER_MODE_REG, ADV7343_POWER_MODE_REG_DEFAULT,
ADV7343_HD_MODE_REG1, ADV7343_HD_MODE_REG1_DEFAULT,
ADV7343_HD_MODE_REG2, ADV7343_HD_MODE_REG2_DEFAULT,
ADV7343_HD_MODE_REG3, ADV7343_HD_MODE_REG3_DEFAULT,
ADV7343_HD_MODE_REG4, ADV7343_HD_MODE_REG4_DEFAULT,
ADV7343_HD_MODE_REG5, ADV7343_HD_MODE_REG5_DEFAULT,
ADV7343_HD_MODE_REG6, ADV7343_HD_MODE_REG6_DEFAULT,
ADV7343_HD_MODE_REG7, ADV7343_HD_MODE_REG7_DEFAULT,
ADV7343_SD_MODE_REG1, ADV7343_SD_MODE_REG1_DEFAULT,
ADV7343_SD_MODE_REG2, ADV7343_SD_MODE_REG2_DEFAULT,
ADV7343_SD_MODE_REG3, ADV7343_SD_MODE_REG3_DEFAULT,
ADV7343_SD_MODE_REG4, ADV7343_SD_MODE_REG4_DEFAULT,
ADV7343_SD_MODE_REG5, ADV7343_SD_MODE_REG5_DEFAULT,
ADV7343_SD_MODE_REG6, ADV7343_SD_MODE_REG6_DEFAULT,
ADV7343_SD_MODE_REG7, ADV7343_SD_MODE_REG7_DEFAULT,
ADV7343_SD_MODE_REG8, ADV7343_SD_MODE_REG8_DEFAULT,
ADV7343_SD_HUE_REG, ADV7343_SD_HUE_REG_DEFAULT,
ADV7343_SD_CGMS_WSS0, ADV7343_SD_CGMS_WSS0_DEFAULT,
ADV7343_SD_BRIGHTNESS_WSS, ADV7343_SD_BRIGHTNESS_WSS_DEFAULT,
};
/*
* 2^32
* FSC(reg) = FSC (HZ) * --------
* 27000000
*/
static const struct adv7343_std_info stdinfo[] = {
{
/* FSC(Hz) = 3,579,545.45 Hz */
SD_STD_NTSC, 569408542, V4L2_STD_NTSC,
}, {
/* FSC(Hz) = 3,575,611.00 Hz */
SD_STD_PAL_M, 568782678, V4L2_STD_PAL_M,
}, {
/* FSC(Hz) = 3,582,056.00 */
SD_STD_PAL_N, 569807903, V4L2_STD_PAL_Nc,
}, {
/* FSC(Hz) = 4,433,618.75 Hz */
SD_STD_PAL_N, 705268427, V4L2_STD_PAL_N,
}, {
/* FSC(Hz) = 4,433,618.75 Hz */
SD_STD_PAL_BDGHI, 705268427, V4L2_STD_PAL,
}, {
/* FSC(Hz) = 4,433,618.75 Hz */
SD_STD_NTSC, 705268427, V4L2_STD_NTSC_443,
}, {
/* FSC(Hz) = 4,433,618.75 Hz */
SD_STD_PAL_M, 705268427, V4L2_STD_PAL_60,
},
};
static int adv7343_setstd(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct adv7343_state *state = to_state(sd);
struct adv7343_std_info *std_info;
int output_idx, num_std;
char *fsc_ptr;
u8 reg, val;
int err = 0;
int i = 0;
output_idx = state->output;
std_info = (struct adv7343_std_info *)stdinfo;
num_std = ARRAY_SIZE(stdinfo);
for (i = 0; i < num_std; i++) {
if (std_info[i].stdid & std)
break;
}
if (i == num_std) {
v4l2_dbg(1, debug, sd,
"Invalid std or std is not supported: %llx\n",
(unsigned long long)std);
return -EINVAL;
}
/* Set the standard */
val = state->reg80 & (~(SD_STD_MASK));
val |= std_info[i].standard_val3;
err = adv7343_write(sd, ADV7343_SD_MODE_REG1, val);
if (err < 0)
goto setstd_exit;
state->reg80 = val;
/* Configure the input mode register */
val = state->reg01 & (~((u8) INPUT_MODE_MASK));
val |= SD_INPUT_MODE;
err = adv7343_write(sd, ADV7343_MODE_SELECT_REG, val);
if (err < 0)
goto setstd_exit;
state->reg01 = val;
/* Program the sub carrier frequency registers */
fsc_ptr = (unsigned char *)&std_info[i].fsc_val;
reg = ADV7343_FSC_REG0;
for (i = 0; i < 4; i++, reg++, fsc_ptr++) {
err = adv7343_write(sd, reg, *fsc_ptr);
if (err < 0)
goto setstd_exit;
}
val = state->reg80;
/* Filter settings */
if (std & (V4L2_STD_NTSC | V4L2_STD_NTSC_443))
val &= 0x03;
else if (std & ~V4L2_STD_SECAM)
val |= 0x04;
err = adv7343_write(sd, ADV7343_SD_MODE_REG1, val);
if (err < 0)
goto setstd_exit;
state->reg80 = val;
setstd_exit:
if (err != 0)
v4l2_err(sd, "Error setting std, write failed\n");
return err;
}
static int adv7343_setoutput(struct v4l2_subdev *sd, u32 output_type)
{
struct adv7343_state *state = to_state(sd);
unsigned char val;
int err = 0;
if (output_type > ADV7343_SVIDEO_ID) {
v4l2_dbg(1, debug, sd,
"Invalid output type or output type not supported:%d\n",
output_type);
return -EINVAL;
}
/* Enable Appropriate DAC */
val = state->reg00 & 0x03;
if (output_type == ADV7343_COMPOSITE_ID)
val |= ADV7343_COMPOSITE_POWER_VALUE;
else if (output_type == ADV7343_COMPONENT_ID)
val |= ADV7343_COMPONENT_POWER_VALUE;
else
val |= ADV7343_SVIDEO_POWER_VALUE;
err = adv7343_write(sd, ADV7343_POWER_MODE_REG, val);
if (err < 0)
goto setoutput_exit;
state->reg00 = val;
/* Enable YUV output */
val = state->reg02 | YUV_OUTPUT_SELECT;
err = adv7343_write(sd, ADV7343_MODE_REG0, val);
if (err < 0)
goto setoutput_exit;
state->reg02 = val;
/* configure SD DAC Output 2 and SD DAC Output 1 bit to zero */
val = state->reg82 & (SD_DAC_1_DI & SD_DAC_2_DI);
err = adv7343_write(sd, ADV7343_SD_MODE_REG2, val);
if (err < 0)
goto setoutput_exit;
state->reg82 = val;
/* configure ED/HD Color DAC Swap and ED/HD RGB Input Enable bit to
* zero */
val = state->reg35 & (HD_RGB_INPUT_DI & HD_DAC_SWAP_DI);
err = adv7343_write(sd, ADV7343_HD_MODE_REG6, val);
if (err < 0)
goto setoutput_exit;
state->reg35 = val;
setoutput_exit:
if (err != 0)
v4l2_err(sd, "Error setting output, write failed\n");
return err;
}
static int adv7343_log_status(struct v4l2_subdev *sd)
{
struct adv7343_state *state = to_state(sd);
v4l2_info(sd, "Standard: %llx\n", (unsigned long long)state->std);
v4l2_info(sd, "Output: %s\n", (state->output == 0) ? "Composite" :
((state->output == 1) ? "Component" : "S-Video"));
return 0;
}
static int adv7343_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
{
switch (qc->id) {
case V4L2_CID_BRIGHTNESS:
return v4l2_ctrl_query_fill(qc, ADV7343_BRIGHTNESS_MIN,
ADV7343_BRIGHTNESS_MAX, 1,
ADV7343_BRIGHTNESS_DEF);
case V4L2_CID_HUE:
return v4l2_ctrl_query_fill(qc, ADV7343_HUE_MIN,
ADV7343_HUE_MAX, 1 ,
ADV7343_HUE_DEF);
case V4L2_CID_GAIN:
return v4l2_ctrl_query_fill(qc, ADV7343_GAIN_MIN,
ADV7343_GAIN_MAX, 1,
ADV7343_GAIN_DEF);
default:
break;
}
return 0;
}
static int adv7343_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
struct adv7343_state *state = to_state(sd);
int err = 0;
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
if (ctrl->value < ADV7343_BRIGHTNESS_MIN ||
ctrl->value > ADV7343_BRIGHTNESS_MAX) {
v4l2_dbg(1, debug, sd,
"invalid brightness settings %d\n",
ctrl->value);
return -ERANGE;
}
state->bright = ctrl->value;
err = adv7343_write(sd, ADV7343_SD_BRIGHTNESS_WSS,
state->bright);
break;
case V4L2_CID_HUE:
if (ctrl->value < ADV7343_HUE_MIN ||
ctrl->value > ADV7343_HUE_MAX) {
v4l2_dbg(1, debug, sd, "invalid hue settings %d\n",
ctrl->value);
return -ERANGE;
}
state->hue = ctrl->value;
err = adv7343_write(sd, ADV7343_SD_HUE_REG, state->hue);
break;
case V4L2_CID_GAIN:
if (ctrl->value < ADV7343_GAIN_MIN ||
ctrl->value > ADV7343_GAIN_MAX) {
v4l2_dbg(1, debug, sd, "invalid gain settings %d\n",
ctrl->value);
return -ERANGE;
}
if ((ctrl->value > POSITIVE_GAIN_MAX) &&
(ctrl->value < NEGATIVE_GAIN_MIN)) {
v4l2_dbg(1, debug, sd,
"gain settings not within the specified range\n");
return -ERANGE;
}
state->gain = ctrl->value;
err = adv7343_write(sd, ADV7343_DAC2_OUTPUT_LEVEL, state->gain);
break;
default:
return -EINVAL;
}
if (err < 0)
v4l2_err(sd, "Failed to set the encoder controls\n");
return err;
}
static int adv7343_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
struct adv7343_state *state = to_state(sd);
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
ctrl->value = state->bright;
break;
case V4L2_CID_HUE:
ctrl->value = state->hue;
break;
case V4L2_CID_GAIN:
ctrl->value = state->gain;
break;
default:
return -EINVAL;
}
return 0;
}
static int adv7343_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *chip)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_ADV7343, 0);
}
static const struct v4l2_subdev_core_ops adv7343_core_ops = {
.log_status = adv7343_log_status,
.g_chip_ident = adv7343_g_chip_ident,
.g_ctrl = adv7343_g_ctrl,
.s_ctrl = adv7343_s_ctrl,
.queryctrl = adv7343_queryctrl,
};
static int adv7343_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct adv7343_state *state = to_state(sd);
int err = 0;
if (state->std == std)
return 0;
err = adv7343_setstd(sd, std);
if (!err)
state->std = std;
return err;
}
static int adv7343_s_routing(struct v4l2_subdev *sd,
u32 input, u32 output, u32 config)
{
struct adv7343_state *state = to_state(sd);
int err = 0;
if (state->output == output)
return 0;
err = adv7343_setoutput(sd, output);
if (!err)
state->output = output;
return err;
}
static const struct v4l2_subdev_video_ops adv7343_video_ops = {
.s_std_output = adv7343_s_std_output,
.s_routing = adv7343_s_routing,
};
static const struct v4l2_subdev_ops adv7343_ops = {
.core = &adv7343_core_ops,
.video = &adv7343_video_ops,
};
static int adv7343_initialize(struct v4l2_subdev *sd)
{
struct adv7343_state *state = to_state(sd);
int err = 0;
int i;
for (i = 0; i < ARRAY_SIZE(adv7343_init_reg_val); i += 2) {
err = adv7343_write(sd, adv7343_init_reg_val[i],
adv7343_init_reg_val[i+1]);
if (err) {
v4l2_err(sd, "Error initializing\n");
return err;
}
}
/* Configure for default video standard */
err = adv7343_setoutput(sd, state->output);
if (err < 0) {
v4l2_err(sd, "Error setting output during init\n");
return -EINVAL;
}
err = adv7343_setstd(sd, state->std);
if (err < 0) {
v4l2_err(sd, "Error setting std during init\n");
return -EINVAL;
}
return err;
}
static int adv7343_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct adv7343_state *state;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
v4l_info(client, "chip found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
state = kzalloc(sizeof(struct adv7343_state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
state->reg00 = 0x80;
state->reg01 = 0x00;
state->reg02 = 0x20;
state->reg35 = 0x00;
state->reg80 = ADV7343_SD_MODE_REG1_DEFAULT;
state->reg82 = ADV7343_SD_MODE_REG2_DEFAULT;
state->output = ADV7343_COMPOSITE_ID;
state->std = V4L2_STD_NTSC;
v4l2_i2c_subdev_init(&state->sd, client, &adv7343_ops);
return adv7343_initialize(&state->sd);
}
static int adv7343_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
kfree(to_state(sd));
return 0;
}
static const struct i2c_device_id adv7343_id[] = {
{"adv7343", 0},
{},
};
MODULE_DEVICE_TABLE(i2c, adv7343_id);
static struct i2c_driver adv7343_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "adv7343",
},
.probe = adv7343_probe,
.remove = adv7343_remove,
.id_table = adv7343_id,
};
static __init int init_adv7343(void)
{
return i2c_add_driver(&adv7343_driver);
}
static __exit void exit_adv7343(void)
{
i2c_del_driver(&adv7343_driver);
}
module_init(init_adv7343);
module_exit(exit_adv7343);
| gpl-2.0 |
xdajog/kernel_fx3q_aosp | net/core/rtnetlink.c | 1096 | 52137 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Routing netlink socket interface: protocol independent part.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Fixes:
* Vitaly E. Lavrov RTA_OK arithmetics was wrong.
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/capability.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/security.h>
#include <linux/mutex.h>
#include <linux/if_addr.h>
#include <linux/pci.h>
#include <asm/uaccess.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/arp.h>
#include <net/route.h>
#include <net/udp.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
#include <net/fib_rules.h>
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
struct rtnl_link {
rtnl_doit_func doit;
rtnl_dumpit_func dumpit;
rtnl_calcit_func calcit;
};
static DEFINE_MUTEX(rtnl_mutex);
void rtnl_lock(void)
{
mutex_lock(&rtnl_mutex);
}
EXPORT_SYMBOL(rtnl_lock);
void __rtnl_unlock(void)
{
mutex_unlock(&rtnl_mutex);
}
void rtnl_unlock(void)
{
/* This fellow will unlock it for us. */
netdev_run_todo();
}
EXPORT_SYMBOL(rtnl_unlock);
int rtnl_trylock(void)
{
return mutex_trylock(&rtnl_mutex);
}
EXPORT_SYMBOL(rtnl_trylock);
int rtnl_is_locked(void)
{
return mutex_is_locked(&rtnl_mutex);
}
EXPORT_SYMBOL(rtnl_is_locked);
#ifdef CONFIG_PROVE_LOCKING
int lockdep_rtnl_is_held(void)
{
return lockdep_is_held(&rtnl_mutex);
}
EXPORT_SYMBOL(lockdep_rtnl_is_held);
#endif /* #ifdef CONFIG_PROVE_LOCKING */
static struct rtnl_link *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
static inline int rtm_msgindex(int msgtype)
{
int msgindex = msgtype - RTM_BASE;
/*
* msgindex < 0 implies someone tried to register a netlink
* control code. msgindex >= RTM_NR_MSGTYPES may indicate that
* the message type has not been added to linux/rtnetlink.h
*/
BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
return msgindex;
}
static rtnl_doit_func rtnl_get_doit(int protocol, int msgindex)
{
struct rtnl_link *tab;
if (protocol <= RTNL_FAMILY_MAX)
tab = rtnl_msg_handlers[protocol];
else
tab = NULL;
if (tab == NULL || tab[msgindex].doit == NULL)
tab = rtnl_msg_handlers[PF_UNSPEC];
return tab ? tab[msgindex].doit : NULL;
}
static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex)
{
struct rtnl_link *tab;
if (protocol <= RTNL_FAMILY_MAX)
tab = rtnl_msg_handlers[protocol];
else
tab = NULL;
if (tab == NULL || tab[msgindex].dumpit == NULL)
tab = rtnl_msg_handlers[PF_UNSPEC];
return tab ? tab[msgindex].dumpit : NULL;
}
static rtnl_calcit_func rtnl_get_calcit(int protocol, int msgindex)
{
struct rtnl_link *tab;
if (protocol <= RTNL_FAMILY_MAX)
tab = rtnl_msg_handlers[protocol];
else
tab = NULL;
if (tab == NULL || tab[msgindex].calcit == NULL)
tab = rtnl_msg_handlers[PF_UNSPEC];
return tab ? tab[msgindex].calcit : NULL;
}
/**
* __rtnl_register - Register a rtnetlink message type
* @protocol: Protocol family or PF_UNSPEC
* @msgtype: rtnetlink message type
* @doit: Function pointer called for each request message
* @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
* @calcit: Function pointer to calc size of dump message
*
* Registers the specified function pointers (at least one of them has
* to be non-NULL) to be called whenever a request message for the
* specified protocol family and message type is received.
*
* The special protocol family PF_UNSPEC may be used to define fallback
* function pointers for the case when no entry for the specific protocol
* family exists.
*
* Returns 0 on success or a negative error code.
*/
int __rtnl_register(int protocol, int msgtype,
rtnl_doit_func doit, rtnl_dumpit_func dumpit,
rtnl_calcit_func calcit)
{
struct rtnl_link *tab;
int msgindex;
BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
msgindex = rtm_msgindex(msgtype);
tab = rtnl_msg_handlers[protocol];
if (tab == NULL) {
tab = kcalloc(RTM_NR_MSGTYPES, sizeof(*tab), GFP_KERNEL);
if (tab == NULL)
return -ENOBUFS;
rtnl_msg_handlers[protocol] = tab;
}
if (doit)
tab[msgindex].doit = doit;
if (dumpit)
tab[msgindex].dumpit = dumpit;
if (calcit)
tab[msgindex].calcit = calcit;
return 0;
}
EXPORT_SYMBOL_GPL(__rtnl_register);
/**
* rtnl_register - Register a rtnetlink message type
*
* Identical to __rtnl_register() but panics on failure. This is useful
* as failure of this function is very unlikely, it can only happen due
* to lack of memory when allocating the chain to store all message
* handlers for a protocol. Meant for use in init functions where lack
* of memory implies no sense in continuing.
*/
void rtnl_register(int protocol, int msgtype,
rtnl_doit_func doit, rtnl_dumpit_func dumpit,
rtnl_calcit_func calcit)
{
if (__rtnl_register(protocol, msgtype, doit, dumpit, calcit) < 0)
panic("Unable to register rtnetlink message handler, "
"protocol = %d, message type = %d\n",
protocol, msgtype);
}
EXPORT_SYMBOL_GPL(rtnl_register);
/**
* rtnl_unregister - Unregister a rtnetlink message type
* @protocol: Protocol family or PF_UNSPEC
* @msgtype: rtnetlink message type
*
* Returns 0 on success or a negative error code.
*/
int rtnl_unregister(int protocol, int msgtype)
{
int msgindex;
BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
msgindex = rtm_msgindex(msgtype);
if (rtnl_msg_handlers[protocol] == NULL)
return -ENOENT;
rtnl_msg_handlers[protocol][msgindex].doit = NULL;
rtnl_msg_handlers[protocol][msgindex].dumpit = NULL;
return 0;
}
EXPORT_SYMBOL_GPL(rtnl_unregister);
/**
* rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
* @protocol : Protocol family or PF_UNSPEC
*
* Identical to calling rtnl_unregster() for all registered message types
* of a certain protocol family.
*/
void rtnl_unregister_all(int protocol)
{
BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
kfree(rtnl_msg_handlers[protocol]);
rtnl_msg_handlers[protocol] = NULL;
}
EXPORT_SYMBOL_GPL(rtnl_unregister_all);
static LIST_HEAD(link_ops);
static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
{
const struct rtnl_link_ops *ops;
list_for_each_entry(ops, &link_ops, list) {
if (!strcmp(ops->kind, kind))
return ops;
}
return NULL;
}
/**
* __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
* @ops: struct rtnl_link_ops * to register
*
* The caller must hold the rtnl_mutex. This function should be used
* by drivers that create devices during module initialization. It
* must be called before registering the devices.
*
* Returns 0 on success or a negative error code.
*/
int __rtnl_link_register(struct rtnl_link_ops *ops)
{
if (rtnl_link_ops_get(ops->kind))
return -EEXIST;
if (!ops->dellink)
ops->dellink = unregister_netdevice_queue;
list_add_tail(&ops->list, &link_ops);
return 0;
}
EXPORT_SYMBOL_GPL(__rtnl_link_register);
/**
* rtnl_link_register - Register rtnl_link_ops with rtnetlink.
* @ops: struct rtnl_link_ops * to register
*
* Returns 0 on success or a negative error code.
*/
int rtnl_link_register(struct rtnl_link_ops *ops)
{
int err;
rtnl_lock();
err = __rtnl_link_register(ops);
rtnl_unlock();
return err;
}
EXPORT_SYMBOL_GPL(rtnl_link_register);
static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
{
struct net_device *dev;
LIST_HEAD(list_kill);
for_each_netdev(net, dev) {
if (dev->rtnl_link_ops == ops)
ops->dellink(dev, &list_kill);
}
unregister_netdevice_many(&list_kill);
}
/**
* __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
* @ops: struct rtnl_link_ops * to unregister
*
* The caller must hold the rtnl_mutex.
*/
void __rtnl_link_unregister(struct rtnl_link_ops *ops)
{
struct net *net;
for_each_net(net) {
__rtnl_kill_links(net, ops);
}
list_del(&ops->list);
}
EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
/**
* rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
* @ops: struct rtnl_link_ops * to unregister
*/
void rtnl_link_unregister(struct rtnl_link_ops *ops)
{
rtnl_lock();
__rtnl_link_unregister(ops);
rtnl_unlock();
}
EXPORT_SYMBOL_GPL(rtnl_link_unregister);
static size_t rtnl_link_get_size(const struct net_device *dev)
{
const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
size_t size;
if (!ops)
return 0;
size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
if (ops->get_size)
/* IFLA_INFO_DATA + nested data */
size += nla_total_size(sizeof(struct nlattr)) +
ops->get_size(dev);
if (ops->get_xstats_size)
/* IFLA_INFO_XSTATS */
size += nla_total_size(ops->get_xstats_size(dev));
return size;
}
static LIST_HEAD(rtnl_af_ops);
static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
{
const struct rtnl_af_ops *ops;
list_for_each_entry(ops, &rtnl_af_ops, list) {
if (ops->family == family)
return ops;
}
return NULL;
}
/**
* __rtnl_af_register - Register rtnl_af_ops with rtnetlink.
* @ops: struct rtnl_af_ops * to register
*
* The caller must hold the rtnl_mutex.
*
* Returns 0 on success or a negative error code.
*/
int __rtnl_af_register(struct rtnl_af_ops *ops)
{
list_add_tail(&ops->list, &rtnl_af_ops);
return 0;
}
EXPORT_SYMBOL_GPL(__rtnl_af_register);
/**
* rtnl_af_register - Register rtnl_af_ops with rtnetlink.
* @ops: struct rtnl_af_ops * to register
*
* Returns 0 on success or a negative error code.
*/
int rtnl_af_register(struct rtnl_af_ops *ops)
{
int err;
rtnl_lock();
err = __rtnl_af_register(ops);
rtnl_unlock();
return err;
}
EXPORT_SYMBOL_GPL(rtnl_af_register);
/**
* __rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
* @ops: struct rtnl_af_ops * to unregister
*
* The caller must hold the rtnl_mutex.
*/
void __rtnl_af_unregister(struct rtnl_af_ops *ops)
{
list_del(&ops->list);
}
EXPORT_SYMBOL_GPL(__rtnl_af_unregister);
/**
* rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
* @ops: struct rtnl_af_ops * to unregister
*/
void rtnl_af_unregister(struct rtnl_af_ops *ops)
{
rtnl_lock();
__rtnl_af_unregister(ops);
rtnl_unlock();
}
EXPORT_SYMBOL_GPL(rtnl_af_unregister);
static size_t rtnl_link_get_af_size(const struct net_device *dev)
{
struct rtnl_af_ops *af_ops;
size_t size;
/* IFLA_AF_SPEC */
size = nla_total_size(sizeof(struct nlattr));
list_for_each_entry(af_ops, &rtnl_af_ops, list) {
if (af_ops->get_link_af_size) {
/* AF_* + nested data */
size += nla_total_size(sizeof(struct nlattr)) +
af_ops->get_link_af_size(dev);
}
}
return size;
}
static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
{
const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
struct nlattr *linkinfo, *data;
int err = -EMSGSIZE;
linkinfo = nla_nest_start(skb, IFLA_LINKINFO);
if (linkinfo == NULL)
goto out;
if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
goto err_cancel_link;
if (ops->fill_xstats) {
err = ops->fill_xstats(skb, dev);
if (err < 0)
goto err_cancel_link;
}
if (ops->fill_info) {
data = nla_nest_start(skb, IFLA_INFO_DATA);
if (data == NULL)
goto err_cancel_link;
err = ops->fill_info(skb, dev);
if (err < 0)
goto err_cancel_data;
nla_nest_end(skb, data);
}
nla_nest_end(skb, linkinfo);
return 0;
err_cancel_data:
nla_nest_cancel(skb, data);
err_cancel_link:
nla_nest_cancel(skb, linkinfo);
out:
return err;
}
static const int rtm_min[RTM_NR_FAMILIES] =
{
[RTM_FAM(RTM_NEWLINK)] = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
[RTM_FAM(RTM_NEWADDR)] = NLMSG_LENGTH(sizeof(struct ifaddrmsg)),
[RTM_FAM(RTM_NEWROUTE)] = NLMSG_LENGTH(sizeof(struct rtmsg)),
[RTM_FAM(RTM_NEWRULE)] = NLMSG_LENGTH(sizeof(struct fib_rule_hdr)),
[RTM_FAM(RTM_NEWQDISC)] = NLMSG_LENGTH(sizeof(struct tcmsg)),
[RTM_FAM(RTM_NEWTCLASS)] = NLMSG_LENGTH(sizeof(struct tcmsg)),
[RTM_FAM(RTM_NEWTFILTER)] = NLMSG_LENGTH(sizeof(struct tcmsg)),
[RTM_FAM(RTM_NEWACTION)] = NLMSG_LENGTH(sizeof(struct tcamsg)),
[RTM_FAM(RTM_GETMULTICAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)),
[RTM_FAM(RTM_GETANYCAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)),
};
static const int rta_max[RTM_NR_FAMILIES] =
{
[RTM_FAM(RTM_NEWLINK)] = IFLA_MAX,
[RTM_FAM(RTM_NEWADDR)] = IFA_MAX,
[RTM_FAM(RTM_NEWROUTE)] = RTA_MAX,
[RTM_FAM(RTM_NEWRULE)] = FRA_MAX,
[RTM_FAM(RTM_NEWQDISC)] = TCA_MAX,
[RTM_FAM(RTM_NEWTCLASS)] = TCA_MAX,
[RTM_FAM(RTM_NEWTFILTER)] = TCA_MAX,
[RTM_FAM(RTM_NEWACTION)] = TCAA_MAX,
};
void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
{
struct rtattr *rta;
int size = RTA_LENGTH(attrlen);
rta = (struct rtattr *)skb_put(skb, RTA_ALIGN(size));
rta->rta_type = attrtype;
rta->rta_len = size;
memcpy(RTA_DATA(rta), data, attrlen);
memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size);
}
EXPORT_SYMBOL(__rta_fill);
int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, int echo)
{
struct sock *rtnl = net->rtnl;
int err = 0;
NETLINK_CB(skb).dst_group = group;
if (echo)
atomic_inc(&skb->users);
netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
if (echo)
err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
return err;
}
int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
{
struct sock *rtnl = net->rtnl;
return nlmsg_unicast(rtnl, skb, pid);
}
EXPORT_SYMBOL(rtnl_unicast);
void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
struct nlmsghdr *nlh, gfp_t flags)
{
struct sock *rtnl = net->rtnl;
int report = 0;
if (nlh)
report = nlmsg_report(nlh);
nlmsg_notify(rtnl, skb, pid, group, report, flags);
}
EXPORT_SYMBOL(rtnl_notify);
void rtnl_set_sk_err(struct net *net, u32 group, int error)
{
struct sock *rtnl = net->rtnl;
netlink_set_err(rtnl, 0, group, error);
}
EXPORT_SYMBOL(rtnl_set_sk_err);
int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
{
struct nlattr *mx;
int i, valid = 0;
mx = nla_nest_start(skb, RTA_METRICS);
if (mx == NULL)
return -ENOBUFS;
for (i = 0; i < RTAX_MAX; i++) {
if (metrics[i]) {
valid++;
NLA_PUT_U32(skb, i+1, metrics[i]);
}
}
if (!valid) {
nla_nest_cancel(skb, mx);
return 0;
}
return nla_nest_end(skb, mx);
nla_put_failure:
nla_nest_cancel(skb, mx);
return -EMSGSIZE;
}
EXPORT_SYMBOL(rtnetlink_put_metrics);
int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
u32 ts, u32 tsage, long expires, u32 error)
{
struct rta_cacheinfo ci = {
.rta_lastuse = jiffies_to_clock_t(jiffies - dst->lastuse),
.rta_used = dst->__use,
.rta_clntref = atomic_read(&(dst->__refcnt)),
.rta_error = error,
.rta_id = id,
.rta_ts = ts,
.rta_tsage = tsage,
};
if (expires)
ci.rta_expires = jiffies_to_clock_t(expires);
return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
}
EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
static void set_operstate(struct net_device *dev, unsigned char transition)
{
unsigned char operstate = dev->operstate;
switch (transition) {
case IF_OPER_UP:
if ((operstate == IF_OPER_DORMANT ||
operstate == IF_OPER_UNKNOWN) &&
!netif_dormant(dev))
operstate = IF_OPER_UP;
break;
case IF_OPER_DORMANT:
if (operstate == IF_OPER_UP ||
operstate == IF_OPER_UNKNOWN)
operstate = IF_OPER_DORMANT;
break;
}
if (dev->operstate != operstate) {
write_lock_bh(&dev_base_lock);
dev->operstate = operstate;
write_unlock_bh(&dev_base_lock);
netdev_state_change(dev);
}
}
static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
const struct ifinfomsg *ifm)
{
unsigned int flags = ifm->ifi_flags;
/* bugwards compatibility: ifi_change == 0 is treated as ~0 */
if (ifm->ifi_change)
flags = (flags & ifm->ifi_change) |
(dev->flags & ~ifm->ifi_change);
return flags;
}
static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
const struct rtnl_link_stats64 *b)
{
a->rx_packets = b->rx_packets;
a->tx_packets = b->tx_packets;
a->rx_bytes = b->rx_bytes;
a->tx_bytes = b->tx_bytes;
a->rx_errors = b->rx_errors;
a->tx_errors = b->tx_errors;
a->rx_dropped = b->rx_dropped;
a->tx_dropped = b->tx_dropped;
a->multicast = b->multicast;
a->collisions = b->collisions;
a->rx_length_errors = b->rx_length_errors;
a->rx_over_errors = b->rx_over_errors;
a->rx_crc_errors = b->rx_crc_errors;
a->rx_frame_errors = b->rx_frame_errors;
a->rx_fifo_errors = b->rx_fifo_errors;
a->rx_missed_errors = b->rx_missed_errors;
a->tx_aborted_errors = b->tx_aborted_errors;
a->tx_carrier_errors = b->tx_carrier_errors;
a->tx_fifo_errors = b->tx_fifo_errors;
a->tx_heartbeat_errors = b->tx_heartbeat_errors;
a->tx_window_errors = b->tx_window_errors;
a->rx_compressed = b->rx_compressed;
a->tx_compressed = b->tx_compressed;
}
static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b)
{
memcpy(v, b, sizeof(*b));
}
/* All VF info */
static inline int rtnl_vfinfo_size(const struct net_device *dev,
u32 ext_filter_mask)
{
if (dev->dev.parent && dev_is_pci(dev->dev.parent) &&
(ext_filter_mask & RTEXT_FILTER_VF)) {
int num_vfs = dev_num_vf(dev->dev.parent);
size_t size = nla_total_size(sizeof(struct nlattr));
size += nla_total_size(num_vfs * sizeof(struct nlattr));
size += num_vfs *
(nla_total_size(sizeof(struct ifla_vf_mac)) +
nla_total_size(sizeof(struct ifla_vf_vlan)) +
nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
nla_total_size(sizeof(struct ifla_vf_spoofchk)));
return size;
} else
return 0;
}
static size_t rtnl_port_size(const struct net_device *dev)
{
size_t port_size = nla_total_size(4) /* PORT_VF */
+ nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
+ nla_total_size(sizeof(struct ifla_port_vsi))
/* PORT_VSI_TYPE */
+ nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */
+ nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */
+ nla_total_size(1) /* PROT_VDP_REQUEST */
+ nla_total_size(2); /* PORT_VDP_RESPONSE */
size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
+ port_size;
size_t port_self_size = nla_total_size(sizeof(struct nlattr))
+ port_size;
if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent)
return 0;
if (dev_num_vf(dev->dev.parent))
return port_self_size + vf_ports_size +
vf_port_size * dev_num_vf(dev->dev.parent);
else
return port_self_size;
}
static noinline size_t if_nlmsg_size(const struct net_device *dev,
u32 ext_filter_mask)
{
return NLMSG_ALIGN(sizeof(struct ifinfomsg))
+ nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
+ nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
+ nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
+ nla_total_size(sizeof(struct rtnl_link_ifmap))
+ nla_total_size(sizeof(struct rtnl_link_stats))
+ nla_total_size(sizeof(struct rtnl_link_stats64))
+ nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
+ nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
+ nla_total_size(4) /* IFLA_TXQLEN */
+ nla_total_size(4) /* IFLA_WEIGHT */
+ nla_total_size(4) /* IFLA_MTU */
+ nla_total_size(4) /* IFLA_LINK */
+ nla_total_size(4) /* IFLA_MASTER */
+ nla_total_size(1) /* IFLA_OPERSTATE */
+ nla_total_size(1) /* IFLA_LINKMODE */
+ nla_total_size(ext_filter_mask
& RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
+ rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
+ rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
+ rtnl_link_get_size(dev) /* IFLA_LINKINFO */
+ rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */
}
static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
{
struct nlattr *vf_ports;
struct nlattr *vf_port;
int vf;
int err;
vf_ports = nla_nest_start(skb, IFLA_VF_PORTS);
if (!vf_ports)
return -EMSGSIZE;
for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
vf_port = nla_nest_start(skb, IFLA_VF_PORT);
if (!vf_port)
goto nla_put_failure;
NLA_PUT_U32(skb, IFLA_PORT_VF, vf);
err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
if (err == -EMSGSIZE)
goto nla_put_failure;
if (err) {
nla_nest_cancel(skb, vf_port);
continue;
}
nla_nest_end(skb, vf_port);
}
nla_nest_end(skb, vf_ports);
return 0;
nla_put_failure:
nla_nest_cancel(skb, vf_ports);
return -EMSGSIZE;
}
static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
{
struct nlattr *port_self;
int err;
port_self = nla_nest_start(skb, IFLA_PORT_SELF);
if (!port_self)
return -EMSGSIZE;
err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
if (err) {
nla_nest_cancel(skb, port_self);
return (err == -EMSGSIZE) ? err : 0;
}
nla_nest_end(skb, port_self);
return 0;
}
static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev)
{
int err;
if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent)
return 0;
err = rtnl_port_self_fill(skb, dev);
if (err)
return err;
if (dev_num_vf(dev->dev.parent)) {
err = rtnl_vf_ports_fill(skb, dev);
if (err)
return err;
}
return 0;
}
static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
int type, u32 pid, u32 seq, u32 change,
unsigned int flags, u32 ext_filter_mask)
{
struct ifinfomsg *ifm;
struct nlmsghdr *nlh;
struct rtnl_link_stats64 temp;
const struct rtnl_link_stats64 *stats;
struct nlattr *attr, *af_spec;
struct rtnl_af_ops *af_ops;
ASSERT_RTNL();
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
if (nlh == NULL)
return -EMSGSIZE;
ifm = nlmsg_data(nlh);
ifm->ifi_family = AF_UNSPEC;
ifm->__ifi_pad = 0;
ifm->ifi_type = dev->type;
ifm->ifi_index = dev->ifindex;
ifm->ifi_flags = dev_get_flags(dev);
ifm->ifi_change = change;
NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
NLA_PUT_U32(skb, IFLA_TXQLEN, dev->tx_queue_len);
NLA_PUT_U8(skb, IFLA_OPERSTATE,
netif_running(dev) ? dev->operstate : IF_OPER_DOWN);
NLA_PUT_U8(skb, IFLA_LINKMODE, dev->link_mode);
NLA_PUT_U32(skb, IFLA_MTU, dev->mtu);
NLA_PUT_U32(skb, IFLA_GROUP, dev->group);
if (dev->ifindex != dev->iflink)
NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
if (dev->master)
NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex);
if (dev->qdisc)
NLA_PUT_STRING(skb, IFLA_QDISC, dev->qdisc->ops->id);
if (dev->ifalias)
NLA_PUT_STRING(skb, IFLA_IFALIAS, dev->ifalias);
if (1) {
struct rtnl_link_ifmap map = {
.mem_start = dev->mem_start,
.mem_end = dev->mem_end,
.base_addr = dev->base_addr,
.irq = dev->irq,
.dma = dev->dma,
.port = dev->if_port,
};
NLA_PUT(skb, IFLA_MAP, sizeof(map), &map);
}
if (dev->addr_len) {
NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr);
NLA_PUT(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast);
}
attr = nla_reserve(skb, IFLA_STATS,
sizeof(struct rtnl_link_stats));
if (attr == NULL)
goto nla_put_failure;
stats = dev_get_stats(dev, &temp);
copy_rtnl_link_stats(nla_data(attr), stats);
attr = nla_reserve(skb, IFLA_STATS64,
sizeof(struct rtnl_link_stats64));
if (attr == NULL)
goto nla_put_failure;
copy_rtnl_link_stats64(nla_data(attr), stats);
if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF))
NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent));
if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent
&& (ext_filter_mask & RTEXT_FILTER_VF)) {
int i;
struct nlattr *vfinfo, *vf;
int num_vfs = dev_num_vf(dev->dev.parent);
vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
if (!vfinfo)
goto nla_put_failure;
for (i = 0; i < num_vfs; i++) {
struct ifla_vf_info ivi;
struct ifla_vf_mac vf_mac;
struct ifla_vf_vlan vf_vlan;
struct ifla_vf_tx_rate vf_tx_rate;
struct ifla_vf_spoofchk vf_spoofchk;
/*
* Not all SR-IOV capable drivers support the
* spoofcheck query. Preset to -1 so the user
* space tool can detect that the driver didn't
* report anything.
*/
ivi.spoofchk = -1;
if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
break;
vf_mac.vf =
vf_vlan.vf =
vf_tx_rate.vf =
vf_spoofchk.vf = ivi.vf;
memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
vf_vlan.vlan = ivi.vlan;
vf_vlan.qos = ivi.qos;
vf_tx_rate.rate = ivi.tx_rate;
vf_spoofchk.setting = ivi.spoofchk;
vf = nla_nest_start(skb, IFLA_VF_INFO);
if (!vf) {
nla_nest_cancel(skb, vfinfo);
goto nla_put_failure;
}
NLA_PUT(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac);
NLA_PUT(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan);
NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
&vf_tx_rate);
NLA_PUT(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
&vf_spoofchk);
nla_nest_end(skb, vf);
}
nla_nest_end(skb, vfinfo);
}
if (rtnl_port_fill(skb, dev))
goto nla_put_failure;
if (dev->rtnl_link_ops) {
if (rtnl_link_fill(skb, dev) < 0)
goto nla_put_failure;
}
if (!(af_spec = nla_nest_start(skb, IFLA_AF_SPEC)))
goto nla_put_failure;
list_for_each_entry(af_ops, &rtnl_af_ops, list) {
if (af_ops->fill_link_af) {
struct nlattr *af;
int err;
if (!(af = nla_nest_start(skb, af_ops->family)))
goto nla_put_failure;
err = af_ops->fill_link_af(skb, dev);
/*
* Caller may return ENODATA to indicate that there
* was no data to be dumped. This is not an error, it
* means we should trim the attribute header and
* continue.
*/
if (err == -ENODATA)
nla_nest_cancel(skb, af);
else if (err < 0)
goto nla_put_failure;
nla_nest_end(skb, af);
}
}
nla_nest_end(skb, af_spec);
return nlmsg_end(skb, nlh);
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
int h, s_h;
int idx = 0, s_idx;
struct net_device *dev;
struct hlist_head *head;
struct hlist_node *node;
struct nlattr *tb[IFLA_MAX+1];
u32 ext_filter_mask = 0;
s_h = cb->args[0];
s_idx = cb->args[1];
rcu_read_lock();
cb->seq = net->dev_base_seq;
if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
ifla_policy) >= 0) {
if (tb[IFLA_EXT_MASK])
ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
}
for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
idx = 0;
head = &net->dev_index_head[h];
hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
if (idx < s_idx)
goto cont;
if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, 0,
NLM_F_MULTI,
ext_filter_mask) <= 0)
goto out;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
}
out:
rcu_read_unlock();
cb->args[1] = idx;
cb->args[0] = h;
return skb->len;
}
const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
[IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
[IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
[IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
[IFLA_MTU] = { .type = NLA_U32 },
[IFLA_LINK] = { .type = NLA_U32 },
[IFLA_MASTER] = { .type = NLA_U32 },
[IFLA_TXQLEN] = { .type = NLA_U32 },
[IFLA_WEIGHT] = { .type = NLA_U32 },
[IFLA_OPERSTATE] = { .type = NLA_U8 },
[IFLA_LINKMODE] = { .type = NLA_U8 },
[IFLA_LINKINFO] = { .type = NLA_NESTED },
[IFLA_NET_NS_PID] = { .type = NLA_U32 },
[IFLA_NET_NS_FD] = { .type = NLA_U32 },
[IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
[IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
[IFLA_VF_PORTS] = { .type = NLA_NESTED },
[IFLA_PORT_SELF] = { .type = NLA_NESTED },
[IFLA_AF_SPEC] = { .type = NLA_NESTED },
[IFLA_EXT_MASK] = { .type = NLA_U32 },
};
EXPORT_SYMBOL(ifla_policy);
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
[IFLA_INFO_KIND] = { .type = NLA_STRING },
[IFLA_INFO_DATA] = { .type = NLA_NESTED },
};
static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = {
[IFLA_VF_INFO] = { .type = NLA_NESTED },
};
static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
[IFLA_VF_MAC] = { .type = NLA_BINARY,
.len = sizeof(struct ifla_vf_mac) },
[IFLA_VF_VLAN] = { .type = NLA_BINARY,
.len = sizeof(struct ifla_vf_vlan) },
[IFLA_VF_TX_RATE] = { .type = NLA_BINARY,
.len = sizeof(struct ifla_vf_tx_rate) },
[IFLA_VF_SPOOFCHK] = { .type = NLA_BINARY,
.len = sizeof(struct ifla_vf_spoofchk) },
};
static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
[IFLA_PORT_VF] = { .type = NLA_U32 },
[IFLA_PORT_PROFILE] = { .type = NLA_STRING,
.len = PORT_PROFILE_MAX },
[IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY,
.len = sizeof(struct ifla_port_vsi)},
[IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
.len = PORT_UUID_MAX },
[IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
.len = PORT_UUID_MAX },
[IFLA_PORT_REQUEST] = { .type = NLA_U8, },
[IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
};
struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
{
struct net *net;
/* Examine the link attributes and figure out which
* network namespace we are talking about.
*/
if (tb[IFLA_NET_NS_PID])
net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
else if (tb[IFLA_NET_NS_FD])
net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
else
net = get_net(src_net);
return net;
}
EXPORT_SYMBOL(rtnl_link_get_net);
static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
{
if (dev) {
if (tb[IFLA_ADDRESS] &&
nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
return -EINVAL;
if (tb[IFLA_BROADCAST] &&
nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
return -EINVAL;
}
if (tb[IFLA_AF_SPEC]) {
struct nlattr *af;
int rem, err;
nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
const struct rtnl_af_ops *af_ops;
if (!(af_ops = rtnl_af_lookup(nla_type(af))))
return -EAFNOSUPPORT;
if (!af_ops->set_link_af)
return -EOPNOTSUPP;
if (af_ops->validate_link_af) {
err = af_ops->validate_link_af(dev, af);
if (err < 0)
return err;
}
}
}
return 0;
}
static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
{
int rem, err = -EINVAL;
struct nlattr *vf;
const struct net_device_ops *ops = dev->netdev_ops;
nla_for_each_nested(vf, attr, rem) {
switch (nla_type(vf)) {
case IFLA_VF_MAC: {
struct ifla_vf_mac *ivm;
ivm = nla_data(vf);
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_mac)
err = ops->ndo_set_vf_mac(dev, ivm->vf,
ivm->mac);
break;
}
case IFLA_VF_VLAN: {
struct ifla_vf_vlan *ivv;
ivv = nla_data(vf);
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_vlan)
err = ops->ndo_set_vf_vlan(dev, ivv->vf,
ivv->vlan,
ivv->qos);
break;
}
case IFLA_VF_TX_RATE: {
struct ifla_vf_tx_rate *ivt;
ivt = nla_data(vf);
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_tx_rate)
err = ops->ndo_set_vf_tx_rate(dev, ivt->vf,
ivt->rate);
break;
}
case IFLA_VF_SPOOFCHK: {
struct ifla_vf_spoofchk *ivs;
ivs = nla_data(vf);
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_spoofchk)
err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
ivs->setting);
break;
}
default:
err = -EINVAL;
break;
}
if (err)
break;
}
return err;
}
static int do_set_master(struct net_device *dev, int ifindex)
{
struct net_device *master_dev;
const struct net_device_ops *ops;
int err;
if (dev->master) {
if (dev->master->ifindex == ifindex)
return 0;
ops = dev->master->netdev_ops;
if (ops->ndo_del_slave) {
err = ops->ndo_del_slave(dev->master, dev);
if (err)
return err;
} else {
return -EOPNOTSUPP;
}
}
if (ifindex) {
master_dev = __dev_get_by_index(dev_net(dev), ifindex);
if (!master_dev)
return -EINVAL;
ops = master_dev->netdev_ops;
if (ops->ndo_add_slave) {
err = ops->ndo_add_slave(master_dev, dev);
if (err)
return err;
} else {
return -EOPNOTSUPP;
}
}
return 0;
}
static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
struct nlattr **tb, char *ifname, int modified)
{
const struct net_device_ops *ops = dev->netdev_ops;
int send_addr_notify = 0;
int err;
if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) {
struct net *net = rtnl_link_get_net(dev_net(dev), tb);
if (IS_ERR(net)) {
err = PTR_ERR(net);
goto errout;
}
err = dev_change_net_namespace(dev, net, ifname);
put_net(net);
if (err)
goto errout;
modified = 1;
}
if (tb[IFLA_MAP]) {
struct rtnl_link_ifmap *u_map;
struct ifmap k_map;
if (!ops->ndo_set_config) {
err = -EOPNOTSUPP;
goto errout;
}
if (!netif_device_present(dev)) {
err = -ENODEV;
goto errout;
}
u_map = nla_data(tb[IFLA_MAP]);
k_map.mem_start = (unsigned long) u_map->mem_start;
k_map.mem_end = (unsigned long) u_map->mem_end;
k_map.base_addr = (unsigned short) u_map->base_addr;
k_map.irq = (unsigned char) u_map->irq;
k_map.dma = (unsigned char) u_map->dma;
k_map.port = (unsigned char) u_map->port;
err = ops->ndo_set_config(dev, &k_map);
if (err < 0)
goto errout;
modified = 1;
}
if (tb[IFLA_ADDRESS]) {
struct sockaddr *sa;
int len;
if (!ops->ndo_set_mac_address) {
err = -EOPNOTSUPP;
goto errout;
}
if (!netif_device_present(dev)) {
err = -ENODEV;
goto errout;
}
len = sizeof(sa_family_t) + dev->addr_len;
sa = kmalloc(len, GFP_KERNEL);
if (!sa) {
err = -ENOMEM;
goto errout;
}
sa->sa_family = dev->type;
memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
dev->addr_len);
err = ops->ndo_set_mac_address(dev, sa);
kfree(sa);
if (err)
goto errout;
send_addr_notify = 1;
modified = 1;
}
if (tb[IFLA_MTU]) {
err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
if (err < 0)
goto errout;
modified = 1;
}
if (tb[IFLA_GROUP]) {
dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
modified = 1;
}
/*
* Interface selected by interface index but interface
* name provided implies that a name change has been
* requested.
*/
if (ifm->ifi_index > 0 && ifname[0]) {
err = dev_change_name(dev, ifname);
if (err < 0)
goto errout;
modified = 1;
}
if (tb[IFLA_IFALIAS]) {
err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
nla_len(tb[IFLA_IFALIAS]));
if (err < 0)
goto errout;
modified = 1;
}
if (tb[IFLA_BROADCAST]) {
nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
send_addr_notify = 1;
}
if (ifm->ifi_flags || ifm->ifi_change) {
err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
if (err < 0)
goto errout;
}
if (tb[IFLA_MASTER]) {
err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]));
if (err)
goto errout;
modified = 1;
}
if (tb[IFLA_TXQLEN])
dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
if (tb[IFLA_OPERSTATE])
set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
if (tb[IFLA_LINKMODE]) {
write_lock_bh(&dev_base_lock);
dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
write_unlock_bh(&dev_base_lock);
}
if (tb[IFLA_VFINFO_LIST]) {
struct nlattr *attr;
int rem;
nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
if (nla_type(attr) != IFLA_VF_INFO) {
err = -EINVAL;
goto errout;
}
err = do_setvfinfo(dev, attr);
if (err < 0)
goto errout;
modified = 1;
}
}
err = 0;
if (tb[IFLA_VF_PORTS]) {
struct nlattr *port[IFLA_PORT_MAX+1];
struct nlattr *attr;
int vf;
int rem;
err = -EOPNOTSUPP;
if (!ops->ndo_set_vf_port)
goto errout;
nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
if (nla_type(attr) != IFLA_VF_PORT)
continue;
err = nla_parse_nested(port, IFLA_PORT_MAX,
attr, ifla_port_policy);
if (err < 0)
goto errout;
if (!port[IFLA_PORT_VF]) {
err = -EOPNOTSUPP;
goto errout;
}
vf = nla_get_u32(port[IFLA_PORT_VF]);
err = ops->ndo_set_vf_port(dev, vf, port);
if (err < 0)
goto errout;
modified = 1;
}
}
err = 0;
if (tb[IFLA_PORT_SELF]) {
struct nlattr *port[IFLA_PORT_MAX+1];
err = nla_parse_nested(port, IFLA_PORT_MAX,
tb[IFLA_PORT_SELF], ifla_port_policy);
if (err < 0)
goto errout;
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_port)
err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
if (err < 0)
goto errout;
modified = 1;
}
if (tb[IFLA_AF_SPEC]) {
struct nlattr *af;
int rem;
nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
const struct rtnl_af_ops *af_ops;
if (!(af_ops = rtnl_af_lookup(nla_type(af))))
BUG();
err = af_ops->set_link_af(dev, af);
if (err < 0)
goto errout;
modified = 1;
}
}
err = 0;
errout:
if (err < 0 && modified && net_ratelimit())
printk(KERN_WARNING "A link change request failed with "
"some changes committed already. Interface %s may "
"have been left with an inconsistent configuration, "
"please check.\n", dev->name);
if (send_addr_notify)
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
return err;
}
static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
struct ifinfomsg *ifm;
struct net_device *dev;
int err;
struct nlattr *tb[IFLA_MAX+1];
char ifname[IFNAMSIZ];
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
if (err < 0)
goto errout;
if (tb[IFLA_IFNAME])
nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
else
ifname[0] = '\0';
err = -EINVAL;
ifm = nlmsg_data(nlh);
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(net, ifm->ifi_index);
else if (tb[IFLA_IFNAME])
dev = __dev_get_by_name(net, ifname);
else
goto errout;
if (dev == NULL) {
err = -ENODEV;
goto errout;
}
err = validate_linkmsg(dev, tb);
if (err < 0)
goto errout;
err = do_setlink(dev, ifm, tb, ifname, 0);
errout:
return err;
}
static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
const struct rtnl_link_ops *ops;
struct net_device *dev;
struct ifinfomsg *ifm;
char ifname[IFNAMSIZ];
struct nlattr *tb[IFLA_MAX+1];
int err;
LIST_HEAD(list_kill);
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
if (err < 0)
return err;
if (tb[IFLA_IFNAME])
nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
ifm = nlmsg_data(nlh);
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(net, ifm->ifi_index);
else if (tb[IFLA_IFNAME])
dev = __dev_get_by_name(net, ifname);
else
return -EINVAL;
if (!dev)
return -ENODEV;
ops = dev->rtnl_link_ops;
if (!ops)
return -EOPNOTSUPP;
ops->dellink(dev, &list_kill);
unregister_netdevice_many(&list_kill);
list_del(&list_kill);
return 0;
}
int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
{
unsigned int old_flags;
int err;
old_flags = dev->flags;
if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
if (err < 0)
return err;
}
dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
__dev_notify_flags(dev, old_flags);
return 0;
}
EXPORT_SYMBOL(rtnl_configure_link);
struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[])
{
int err;
struct net_device *dev;
unsigned int num_queues = 1;
unsigned int real_num_queues = 1;
if (ops->get_tx_queues) {
err = ops->get_tx_queues(src_net, tb, &num_queues,
&real_num_queues);
if (err)
goto err;
}
err = -ENOMEM;
dev = alloc_netdev_mq(ops->priv_size, ifname, ops->setup, num_queues);
if (!dev)
goto err;
dev_net_set(dev, net);
dev->rtnl_link_ops = ops;
dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
if (tb[IFLA_MTU])
dev->mtu = nla_get_u32(tb[IFLA_MTU]);
if (tb[IFLA_ADDRESS])
memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
nla_len(tb[IFLA_ADDRESS]));
if (tb[IFLA_BROADCAST])
memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
nla_len(tb[IFLA_BROADCAST]));
if (tb[IFLA_TXQLEN])
dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
if (tb[IFLA_OPERSTATE])
set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
if (tb[IFLA_LINKMODE])
dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
if (tb[IFLA_GROUP])
dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
return dev;
err:
return ERR_PTR(err);
}
EXPORT_SYMBOL(rtnl_create_link);
static int rtnl_group_changelink(struct net *net, int group,
struct ifinfomsg *ifm,
struct nlattr **tb)
{
struct net_device *dev;
int err;
for_each_netdev(net, dev) {
if (dev->group == group) {
err = do_setlink(dev, ifm, tb, NULL, 0);
if (err < 0)
return err;
}
}
return 0;
}
static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
const struct rtnl_link_ops *ops;
struct net_device *dev;
struct ifinfomsg *ifm;
char kind[MODULE_NAME_LEN];
char ifname[IFNAMSIZ];
struct nlattr *tb[IFLA_MAX+1];
struct nlattr *linkinfo[IFLA_INFO_MAX+1];
int err;
#ifdef CONFIG_MODULES
replay:
#endif
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
if (err < 0)
return err;
if (tb[IFLA_IFNAME])
nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
else
ifname[0] = '\0';
ifm = nlmsg_data(nlh);
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(net, ifm->ifi_index);
else {
if (ifname[0])
dev = __dev_get_by_name(net, ifname);
else
dev = NULL;
}
err = validate_linkmsg(dev, tb);
if (err < 0)
return err;
if (tb[IFLA_LINKINFO]) {
err = nla_parse_nested(linkinfo, IFLA_INFO_MAX,
tb[IFLA_LINKINFO], ifla_info_policy);
if (err < 0)
return err;
} else
memset(linkinfo, 0, sizeof(linkinfo));
if (linkinfo[IFLA_INFO_KIND]) {
nla_strlcpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
ops = rtnl_link_ops_get(kind);
} else {
kind[0] = '\0';
ops = NULL;
}
if (1) {
struct nlattr *attr[ops ? ops->maxtype + 1 : 0], **data = NULL;
struct net *dest_net;
if (ops) {
if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
err = nla_parse_nested(attr, ops->maxtype,
linkinfo[IFLA_INFO_DATA],
ops->policy);
if (err < 0)
return err;
data = attr;
}
if (ops->validate) {
err = ops->validate(tb, data);
if (err < 0)
return err;
}
}
if (dev) {
int modified = 0;
if (nlh->nlmsg_flags & NLM_F_EXCL)
return -EEXIST;
if (nlh->nlmsg_flags & NLM_F_REPLACE)
return -EOPNOTSUPP;
if (linkinfo[IFLA_INFO_DATA]) {
if (!ops || ops != dev->rtnl_link_ops ||
!ops->changelink)
return -EOPNOTSUPP;
err = ops->changelink(dev, tb, data);
if (err < 0)
return err;
modified = 1;
}
return do_setlink(dev, ifm, tb, ifname, modified);
}
if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
return rtnl_group_changelink(net,
nla_get_u32(tb[IFLA_GROUP]),
ifm, tb);
return -ENODEV;
}
if (ifm->ifi_index)
return -EOPNOTSUPP;
if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO])
return -EOPNOTSUPP;
if (!ops) {
#ifdef CONFIG_MODULES
if (kind[0]) {
__rtnl_unlock();
request_module("rtnl-link-%s", kind);
rtnl_lock();
ops = rtnl_link_ops_get(kind);
if (ops)
goto replay;
}
#endif
return -EOPNOTSUPP;
}
if (!ifname[0])
snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
dest_net = rtnl_link_get_net(net, tb);
if (IS_ERR(dest_net))
return PTR_ERR(dest_net);
dev = rtnl_create_link(net, dest_net, ifname, ops, tb);
if (IS_ERR(dev))
err = PTR_ERR(dev);
else if (ops->newlink)
err = ops->newlink(net, dev, tb, data);
else
err = register_netdevice(dev);
if (err < 0 && !IS_ERR(dev))
free_netdev(dev);
if (err < 0)
goto out;
err = rtnl_configure_link(dev, ifm);
if (err < 0)
unregister_netdevice(dev);
out:
put_net(dest_net);
return err;
}
}
static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
struct ifinfomsg *ifm;
char ifname[IFNAMSIZ];
struct nlattr *tb[IFLA_MAX+1];
struct net_device *dev = NULL;
struct sk_buff *nskb;
int err;
u32 ext_filter_mask = 0;
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
if (err < 0)
return err;
if (tb[IFLA_IFNAME])
nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
if (tb[IFLA_EXT_MASK])
ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
ifm = nlmsg_data(nlh);
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(net, ifm->ifi_index);
else if (tb[IFLA_IFNAME])
dev = __dev_get_by_name(net, ifname);
else
return -EINVAL;
if (dev == NULL)
return -ENODEV;
nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
if (nskb == NULL)
return -ENOBUFS;
err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid,
nlh->nlmsg_seq, 0, 0, ext_filter_mask);
if (err < 0) {
/* -EMSGSIZE implies BUG in if_nlmsg_size */
WARN_ON(err == -EMSGSIZE);
kfree_skb(nskb);
} else
err = rtnl_unicast(nskb, net, NETLINK_CB(skb).pid);
return err;
}
static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct net_device *dev;
struct nlattr *tb[IFLA_MAX+1];
u32 ext_filter_mask = 0;
u16 min_ifinfo_dump_size = 0;
if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
ifla_policy) >= 0) {
if (tb[IFLA_EXT_MASK])
ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
}
if (!ext_filter_mask)
return NLMSG_GOODSIZE;
/*
* traverse the list of net devices and compute the minimum
* buffer size based upon the filter mask.
*/
list_for_each_entry(dev, &net->dev_base_head, dev_list) {
min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size,
if_nlmsg_size(dev,
ext_filter_mask));
}
return min_ifinfo_dump_size;
}
static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
{
int idx;
int s_idx = cb->family;
if (s_idx == 0)
s_idx = 1;
for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
int type = cb->nlh->nlmsg_type-RTM_BASE;
if (idx < s_idx || idx == PF_PACKET)
continue;
if (rtnl_msg_handlers[idx] == NULL ||
rtnl_msg_handlers[idx][type].dumpit == NULL)
continue;
if (idx > s_idx)
memset(&cb->args[0], 0, sizeof(cb->args));
if (rtnl_msg_handlers[idx][type].dumpit(skb, cb))
break;
}
cb->family = idx;
return skb->len;
}
void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
{
struct net *net = dev_net(dev);
struct sk_buff *skb;
int err = -ENOBUFS;
size_t if_info_size;
skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), GFP_KERNEL);
if (skb == NULL)
goto errout;
err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0);
if (err < 0) {
/* -EMSGSIZE implies BUG in if_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
return;
errout:
if (err < 0)
rtnl_set_sk_err(net, RTNLGRP_LINK, err);
}
/* Protected by RTNL sempahore. */
static struct rtattr **rta_buf;
static int rtattr_max;
/* Process one rtnetlink message. */
static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
rtnl_doit_func doit;
int sz_idx, kind;
int min_len;
int family;
int type;
int err;
type = nlh->nlmsg_type;
if (type > RTM_MAX)
return -EOPNOTSUPP;
type -= RTM_BASE;
/* All the messages must have at least 1 byte length */
if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(struct rtgenmsg)))
return 0;
family = ((struct rtgenmsg *)NLMSG_DATA(nlh))->rtgen_family;
sz_idx = type>>2;
kind = type&3;
if (kind != 2 && !capable(CAP_NET_ADMIN))
return -EPERM;
if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
struct sock *rtnl;
rtnl_dumpit_func dumpit;
rtnl_calcit_func calcit;
u16 min_dump_alloc = 0;
dumpit = rtnl_get_dumpit(family, type);
if (dumpit == NULL)
return -EOPNOTSUPP;
calcit = rtnl_get_calcit(family, type);
if (calcit)
min_dump_alloc = calcit(skb, nlh);
__rtnl_unlock();
rtnl = net->rtnl;
{
struct netlink_dump_control c = {
.dump = dumpit,
.min_dump_alloc = min_dump_alloc,
};
err = netlink_dump_start(rtnl, skb, nlh, &c);
}
rtnl_lock();
return err;
}
memset(rta_buf, 0, (rtattr_max * sizeof(struct rtattr *)));
min_len = rtm_min[sz_idx];
if (nlh->nlmsg_len < min_len)
return -EINVAL;
if (nlh->nlmsg_len > min_len) {
int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len);
struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len);
while (RTA_OK(attr, attrlen)) {
unsigned flavor = attr->rta_type;
if (flavor) {
if (flavor > rta_max[sz_idx])
return -EINVAL;
rta_buf[flavor-1] = attr;
}
attr = RTA_NEXT(attr, attrlen);
}
}
doit = rtnl_get_doit(family, type);
if (doit == NULL)
return -EOPNOTSUPP;
return doit(skb, nlh, (void *)&rta_buf[0]);
}
static void rtnetlink_rcv(struct sk_buff *skb)
{
rtnl_lock();
netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
rtnl_unlock();
}
static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = ptr;
switch (event) {
case NETDEV_UP:
case NETDEV_DOWN:
case NETDEV_PRE_UP:
case NETDEV_POST_INIT:
case NETDEV_REGISTER:
case NETDEV_CHANGE:
case NETDEV_PRE_TYPE_CHANGE:
case NETDEV_GOING_DOWN:
case NETDEV_UNREGISTER:
case NETDEV_UNREGISTER_BATCH:
case NETDEV_RELEASE:
case NETDEV_JOIN:
break;
default:
rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
break;
}
return NOTIFY_DONE;
}
static struct notifier_block rtnetlink_dev_notifier = {
.notifier_call = rtnetlink_event,
};
static int __net_init rtnetlink_net_init(struct net *net)
{
struct sock *sk;
sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX,
rtnetlink_rcv, &rtnl_mutex, THIS_MODULE);
if (!sk)
return -ENOMEM;
net->rtnl = sk;
return 0;
}
static void __net_exit rtnetlink_net_exit(struct net *net)
{
netlink_kernel_release(net->rtnl);
net->rtnl = NULL;
}
static struct pernet_operations rtnetlink_net_ops = {
.init = rtnetlink_net_init,
.exit = rtnetlink_net_exit,
};
void __init rtnetlink_init(void)
{
int i;
rtattr_max = 0;
for (i = 0; i < ARRAY_SIZE(rta_max); i++)
if (rta_max[i] > rtattr_max)
rtattr_max = rta_max[i];
rta_buf = kmalloc(rtattr_max * sizeof(struct rtattr *), GFP_KERNEL);
if (!rta_buf)
panic("rtnetlink_init: cannot allocate rta_buf\n");
if (register_pernet_subsys(&rtnetlink_net_ops))
panic("rtnetlink_init: cannot initialize rtnetlink\n");
netlink_set_nonroot(NETLINK_ROUTE, NL_NONROOT_RECV);
register_netdevice_notifier(&rtnetlink_dev_notifier);
rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
rtnl_dump_ifinfo, rtnl_calcit);
rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, NULL);
rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, NULL);
rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, NULL);
rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, NULL);
rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, NULL);
}
| gpl-2.0 |
Elite-Kernels/elite_bullhead | net/bridge/br_fdb.c | 1864 | 19883 | /*
* Forwarding database
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/rculist.h>
#include <linux/spinlock.h>
#include <linux/times.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/jhash.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <asm/unaligned.h>
#include <linux/if_vlan.h>
#include "br_private.h"
static struct kmem_cache *br_fdb_cache __read_mostly;
static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid);
static void fdb_notify(struct net_bridge *br,
const struct net_bridge_fdb_entry *, int);
static u32 fdb_salt __read_mostly;
int __init br_fdb_init(void)
{
br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
sizeof(struct net_bridge_fdb_entry),
0,
SLAB_HWCACHE_ALIGN, NULL);
if (!br_fdb_cache)
return -ENOMEM;
get_random_bytes(&fdb_salt, sizeof(fdb_salt));
return 0;
}
void br_fdb_fini(void)
{
kmem_cache_destroy(br_fdb_cache);
}
/* if topology_changing then use forward_delay (default 15 sec)
* otherwise keep longer (default 5 minutes)
*/
static inline unsigned long hold_time(const struct net_bridge *br)
{
return br->topology_change ? br->forward_delay : br->ageing_time;
}
static inline int has_expired(const struct net_bridge *br,
const struct net_bridge_fdb_entry *fdb)
{
return !fdb->is_static &&
time_before_eq(fdb->updated + hold_time(br), jiffies);
}
static inline int br_mac_hash(const unsigned char *mac, __u16 vid)
{
/* use 1 byte of OUI and 3 bytes of NIC */
u32 key = get_unaligned((u32 *)(mac + 2));
return jhash_2words(key, vid, fdb_salt) & (BR_HASH_SIZE - 1);
}
static void fdb_rcu_free(struct rcu_head *head)
{
struct net_bridge_fdb_entry *ent
= container_of(head, struct net_bridge_fdb_entry, rcu);
kmem_cache_free(br_fdb_cache, ent);
}
static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
{
hlist_del_rcu(&f->hlist);
fdb_notify(br, f, RTM_DELNEIGH);
call_rcu(&f->rcu, fdb_rcu_free);
}
void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
{
struct net_bridge *br = p->br;
bool no_vlan = (nbp_get_vlan_info(p) == NULL) ? true : false;
int i;
spin_lock_bh(&br->hash_lock);
/* Search all chains since old address/hash is unknown */
for (i = 0; i < BR_HASH_SIZE; i++) {
struct hlist_node *h;
hlist_for_each(h, &br->hash[i]) {
struct net_bridge_fdb_entry *f;
f = hlist_entry(h, struct net_bridge_fdb_entry, hlist);
if (f->dst == p && f->is_local) {
/* maybe another port has same hw addr? */
struct net_bridge_port *op;
u16 vid = f->vlan_id;
list_for_each_entry(op, &br->port_list, list) {
if (op != p &&
ether_addr_equal(op->dev->dev_addr,
f->addr.addr) &&
nbp_vlan_find(op, vid)) {
f->dst = op;
goto insert;
}
}
/* delete old one */
fdb_delete(br, f);
insert:
/* insert new address, may fail if invalid
* address or dup.
*/
fdb_insert(br, p, newaddr, vid);
/* if this port has no vlan information
* configured, we can safely be done at
* this point.
*/
if (no_vlan)
goto done;
}
}
}
done:
spin_unlock_bh(&br->hash_lock);
}
void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
{
struct net_bridge_fdb_entry *f;
struct net_port_vlans *pv;
u16 vid = 0;
/* If old entry was unassociated with any port, then delete it. */
f = __br_fdb_get(br, br->dev->dev_addr, 0);
if (f && f->is_local && !f->dst)
fdb_delete(br, f);
fdb_insert(br, NULL, newaddr, 0);
/* Now remove and add entries for every VLAN configured on the
* bridge. This function runs under RTNL so the bitmap will not
* change from under us.
*/
pv = br_get_vlan_info(br);
if (!pv)
return;
for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) {
f = __br_fdb_get(br, br->dev->dev_addr, vid);
if (f && f->is_local && !f->dst)
fdb_delete(br, f);
fdb_insert(br, NULL, newaddr, vid);
}
}
void br_fdb_cleanup(unsigned long _data)
{
struct net_bridge *br = (struct net_bridge *)_data;
unsigned long delay = hold_time(br);
unsigned long next_timer = jiffies + br->ageing_time;
int i;
spin_lock(&br->hash_lock);
for (i = 0; i < BR_HASH_SIZE; i++) {
struct net_bridge_fdb_entry *f;
struct hlist_node *n;
hlist_for_each_entry_safe(f, n, &br->hash[i], hlist) {
unsigned long this_timer;
if (f->is_static)
continue;
this_timer = f->updated + delay;
if (time_before_eq(this_timer, jiffies))
fdb_delete(br, f);
else if (time_before(this_timer, next_timer))
next_timer = this_timer;
}
}
spin_unlock(&br->hash_lock);
mod_timer(&br->gc_timer, round_jiffies_up(next_timer));
}
/* Completely flush all dynamic entries in forwarding database.*/
void br_fdb_flush(struct net_bridge *br)
{
int i;
spin_lock_bh(&br->hash_lock);
for (i = 0; i < BR_HASH_SIZE; i++) {
struct net_bridge_fdb_entry *f;
struct hlist_node *n;
hlist_for_each_entry_safe(f, n, &br->hash[i], hlist) {
if (!f->is_static)
fdb_delete(br, f);
}
}
spin_unlock_bh(&br->hash_lock);
}
/* Flush all entries referring to a specific port.
* if do_all is set also flush static entries
*/
void br_fdb_delete_by_port(struct net_bridge *br,
const struct net_bridge_port *p,
int do_all)
{
int i;
spin_lock_bh(&br->hash_lock);
for (i = 0; i < BR_HASH_SIZE; i++) {
struct hlist_node *h, *g;
hlist_for_each_safe(h, g, &br->hash[i]) {
struct net_bridge_fdb_entry *f
= hlist_entry(h, struct net_bridge_fdb_entry, hlist);
if (f->dst != p)
continue;
if (f->is_static && !do_all)
continue;
/*
* if multiple ports all have the same device address
* then when one port is deleted, assign
* the local entry to other port
*/
if (f->is_local) {
struct net_bridge_port *op;
list_for_each_entry(op, &br->port_list, list) {
if (op != p &&
ether_addr_equal(op->dev->dev_addr,
f->addr.addr)) {
f->dst = op;
goto skip_delete;
}
}
}
fdb_delete(br, f);
skip_delete: ;
}
}
spin_unlock_bh(&br->hash_lock);
}
/* No locking or refcounting, assumes caller has rcu_read_lock */
struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
const unsigned char *addr,
__u16 vid)
{
struct net_bridge_fdb_entry *fdb;
hlist_for_each_entry_rcu(fdb,
&br->hash[br_mac_hash(addr, vid)], hlist) {
if (ether_addr_equal(fdb->addr.addr, addr) &&
fdb->vlan_id == vid) {
if (unlikely(has_expired(br, fdb)))
break;
return fdb;
}
}
return NULL;
}
#if IS_ENABLED(CONFIG_ATM_LANE)
/* Interface used by ATM LANE hook to test
* if an addr is on some other bridge port */
int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
{
struct net_bridge_fdb_entry *fdb;
struct net_bridge_port *port;
int ret;
rcu_read_lock();
port = br_port_get_rcu(dev);
if (!port)
ret = 0;
else {
fdb = __br_fdb_get(port->br, addr, 0);
ret = fdb && fdb->dst && fdb->dst->dev != dev &&
fdb->dst->state == BR_STATE_FORWARDING;
}
rcu_read_unlock();
return ret;
}
#endif /* CONFIG_ATM_LANE */
/*
* Fill buffer with forwarding table records in
* the API format.
*/
int br_fdb_fillbuf(struct net_bridge *br, void *buf,
unsigned long maxnum, unsigned long skip)
{
struct __fdb_entry *fe = buf;
int i, num = 0;
struct net_bridge_fdb_entry *f;
memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
rcu_read_lock();
for (i = 0; i < BR_HASH_SIZE; i++) {
hlist_for_each_entry_rcu(f, &br->hash[i], hlist) {
if (num >= maxnum)
goto out;
if (has_expired(br, f))
continue;
/* ignore pseudo entry for local MAC address */
if (!f->dst)
continue;
if (skip) {
--skip;
continue;
}
/* convert from internal format to API */
memcpy(fe->mac_addr, f->addr.addr, ETH_ALEN);
/* due to ABI compat need to split into hi/lo */
fe->port_no = f->dst->port_no;
fe->port_hi = f->dst->port_no >> 8;
fe->is_local = f->is_local;
if (!f->is_static)
fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
++fe;
++num;
}
}
out:
rcu_read_unlock();
return num;
}
static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
const unsigned char *addr,
__u16 vid)
{
struct net_bridge_fdb_entry *fdb;
hlist_for_each_entry(fdb, head, hlist) {
if (ether_addr_equal(fdb->addr.addr, addr) &&
fdb->vlan_id == vid)
return fdb;
}
return NULL;
}
static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
const unsigned char *addr,
__u16 vid)
{
struct net_bridge_fdb_entry *fdb;
hlist_for_each_entry_rcu(fdb, head, hlist) {
if (ether_addr_equal(fdb->addr.addr, addr) &&
fdb->vlan_id == vid)
return fdb;
}
return NULL;
}
static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
struct net_bridge_port *source,
const unsigned char *addr,
__u16 vid)
{
struct net_bridge_fdb_entry *fdb;
fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
if (fdb) {
memcpy(fdb->addr.addr, addr, ETH_ALEN);
fdb->dst = source;
fdb->vlan_id = vid;
fdb->is_local = 0;
fdb->is_static = 0;
fdb->updated = fdb->used = jiffies;
hlist_add_head_rcu(&fdb->hlist, head);
}
return fdb;
}
static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid)
{
struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
struct net_bridge_fdb_entry *fdb;
if (!is_valid_ether_addr(addr))
return -EINVAL;
fdb = fdb_find(head, addr, vid);
if (fdb) {
/* it is okay to have multiple ports with same
* address, just use the first one.
*/
if (fdb->is_local)
return 0;
br_warn(br, "adding interface %s with same address "
"as a received packet\n",
source ? source->dev->name : br->dev->name);
fdb_delete(br, fdb);
}
fdb = fdb_create(head, source, addr, vid);
if (!fdb)
return -ENOMEM;
fdb->is_local = fdb->is_static = 1;
fdb_notify(br, fdb, RTM_NEWNEIGH);
return 0;
}
/* Add entry for local address of interface */
int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid)
{
int ret;
spin_lock_bh(&br->hash_lock);
ret = fdb_insert(br, source, addr, vid);
spin_unlock_bh(&br->hash_lock);
return ret;
}
void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid)
{
struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
struct net_bridge_fdb_entry *fdb;
/* some users want to always flood. */
if (hold_time(br) == 0)
return;
/* ignore packets unless we are using this port */
if (!(source->state == BR_STATE_LEARNING ||
source->state == BR_STATE_FORWARDING))
return;
fdb = fdb_find_rcu(head, addr, vid);
if (likely(fdb)) {
/* attempt to update an entry for a local interface */
if (unlikely(fdb->is_local)) {
if (net_ratelimit())
br_warn(br, "received packet on %s with "
"own address as source address\n",
source->dev->name);
} else {
/* fastpath: update of existing entry */
fdb->dst = source;
fdb->updated = jiffies;
}
} else {
spin_lock(&br->hash_lock);
if (likely(!fdb_find(head, addr, vid))) {
fdb = fdb_create(head, source, addr, vid);
if (fdb)
fdb_notify(br, fdb, RTM_NEWNEIGH);
}
/* else we lose race and someone else inserts
* it first, don't bother updating
*/
spin_unlock(&br->hash_lock);
}
}
static int fdb_to_nud(const struct net_bridge_fdb_entry *fdb)
{
if (fdb->is_local)
return NUD_PERMANENT;
else if (fdb->is_static)
return NUD_NOARP;
else if (has_expired(fdb->dst->br, fdb))
return NUD_STALE;
else
return NUD_REACHABLE;
}
static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
const struct net_bridge_fdb_entry *fdb,
u32 portid, u32 seq, int type, unsigned int flags)
{
unsigned long now = jiffies;
struct nda_cacheinfo ci;
struct nlmsghdr *nlh;
struct ndmsg *ndm;
nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
if (nlh == NULL)
return -EMSGSIZE;
ndm = nlmsg_data(nlh);
ndm->ndm_family = AF_BRIDGE;
ndm->ndm_pad1 = 0;
ndm->ndm_pad2 = 0;
ndm->ndm_flags = 0;
ndm->ndm_type = 0;
ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
ndm->ndm_state = fdb_to_nud(fdb);
if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr))
goto nla_put_failure;
ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
ci.ndm_confirmed = 0;
ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
ci.ndm_refcnt = 0;
if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
goto nla_put_failure;
if (nla_put(skb, NDA_VLAN, sizeof(u16), &fdb->vlan_id))
goto nla_put_failure;
return nlmsg_end(skb, nlh);
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static inline size_t fdb_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct ndmsg))
+ nla_total_size(ETH_ALEN) /* NDA_LLADDR */
+ nla_total_size(sizeof(u16)) /* NDA_VLAN */
+ nla_total_size(sizeof(struct nda_cacheinfo));
}
static void fdb_notify(struct net_bridge *br,
const struct net_bridge_fdb_entry *fdb, int type)
{
struct net *net = dev_net(br->dev);
struct sk_buff *skb;
int err = -ENOBUFS;
skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
if (skb == NULL)
goto errout;
err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0);
if (err < 0) {
/* -EMSGSIZE implies BUG in fdb_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
return;
errout:
if (err < 0)
rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
}
/* Dump information about entries, in response to GETNEIGH */
int br_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
int idx)
{
struct net_bridge *br = netdev_priv(dev);
int i;
if (!(dev->priv_flags & IFF_EBRIDGE))
goto out;
for (i = 0; i < BR_HASH_SIZE; i++) {
struct net_bridge_fdb_entry *f;
hlist_for_each_entry_rcu(f, &br->hash[i], hlist) {
if (idx < cb->args[0])
goto skip;
if (fdb_fill_info(skb, br, f,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
RTM_NEWNEIGH,
NLM_F_MULTI) < 0)
break;
skip:
++idx;
}
}
out:
return idx;
}
/* Update (create or replace) forwarding database entry */
static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
__u16 state, __u16 flags, __u16 vid)
{
struct net_bridge *br = source->br;
struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
struct net_bridge_fdb_entry *fdb;
bool modified = false;
fdb = fdb_find(head, addr, vid);
if (fdb == NULL) {
if (!(flags & NLM_F_CREATE))
return -ENOENT;
fdb = fdb_create(head, source, addr, vid);
if (!fdb)
return -ENOMEM;
modified = true;
} else {
if (flags & NLM_F_EXCL)
return -EEXIST;
if (fdb->dst != source) {
fdb->dst = source;
modified = true;
}
}
if (fdb_to_nud(fdb) != state) {
if (state & NUD_PERMANENT)
fdb->is_local = fdb->is_static = 1;
else if (state & NUD_NOARP) {
fdb->is_local = 0;
fdb->is_static = 1;
} else
fdb->is_local = fdb->is_static = 0;
modified = true;
}
fdb->used = jiffies;
if (modified) {
fdb->updated = jiffies;
fdb_notify(br, fdb, RTM_NEWNEIGH);
}
return 0;
}
static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p,
const unsigned char *addr, u16 nlh_flags, u16 vid)
{
int err = 0;
if (ndm->ndm_flags & NTF_USE) {
rcu_read_lock();
br_fdb_update(p->br, p, addr, vid);
rcu_read_unlock();
} else {
spin_lock_bh(&p->br->hash_lock);
err = fdb_add_entry(p, addr, ndm->ndm_state,
nlh_flags, vid);
spin_unlock_bh(&p->br->hash_lock);
}
return err;
}
/* Add new permanent fdb entry with RTM_NEWNEIGH */
int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 nlh_flags)
{
struct net_bridge_port *p;
int err = 0;
struct net_port_vlans *pv;
unsigned short vid = VLAN_N_VID;
if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
return -EINVAL;
}
if (tb[NDA_VLAN]) {
if (nla_len(tb[NDA_VLAN]) != sizeof(unsigned short)) {
pr_info("bridge: RTM_NEWNEIGH with invalid vlan\n");
return -EINVAL;
}
vid = nla_get_u16(tb[NDA_VLAN]);
if (vid >= VLAN_N_VID) {
pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n",
vid);
return -EINVAL;
}
}
p = br_port_get_rtnl(dev);
if (p == NULL) {
pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
dev->name);
return -EINVAL;
}
pv = nbp_get_vlan_info(p);
if (vid != VLAN_N_VID) {
if (!pv || !test_bit(vid, pv->vlan_bitmap)) {
pr_info("bridge: RTM_NEWNEIGH with unconfigured "
"vlan %d on port %s\n", vid, dev->name);
return -EINVAL;
}
/* VID was specified, so use it. */
err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
} else {
if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
goto out;
}
/* We have vlans configured on this port and user didn't
* specify a VLAN. To be nice, add/update entry for every
* vlan on this port.
*/
for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
if (err)
goto out;
}
}
out:
return err;
}
int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr,
u16 vlan)
{
struct hlist_head *head = &br->hash[br_mac_hash(addr, vlan)];
struct net_bridge_fdb_entry *fdb;
fdb = fdb_find(head, addr, vlan);
if (!fdb)
return -ENOENT;
fdb_delete(br, fdb);
return 0;
}
static int __br_fdb_delete(struct net_bridge_port *p,
const unsigned char *addr, u16 vid)
{
int err;
spin_lock_bh(&p->br->hash_lock);
err = fdb_delete_by_addr(p->br, addr, vid);
spin_unlock_bh(&p->br->hash_lock);
return err;
}
/* Remove neighbor entry with RTM_DELNEIGH */
int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr)
{
struct net_bridge_port *p;
int err;
struct net_port_vlans *pv;
unsigned short vid = VLAN_N_VID;
if (tb[NDA_VLAN]) {
if (nla_len(tb[NDA_VLAN]) != sizeof(unsigned short)) {
pr_info("bridge: RTM_NEWNEIGH with invalid vlan\n");
return -EINVAL;
}
vid = nla_get_u16(tb[NDA_VLAN]);
if (vid >= VLAN_N_VID) {
pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n",
vid);
return -EINVAL;
}
}
p = br_port_get_rtnl(dev);
if (p == NULL) {
pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
dev->name);
return -EINVAL;
}
pv = nbp_get_vlan_info(p);
if (vid != VLAN_N_VID) {
if (!pv || !test_bit(vid, pv->vlan_bitmap)) {
pr_info("bridge: RTM_DELNEIGH with unconfigured "
"vlan %d on port %s\n", vid, dev->name);
return -EINVAL;
}
err = __br_fdb_delete(p, addr, vid);
} else {
if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
err = __br_fdb_delete(p, addr, 0);
goto out;
}
/* We have vlans configured on this port and user didn't
* specify a VLAN. To be nice, add/update entry for every
* vlan on this port.
*/
err = -ENOENT;
for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
err &= __br_fdb_delete(p, addr, vid);
}
}
out:
return err;
}
| gpl-2.0 |
Tesla-Redux-Devices/hells-Core-N6 | net/dccp/proto.c | 2376 | 30699 | /*
* net/dccp/proto.c
*
* An implementation of the DCCP protocol
* Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/dccp.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/in.h>
#include <linux/if_arp.h>
#include <linux/init.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <net/checksum.h>
#include <net/inet_sock.h>
#include <net/sock.h>
#include <net/xfrm.h>
#include <asm/ioctls.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/poll.h>
#include "ccid.h"
#include "dccp.h"
#include "feat.h"
DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
EXPORT_SYMBOL_GPL(dccp_statistics);
struct percpu_counter dccp_orphan_count;
EXPORT_SYMBOL_GPL(dccp_orphan_count);
struct inet_hashinfo dccp_hashinfo;
EXPORT_SYMBOL_GPL(dccp_hashinfo);
/* the maximum queue length for tx in packets. 0 is no limit */
int sysctl_dccp_tx_qlen __read_mostly = 5;
#ifdef CONFIG_IP_DCCP_DEBUG
static const char *dccp_state_name(const int state)
{
static const char *const dccp_state_names[] = {
[DCCP_OPEN] = "OPEN",
[DCCP_REQUESTING] = "REQUESTING",
[DCCP_PARTOPEN] = "PARTOPEN",
[DCCP_LISTEN] = "LISTEN",
[DCCP_RESPOND] = "RESPOND",
[DCCP_CLOSING] = "CLOSING",
[DCCP_ACTIVE_CLOSEREQ] = "CLOSEREQ",
[DCCP_PASSIVE_CLOSE] = "PASSIVE_CLOSE",
[DCCP_PASSIVE_CLOSEREQ] = "PASSIVE_CLOSEREQ",
[DCCP_TIME_WAIT] = "TIME_WAIT",
[DCCP_CLOSED] = "CLOSED",
};
if (state >= DCCP_MAX_STATES)
return "INVALID STATE!";
else
return dccp_state_names[state];
}
#endif
void dccp_set_state(struct sock *sk, const int state)
{
const int oldstate = sk->sk_state;
dccp_pr_debug("%s(%p) %s --> %s\n", dccp_role(sk), sk,
dccp_state_name(oldstate), dccp_state_name(state));
WARN_ON(state == oldstate);
switch (state) {
case DCCP_OPEN:
if (oldstate != DCCP_OPEN)
DCCP_INC_STATS(DCCP_MIB_CURRESTAB);
/* Client retransmits all Confirm options until entering OPEN */
if (oldstate == DCCP_PARTOPEN)
dccp_feat_list_purge(&dccp_sk(sk)->dccps_featneg);
break;
case DCCP_CLOSED:
if (oldstate == DCCP_OPEN || oldstate == DCCP_ACTIVE_CLOSEREQ ||
oldstate == DCCP_CLOSING)
DCCP_INC_STATS(DCCP_MIB_ESTABRESETS);
sk->sk_prot->unhash(sk);
if (inet_csk(sk)->icsk_bind_hash != NULL &&
!(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
inet_put_port(sk);
/* fall through */
default:
if (oldstate == DCCP_OPEN)
DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
}
/* Change state AFTER socket is unhashed to avoid closed
* socket sitting in hash tables.
*/
sk->sk_state = state;
}
EXPORT_SYMBOL_GPL(dccp_set_state);
static void dccp_finish_passive_close(struct sock *sk)
{
switch (sk->sk_state) {
case DCCP_PASSIVE_CLOSE:
/* Node (client or server) has received Close packet. */
dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
dccp_set_state(sk, DCCP_CLOSED);
break;
case DCCP_PASSIVE_CLOSEREQ:
/*
* Client received CloseReq. We set the `active' flag so that
* dccp_send_close() retransmits the Close as per RFC 4340, 8.3.
*/
dccp_send_close(sk, 1);
dccp_set_state(sk, DCCP_CLOSING);
}
}
void dccp_done(struct sock *sk)
{
dccp_set_state(sk, DCCP_CLOSED);
dccp_clear_xmit_timers(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_state_change(sk);
else
inet_csk_destroy_sock(sk);
}
EXPORT_SYMBOL_GPL(dccp_done);
const char *dccp_packet_name(const int type)
{
static const char *const dccp_packet_names[] = {
[DCCP_PKT_REQUEST] = "REQUEST",
[DCCP_PKT_RESPONSE] = "RESPONSE",
[DCCP_PKT_DATA] = "DATA",
[DCCP_PKT_ACK] = "ACK",
[DCCP_PKT_DATAACK] = "DATAACK",
[DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
[DCCP_PKT_CLOSE] = "CLOSE",
[DCCP_PKT_RESET] = "RESET",
[DCCP_PKT_SYNC] = "SYNC",
[DCCP_PKT_SYNCACK] = "SYNCACK",
};
if (type >= DCCP_NR_PKT_TYPES)
return "INVALID";
else
return dccp_packet_names[type];
}
EXPORT_SYMBOL_GPL(dccp_packet_name);
int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
{
struct dccp_sock *dp = dccp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
icsk->icsk_rto = DCCP_TIMEOUT_INIT;
icsk->icsk_syn_retries = sysctl_dccp_request_retries;
sk->sk_state = DCCP_CLOSED;
sk->sk_write_space = dccp_write_space;
icsk->icsk_sync_mss = dccp_sync_mss;
dp->dccps_mss_cache = 536;
dp->dccps_rate_last = jiffies;
dp->dccps_role = DCCP_ROLE_UNDEFINED;
dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT;
dp->dccps_tx_qlen = sysctl_dccp_tx_qlen;
dccp_init_xmit_timers(sk);
INIT_LIST_HEAD(&dp->dccps_featneg);
/* control socket doesn't need feat nego */
if (likely(ctl_sock_initialized))
return dccp_feat_init(sk);
return 0;
}
EXPORT_SYMBOL_GPL(dccp_init_sock);
void dccp_destroy_sock(struct sock *sk)
{
struct dccp_sock *dp = dccp_sk(sk);
/*
* DCCP doesn't use sk_write_queue, just sk_send_head
* for retransmissions
*/
if (sk->sk_send_head != NULL) {
kfree_skb(sk->sk_send_head);
sk->sk_send_head = NULL;
}
/* Clean up a referenced DCCP bind bucket. */
if (inet_csk(sk)->icsk_bind_hash != NULL)
inet_put_port(sk);
kfree(dp->dccps_service_list);
dp->dccps_service_list = NULL;
if (dp->dccps_hc_rx_ackvec != NULL) {
dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
dp->dccps_hc_rx_ackvec = NULL;
}
ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
/* clean up feature negotiation state */
dccp_feat_list_purge(&dp->dccps_featneg);
}
EXPORT_SYMBOL_GPL(dccp_destroy_sock);
static inline int dccp_listen_start(struct sock *sk, int backlog)
{
struct dccp_sock *dp = dccp_sk(sk);
dp->dccps_role = DCCP_ROLE_LISTEN;
/* do not start to listen if feature negotiation setup fails */
if (dccp_feat_finalise_settings(dp))
return -EPROTO;
return inet_csk_listen_start(sk, backlog);
}
static inline int dccp_need_reset(int state)
{
return state != DCCP_CLOSED && state != DCCP_LISTEN &&
state != DCCP_REQUESTING;
}
int dccp_disconnect(struct sock *sk, int flags)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct inet_sock *inet = inet_sk(sk);
int err = 0;
const int old_state = sk->sk_state;
if (old_state != DCCP_CLOSED)
dccp_set_state(sk, DCCP_CLOSED);
/*
* This corresponds to the ABORT function of RFC793, sec. 3.8
* TCP uses a RST segment, DCCP a Reset packet with Code 2, "Aborted".
*/
if (old_state == DCCP_LISTEN) {
inet_csk_listen_stop(sk);
} else if (dccp_need_reset(old_state)) {
dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
sk->sk_err = ECONNRESET;
} else if (old_state == DCCP_REQUESTING)
sk->sk_err = ECONNRESET;
dccp_clear_xmit_timers(sk);
__skb_queue_purge(&sk->sk_receive_queue);
__skb_queue_purge(&sk->sk_write_queue);
if (sk->sk_send_head != NULL) {
__kfree_skb(sk->sk_send_head);
sk->sk_send_head = NULL;
}
inet->inet_dport = 0;
if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
inet_reset_saddr(sk);
sk->sk_shutdown = 0;
sock_reset_flag(sk, SOCK_DONE);
icsk->icsk_backoff = 0;
inet_csk_delack_init(sk);
__sk_dst_reset(sk);
WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
sk->sk_error_report(sk);
return err;
}
EXPORT_SYMBOL_GPL(dccp_disconnect);
/*
* Wait for a DCCP event.
*
* Note that we don't need to lock the socket, as the upper poll layers
* take care of normal races (between the test and the event) and we don't
* go look at any of the socket buffers directly.
*/
unsigned int dccp_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
unsigned int mask;
struct sock *sk = sock->sk;
sock_poll_wait(file, sk_sleep(sk), wait);
if (sk->sk_state == DCCP_LISTEN)
return inet_csk_listen_poll(sk);
/* Socket is not locked. We are protected from async events
by poll logic and correct handling of state changes
made by another threads is impossible in any case.
*/
mask = 0;
if (sk->sk_err)
mask = POLLERR;
if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
mask |= POLLHUP;
if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= POLLIN | POLLRDNORM | POLLRDHUP;
/* Connected? */
if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
if (atomic_read(&sk->sk_rmem_alloc) > 0)
mask |= POLLIN | POLLRDNORM;
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
mask |= POLLOUT | POLLWRNORM;
} else { /* send SIGIO later */
set_bit(SOCK_ASYNC_NOSPACE,
&sk->sk_socket->flags);
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
/* Race breaker. If space is freed after
* wspace test but before the flags are set,
* IO signal will be lost.
*/
if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
mask |= POLLOUT | POLLWRNORM;
}
}
}
return mask;
}
EXPORT_SYMBOL_GPL(dccp_poll);
int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
int rc = -ENOTCONN;
lock_sock(sk);
if (sk->sk_state == DCCP_LISTEN)
goto out;
switch (cmd) {
case SIOCINQ: {
struct sk_buff *skb;
unsigned long amount = 0;
skb = skb_peek(&sk->sk_receive_queue);
if (skb != NULL) {
/*
* We will only return the amount of this packet since
* that is all that will be read.
*/
amount = skb->len;
}
rc = put_user(amount, (int __user *)arg);
}
break;
default:
rc = -ENOIOCTLCMD;
break;
}
out:
release_sock(sk);
return rc;
}
EXPORT_SYMBOL_GPL(dccp_ioctl);
static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
char __user *optval, unsigned int optlen)
{
struct dccp_sock *dp = dccp_sk(sk);
struct dccp_service_list *sl = NULL;
if (service == DCCP_SERVICE_INVALID_VALUE ||
optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
return -EINVAL;
if (optlen > sizeof(service)) {
sl = kmalloc(optlen, GFP_KERNEL);
if (sl == NULL)
return -ENOMEM;
sl->dccpsl_nr = optlen / sizeof(u32) - 1;
if (copy_from_user(sl->dccpsl_list,
optval + sizeof(service),
optlen - sizeof(service)) ||
dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) {
kfree(sl);
return -EFAULT;
}
}
lock_sock(sk);
dp->dccps_service = service;
kfree(dp->dccps_service_list);
dp->dccps_service_list = sl;
release_sock(sk);
return 0;
}
static int dccp_setsockopt_cscov(struct sock *sk, int cscov, bool rx)
{
u8 *list, len;
int i, rc;
if (cscov < 0 || cscov > 15)
return -EINVAL;
/*
* Populate a list of permissible values, in the range cscov...15. This
* is necessary since feature negotiation of single values only works if
* both sides incidentally choose the same value. Since the list starts
* lowest-value first, negotiation will pick the smallest shared value.
*/
if (cscov == 0)
return 0;
len = 16 - cscov;
list = kmalloc(len, GFP_KERNEL);
if (list == NULL)
return -ENOBUFS;
for (i = 0; i < len; i++)
list[i] = cscov++;
rc = dccp_feat_register_sp(sk, DCCPF_MIN_CSUM_COVER, rx, list, len);
if (rc == 0) {
if (rx)
dccp_sk(sk)->dccps_pcrlen = cscov;
else
dccp_sk(sk)->dccps_pcslen = cscov;
}
kfree(list);
return rc;
}
static int dccp_setsockopt_ccid(struct sock *sk, int type,
char __user *optval, unsigned int optlen)
{
u8 *val;
int rc = 0;
if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS)
return -EINVAL;
val = memdup_user(optval, optlen);
if (IS_ERR(val))
return PTR_ERR(val);
lock_sock(sk);
if (type == DCCP_SOCKOPT_TX_CCID || type == DCCP_SOCKOPT_CCID)
rc = dccp_feat_register_sp(sk, DCCPF_CCID, 1, val, optlen);
if (!rc && (type == DCCP_SOCKOPT_RX_CCID || type == DCCP_SOCKOPT_CCID))
rc = dccp_feat_register_sp(sk, DCCPF_CCID, 0, val, optlen);
release_sock(sk);
kfree(val);
return rc;
}
static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct dccp_sock *dp = dccp_sk(sk);
int val, err = 0;
switch (optname) {
case DCCP_SOCKOPT_PACKET_SIZE:
DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
return 0;
case DCCP_SOCKOPT_CHANGE_L:
case DCCP_SOCKOPT_CHANGE_R:
DCCP_WARN("sockopt(CHANGE_L/R) is deprecated: fix your app\n");
return 0;
case DCCP_SOCKOPT_CCID:
case DCCP_SOCKOPT_RX_CCID:
case DCCP_SOCKOPT_TX_CCID:
return dccp_setsockopt_ccid(sk, optname, optval, optlen);
}
if (optlen < (int)sizeof(int))
return -EINVAL;
if (get_user(val, (int __user *)optval))
return -EFAULT;
if (optname == DCCP_SOCKOPT_SERVICE)
return dccp_setsockopt_service(sk, val, optval, optlen);
lock_sock(sk);
switch (optname) {
case DCCP_SOCKOPT_SERVER_TIMEWAIT:
if (dp->dccps_role != DCCP_ROLE_SERVER)
err = -EOPNOTSUPP;
else
dp->dccps_server_timewait = (val != 0);
break;
case DCCP_SOCKOPT_SEND_CSCOV:
err = dccp_setsockopt_cscov(sk, val, false);
break;
case DCCP_SOCKOPT_RECV_CSCOV:
err = dccp_setsockopt_cscov(sk, val, true);
break;
case DCCP_SOCKOPT_QPOLICY_ID:
if (sk->sk_state != DCCP_CLOSED)
err = -EISCONN;
else if (val < 0 || val >= DCCPQ_POLICY_MAX)
err = -EINVAL;
else
dp->dccps_qpolicy = val;
break;
case DCCP_SOCKOPT_QPOLICY_TXQLEN:
if (val < 0)
err = -EINVAL;
else
dp->dccps_tx_qlen = val;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
int dccp_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (level != SOL_DCCP)
return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
optname, optval,
optlen);
return do_dccp_setsockopt(sk, level, optname, optval, optlen);
}
EXPORT_SYMBOL_GPL(dccp_setsockopt);
#ifdef CONFIG_COMPAT
int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (level != SOL_DCCP)
return inet_csk_compat_setsockopt(sk, level, optname,
optval, optlen);
return do_dccp_setsockopt(sk, level, optname, optval, optlen);
}
EXPORT_SYMBOL_GPL(compat_dccp_setsockopt);
#endif
static int dccp_getsockopt_service(struct sock *sk, int len,
__be32 __user *optval,
int __user *optlen)
{
const struct dccp_sock *dp = dccp_sk(sk);
const struct dccp_service_list *sl;
int err = -ENOENT, slen = 0, total_len = sizeof(u32);
lock_sock(sk);
if ((sl = dp->dccps_service_list) != NULL) {
slen = sl->dccpsl_nr * sizeof(u32);
total_len += slen;
}
err = -EINVAL;
if (total_len > len)
goto out;
err = 0;
if (put_user(total_len, optlen) ||
put_user(dp->dccps_service, optval) ||
(sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen)))
err = -EFAULT;
out:
release_sock(sk);
return err;
}
static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
struct dccp_sock *dp;
int val, len;
if (get_user(len, optlen))
return -EFAULT;
if (len < (int)sizeof(int))
return -EINVAL;
dp = dccp_sk(sk);
switch (optname) {
case DCCP_SOCKOPT_PACKET_SIZE:
DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
return 0;
case DCCP_SOCKOPT_SERVICE:
return dccp_getsockopt_service(sk, len,
(__be32 __user *)optval, optlen);
case DCCP_SOCKOPT_GET_CUR_MPS:
val = dp->dccps_mss_cache;
break;
case DCCP_SOCKOPT_AVAILABLE_CCIDS:
return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen);
case DCCP_SOCKOPT_TX_CCID:
val = ccid_get_current_tx_ccid(dp);
if (val < 0)
return -ENOPROTOOPT;
break;
case DCCP_SOCKOPT_RX_CCID:
val = ccid_get_current_rx_ccid(dp);
if (val < 0)
return -ENOPROTOOPT;
break;
case DCCP_SOCKOPT_SERVER_TIMEWAIT:
val = dp->dccps_server_timewait;
break;
case DCCP_SOCKOPT_SEND_CSCOV:
val = dp->dccps_pcslen;
break;
case DCCP_SOCKOPT_RECV_CSCOV:
val = dp->dccps_pcrlen;
break;
case DCCP_SOCKOPT_QPOLICY_ID:
val = dp->dccps_qpolicy;
break;
case DCCP_SOCKOPT_QPOLICY_TXQLEN:
val = dp->dccps_tx_qlen;
break;
case 128 ... 191:
return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
len, (u32 __user *)optval, optlen);
case 192 ... 255:
return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,
len, (u32 __user *)optval, optlen);
default:
return -ENOPROTOOPT;
}
len = sizeof(val);
if (put_user(len, optlen) || copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
int dccp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (level != SOL_DCCP)
return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,
optname, optval,
optlen);
return do_dccp_getsockopt(sk, level, optname, optval, optlen);
}
EXPORT_SYMBOL_GPL(dccp_getsockopt);
#ifdef CONFIG_COMPAT
int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (level != SOL_DCCP)
return inet_csk_compat_getsockopt(sk, level, optname,
optval, optlen);
return do_dccp_getsockopt(sk, level, optname, optval, optlen);
}
EXPORT_SYMBOL_GPL(compat_dccp_getsockopt);
#endif
static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb)
{
struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg);
/*
* Assign an (opaque) qpolicy priority value to skb->priority.
*
* We are overloading this skb field for use with the qpolicy subystem.
* The skb->priority is normally used for the SO_PRIORITY option, which
* is initialised from sk_priority. Since the assignment of sk_priority
* to skb->priority happens later (on layer 3), we overload this field
* for use with queueing priorities as long as the skb is on layer 4.
* The default priority value (if nothing is set) is 0.
*/
skb->priority = 0;
for (; cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) {
if (!CMSG_OK(msg, cmsg))
return -EINVAL;
if (cmsg->cmsg_level != SOL_DCCP)
continue;
if (cmsg->cmsg_type <= DCCP_SCM_QPOLICY_MAX &&
!dccp_qpolicy_param_ok(skb->sk, cmsg->cmsg_type))
return -EINVAL;
switch (cmsg->cmsg_type) {
case DCCP_SCM_PRIORITY:
if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u32)))
return -EINVAL;
skb->priority = *(__u32 *)CMSG_DATA(cmsg);
break;
default:
return -EINVAL;
}
}
return 0;
}
int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len)
{
const struct dccp_sock *dp = dccp_sk(sk);
const int flags = msg->msg_flags;
const int noblock = flags & MSG_DONTWAIT;
struct sk_buff *skb;
int rc, size;
long timeo;
if (len > dp->dccps_mss_cache)
return -EMSGSIZE;
lock_sock(sk);
if (dccp_qpolicy_full(sk)) {
rc = -EAGAIN;
goto out_release;
}
timeo = sock_sndtimeo(sk, noblock);
/*
* We have to use sk_stream_wait_connect here to set sk_write_pending,
* so that the trick in dccp_rcv_request_sent_state_process.
*/
/* Wait for a connection to finish. */
if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))
if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
goto out_release;
size = sk->sk_prot->max_header + len;
release_sock(sk);
skb = sock_alloc_send_skb(sk, size, noblock, &rc);
lock_sock(sk);
if (skb == NULL)
goto out_release;
skb_reserve(skb, sk->sk_prot->max_header);
rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
if (rc != 0)
goto out_discard;
rc = dccp_msghdr_parse(msg, skb);
if (rc != 0)
goto out_discard;
dccp_qpolicy_push(sk, skb);
/*
* The xmit_timer is set if the TX CCID is rate-based and will expire
* when congestion control permits to release further packets into the
* network. Window-based CCIDs do not use this timer.
*/
if (!timer_pending(&dp->dccps_xmit_timer))
dccp_write_xmit(sk);
out_release:
release_sock(sk);
return rc ? : len;
out_discard:
kfree_skb(skb);
goto out_release;
}
EXPORT_SYMBOL_GPL(dccp_sendmsg);
int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len, int nonblock, int flags, int *addr_len)
{
const struct dccp_hdr *dh;
long timeo;
lock_sock(sk);
if (sk->sk_state == DCCP_LISTEN) {
len = -ENOTCONN;
goto out;
}
timeo = sock_rcvtimeo(sk, nonblock);
do {
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
if (skb == NULL)
goto verify_sock_status;
dh = dccp_hdr(skb);
switch (dh->dccph_type) {
case DCCP_PKT_DATA:
case DCCP_PKT_DATAACK:
goto found_ok_skb;
case DCCP_PKT_CLOSE:
case DCCP_PKT_CLOSEREQ:
if (!(flags & MSG_PEEK))
dccp_finish_passive_close(sk);
/* fall through */
case DCCP_PKT_RESET:
dccp_pr_debug("found fin (%s) ok!\n",
dccp_packet_name(dh->dccph_type));
len = 0;
goto found_fin_ok;
default:
dccp_pr_debug("packet_type=%s\n",
dccp_packet_name(dh->dccph_type));
sk_eat_skb(sk, skb, false);
}
verify_sock_status:
if (sock_flag(sk, SOCK_DONE)) {
len = 0;
break;
}
if (sk->sk_err) {
len = sock_error(sk);
break;
}
if (sk->sk_shutdown & RCV_SHUTDOWN) {
len = 0;
break;
}
if (sk->sk_state == DCCP_CLOSED) {
if (!sock_flag(sk, SOCK_DONE)) {
/* This occurs when user tries to read
* from never connected socket.
*/
len = -ENOTCONN;
break;
}
len = 0;
break;
}
if (!timeo) {
len = -EAGAIN;
break;
}
if (signal_pending(current)) {
len = sock_intr_errno(timeo);
break;
}
sk_wait_data(sk, &timeo);
continue;
found_ok_skb:
if (len > skb->len)
len = skb->len;
else if (len < skb->len)
msg->msg_flags |= MSG_TRUNC;
if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) {
/* Exception. Bailout! */
len = -EFAULT;
break;
}
if (flags & MSG_TRUNC)
len = skb->len;
found_fin_ok:
if (!(flags & MSG_PEEK))
sk_eat_skb(sk, skb, false);
break;
} while (1);
out:
release_sock(sk);
return len;
}
EXPORT_SYMBOL_GPL(dccp_recvmsg);
int inet_dccp_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
unsigned char old_state;
int err;
lock_sock(sk);
err = -EINVAL;
if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
goto out;
old_state = sk->sk_state;
if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
goto out;
/* Really, if the socket is already in listen state
* we can only allow the backlog to be adjusted.
*/
if (old_state != DCCP_LISTEN) {
/*
* FIXME: here it probably should be sk->sk_prot->listen_start
* see tcp_listen_start
*/
err = dccp_listen_start(sk, backlog);
if (err)
goto out;
}
sk->sk_max_ack_backlog = backlog;
err = 0;
out:
release_sock(sk);
return err;
}
EXPORT_SYMBOL_GPL(inet_dccp_listen);
static void dccp_terminate_connection(struct sock *sk)
{
u8 next_state = DCCP_CLOSED;
switch (sk->sk_state) {
case DCCP_PASSIVE_CLOSE:
case DCCP_PASSIVE_CLOSEREQ:
dccp_finish_passive_close(sk);
break;
case DCCP_PARTOPEN:
dccp_pr_debug("Stop PARTOPEN timer (%p)\n", sk);
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
/* fall through */
case DCCP_OPEN:
dccp_send_close(sk, 1);
if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER &&
!dccp_sk(sk)->dccps_server_timewait)
next_state = DCCP_ACTIVE_CLOSEREQ;
else
next_state = DCCP_CLOSING;
/* fall through */
default:
dccp_set_state(sk, next_state);
}
}
void dccp_close(struct sock *sk, long timeout)
{
struct dccp_sock *dp = dccp_sk(sk);
struct sk_buff *skb;
u32 data_was_unread = 0;
int state;
lock_sock(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
if (sk->sk_state == DCCP_LISTEN) {
dccp_set_state(sk, DCCP_CLOSED);
/* Special case. */
inet_csk_listen_stop(sk);
goto adjudge_to_death;
}
sk_stop_timer(sk, &dp->dccps_xmit_timer);
/*
* We need to flush the recv. buffs. We do this only on the
* descriptor close, not protocol-sourced closes, because the
*reader process may not have drained the data yet!
*/
while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
data_was_unread += skb->len;
__kfree_skb(skb);
}
if (data_was_unread) {
/* Unread data was tossed, send an appropriate Reset Code */
DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread);
dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
dccp_set_state(sk, DCCP_CLOSED);
} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
/* Check zero linger _after_ checking for unread data. */
sk->sk_prot->disconnect(sk, 0);
} else if (sk->sk_state != DCCP_CLOSED) {
/*
* Normal connection termination. May need to wait if there are
* still packets in the TX queue that are delayed by the CCID.
*/
dccp_flush_write_queue(sk, &timeout);
dccp_terminate_connection(sk);
}
/*
* Flush write queue. This may be necessary in several cases:
* - we have been closed by the peer but still have application data;
* - abortive termination (unread data or zero linger time),
* - normal termination but queue could not be flushed within time limit
*/
__skb_queue_purge(&sk->sk_write_queue);
sk_stream_wait_close(sk, timeout);
adjudge_to_death:
state = sk->sk_state;
sock_hold(sk);
sock_orphan(sk);
/*
* It is the last release_sock in its life. It will remove backlog.
*/
release_sock(sk);
/*
* Now socket is owned by kernel and we acquire BH lock
* to finish close. No need to check for user refs.
*/
local_bh_disable();
bh_lock_sock(sk);
WARN_ON(sock_owned_by_user(sk));
percpu_counter_inc(sk->sk_prot->orphan_count);
/* Have we already been destroyed by a softirq or backlog? */
if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
goto out;
if (sk->sk_state == DCCP_CLOSED)
inet_csk_destroy_sock(sk);
/* Otherwise, socket is reprieved until protocol close. */
out:
bh_unlock_sock(sk);
local_bh_enable();
sock_put(sk);
}
EXPORT_SYMBOL_GPL(dccp_close);
void dccp_shutdown(struct sock *sk, int how)
{
dccp_pr_debug("called shutdown(%x)\n", how);
}
EXPORT_SYMBOL_GPL(dccp_shutdown);
static inline int dccp_mib_init(void)
{
return snmp_mib_init((void __percpu **)dccp_statistics,
sizeof(struct dccp_mib),
__alignof__(struct dccp_mib));
}
static inline void dccp_mib_exit(void)
{
snmp_mib_free((void __percpu **)dccp_statistics);
}
static int thash_entries;
module_param(thash_entries, int, 0444);
MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
#ifdef CONFIG_IP_DCCP_DEBUG
bool dccp_debug;
module_param(dccp_debug, bool, 0644);
MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
EXPORT_SYMBOL_GPL(dccp_debug);
#endif
static int __init dccp_init(void)
{
unsigned long goal;
int ehash_order, bhash_order, i;
int rc;
BUILD_BUG_ON(sizeof(struct dccp_skb_cb) >
FIELD_SIZEOF(struct sk_buff, cb));
rc = percpu_counter_init(&dccp_orphan_count, 0);
if (rc)
goto out_fail;
rc = -ENOBUFS;
inet_hashinfo_init(&dccp_hashinfo);
dccp_hashinfo.bind_bucket_cachep =
kmem_cache_create("dccp_bind_bucket",
sizeof(struct inet_bind_bucket), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!dccp_hashinfo.bind_bucket_cachep)
goto out_free_percpu;
/*
* Size and allocate the main established and bind bucket
* hash tables.
*
* The methodology is similar to that of the buffer cache.
*/
if (totalram_pages >= (128 * 1024))
goal = totalram_pages >> (21 - PAGE_SHIFT);
else
goal = totalram_pages >> (23 - PAGE_SHIFT);
if (thash_entries)
goal = (thash_entries *
sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
;
do {
unsigned long hash_size = (1UL << ehash_order) * PAGE_SIZE /
sizeof(struct inet_ehash_bucket);
while (hash_size & (hash_size - 1))
hash_size--;
dccp_hashinfo.ehash_mask = hash_size - 1;
dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
__get_free_pages(GFP_ATOMIC|__GFP_NOWARN, ehash_order);
} while (!dccp_hashinfo.ehash && --ehash_order > 0);
if (!dccp_hashinfo.ehash) {
DCCP_CRIT("Failed to allocate DCCP established hash table");
goto out_free_bind_bucket_cachep;
}
for (i = 0; i <= dccp_hashinfo.ehash_mask; i++) {
INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i);
INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].twchain, i);
}
if (inet_ehash_locks_alloc(&dccp_hashinfo))
goto out_free_dccp_ehash;
bhash_order = ehash_order;
do {
dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
sizeof(struct inet_bind_hashbucket);
if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
bhash_order > 0)
continue;
dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
__get_free_pages(GFP_ATOMIC|__GFP_NOWARN, bhash_order);
} while (!dccp_hashinfo.bhash && --bhash_order >= 0);
if (!dccp_hashinfo.bhash) {
DCCP_CRIT("Failed to allocate DCCP bind hash table");
goto out_free_dccp_locks;
}
for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
spin_lock_init(&dccp_hashinfo.bhash[i].lock);
INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
}
rc = dccp_mib_init();
if (rc)
goto out_free_dccp_bhash;
rc = dccp_ackvec_init();
if (rc)
goto out_free_dccp_mib;
rc = dccp_sysctl_init();
if (rc)
goto out_ackvec_exit;
rc = ccid_initialize_builtins();
if (rc)
goto out_sysctl_exit;
dccp_timestamping_init();
return 0;
out_sysctl_exit:
dccp_sysctl_exit();
out_ackvec_exit:
dccp_ackvec_exit();
out_free_dccp_mib:
dccp_mib_exit();
out_free_dccp_bhash:
free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
out_free_dccp_locks:
inet_ehash_locks_free(&dccp_hashinfo);
out_free_dccp_ehash:
free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
out_free_bind_bucket_cachep:
kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
out_free_percpu:
percpu_counter_destroy(&dccp_orphan_count);
out_fail:
dccp_hashinfo.bhash = NULL;
dccp_hashinfo.ehash = NULL;
dccp_hashinfo.bind_bucket_cachep = NULL;
return rc;
}
static void __exit dccp_fini(void)
{
ccid_cleanup_builtins();
dccp_mib_exit();
free_pages((unsigned long)dccp_hashinfo.bhash,
get_order(dccp_hashinfo.bhash_size *
sizeof(struct inet_bind_hashbucket)));
free_pages((unsigned long)dccp_hashinfo.ehash,
get_order((dccp_hashinfo.ehash_mask + 1) *
sizeof(struct inet_ehash_bucket)));
inet_ehash_locks_free(&dccp_hashinfo);
kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
dccp_ackvec_exit();
dccp_sysctl_exit();
percpu_counter_destroy(&dccp_orphan_count);
}
module_init(dccp_init);
module_exit(dccp_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");
| gpl-2.0 |
Perferom/android_kernel_samsung_msm | arch/h8300/kernel/ptrace.c | 3144 | 3802 | /*
* linux/arch/h8300/kernel/ptrace.c
*
* Yoshinori Sato <ysato@users.sourceforge.jp>
*
* Based on:
* linux/arch/m68k/kernel/ptrace.c
*
* Copyright (C) 1994 by Hamish Macdonald
* Taken from linux/kernel/ptrace.c and modified for M680x0.
* linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of
* this archive for more details.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/signal.h>
#include <asm/uaccess.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/signal.h>
/* cpu depend functions */
extern long h8300_get_reg(struct task_struct *task, int regno);
extern int h8300_put_reg(struct task_struct *task, int regno, unsigned long data);
void user_disable_single_step(struct task_struct *child)
{
}
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
*/
void ptrace_disable(struct task_struct *child)
{
user_disable_single_step(child);
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int ret;
int regno = addr >> 2;
unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long tmp = 0;
if ((addr & 3) || addr >= sizeof(struct user)) {
ret = -EIO;
break ;
}
ret = 0; /* Default return condition */
if (regno < H8300_REGS_NO)
tmp = h8300_get_reg(child, regno);
else {
switch (regno) {
case 49:
tmp = child->mm->start_code;
break ;
case 50:
tmp = child->mm->start_data;
break ;
case 51:
tmp = child->mm->end_code;
break ;
case 52:
tmp = child->mm->end_data;
break ;
default:
ret = -EIO;
}
}
if (!ret)
ret = put_user(tmp, datap);
break ;
}
/* when I and D space are separate, this will have to be fixed. */
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
if ((addr & 3) || addr >= sizeof(struct user)) {
ret = -EIO;
break ;
}
if (regno == PT_ORIG_ER0) {
ret = -EIO;
break ;
}
if (regno < H8300_REGS_NO) {
ret = h8300_put_reg(child, regno, data);
break ;
}
ret = -EIO;
break ;
case PTRACE_GETREGS: { /* Get all gp regs from the child. */
int i;
unsigned long tmp;
for (i = 0; i < H8300_REGS_NO; i++) {
tmp = h8300_get_reg(child, i);
if (put_user(tmp, datap)) {
ret = -EFAULT;
break;
}
datap++;
}
ret = 0;
break;
}
case PTRACE_SETREGS: { /* Set all gp regs in the child. */
int i;
unsigned long tmp;
for (i = 0; i < H8300_REGS_NO; i++) {
if (get_user(tmp, datap)) {
ret = -EFAULT;
break;
}
h8300_put_reg(child, i, tmp);
datap++;
}
ret = 0;
break;
}
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
asmlinkage void do_syscall_trace(void)
{
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return;
if (!(current->ptrace & PT_PTRACED))
return;
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
? 0x80 : 0));
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the
* stopping signal is not SIGTRAP. -brl
*/
if (current->exit_code) {
send_sig(current->exit_code, current, 1);
current->exit_code = 0;
}
}
| gpl-2.0 |
cmartinbaughman/shooter-ics-sense | arch/arm/mach-omap1/id.c | 4168 | 6447 | /*
* linux/arch/arm/mach-omap1/id.c
*
* OMAP1 CPU identification code
*
* Copyright (C) 2004 Nokia Corporation
* Written by Tony Lindgren <tony@atomide.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <plat/cpu.h>
#define OMAP_DIE_ID_0 0xfffe1800
#define OMAP_DIE_ID_1 0xfffe1804
#define OMAP_PRODUCTION_ID_0 0xfffe2000
#define OMAP_PRODUCTION_ID_1 0xfffe2004
#define OMAP32_ID_0 0xfffed400
#define OMAP32_ID_1 0xfffed404
struct omap_id {
u16 jtag_id; /* Used to determine OMAP type */
u8 die_rev; /* Processor revision */
u32 omap_id; /* OMAP revision */
u32 type; /* Cpu id bits [31:08], cpu class bits [07:00] */
};
static unsigned int omap_revision;
/* Register values to detect the OMAP version */
static struct omap_id omap_ids[] __initdata = {
{ .jtag_id = 0xb574, .die_rev = 0x2, .omap_id = 0x03310315, .type = 0x03100000},
{ .jtag_id = 0x355f, .die_rev = 0x0, .omap_id = 0x03320000, .type = 0x07300100},
{ .jtag_id = 0xb55f, .die_rev = 0x0, .omap_id = 0x03320000, .type = 0x07300300},
{ .jtag_id = 0xb62c, .die_rev = 0x1, .omap_id = 0x03320500, .type = 0x08500000},
{ .jtag_id = 0xb470, .die_rev = 0x0, .omap_id = 0x03310100, .type = 0x15100000},
{ .jtag_id = 0xb576, .die_rev = 0x0, .omap_id = 0x03320000, .type = 0x16100000},
{ .jtag_id = 0xb576, .die_rev = 0x2, .omap_id = 0x03320100, .type = 0x16110000},
{ .jtag_id = 0xb576, .die_rev = 0x3, .omap_id = 0x03320100, .type = 0x16100c00},
{ .jtag_id = 0xb576, .die_rev = 0x0, .omap_id = 0x03320200, .type = 0x16100d00},
{ .jtag_id = 0xb613, .die_rev = 0x0, .omap_id = 0x03320300, .type = 0x1610ef00},
{ .jtag_id = 0xb613, .die_rev = 0x0, .omap_id = 0x03320300, .type = 0x1610ef00},
{ .jtag_id = 0xb576, .die_rev = 0x1, .omap_id = 0x03320100, .type = 0x16110000},
{ .jtag_id = 0xb58c, .die_rev = 0x2, .omap_id = 0x03320200, .type = 0x16110b00},
{ .jtag_id = 0xb58c, .die_rev = 0x3, .omap_id = 0x03320200, .type = 0x16110c00},
{ .jtag_id = 0xb65f, .die_rev = 0x0, .omap_id = 0x03320400, .type = 0x16212300},
{ .jtag_id = 0xb65f, .die_rev = 0x1, .omap_id = 0x03320400, .type = 0x16212300},
{ .jtag_id = 0xb65f, .die_rev = 0x1, .omap_id = 0x03320500, .type = 0x16212300},
{ .jtag_id = 0xb5f7, .die_rev = 0x0, .omap_id = 0x03330000, .type = 0x17100000},
{ .jtag_id = 0xb5f7, .die_rev = 0x1, .omap_id = 0x03330100, .type = 0x17100000},
{ .jtag_id = 0xb5f7, .die_rev = 0x2, .omap_id = 0x03330100, .type = 0x17100000},
};
unsigned int omap_rev(void)
{
return omap_revision;
}
EXPORT_SYMBOL(omap_rev);
/*
* Get OMAP type from PROD_ID.
* 1710 has the PROD_ID in bits 15:00, not in 16:01 as documented in TRM.
* 1510 PROD_ID is empty, and 1610 PROD_ID does not make sense.
* Undocumented register in TEST BLOCK is used as fallback; This seems to
* work on 1510, 1610 & 1710. The official way hopefully will work in future
* processors.
*/
static u16 __init omap_get_jtag_id(void)
{
u32 prod_id, omap_id;
prod_id = omap_readl(OMAP_PRODUCTION_ID_1);
omap_id = omap_readl(OMAP32_ID_1);
/* Check for unusable OMAP_PRODUCTION_ID_1 on 1611B/5912 and 730/850 */
if (((prod_id >> 20) == 0) || (prod_id == omap_id))
prod_id = 0;
else
prod_id &= 0xffff;
if (prod_id)
return prod_id;
/* Use OMAP32_ID_1 as fallback */
prod_id = ((omap_id >> 12) & 0xffff);
return prod_id;
}
/*
* Get OMAP revision from DIE_REV.
* Early 1710 processors may have broken OMAP_DIE_ID, it contains PROD_ID.
* Undocumented register in the TEST BLOCK is used as fallback.
* REVISIT: This does not seem to work on 1510
*/
static u8 __init omap_get_die_rev(void)
{
u32 die_rev;
die_rev = omap_readl(OMAP_DIE_ID_1);
/* Check for broken OMAP_DIE_ID on early 1710 */
if (((die_rev >> 12) & 0xffff) == omap_get_jtag_id())
die_rev = 0;
die_rev = (die_rev >> 17) & 0xf;
if (die_rev)
return die_rev;
die_rev = (omap_readl(OMAP32_ID_1) >> 28) & 0xf;
return die_rev;
}
void __init omap_check_revision(void)
{
int i;
u16 jtag_id;
u8 die_rev;
u32 omap_id;
u8 cpu_type;
jtag_id = omap_get_jtag_id();
die_rev = omap_get_die_rev();
omap_id = omap_readl(OMAP32_ID_0);
#ifdef DEBUG
printk(KERN_DEBUG "OMAP_DIE_ID_0: 0x%08x\n", omap_readl(OMAP_DIE_ID_0));
printk(KERN_DEBUG "OMAP_DIE_ID_1: 0x%08x DIE_REV: %i\n",
omap_readl(OMAP_DIE_ID_1),
(omap_readl(OMAP_DIE_ID_1) >> 17) & 0xf);
printk(KERN_DEBUG "OMAP_PRODUCTION_ID_0: 0x%08x\n",
omap_readl(OMAP_PRODUCTION_ID_0));
printk(KERN_DEBUG "OMAP_PRODUCTION_ID_1: 0x%08x JTAG_ID: 0x%04x\n",
omap_readl(OMAP_PRODUCTION_ID_1),
omap_readl(OMAP_PRODUCTION_ID_1) & 0xffff);
printk(KERN_DEBUG "OMAP32_ID_0: 0x%08x\n", omap_readl(OMAP32_ID_0));
printk(KERN_DEBUG "OMAP32_ID_1: 0x%08x\n", omap_readl(OMAP32_ID_1));
printk(KERN_DEBUG "JTAG_ID: 0x%04x DIE_REV: %i\n", jtag_id, die_rev);
#endif
system_serial_high = omap_readl(OMAP_DIE_ID_0);
system_serial_low = omap_readl(OMAP_DIE_ID_1);
/* First check only the major version in a safe way */
for (i = 0; i < ARRAY_SIZE(omap_ids); i++) {
if (jtag_id == (omap_ids[i].jtag_id)) {
omap_revision = omap_ids[i].type;
break;
}
}
/* Check if we can find the die revision */
for (i = 0; i < ARRAY_SIZE(omap_ids); i++) {
if (jtag_id == omap_ids[i].jtag_id && die_rev == omap_ids[i].die_rev) {
omap_revision = omap_ids[i].type;
break;
}
}
/* Finally check also the omap_id */
for (i = 0; i < ARRAY_SIZE(omap_ids); i++) {
if (jtag_id == omap_ids[i].jtag_id
&& die_rev == omap_ids[i].die_rev
&& omap_id == omap_ids[i].omap_id) {
omap_revision = omap_ids[i].type;
break;
}
}
/* Add the cpu class info (7xx, 15xx, 16xx, 24xx) */
cpu_type = omap_revision >> 24;
switch (cpu_type) {
case 0x07:
case 0x08:
omap_revision |= 0x07;
break;
case 0x03:
case 0x15:
omap_revision |= 0x15;
break;
case 0x16:
case 0x17:
omap_revision |= 0x16;
break;
default:
printk(KERN_INFO "Unknown OMAP cpu type: 0x%02x\n", cpu_type);
}
printk(KERN_INFO "OMAP%04x", omap_revision >> 16);
if ((omap_revision >> 8) & 0xff)
printk(KERN_INFO "%x", (omap_revision >> 8) & 0xff);
printk(KERN_INFO " revision %i handled as %02xxx id: %08x%08x\n",
die_rev, omap_revision & 0xff, system_serial_low,
system_serial_high);
}
| gpl-2.0 |
0x7678/SJKernel-gn2 | drivers/input/touchscreen/da9034-ts.c | 4168 | 9081 | /*
* Touchscreen driver for Dialog Semiconductor DA9034
*
* Copyright (C) 2006-2008 Marvell International Ltd.
* Fengwei Yin <fengwei.yin@marvell.com>
* Bin Yang <bin.yang@marvell.com>
* Eric Miao <eric.miao@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/workqueue.h>
#include <linux/mfd/da903x.h>
#include <linux/slab.h>
#define DA9034_MANUAL_CTRL 0x50
#define DA9034_LDO_ADC_EN (1 << 4)
#define DA9034_AUTO_CTRL1 0x51
#define DA9034_AUTO_CTRL2 0x52
#define DA9034_AUTO_TSI_EN (1 << 3)
#define DA9034_PEN_DETECT (1 << 4)
#define DA9034_TSI_CTRL1 0x53
#define DA9034_TSI_CTRL2 0x54
#define DA9034_TSI_X_MSB 0x6c
#define DA9034_TSI_Y_MSB 0x6d
#define DA9034_TSI_XY_LSB 0x6e
enum {
STATE_IDLE, /* wait for pendown */
STATE_BUSY, /* TSI busy sampling */
STATE_STOP, /* sample available */
STATE_WAIT, /* Wait to start next sample */
};
enum {
EVENT_PEN_DOWN,
EVENT_PEN_UP,
EVENT_TSI_READY,
EVENT_TIMEDOUT,
};
struct da9034_touch {
struct device *da9034_dev;
struct input_dev *input_dev;
struct delayed_work tsi_work;
struct notifier_block notifier;
int state;
int interval_ms;
int x_inverted;
int y_inverted;
int last_x;
int last_y;
};
static inline int is_pen_down(struct da9034_touch *touch)
{
return da903x_query_status(touch->da9034_dev, DA9034_STATUS_PEN_DOWN);
}
static inline int detect_pen_down(struct da9034_touch *touch, int on)
{
if (on)
return da903x_set_bits(touch->da9034_dev,
DA9034_AUTO_CTRL2, DA9034_PEN_DETECT);
else
return da903x_clr_bits(touch->da9034_dev,
DA9034_AUTO_CTRL2, DA9034_PEN_DETECT);
}
static int read_tsi(struct da9034_touch *touch)
{
uint8_t _x, _y, _v;
int ret;
ret = da903x_read(touch->da9034_dev, DA9034_TSI_X_MSB, &_x);
if (ret)
return ret;
ret = da903x_read(touch->da9034_dev, DA9034_TSI_Y_MSB, &_y);
if (ret)
return ret;
ret = da903x_read(touch->da9034_dev, DA9034_TSI_XY_LSB, &_v);
if (ret)
return ret;
touch->last_x = ((_x << 2) & 0x3fc) | (_v & 0x3);
touch->last_y = ((_y << 2) & 0x3fc) | ((_v & 0xc) >> 2);
return 0;
}
static inline int start_tsi(struct da9034_touch *touch)
{
return da903x_set_bits(touch->da9034_dev,
DA9034_AUTO_CTRL2, DA9034_AUTO_TSI_EN);
}
static inline int stop_tsi(struct da9034_touch *touch)
{
return da903x_clr_bits(touch->da9034_dev,
DA9034_AUTO_CTRL2, DA9034_AUTO_TSI_EN);
}
static inline void report_pen_down(struct da9034_touch *touch)
{
int x = touch->last_x;
int y = touch->last_y;
x &= 0xfff;
if (touch->x_inverted)
x = 1024 - x;
y &= 0xfff;
if (touch->y_inverted)
y = 1024 - y;
input_report_abs(touch->input_dev, ABS_X, x);
input_report_abs(touch->input_dev, ABS_Y, y);
input_report_key(touch->input_dev, BTN_TOUCH, 1);
input_sync(touch->input_dev);
}
static inline void report_pen_up(struct da9034_touch *touch)
{
input_report_key(touch->input_dev, BTN_TOUCH, 0);
input_sync(touch->input_dev);
}
static void da9034_event_handler(struct da9034_touch *touch, int event)
{
int err;
switch (touch->state) {
case STATE_IDLE:
if (event != EVENT_PEN_DOWN)
break;
/* Enable auto measurement of the TSI, this will
* automatically disable pen down detection
*/
err = start_tsi(touch);
if (err)
goto err_reset;
touch->state = STATE_BUSY;
break;
case STATE_BUSY:
if (event != EVENT_TSI_READY)
break;
err = read_tsi(touch);
if (err)
goto err_reset;
/* Disable auto measurement of the TSI, so that
* pen down status will be available
*/
err = stop_tsi(touch);
if (err)
goto err_reset;
touch->state = STATE_STOP;
/* FIXME: PEN_{UP/DOWN} events are expected to be
* available by stopping TSI, but this is found not
* always true, delay and simulate such an event
* here is more reliable
*/
mdelay(1);
da9034_event_handler(touch,
is_pen_down(touch) ? EVENT_PEN_DOWN :
EVENT_PEN_UP);
break;
case STATE_STOP:
if (event == EVENT_PEN_DOWN) {
report_pen_down(touch);
schedule_delayed_work(&touch->tsi_work,
msecs_to_jiffies(touch->interval_ms));
touch->state = STATE_WAIT;
}
if (event == EVENT_PEN_UP) {
report_pen_up(touch);
touch->state = STATE_IDLE;
}
break;
case STATE_WAIT:
if (event != EVENT_TIMEDOUT)
break;
if (is_pen_down(touch)) {
start_tsi(touch);
touch->state = STATE_BUSY;
} else {
report_pen_up(touch);
touch->state = STATE_IDLE;
}
break;
}
return;
err_reset:
touch->state = STATE_IDLE;
stop_tsi(touch);
detect_pen_down(touch, 1);
}
static void da9034_tsi_work(struct work_struct *work)
{
struct da9034_touch *touch =
container_of(work, struct da9034_touch, tsi_work.work);
da9034_event_handler(touch, EVENT_TIMEDOUT);
}
static int da9034_touch_notifier(struct notifier_block *nb,
unsigned long event, void *data)
{
struct da9034_touch *touch =
container_of(nb, struct da9034_touch, notifier);
if (event & DA9034_EVENT_TSI_READY)
da9034_event_handler(touch, EVENT_TSI_READY);
if ((event & DA9034_EVENT_PEN_DOWN) && touch->state == STATE_IDLE)
da9034_event_handler(touch, EVENT_PEN_DOWN);
return 0;
}
static int da9034_touch_open(struct input_dev *dev)
{
struct da9034_touch *touch = input_get_drvdata(dev);
int ret;
ret = da903x_register_notifier(touch->da9034_dev, &touch->notifier,
DA9034_EVENT_PEN_DOWN | DA9034_EVENT_TSI_READY);
if (ret)
return -EBUSY;
/* Enable ADC LDO */
ret = da903x_set_bits(touch->da9034_dev,
DA9034_MANUAL_CTRL, DA9034_LDO_ADC_EN);
if (ret)
return ret;
/* TSI_DELAY: 3 slots, TSI_SKIP: 3 slots */
ret = da903x_write(touch->da9034_dev, DA9034_TSI_CTRL1, 0x1b);
if (ret)
return ret;
ret = da903x_write(touch->da9034_dev, DA9034_TSI_CTRL2, 0x00);
if (ret)
return ret;
touch->state = STATE_IDLE;
detect_pen_down(touch, 1);
return 0;
}
static void da9034_touch_close(struct input_dev *dev)
{
struct da9034_touch *touch = input_get_drvdata(dev);
da903x_unregister_notifier(touch->da9034_dev, &touch->notifier,
DA9034_EVENT_PEN_DOWN | DA9034_EVENT_TSI_READY);
cancel_delayed_work_sync(&touch->tsi_work);
touch->state = STATE_IDLE;
stop_tsi(touch);
detect_pen_down(touch, 0);
/* Disable ADC LDO */
da903x_clr_bits(touch->da9034_dev,
DA9034_MANUAL_CTRL, DA9034_LDO_ADC_EN);
}
static int __devinit da9034_touch_probe(struct platform_device *pdev)
{
struct da9034_touch_pdata *pdata = pdev->dev.platform_data;
struct da9034_touch *touch;
struct input_dev *input_dev;
int ret;
touch = kzalloc(sizeof(struct da9034_touch), GFP_KERNEL);
if (touch == NULL) {
dev_err(&pdev->dev, "failed to allocate driver data\n");
return -ENOMEM;
}
touch->da9034_dev = pdev->dev.parent;
if (pdata) {
touch->interval_ms = pdata->interval_ms;
touch->x_inverted = pdata->x_inverted;
touch->y_inverted = pdata->y_inverted;
} else
/* fallback into default */
touch->interval_ms = 10;
INIT_DELAYED_WORK(&touch->tsi_work, da9034_tsi_work);
touch->notifier.notifier_call = da9034_touch_notifier;
input_dev = input_allocate_device();
if (!input_dev) {
dev_err(&pdev->dev, "failed to allocate input device\n");
ret = -ENOMEM;
goto err_free_touch;
}
input_dev->name = pdev->name;
input_dev->open = da9034_touch_open;
input_dev->close = da9034_touch_close;
input_dev->dev.parent = &pdev->dev;
__set_bit(EV_ABS, input_dev->evbit);
__set_bit(ABS_X, input_dev->absbit);
__set_bit(ABS_Y, input_dev->absbit);
input_set_abs_params(input_dev, ABS_X, 0, 1023, 0, 0);
input_set_abs_params(input_dev, ABS_Y, 0, 1023, 0, 0);
__set_bit(EV_KEY, input_dev->evbit);
__set_bit(BTN_TOUCH, input_dev->keybit);
touch->input_dev = input_dev;
input_set_drvdata(input_dev, touch);
ret = input_register_device(input_dev);
if (ret)
goto err_free_input;
platform_set_drvdata(pdev, touch);
return 0;
err_free_input:
input_free_device(input_dev);
err_free_touch:
kfree(touch);
return ret;
}
static int __devexit da9034_touch_remove(struct platform_device *pdev)
{
struct da9034_touch *touch = platform_get_drvdata(pdev);
input_unregister_device(touch->input_dev);
kfree(touch);
return 0;
}
static struct platform_driver da9034_touch_driver = {
.driver = {
.name = "da9034-touch",
.owner = THIS_MODULE,
},
.probe = da9034_touch_probe,
.remove = __devexit_p(da9034_touch_remove),
};
static int __init da9034_touch_init(void)
{
return platform_driver_register(&da9034_touch_driver);
}
module_init(da9034_touch_init);
static void __exit da9034_touch_exit(void)
{
platform_driver_unregister(&da9034_touch_driver);
}
module_exit(da9034_touch_exit);
MODULE_DESCRIPTION("Touchscreen driver for Dialog Semiconductor DA9034");
MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>, Bin Yang <bin.yang@marvell.com>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:da9034-touch");
| gpl-2.0 |
mikeljim/fb-mptcp | drivers/xen/xen-stub.c | 4680 | 2857 | /*
* xen-stub.c - stub drivers to reserve space for Xen
*
* Copyright (C) 2012 Intel Corporation
* Author: Liu Jinsong <jinsong.liu@intel.com>
* Author: Jiang Yunhong <yunhong.jiang@intel.com>
*
* Copyright (C) 2012 Oracle Inc
* Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/types.h>
#include <linux/acpi.h>
#include <xen/acpi.h>
#ifdef CONFIG_ACPI
/*--------------------------------------------
stub driver for Xen memory hotplug
--------------------------------------------*/
static const struct acpi_device_id memory_device_ids[] = {
{ACPI_MEMORY_DEVICE_HID, 0},
{"", 0},
};
static struct acpi_driver xen_stub_memory_device_driver = {
/* same name as native memory driver to block native loaded */
.name = "acpi_memhotplug",
.class = ACPI_MEMORY_DEVICE_CLASS,
.ids = memory_device_ids,
};
int xen_stub_memory_device_init(void)
{
if (!xen_initial_domain())
return -ENODEV;
/* just reserve space for Xen, block native driver loaded */
return acpi_bus_register_driver(&xen_stub_memory_device_driver);
}
EXPORT_SYMBOL_GPL(xen_stub_memory_device_init);
subsys_initcall(xen_stub_memory_device_init);
void xen_stub_memory_device_exit(void)
{
acpi_bus_unregister_driver(&xen_stub_memory_device_driver);
}
EXPORT_SYMBOL_GPL(xen_stub_memory_device_exit);
/*--------------------------------------------
stub driver for Xen cpu hotplug
--------------------------------------------*/
static const struct acpi_device_id processor_device_ids[] = {
{ACPI_PROCESSOR_OBJECT_HID, 0},
{ACPI_PROCESSOR_DEVICE_HID, 0},
{"", 0},
};
static struct acpi_driver xen_stub_processor_driver = {
/* same name as native processor driver to block native loaded */
.name = "processor",
.class = ACPI_PROCESSOR_CLASS,
.ids = processor_device_ids,
};
int xen_stub_processor_init(void)
{
if (!xen_initial_domain())
return -ENODEV;
/* just reserve space for Xen, block native driver loaded */
return acpi_bus_register_driver(&xen_stub_processor_driver);
}
EXPORT_SYMBOL_GPL(xen_stub_processor_init);
subsys_initcall(xen_stub_processor_init);
void xen_stub_processor_exit(void)
{
acpi_bus_unregister_driver(&xen_stub_processor_driver);
}
EXPORT_SYMBOL_GPL(xen_stub_processor_exit);
#endif
| gpl-2.0 |
jfdsmabalot/kernel_mako | drivers/i2c/busses/i2c-sis630.c | 4936 | 13905 | /*
Copyright (c) 2002,2003 Alexander Malysh <amalysh@web.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Changes:
24.08.2002
Fixed the typo in sis630_access (Thanks to Mark M. Hoffman)
Changed sis630_transaction.(Thanks to Mark M. Hoffman)
18.09.2002
Added SIS730 as supported.
21.09.2002
Added high_clock module option.If this option is set
used Host Master Clock 56KHz (default 14KHz).For now we save old Host
Master Clock and after transaction completed restore (otherwise
it's confuse BIOS and hung Machine).
24.09.2002
Fixed typo in sis630_access
Fixed logical error by restoring of Host Master Clock
31.07.2003
Added block data read/write support.
*/
/*
Status: beta
Supports:
SIS 630
SIS 730
Note: we assume there can only be one device, with one SMBus interface.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/acpi.h>
#include <linux/io.h>
/* SIS630 SMBus registers */
#define SMB_STS 0x80 /* status */
#define SMB_EN 0x81 /* status enable */
#define SMB_CNT 0x82
#define SMBHOST_CNT 0x83
#define SMB_ADDR 0x84
#define SMB_CMD 0x85
#define SMB_PCOUNT 0x86 /* processed count */
#define SMB_COUNT 0x87
#define SMB_BYTE 0x88 /* ~0x8F data byte field */
#define SMBDEV_ADDR 0x90
#define SMB_DB0 0x91
#define SMB_DB1 0x92
#define SMB_SAA 0x93
/* register count for request_region */
#define SIS630_SMB_IOREGION 20
/* PCI address constants */
/* acpi base address register */
#define SIS630_ACPI_BASE_REG 0x74
/* bios control register */
#define SIS630_BIOS_CTL_REG 0x40
/* Other settings */
#define MAX_TIMEOUT 500
/* SIS630 constants */
#define SIS630_QUICK 0x00
#define SIS630_BYTE 0x01
#define SIS630_BYTE_DATA 0x02
#define SIS630_WORD_DATA 0x03
#define SIS630_PCALL 0x04
#define SIS630_BLOCK_DATA 0x05
static struct pci_driver sis630_driver;
/* insmod parameters */
static bool high_clock;
static bool force;
module_param(high_clock, bool, 0);
MODULE_PARM_DESC(high_clock, "Set Host Master Clock to 56KHz (default 14KHz).");
module_param(force, bool, 0);
MODULE_PARM_DESC(force, "Forcibly enable the SIS630. DANGEROUS!");
/* acpi base address */
static unsigned short acpi_base;
/* supported chips */
static int supported[] = {
PCI_DEVICE_ID_SI_630,
PCI_DEVICE_ID_SI_730,
0 /* terminates the list */
};
static inline u8 sis630_read(u8 reg)
{
return inb(acpi_base + reg);
}
static inline void sis630_write(u8 reg, u8 data)
{
outb(data, acpi_base + reg);
}
static int sis630_transaction_start(struct i2c_adapter *adap, int size, u8 *oldclock)
{
int temp;
/* Make sure the SMBus host is ready to start transmitting. */
if ((temp = sis630_read(SMB_CNT) & 0x03) != 0x00) {
dev_dbg(&adap->dev, "SMBus busy (%02x).Resetting...\n",temp);
/* kill smbus transaction */
sis630_write(SMBHOST_CNT, 0x20);
if ((temp = sis630_read(SMB_CNT) & 0x03) != 0x00) {
dev_dbg(&adap->dev, "Failed! (%02x)\n", temp);
return -EBUSY;
} else {
dev_dbg(&adap->dev, "Successful!\n");
}
}
/* save old clock, so we can prevent machine for hung */
*oldclock = sis630_read(SMB_CNT);
dev_dbg(&adap->dev, "saved clock 0x%02x\n", *oldclock);
/* disable timeout interrupt , set Host Master Clock to 56KHz if requested */
if (high_clock)
sis630_write(SMB_CNT, 0x20);
else
sis630_write(SMB_CNT, (*oldclock & ~0x40));
/* clear all sticky bits */
temp = sis630_read(SMB_STS);
sis630_write(SMB_STS, temp & 0x1e);
/* start the transaction by setting bit 4 and size */
sis630_write(SMBHOST_CNT,0x10 | (size & 0x07));
return 0;
}
static int sis630_transaction_wait(struct i2c_adapter *adap, int size)
{
int temp, result = 0, timeout = 0;
/* We will always wait for a fraction of a second! */
do {
msleep(1);
temp = sis630_read(SMB_STS);
/* check if block transmitted */
if (size == SIS630_BLOCK_DATA && (temp & 0x10))
break;
} while (!(temp & 0x0e) && (timeout++ < MAX_TIMEOUT));
/* If the SMBus is still busy, we give up */
if (timeout > MAX_TIMEOUT) {
dev_dbg(&adap->dev, "SMBus Timeout!\n");
result = -ETIMEDOUT;
}
if (temp & 0x02) {
dev_dbg(&adap->dev, "Error: Failed bus transaction\n");
result = -ENXIO;
}
if (temp & 0x04) {
dev_err(&adap->dev, "Bus collision!\n");
result = -EIO;
/*
TBD: Datasheet say:
the software should clear this bit and restart SMBUS operation.
Should we do it or user start request again?
*/
}
return result;
}
static void sis630_transaction_end(struct i2c_adapter *adap, u8 oldclock)
{
int temp = 0;
/* clear all status "sticky" bits */
sis630_write(SMB_STS, temp);
dev_dbg(&adap->dev, "SMB_CNT before clock restore 0x%02x\n", sis630_read(SMB_CNT));
/*
* restore old Host Master Clock if high_clock is set
* and oldclock was not 56KHz
*/
if (high_clock && !(oldclock & 0x20))
sis630_write(SMB_CNT,(sis630_read(SMB_CNT) & ~0x20));
dev_dbg(&adap->dev, "SMB_CNT after clock restore 0x%02x\n", sis630_read(SMB_CNT));
}
static int sis630_transaction(struct i2c_adapter *adap, int size)
{
int result = 0;
u8 oldclock = 0;
result = sis630_transaction_start(adap, size, &oldclock);
if (!result) {
result = sis630_transaction_wait(adap, size);
sis630_transaction_end(adap, oldclock);
}
return result;
}
static int sis630_block_data(struct i2c_adapter *adap, union i2c_smbus_data *data, int read_write)
{
int i, len = 0, rc = 0;
u8 oldclock = 0;
if (read_write == I2C_SMBUS_WRITE) {
len = data->block[0];
if (len < 0)
len = 0;
else if (len > 32)
len = 32;
sis630_write(SMB_COUNT, len);
for (i=1; i <= len; i++) {
dev_dbg(&adap->dev, "set data 0x%02x\n", data->block[i]);
/* set data */
sis630_write(SMB_BYTE+(i-1)%8, data->block[i]);
if (i==8 || (len<8 && i==len)) {
dev_dbg(&adap->dev, "start trans len=%d i=%d\n",len ,i);
/* first transaction */
rc = sis630_transaction_start(adap,
SIS630_BLOCK_DATA, &oldclock);
if (rc)
return rc;
}
else if ((i-1)%8 == 7 || i==len) {
dev_dbg(&adap->dev, "trans_wait len=%d i=%d\n",len,i);
if (i>8) {
dev_dbg(&adap->dev, "clear smbary_sts len=%d i=%d\n",len,i);
/*
If this is not first transaction,
we must clear sticky bit.
clear SMBARY_STS
*/
sis630_write(SMB_STS,0x10);
}
rc = sis630_transaction_wait(adap,
SIS630_BLOCK_DATA);
if (rc) {
dev_dbg(&adap->dev, "trans_wait failed\n");
break;
}
}
}
}
else {
/* read request */
data->block[0] = len = 0;
rc = sis630_transaction_start(adap,
SIS630_BLOCK_DATA, &oldclock);
if (rc)
return rc;
do {
rc = sis630_transaction_wait(adap, SIS630_BLOCK_DATA);
if (rc) {
dev_dbg(&adap->dev, "trans_wait failed\n");
break;
}
/* if this first transaction then read byte count */
if (len == 0)
data->block[0] = sis630_read(SMB_COUNT);
/* just to be sure */
if (data->block[0] > 32)
data->block[0] = 32;
dev_dbg(&adap->dev, "block data read len=0x%x\n", data->block[0]);
for (i=0; i < 8 && len < data->block[0]; i++,len++) {
dev_dbg(&adap->dev, "read i=%d len=%d\n", i, len);
data->block[len+1] = sis630_read(SMB_BYTE+i);
}
dev_dbg(&adap->dev, "clear smbary_sts len=%d i=%d\n",len,i);
/* clear SMBARY_STS */
sis630_write(SMB_STS,0x10);
} while(len < data->block[0]);
}
sis630_transaction_end(adap, oldclock);
return rc;
}
/* Return negative errno on error. */
static s32 sis630_access(struct i2c_adapter *adap, u16 addr,
unsigned short flags, char read_write,
u8 command, int size, union i2c_smbus_data *data)
{
int status;
switch (size) {
case I2C_SMBUS_QUICK:
sis630_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01));
size = SIS630_QUICK;
break;
case I2C_SMBUS_BYTE:
sis630_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01));
if (read_write == I2C_SMBUS_WRITE)
sis630_write(SMB_CMD, command);
size = SIS630_BYTE;
break;
case I2C_SMBUS_BYTE_DATA:
sis630_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01));
sis630_write(SMB_CMD, command);
if (read_write == I2C_SMBUS_WRITE)
sis630_write(SMB_BYTE, data->byte);
size = SIS630_BYTE_DATA;
break;
case I2C_SMBUS_PROC_CALL:
case I2C_SMBUS_WORD_DATA:
sis630_write(SMB_ADDR,((addr & 0x7f) << 1) | (read_write & 0x01));
sis630_write(SMB_CMD, command);
if (read_write == I2C_SMBUS_WRITE) {
sis630_write(SMB_BYTE, data->word & 0xff);
sis630_write(SMB_BYTE + 1,(data->word & 0xff00) >> 8);
}
size = (size == I2C_SMBUS_PROC_CALL ? SIS630_PCALL : SIS630_WORD_DATA);
break;
case I2C_SMBUS_BLOCK_DATA:
sis630_write(SMB_ADDR,((addr & 0x7f) << 1) | (read_write & 0x01));
sis630_write(SMB_CMD, command);
size = SIS630_BLOCK_DATA;
return sis630_block_data(adap, data, read_write);
default:
dev_warn(&adap->dev, "Unsupported transaction %d\n",
size);
return -EOPNOTSUPP;
}
status = sis630_transaction(adap, size);
if (status)
return status;
if ((size != SIS630_PCALL) &&
((read_write == I2C_SMBUS_WRITE) || (size == SIS630_QUICK))) {
return 0;
}
switch(size) {
case SIS630_BYTE:
case SIS630_BYTE_DATA:
data->byte = sis630_read(SMB_BYTE);
break;
case SIS630_PCALL:
case SIS630_WORD_DATA:
data->word = sis630_read(SMB_BYTE) + (sis630_read(SMB_BYTE + 1) << 8);
break;
}
return 0;
}
static u32 sis630_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_PROC_CALL |
I2C_FUNC_SMBUS_BLOCK_DATA;
}
static int __devinit sis630_setup(struct pci_dev *sis630_dev)
{
unsigned char b;
struct pci_dev *dummy = NULL;
int retval, i;
/* check for supported SiS devices */
for (i=0; supported[i] > 0 ; i++) {
if ((dummy = pci_get_device(PCI_VENDOR_ID_SI, supported[i], dummy)))
break; /* found */
}
if (dummy) {
pci_dev_put(dummy);
}
else if (force) {
dev_err(&sis630_dev->dev, "WARNING: Can't detect SIS630 compatible device, but "
"loading because of force option enabled\n");
}
else {
return -ENODEV;
}
/*
Enable ACPI first , so we can accsess reg 74-75
in acpi io space and read acpi base addr
*/
if (pci_read_config_byte(sis630_dev, SIS630_BIOS_CTL_REG,&b)) {
dev_err(&sis630_dev->dev, "Error: Can't read bios ctl reg\n");
retval = -ENODEV;
goto exit;
}
/* if ACPI already enabled , do nothing */
if (!(b & 0x80) &&
pci_write_config_byte(sis630_dev, SIS630_BIOS_CTL_REG, b | 0x80)) {
dev_err(&sis630_dev->dev, "Error: Can't enable ACPI\n");
retval = -ENODEV;
goto exit;
}
/* Determine the ACPI base address */
if (pci_read_config_word(sis630_dev,SIS630_ACPI_BASE_REG,&acpi_base)) {
dev_err(&sis630_dev->dev, "Error: Can't determine ACPI base address\n");
retval = -ENODEV;
goto exit;
}
dev_dbg(&sis630_dev->dev, "ACPI base at 0x%04x\n", acpi_base);
retval = acpi_check_region(acpi_base + SMB_STS, SIS630_SMB_IOREGION,
sis630_driver.name);
if (retval)
goto exit;
/* Everything is happy, let's grab the memory and set things up. */
if (!request_region(acpi_base + SMB_STS, SIS630_SMB_IOREGION,
sis630_driver.name)) {
dev_err(&sis630_dev->dev, "SMBus registers 0x%04x-0x%04x already "
"in use!\n", acpi_base + SMB_STS, acpi_base + SMB_SAA);
retval = -EBUSY;
goto exit;
}
retval = 0;
exit:
if (retval)
acpi_base = 0;
return retval;
}
static const struct i2c_algorithm smbus_algorithm = {
.smbus_xfer = sis630_access,
.functionality = sis630_func,
};
static struct i2c_adapter sis630_adapter = {
.owner = THIS_MODULE,
.class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
.algo = &smbus_algorithm,
};
static DEFINE_PCI_DEVICE_TABLE(sis630_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) },
{ 0, }
};
MODULE_DEVICE_TABLE (pci, sis630_ids);
static int __devinit sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
if (sis630_setup(dev)) {
dev_err(&dev->dev, "SIS630 comp. bus not detected, module not inserted.\n");
return -ENODEV;
}
/* set up the sysfs linkage to our parent device */
sis630_adapter.dev.parent = &dev->dev;
snprintf(sis630_adapter.name, sizeof(sis630_adapter.name),
"SMBus SIS630 adapter at %04x", acpi_base + SMB_STS);
return i2c_add_adapter(&sis630_adapter);
}
static void __devexit sis630_remove(struct pci_dev *dev)
{
if (acpi_base) {
i2c_del_adapter(&sis630_adapter);
release_region(acpi_base + SMB_STS, SIS630_SMB_IOREGION);
acpi_base = 0;
}
}
static struct pci_driver sis630_driver = {
.name = "sis630_smbus",
.id_table = sis630_ids,
.probe = sis630_probe,
.remove = __devexit_p(sis630_remove),
};
static int __init i2c_sis630_init(void)
{
return pci_register_driver(&sis630_driver);
}
static void __exit i2c_sis630_exit(void)
{
pci_unregister_driver(&sis630_driver);
}
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexander Malysh <amalysh@web.de>");
MODULE_DESCRIPTION("SIS630 SMBus driver");
module_init(i2c_sis630_init);
module_exit(i2c_sis630_exit);
| gpl-2.0 |
cameron581/android_kernel_lge_msm8974 | drivers/platform/x86/amilo-rfkill.c | 4936 | 4252 | /*
* Support for rfkill on some Fujitsu-Siemens Amilo laptops.
* Copyright 2011 Ben Hutchings.
*
* Based in part on the fsam7440 driver, which is:
* Copyright 2005 Alejandro Vidal Mata & Javier Vidal Mata.
* and on the fsaa1655g driver, which is:
* Copyright 2006 Martin Večeřa.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/dmi.h>
#include <linux/i8042.h>
#include <linux/io.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/rfkill.h>
/*
* These values were obtained from disassembling and debugging the
* PM.exe program installed in the Fujitsu-Siemens AMILO A1655G
*/
#define A1655_WIFI_COMMAND 0x10C5
#define A1655_WIFI_ON 0x25
#define A1655_WIFI_OFF 0x45
static int amilo_a1655_rfkill_set_block(void *data, bool blocked)
{
u8 param = blocked ? A1655_WIFI_OFF : A1655_WIFI_ON;
int rc;
i8042_lock_chip();
rc = i8042_command(¶m, A1655_WIFI_COMMAND);
i8042_unlock_chip();
return rc;
}
static const struct rfkill_ops amilo_a1655_rfkill_ops = {
.set_block = amilo_a1655_rfkill_set_block
};
/*
* These values were obtained from disassembling the PM.exe program
* installed in the Fujitsu-Siemens AMILO M 7440
*/
#define M7440_PORT1 0x118f
#define M7440_PORT2 0x118e
#define M7440_RADIO_ON1 0x12
#define M7440_RADIO_ON2 0x80
#define M7440_RADIO_OFF1 0x10
#define M7440_RADIO_OFF2 0x00
static int amilo_m7440_rfkill_set_block(void *data, bool blocked)
{
u8 val1 = blocked ? M7440_RADIO_OFF1 : M7440_RADIO_ON1;
u8 val2 = blocked ? M7440_RADIO_OFF2 : M7440_RADIO_ON2;
outb(val1, M7440_PORT1);
outb(val2, M7440_PORT2);
/* Check whether the state has changed correctly */
if (inb(M7440_PORT1) != val1 || inb(M7440_PORT2) != val2)
return -EIO;
return 0;
}
static const struct rfkill_ops amilo_m7440_rfkill_ops = {
.set_block = amilo_m7440_rfkill_set_block
};
static const struct dmi_system_id __devinitdata amilo_rfkill_id_table[] = {
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
DMI_MATCH(DMI_BOARD_NAME, "AMILO A1655"),
},
.driver_data = (void *)&amilo_a1655_rfkill_ops
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
DMI_MATCH(DMI_BOARD_NAME, "AMILO M7440"),
},
.driver_data = (void *)&amilo_m7440_rfkill_ops
},
{}
};
static struct platform_device *amilo_rfkill_pdev;
static struct rfkill *amilo_rfkill_dev;
static int __devinit amilo_rfkill_probe(struct platform_device *device)
{
int rc;
const struct dmi_system_id *system_id =
dmi_first_match(amilo_rfkill_id_table);
if (!system_id)
return -ENXIO;
amilo_rfkill_dev = rfkill_alloc(KBUILD_MODNAME, &device->dev,
RFKILL_TYPE_WLAN,
system_id->driver_data, NULL);
if (!amilo_rfkill_dev)
return -ENOMEM;
rc = rfkill_register(amilo_rfkill_dev);
if (rc)
goto fail;
return 0;
fail:
rfkill_destroy(amilo_rfkill_dev);
return rc;
}
static int amilo_rfkill_remove(struct platform_device *device)
{
rfkill_unregister(amilo_rfkill_dev);
rfkill_destroy(amilo_rfkill_dev);
return 0;
}
static struct platform_driver amilo_rfkill_driver = {
.driver = {
.name = KBUILD_MODNAME,
.owner = THIS_MODULE,
},
.probe = amilo_rfkill_probe,
.remove = amilo_rfkill_remove,
};
static int __init amilo_rfkill_init(void)
{
int rc;
if (dmi_first_match(amilo_rfkill_id_table) == NULL)
return -ENODEV;
rc = platform_driver_register(&amilo_rfkill_driver);
if (rc)
return rc;
amilo_rfkill_pdev = platform_device_register_simple(KBUILD_MODNAME, -1,
NULL, 0);
if (IS_ERR(amilo_rfkill_pdev)) {
rc = PTR_ERR(amilo_rfkill_pdev);
goto fail;
}
return 0;
fail:
platform_driver_unregister(&amilo_rfkill_driver);
return rc;
}
static void __exit amilo_rfkill_exit(void)
{
platform_device_unregister(amilo_rfkill_pdev);
platform_driver_unregister(&amilo_rfkill_driver);
}
MODULE_AUTHOR("Ben Hutchings <ben@decadent.org.uk>");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(dmi, amilo_rfkill_id_table);
module_init(amilo_rfkill_init);
module_exit(amilo_rfkill_exit);
| gpl-2.0 |
lyapota/m8_sense_lollipop | drivers/xen/xenbus/xenbus_client.c | 4936 | 20349 | /******************************************************************************
* Client-facing interface for the Xenbus driver. In other words, the
* interface between the Xenbus and the device-specific code, be it the
* frontend or the backend of that driver.
*
* Copyright (C) 2005 XenSource Ltd
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/page.h>
#include <xen/interface/xen.h>
#include <xen/interface/event_channel.h>
#include <xen/balloon.h>
#include <xen/events.h>
#include <xen/grant_table.h>
#include <xen/xenbus.h>
#include <xen/xen.h>
#include "xenbus_probe.h"
struct xenbus_map_node {
struct list_head next;
union {
struct vm_struct *area; /* PV */
struct page *page; /* HVM */
};
grant_handle_t handle;
};
static DEFINE_SPINLOCK(xenbus_valloc_lock);
static LIST_HEAD(xenbus_valloc_pages);
struct xenbus_ring_ops {
int (*map)(struct xenbus_device *dev, int gnt, void **vaddr);
int (*unmap)(struct xenbus_device *dev, void *vaddr);
};
static const struct xenbus_ring_ops *ring_ops __read_mostly;
const char *xenbus_strstate(enum xenbus_state state)
{
static const char *const name[] = {
[ XenbusStateUnknown ] = "Unknown",
[ XenbusStateInitialising ] = "Initialising",
[ XenbusStateInitWait ] = "InitWait",
[ XenbusStateInitialised ] = "Initialised",
[ XenbusStateConnected ] = "Connected",
[ XenbusStateClosing ] = "Closing",
[ XenbusStateClosed ] = "Closed",
[XenbusStateReconfiguring] = "Reconfiguring",
[XenbusStateReconfigured] = "Reconfigured",
};
return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
}
EXPORT_SYMBOL_GPL(xenbus_strstate);
/**
* xenbus_watch_path - register a watch
* @dev: xenbus device
* @path: path to watch
* @watch: watch to register
* @callback: callback to register
*
* Register a @watch on the given path, using the given xenbus_watch structure
* for storage, and the given @callback function as the callback. Return 0 on
* success, or -errno on error. On success, the given @path will be saved as
* @watch->node, and remains the caller's to free. On error, @watch->node will
* be NULL, the device will switch to %XenbusStateClosing, and the error will
* be saved in the store.
*/
int xenbus_watch_path(struct xenbus_device *dev, const char *path,
struct xenbus_watch *watch,
void (*callback)(struct xenbus_watch *,
const char **, unsigned int))
{
int err;
watch->node = path;
watch->callback = callback;
err = register_xenbus_watch(watch);
if (err) {
watch->node = NULL;
watch->callback = NULL;
xenbus_dev_fatal(dev, err, "adding watch on %s", path);
}
return err;
}
EXPORT_SYMBOL_GPL(xenbus_watch_path);
/**
* xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
* @dev: xenbus device
* @watch: watch to register
* @callback: callback to register
* @pathfmt: format of path to watch
*
* Register a watch on the given @path, using the given xenbus_watch
* structure for storage, and the given @callback function as the callback.
* Return 0 on success, or -errno on error. On success, the watched path
* (@path/@path2) will be saved as @watch->node, and becomes the caller's to
* kfree(). On error, watch->node will be NULL, so the caller has nothing to
* free, the device will switch to %XenbusStateClosing, and the error will be
* saved in the store.
*/
int xenbus_watch_pathfmt(struct xenbus_device *dev,
struct xenbus_watch *watch,
void (*callback)(struct xenbus_watch *,
const char **, unsigned int),
const char *pathfmt, ...)
{
int err;
va_list ap;
char *path;
va_start(ap, pathfmt);
path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
va_end(ap);
if (!path) {
xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
return -ENOMEM;
}
err = xenbus_watch_path(dev, path, watch, callback);
if (err)
kfree(path);
return err;
}
EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
static void xenbus_switch_fatal(struct xenbus_device *, int, int,
const char *, ...);
static int
__xenbus_switch_state(struct xenbus_device *dev,
enum xenbus_state state, int depth)
{
/* We check whether the state is currently set to the given value, and
if not, then the state is set. We don't want to unconditionally
write the given state, because we don't want to fire watches
unnecessarily. Furthermore, if the node has gone, we don't write
to it, as the device will be tearing down, and we don't want to
resurrect that directory.
Note that, because of this cached value of our state, this
function will not take a caller's Xenstore transaction
(something it was trying to in the past) because dev->state
would not get reset if the transaction was aborted.
*/
struct xenbus_transaction xbt;
int current_state;
int err, abort;
if (state == dev->state)
return 0;
again:
abort = 1;
err = xenbus_transaction_start(&xbt);
if (err) {
xenbus_switch_fatal(dev, depth, err, "starting transaction");
return 0;
}
err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state);
if (err != 1)
goto abort;
err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
if (err) {
xenbus_switch_fatal(dev, depth, err, "writing new state");
goto abort;
}
abort = 0;
abort:
err = xenbus_transaction_end(xbt, abort);
if (err) {
if (err == -EAGAIN && !abort)
goto again;
xenbus_switch_fatal(dev, depth, err, "ending transaction");
} else
dev->state = state;
return 0;
}
/**
* xenbus_switch_state
* @dev: xenbus device
* @state: new state
*
* Advertise in the store a change of the given driver to the given new_state.
* Return 0 on success, or -errno on error. On error, the device will switch
* to XenbusStateClosing, and the error will be saved in the store.
*/
int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
{
return __xenbus_switch_state(dev, state, 0);
}
EXPORT_SYMBOL_GPL(xenbus_switch_state);
int xenbus_frontend_closed(struct xenbus_device *dev)
{
xenbus_switch_state(dev, XenbusStateClosed);
complete(&dev->down);
return 0;
}
EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
/**
* Return the path to the error node for the given device, or NULL on failure.
* If the value returned is non-NULL, then it is the caller's to kfree.
*/
static char *error_path(struct xenbus_device *dev)
{
return kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
}
static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
const char *fmt, va_list ap)
{
int ret;
unsigned int len;
char *printf_buffer = NULL;
char *path_buffer = NULL;
#define PRINTF_BUFFER_SIZE 4096
printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
if (printf_buffer == NULL)
goto fail;
len = sprintf(printf_buffer, "%i ", -err);
ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
dev_err(&dev->dev, "%s\n", printf_buffer);
path_buffer = error_path(dev);
if (path_buffer == NULL) {
dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
dev->nodename, printf_buffer);
goto fail;
}
if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
dev->nodename, printf_buffer);
goto fail;
}
fail:
kfree(printf_buffer);
kfree(path_buffer);
}
/**
* xenbus_dev_error
* @dev: xenbus device
* @err: error to report
* @fmt: error message format
*
* Report the given negative errno into the store, along with the given
* formatted message.
*/
void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
xenbus_va_dev_error(dev, err, fmt, ap);
va_end(ap);
}
EXPORT_SYMBOL_GPL(xenbus_dev_error);
/**
* xenbus_dev_fatal
* @dev: xenbus device
* @err: error to report
* @fmt: error message format
*
* Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
* xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly
* closedown of this driver and its peer.
*/
void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
xenbus_va_dev_error(dev, err, fmt, ap);
va_end(ap);
xenbus_switch_state(dev, XenbusStateClosing);
}
EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
/**
* Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps
* avoiding recursion within xenbus_switch_state.
*/
static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
xenbus_va_dev_error(dev, err, fmt, ap);
va_end(ap);
if (!depth)
__xenbus_switch_state(dev, XenbusStateClosing, 1);
}
/**
* xenbus_grant_ring
* @dev: xenbus device
* @ring_mfn: mfn of ring to grant
* Grant access to the given @ring_mfn to the peer of the given device. Return
* 0 on success, or -errno on error. On error, the device will switch to
* XenbusStateClosing, and the error will be saved in the store.
*/
int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
{
int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
if (err < 0)
xenbus_dev_fatal(dev, err, "granting access to ring page");
return err;
}
EXPORT_SYMBOL_GPL(xenbus_grant_ring);
/**
* Allocate an event channel for the given xenbus_device, assigning the newly
* created local port to *port. Return 0 on success, or -errno on error. On
* error, the device will switch to XenbusStateClosing, and the error will be
* saved in the store.
*/
int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
{
struct evtchn_alloc_unbound alloc_unbound;
int err;
alloc_unbound.dom = DOMID_SELF;
alloc_unbound.remote_dom = dev->otherend_id;
err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
&alloc_unbound);
if (err)
xenbus_dev_fatal(dev, err, "allocating event channel");
else
*port = alloc_unbound.port;
return err;
}
EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
/**
* Bind to an existing interdomain event channel in another domain. Returns 0
* on success and stores the local port in *port. On error, returns -errno,
* switches the device to XenbusStateClosing, and saves the error in XenStore.
*/
int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port)
{
struct evtchn_bind_interdomain bind_interdomain;
int err;
bind_interdomain.remote_dom = dev->otherend_id;
bind_interdomain.remote_port = remote_port;
err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
&bind_interdomain);
if (err)
xenbus_dev_fatal(dev, err,
"binding to event channel %d from domain %d",
remote_port, dev->otherend_id);
else
*port = bind_interdomain.local_port;
return err;
}
EXPORT_SYMBOL_GPL(xenbus_bind_evtchn);
/**
* Free an existing event channel. Returns 0 on success or -errno on error.
*/
int xenbus_free_evtchn(struct xenbus_device *dev, int port)
{
struct evtchn_close close;
int err;
close.port = port;
err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
if (err)
xenbus_dev_error(dev, err, "freeing event channel %d", port);
return err;
}
EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
/**
* xenbus_map_ring_valloc
* @dev: xenbus device
* @gnt_ref: grant reference
* @vaddr: pointer to address to be filled out by mapping
*
* Based on Rusty Russell's skeleton driver's map_page.
* Map a page of memory into this domain from another domain's grant table.
* xenbus_map_ring_valloc allocates a page of virtual address space, maps the
* page to that address, and sets *vaddr to that address.
* Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
* or -ENOMEM on error. If an error is returned, device will switch to
* XenbusStateClosing and the error message will be saved in XenStore.
*/
int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
{
return ring_ops->map(dev, gnt_ref, vaddr);
}
EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
int gnt_ref, void **vaddr)
{
struct gnttab_map_grant_ref op = {
.flags = GNTMAP_host_map | GNTMAP_contains_pte,
.ref = gnt_ref,
.dom = dev->otherend_id,
};
struct xenbus_map_node *node;
struct vm_struct *area;
pte_t *pte;
*vaddr = NULL;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
area = alloc_vm_area(PAGE_SIZE, &pte);
if (!area) {
kfree(node);
return -ENOMEM;
}
op.host_addr = arbitrary_virt_to_machine(pte).maddr;
if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
BUG();
if (op.status != GNTST_okay) {
free_vm_area(area);
kfree(node);
xenbus_dev_fatal(dev, op.status,
"mapping in shared page %d from domain %d",
gnt_ref, dev->otherend_id);
return op.status;
}
node->handle = op.handle;
node->area = area;
spin_lock(&xenbus_valloc_lock);
list_add(&node->next, &xenbus_valloc_pages);
spin_unlock(&xenbus_valloc_lock);
*vaddr = area->addr;
return 0;
}
static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
int gnt_ref, void **vaddr)
{
struct xenbus_map_node *node;
int err;
void *addr;
*vaddr = NULL;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
err = alloc_xenballooned_pages(1, &node->page, false /* lowmem */);
if (err)
goto out_err;
addr = pfn_to_kaddr(page_to_pfn(node->page));
err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr);
if (err)
goto out_err;
spin_lock(&xenbus_valloc_lock);
list_add(&node->next, &xenbus_valloc_pages);
spin_unlock(&xenbus_valloc_lock);
*vaddr = addr;
return 0;
out_err:
free_xenballooned_pages(1, &node->page);
kfree(node);
return err;
}
/**
* xenbus_map_ring
* @dev: xenbus device
* @gnt_ref: grant reference
* @handle: pointer to grant handle to be filled
* @vaddr: address to be mapped to
*
* Map a page of memory into this domain from another domain's grant table.
* xenbus_map_ring does not allocate the virtual address space (you must do
* this yourself!). It only maps in the page to the specified address.
* Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
* or -ENOMEM on error. If an error is returned, device will switch to
* XenbusStateClosing and the error message will be saved in XenStore.
*/
int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
grant_handle_t *handle, void *vaddr)
{
struct gnttab_map_grant_ref op;
gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref,
dev->otherend_id);
if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
BUG();
if (op.status != GNTST_okay) {
xenbus_dev_fatal(dev, op.status,
"mapping in shared page %d from domain %d",
gnt_ref, dev->otherend_id);
} else
*handle = op.handle;
return op.status;
}
EXPORT_SYMBOL_GPL(xenbus_map_ring);
/**
* xenbus_unmap_ring_vfree
* @dev: xenbus device
* @vaddr: addr to unmap
*
* Based on Rusty Russell's skeleton driver's unmap_page.
* Unmap a page of memory in this domain that was imported from another domain.
* Use xenbus_unmap_ring_vfree if you mapped in your memory with
* xenbus_map_ring_valloc (it will free the virtual address space).
* Returns 0 on success and returns GNTST_* on error
* (see xen/include/interface/grant_table.h).
*/
int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
{
return ring_ops->unmap(dev, vaddr);
}
EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
{
struct xenbus_map_node *node;
struct gnttab_unmap_grant_ref op = {
.host_addr = (unsigned long)vaddr,
};
unsigned int level;
spin_lock(&xenbus_valloc_lock);
list_for_each_entry(node, &xenbus_valloc_pages, next) {
if (node->area->addr == vaddr) {
list_del(&node->next);
goto found;
}
}
node = NULL;
found:
spin_unlock(&xenbus_valloc_lock);
if (!node) {
xenbus_dev_error(dev, -ENOENT,
"can't find mapped virtual address %p", vaddr);
return GNTST_bad_virt_addr;
}
op.handle = node->handle;
op.host_addr = arbitrary_virt_to_machine(
lookup_address((unsigned long)vaddr, &level)).maddr;
if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
BUG();
if (op.status == GNTST_okay)
free_vm_area(node->area);
else
xenbus_dev_error(dev, op.status,
"unmapping page at handle %d error %d",
node->handle, op.status);
kfree(node);
return op.status;
}
static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
{
int rv;
struct xenbus_map_node *node;
void *addr;
spin_lock(&xenbus_valloc_lock);
list_for_each_entry(node, &xenbus_valloc_pages, next) {
addr = pfn_to_kaddr(page_to_pfn(node->page));
if (addr == vaddr) {
list_del(&node->next);
goto found;
}
}
node = addr = NULL;
found:
spin_unlock(&xenbus_valloc_lock);
if (!node) {
xenbus_dev_error(dev, -ENOENT,
"can't find mapped virtual address %p", vaddr);
return GNTST_bad_virt_addr;
}
rv = xenbus_unmap_ring(dev, node->handle, addr);
if (!rv)
free_xenballooned_pages(1, &node->page);
else
WARN(1, "Leaking %p\n", vaddr);
kfree(node);
return rv;
}
/**
* xenbus_unmap_ring
* @dev: xenbus device
* @handle: grant handle
* @vaddr: addr to unmap
*
* Unmap a page of memory in this domain that was imported from another domain.
* Returns 0 on success and returns GNTST_* on error
* (see xen/include/interface/grant_table.h).
*/
int xenbus_unmap_ring(struct xenbus_device *dev,
grant_handle_t handle, void *vaddr)
{
struct gnttab_unmap_grant_ref op;
gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle);
if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
BUG();
if (op.status != GNTST_okay)
xenbus_dev_error(dev, op.status,
"unmapping page at handle %d error %d",
handle, op.status);
return op.status;
}
EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
/**
* xenbus_read_driver_state
* @path: path for driver
*
* Return the state of the driver rooted at the given store path, or
* XenbusStateUnknown if no state can be read.
*/
enum xenbus_state xenbus_read_driver_state(const char *path)
{
enum xenbus_state result;
int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
if (err)
result = XenbusStateUnknown;
return result;
}
EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
static const struct xenbus_ring_ops ring_ops_pv = {
.map = xenbus_map_ring_valloc_pv,
.unmap = xenbus_unmap_ring_vfree_pv,
};
static const struct xenbus_ring_ops ring_ops_hvm = {
.map = xenbus_map_ring_valloc_hvm,
.unmap = xenbus_unmap_ring_vfree_hvm,
};
void __init xenbus_ring_ops_init(void)
{
if (xen_pv_domain())
ring_ops = &ring_ops_pv;
else
ring_ops = &ring_ops_hvm;
}
| gpl-2.0 |
SlimForce/kernel_lge_hammerhead | drivers/staging/vt6656/wpactl.c | 4936 | 23925 | /*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*
* File: wpactl.c
*
* Purpose: handle wpa supplicant ioctl input/out functions
*
* Author: Lyndon Chen
*
* Date: July 28, 2006
*
* Functions:
*
* Revision History:
*
*/
#include "wpactl.h"
#include "key.h"
#include "mac.h"
#include "device.h"
#include "wmgr.h"
#include "iocmd.h"
#include "iowpa.h"
#include "control.h"
#include "rndis.h"
#include "rf.h"
/*--------------------- Static Definitions -------------------------*/
#define VIAWGET_WPA_MAX_BUF_SIZE 1024
static const int frequency_list[] = {
2412, 2417, 2422, 2427, 2432, 2437, 2442,
2447, 2452, 2457, 2462, 2467, 2472, 2484
};
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
static int msglevel = MSG_LEVEL_INFO;
/*--------------------- Static Functions --------------------------*/
/*--------------------- Export Variables --------------------------*/
static void wpadev_setup(struct net_device *dev)
{
dev->type = ARPHRD_IEEE80211;
dev->hard_header_len = ETH_HLEN;
dev->mtu = 2048;
dev->addr_len = ETH_ALEN;
dev->tx_queue_len = 1000;
memset(dev->broadcast, 0xFF, ETH_ALEN);
dev->flags = IFF_BROADCAST | IFF_MULTICAST;
}
/*
* Description:
* register netdev for wpa supplicant deamon
*
* Parameters:
* In:
* pDevice -
* enable -
* Out:
*
* Return Value:
*
*/
static int wpa_init_wpadev(PSDevice pDevice)
{
PSDevice wpadev_priv;
struct net_device *dev = pDevice->dev;
int ret = 0;
pDevice->wpadev = alloc_netdev(sizeof(PSDevice), "vntwpa", wpadev_setup);
if (pDevice->wpadev == NULL)
return -ENOMEM;
wpadev_priv = netdev_priv(pDevice->wpadev);
*wpadev_priv = *pDevice;
memcpy(pDevice->wpadev->dev_addr, dev->dev_addr, ETH_ALEN);
pDevice->wpadev->base_addr = dev->base_addr;
pDevice->wpadev->irq = dev->irq;
pDevice->wpadev->mem_start = dev->mem_start;
pDevice->wpadev->mem_end = dev->mem_end;
ret = register_netdev(pDevice->wpadev);
if (ret) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: register_netdev(WPA) failed!\n",
dev->name);
free_netdev(pDevice->wpadev);
return -1;
}
if (pDevice->skb == NULL) {
pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
if (pDevice->skb == NULL)
return -ENOMEM;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Registered netdev %s for WPA management\n",
dev->name, pDevice->wpadev->name);
return 0;
}
/*
* Description:
* unregister net_device (wpadev)
*
* Parameters:
* In:
* pDevice -
* Out:
*
* Return Value:
*
*/
static int wpa_release_wpadev(PSDevice pDevice)
{
if (pDevice->skb) {
dev_kfree_skb(pDevice->skb);
pDevice->skb = NULL;
}
if (pDevice->wpadev) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
pDevice->dev->name, pDevice->wpadev->name);
unregister_netdev(pDevice->wpadev);
free_netdev(pDevice->wpadev);
pDevice->wpadev = NULL;
}
return 0;
}
/*
* Description:
* Set enable/disable dev for wpa supplicant deamon
*
* Parameters:
* In:
* pDevice -
* val -
* Out:
*
* Return Value:
*
*/
int wpa_set_wpadev(PSDevice pDevice, int val)
{
if (val)
return wpa_init_wpadev(pDevice);
return wpa_release_wpadev(pDevice);
}
/*
* Description:
* Set WPA algorithm & keys
*
* Parameters:
* In:
* pDevice -
* param -
* Out:
*
* Return Value:
*
*/
int wpa_set_keys(PSDevice pDevice, void *ctx, BOOL fcpfkernel)
{
struct viawget_wpa_param *param = ctx;
PSMgmtObject pMgmt = &pDevice->sMgmtObj;
DWORD dwKeyIndex = 0;
BYTE abyKey[MAX_KEY_LEN];
BYTE abySeq[MAX_KEY_LEN];
QWORD KeyRSC;
BYTE byKeyDecMode = KEY_CTL_WEP;
int ret = 0;
int uu;
int ii;
if (param->u.wpa_key.alg_name > WPA_ALG_CCMP)
return -EINVAL;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "param->u.wpa_key.alg_name = %d \n",
param->u.wpa_key.alg_name);
if (param->u.wpa_key.alg_name == WPA_ALG_NONE) {
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
pDevice->bEncryptionEnable = FALSE;
pDevice->byKeyIndex = 0;
pDevice->bTransmitKey = FALSE;
for (uu=0; uu<MAX_KEY_TABLE; uu++) {
MACvDisableKeyEntry(pDevice, uu);
}
return ret;
}
if (param->u.wpa_key.key && param->u.wpa_key.key_len > sizeof(abyKey))
return -EINVAL;
spin_unlock_irq(&pDevice->lock);
if (param->u.wpa_key.key && fcpfkernel) {
memcpy(&abyKey[0], param->u.wpa_key.key, param->u.wpa_key.key_len);
} else {
if (param->u.wpa_key.key &&
copy_from_user(&abyKey[0], param->u.wpa_key.key,
param->u.wpa_key.key_len)) {
spin_lock_irq(&pDevice->lock);
return -EINVAL;
}
}
spin_lock_irq(&pDevice->lock);
dwKeyIndex = (DWORD)(param->u.wpa_key.key_index);
if (param->u.wpa_key.alg_name == WPA_ALG_WEP) {
if (dwKeyIndex > 3) {
return -EINVAL;
} else {
if (param->u.wpa_key.set_tx) {
pDevice->byKeyIndex = (BYTE)dwKeyIndex;
pDevice->bTransmitKey = TRUE;
dwKeyIndex |= (1 << 31);
}
KeybSetDefaultKey( pDevice,
&(pDevice->sKey),
dwKeyIndex & ~(BIT30 | USE_KEYRSC),
param->u.wpa_key.key_len,
NULL,
abyKey,
KEY_CTL_WEP
);
}
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
pDevice->bEncryptionEnable = TRUE;
return ret;
}
if (param->u.wpa_key.seq && param->u.wpa_key.seq_len > sizeof(abySeq))
return -EINVAL;
spin_unlock_irq(&pDevice->lock);
if (param->u.wpa_key.seq && fcpfkernel) {
memcpy(&abySeq[0], param->u.wpa_key.seq, param->u.wpa_key.seq_len);
} else {
if (param->u.wpa_key.seq &&
copy_from_user(&abySeq[0], param->u.wpa_key.seq,
param->u.wpa_key.seq_len)) {
spin_lock_irq(&pDevice->lock);
return -EINVAL;
}
}
spin_lock_irq(&pDevice->lock);
if (param->u.wpa_key.seq_len > 0) {
for (ii = 0 ; ii < param->u.wpa_key.seq_len ; ii++) {
if (ii < 4)
LODWORD(KeyRSC) |= (abySeq[ii] << (ii * 8));
else
HIDWORD(KeyRSC) |= (abySeq[ii] << ((ii-4) * 8));
}
dwKeyIndex |= 1 << 29;
}
if (param->u.wpa_key.key_index >= MAX_GROUP_KEY) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return dwKeyIndex > 3\n");
return -EINVAL;
}
if (param->u.wpa_key.alg_name == WPA_ALG_TKIP) {
pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
}
if (param->u.wpa_key.alg_name == WPA_ALG_CCMP) {
pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
}
if (param->u.wpa_key.set_tx)
dwKeyIndex |= (1 << 31);
if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled)
byKeyDecMode = KEY_CTL_CCMP;
else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled)
byKeyDecMode = KEY_CTL_TKIP;
else
byKeyDecMode = KEY_CTL_WEP;
// Fix HCT test that set 256 bits KEY and Ndis802_11Encryption3Enabled
if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
if (param->u.wpa_key.key_len == MAX_KEY_LEN)
byKeyDecMode = KEY_CTL_TKIP;
else if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN)
byKeyDecMode = KEY_CTL_WEP;
else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN)
byKeyDecMode = KEY_CTL_WEP;
} else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN)
byKeyDecMode = KEY_CTL_WEP;
else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN)
byKeyDecMode = KEY_CTL_WEP;
}
// Check TKIP key length
if ((byKeyDecMode == KEY_CTL_TKIP) &&
(param->u.wpa_key.key_len != MAX_KEY_LEN)) {
// TKIP Key must be 256 bits
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return- TKIP Key must be 256 bits!\n");
return -EINVAL;
}
// Check AES key length
if ((byKeyDecMode == KEY_CTL_CCMP) &&
(param->u.wpa_key.key_len != AES_KEY_LEN)) {
// AES Key must be 128 bits
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return - AES Key must be 128 bits\n");
return -EINVAL;
}
if (is_broadcast_ether_addr(¶m->addr[0]) || (param->addr == NULL)) {
/* if broadcast, set the key as every key entry's group key */
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Groupe Key Assign.\n");
if ((KeybSetAllGroupKey(pDevice, &(pDevice->sKey), dwKeyIndex,
param->u.wpa_key.key_len,
(PQWORD) &(KeyRSC),
(PBYTE)abyKey,
byKeyDecMode
) == TRUE) &&
(KeybSetDefaultKey(pDevice,
&(pDevice->sKey),
dwKeyIndex,
param->u.wpa_key.key_len,
(PQWORD) &(KeyRSC),
(PBYTE)abyKey,
byKeyDecMode
) == TRUE) ) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "GROUP Key Assign.\n");
} else {
return -EINVAL;
}
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Assign.\n");
// BSSID not 0xffffffffffff
// Pairwise Key can't be WEP
if (byKeyDecMode == KEY_CTL_WEP) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key can't be WEP\n");
return -EINVAL;
}
dwKeyIndex |= (1 << 30); // set pairwise key
if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) {
//DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA - WMAC_CONFIG_IBSS_STA\n"));
return -EINVAL;
}
if (KeybSetKey(pDevice, &(pDevice->sKey), ¶m->addr[0],
dwKeyIndex, param->u.wpa_key.key_len,
(PQWORD) &(KeyRSC), (PBYTE)abyKey, byKeyDecMode
) == TRUE) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Set\n");
} else {
// Key Table Full
if (!compare_ether_addr(¶m->addr[0], pDevice->abyBSSID)) {
//DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA -Key Table Full.2\n"));
return -EINVAL;
} else {
// Save Key and configure just before associate/reassociate to BSSID
// we do not implement now
return -EINVAL;
}
}
} // BSSID not 0xffffffffffff
if ((ret == 0) && ((param->u.wpa_key.set_tx) != 0)) {
pDevice->byKeyIndex = (BYTE)param->u.wpa_key.key_index;
pDevice->bTransmitKey = TRUE;
}
pDevice->bEncryptionEnable = TRUE;
return ret;
}
/*
* Description:
* enable wpa auth & mode
*
* Parameters:
* In:
* pDevice -
* param -
* Out:
*
* Return Value:
*
*/
static int wpa_set_wpa(PSDevice pDevice, struct viawget_wpa_param *param)
{
PSMgmtObject pMgmt = &pDevice->sMgmtObj;
int ret = 0;
pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
pMgmt->bShareKeyAlgorithm = FALSE;
return ret;
}
/*
* Description:
* set disassociate
*
* Parameters:
* In:
* pDevice -
* param -
* Out:
*
* Return Value:
*
*/
static int wpa_set_disassociate(PSDevice pDevice, struct viawget_wpa_param *param)
{
PSMgmtObject pMgmt = &pDevice->sMgmtObj;
int ret = 0;
spin_lock_irq(&pDevice->lock);
if (pDevice->bLinkPass) {
if (!memcmp(param->addr, pMgmt->abyCurrBSSID, 6))
bScheduleCommand((void *)pDevice, WLAN_CMD_DISASSOCIATE, NULL);
}
spin_unlock_irq(&pDevice->lock);
return ret;
}
/*
* Description:
* enable scan process
*
* Parameters:
* In:
* pDevice -
* param -
* Out:
*
* Return Value:
*
*/
static int wpa_set_scan(PSDevice pDevice, struct viawget_wpa_param *param)
{
int ret = 0;
/**set ap_scan=1&&scan_ssid=1 under hidden ssid mode**/
PSMgmtObject pMgmt = &pDevice->sMgmtObj;
PWLAN_IE_SSID pItemSSID;
printk("wpa_set_scan-->desired [ssid=%s,ssid_len=%d]\n",
param->u.scan_req.ssid,param->u.scan_req.ssid_len);
// Set the SSID
memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
pItemSSID->byElementID = WLAN_EID_SSID;
memcpy(pItemSSID->abySSID, param->u.scan_req.ssid, param->u.scan_req.ssid_len);
pItemSSID->len = param->u.scan_req.ssid_len;
spin_lock_irq(&pDevice->lock);
BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass);
bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN,
pMgmt->abyDesireSSID);
spin_unlock_irq(&pDevice->lock);
return ret;
}
/*
* Description:
* get bssid
*
* Parameters:
* In:
* pDevice -
* param -
* Out:
*
* Return Value:
*
*/
static int wpa_get_bssid(PSDevice pDevice, struct viawget_wpa_param *param)
{
PSMgmtObject pMgmt = &pDevice->sMgmtObj;
int ret = 0;
memcpy(param->u.wpa_associate.bssid, pMgmt->abyCurrBSSID, 6);
return ret;
}
/*
* Description:
* get bssid
*
* Parameters:
* In:
* pDevice -
* param -
* Out:
*
* Return Value:
*
*/
static int wpa_get_ssid(PSDevice pDevice, struct viawget_wpa_param *param)
{
PSMgmtObject pMgmt = &pDevice->sMgmtObj;
PWLAN_IE_SSID pItemSSID;
int ret = 0;
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
memcpy(param->u.wpa_associate.ssid, pItemSSID->abySSID, pItemSSID->len);
param->u.wpa_associate.ssid_len = pItemSSID->len;
return ret;
}
/*
* Description:
* get scan results
*
* Parameters:
* In:
* pDevice -
* param -
* Out:
*
* Return Value:
*
*/
static int wpa_get_scan(PSDevice pDevice, struct viawget_wpa_param *param)
{
struct viawget_scan_result *scan_buf;
PSMgmtObject pMgmt = &pDevice->sMgmtObj;
PWLAN_IE_SSID pItemSSID;
PKnownBSS pBSS;
PBYTE pBuf;
int ret = 0;
u16 count = 0;
u16 ii;
u16 jj;
long ldBm; //James //add
//******mike:bubble sort by stronger RSSI*****//
PBYTE ptempBSS;
ptempBSS = kmalloc(sizeof(KnownBSS), GFP_ATOMIC);
if (ptempBSS == NULL) {
printk("bubble sort kmalloc memory fail@@@\n");
ret = -ENOMEM;
return ret;
}
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
for (jj = 0; jj < MAX_BSS_NUM - ii - 1; jj++) {
if ((pMgmt->sBSSList[jj].bActive != TRUE)
|| ((pMgmt->sBSSList[jj].uRSSI > pMgmt->sBSSList[jj + 1].uRSSI)
&& (pMgmt->sBSSList[jj + 1].bActive != FALSE))) {
memcpy(ptempBSS,&pMgmt->sBSSList[jj], sizeof(KnownBSS));
memcpy(&pMgmt->sBSSList[jj], &pMgmt->sBSSList[jj + 1],
sizeof(KnownBSS));
memcpy(&pMgmt->sBSSList[jj + 1], ptempBSS, sizeof(KnownBSS));
}
}
}
kfree(ptempBSS);
count = 0;
pBSS = &(pMgmt->sBSSList[0]);
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
pBSS = &(pMgmt->sBSSList[ii]);
if (!pBSS->bActive)
continue;
count++;
}
pBuf = kcalloc(count, sizeof(struct viawget_scan_result), GFP_ATOMIC);
if (pBuf == NULL) {
ret = -ENOMEM;
return ret;
}
scan_buf = (struct viawget_scan_result *)pBuf;
pBSS = &(pMgmt->sBSSList[0]);
for (ii = 0, jj = 0; ii < MAX_BSS_NUM; ii++) {
pBSS = &(pMgmt->sBSSList[ii]);
if (pBSS->bActive) {
if (jj >= count)
break;
memcpy(scan_buf->bssid, pBSS->abyBSSID, WLAN_BSSID_LEN);
pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
memcpy(scan_buf->ssid, pItemSSID->abySSID, pItemSSID->len);
scan_buf->ssid_len = pItemSSID->len;
scan_buf->freq = frequency_list[pBSS->uChannel-1];
scan_buf->caps = pBSS->wCapInfo; // DavidWang for sharemode
RFvRSSITodBm(pDevice, (BYTE)(pBSS->uRSSI), &ldBm);
if (-ldBm < 50)
scan_buf->qual = 100;
else if (-ldBm > 90)
scan_buf->qual = 0;
else
scan_buf->qual=(40-(-ldBm-50))*100/40;
//James
//scan_buf->caps = pBSS->wCapInfo;
//scan_buf->qual =
scan_buf->noise = 0;
scan_buf->level = ldBm;
//scan_buf->maxrate =
if (pBSS->wWPALen != 0) {
scan_buf->wpa_ie_len = pBSS->wWPALen;
memcpy(scan_buf->wpa_ie, pBSS->byWPAIE, pBSS->wWPALen);
}
if (pBSS->wRSNLen != 0) {
scan_buf->rsn_ie_len = pBSS->wRSNLen;
memcpy(scan_buf->rsn_ie, pBSS->byRSNIE, pBSS->wRSNLen);
}
scan_buf = (struct viawget_scan_result *)((PBYTE)scan_buf + sizeof(struct viawget_scan_result));
jj ++;
}
}
if (jj < count)
count = jj;
if (copy_to_user(param->u.scan_results.buf, pBuf, sizeof(struct viawget_scan_result) * count))
ret = -EFAULT;
param->u.scan_results.scan_count = count;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " param->u.scan_results.scan_count = %d\n", count);
kfree(pBuf);
return ret;
}
/*
* Description:
* set associate with AP
*
* Parameters:
* In:
* pDevice -
* param -
* Out:
*
* Return Value:
*
*/
static int wpa_set_associate(PSDevice pDevice, struct viawget_wpa_param *param)
{
PSMgmtObject pMgmt = &pDevice->sMgmtObj;
PWLAN_IE_SSID pItemSSID;
BYTE abyNullAddr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
BYTE abyWPAIE[64];
int ret = 0;
BOOL bwepEnabled=FALSE;
// set key type & algorithm
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pairwise_suite = %d\n", param->u.wpa_associate.pairwise_suite);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "group_suite = %d\n", param->u.wpa_associate.group_suite);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "key_mgmt_suite = %d\n", param->u.wpa_associate.key_mgmt_suite);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "auth_alg = %d\n", param->u.wpa_associate.auth_alg);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "mode = %d\n", param->u.wpa_associate.mode);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Roaming dBm = %d\n", param->u.wpa_associate.roam_dbm); // Davidwang
if (param->u.wpa_associate.wpa_ie) {
if (param->u.wpa_associate.wpa_ie_len > sizeof(abyWPAIE))
return -EINVAL;
if (copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie,
param->u.wpa_associate.wpa_ie_len))
return -EFAULT;
}
if (param->u.wpa_associate.mode == 1)
pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
else
pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
// set bssid
if (memcmp(param->u.wpa_associate.bssid, &abyNullAddr[0], 6) != 0)
memcpy(pMgmt->abyDesireBSSID, param->u.wpa_associate.bssid, 6);
// set ssid
memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
pItemSSID->byElementID = WLAN_EID_SSID;
pItemSSID->len = param->u.wpa_associate.ssid_len;
memcpy(pItemSSID->abySSID, param->u.wpa_associate.ssid, pItemSSID->len);
if (param->u.wpa_associate.wpa_ie_len == 0) {
if (param->u.wpa_associate.auth_alg & AUTH_ALG_SHARED_KEY)
pMgmt->eAuthenMode = WMAC_AUTH_SHAREKEY;
else
pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
} else if (abyWPAIE[0] == RSN_INFO_ELEM) {
if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_PSK)
pMgmt->eAuthenMode = WMAC_AUTH_WPA2PSK;
else
pMgmt->eAuthenMode = WMAC_AUTH_WPA2;
} else {
if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_WPA_NONE)
pMgmt->eAuthenMode = WMAC_AUTH_WPANONE;
else if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_PSK)
pMgmt->eAuthenMode = WMAC_AUTH_WPAPSK;
else
pMgmt->eAuthenMode = WMAC_AUTH_WPA;
}
switch (param->u.wpa_associate.pairwise_suite) {
case CIPHER_CCMP:
pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
break;
case CIPHER_TKIP:
pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
break;
case CIPHER_WEP40:
case CIPHER_WEP104:
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
bwepEnabled = TRUE;
break;
case CIPHER_NONE:
if (param->u.wpa_associate.group_suite == CIPHER_CCMP)
pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
else
pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
break;
default:
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
}
pMgmt->Roam_dbm = param->u.wpa_associate.roam_dbm;
if (pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) { // @wep-sharekey
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
pMgmt->bShareKeyAlgorithm = TRUE;
} else if (pMgmt->eAuthenMode == WMAC_AUTH_OPEN) {
if(bwepEnabled==TRUE) { //@open-wep
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
} else {
// @only open
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
}
}
// mike save old encryption status
pDevice->eOldEncryptionStatus = pDevice->eEncryptionStatus;
if (pDevice->eEncryptionStatus != Ndis802_11EncryptionDisabled)
pDevice->bEncryptionEnable = TRUE;
else
pDevice->bEncryptionEnable = FALSE;
if ((pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) ||
((pMgmt->eAuthenMode == WMAC_AUTH_OPEN) && (bwepEnabled==TRUE))) {
// mike re-comment:open-wep && sharekey-wep needn't do initial key!!
} else {
KeyvInitTable(pDevice,&pDevice->sKey);
}
spin_lock_irq(&pDevice->lock);
pDevice->bLinkPass = FALSE;
ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_SLOW);
memset(pMgmt->abyCurrBSSID, 0, 6);
pMgmt->eCurrState = WMAC_STATE_IDLE;
netif_stop_queue(pDevice->dev);
/******* search if ap_scan=2, which is associating request in hidden ssid mode ****/
{
PKnownBSS pCurr = NULL;
pCurr = BSSpSearchBSSList(pDevice,
pMgmt->abyDesireBSSID,
pMgmt->abyDesireSSID,
pDevice->eConfigPHYMode
);
if (pCurr == NULL){
printk("wpa_set_associate---->hidden mode site survey before associate.......\n");
bScheduleCommand((void *)pDevice,
WLAN_CMD_BSSID_SCAN,
pMgmt->abyDesireSSID);
}
}
/****************************************************************/
bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
spin_unlock_irq(&pDevice->lock);
return ret;
}
/*
* Description:
* wpa_ioctl main function supported for wpa supplicant
*
* Parameters:
* In:
* pDevice -
* iw_point -
* Out:
*
* Return Value:
*
*/
int wpa_ioctl(PSDevice pDevice, struct iw_point *p)
{
struct viawget_wpa_param *param;
int ret = 0;
int wpa_ioctl = 0;
if (p->length < sizeof(struct viawget_wpa_param) ||
p->length > VIAWGET_WPA_MAX_BUF_SIZE || !p->pointer)
return -EINVAL;
param = kmalloc((int)p->length, GFP_KERNEL);
if (param == NULL)
return -ENOMEM;
if (copy_from_user(param, p->pointer, p->length)) {
ret = -EFAULT;
goto out;
}
switch (param->cmd) {
case VIAWGET_SET_WPA:
ret = wpa_set_wpa(pDevice, param);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_WPA \n");
break;
case VIAWGET_SET_KEY:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_KEY \n");
spin_lock_irq(&pDevice->lock);
ret = wpa_set_keys(pDevice, param, FALSE);
spin_unlock_irq(&pDevice->lock);
break;
case VIAWGET_SET_SCAN:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_SCAN \n");
ret = wpa_set_scan(pDevice, param);
break;
case VIAWGET_GET_SCAN:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SCAN\n");
ret = wpa_get_scan(pDevice, param);
wpa_ioctl = 1;
break;
case VIAWGET_GET_SSID:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SSID \n");
ret = wpa_get_ssid(pDevice, param);
wpa_ioctl = 1;
break;
case VIAWGET_GET_BSSID:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_BSSID \n");
ret = wpa_get_bssid(pDevice, param);
wpa_ioctl = 1;
break;
case VIAWGET_SET_ASSOCIATE:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_ASSOCIATE \n");
ret = wpa_set_associate(pDevice, param);
break;
case VIAWGET_SET_DISASSOCIATE:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DISASSOCIATE \n");
ret = wpa_set_disassociate(pDevice, param);
break;
case VIAWGET_SET_DROP_UNENCRYPT:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DROP_UNENCRYPT \n");
break;
case VIAWGET_SET_DEAUTHENTICATE:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DEAUTHENTICATE \n");
break;
default:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ioctl: unknown cmd=%d\n",
param->cmd);
kfree(param);
return -EOPNOTSUPP;
}
if ((ret == 0) && wpa_ioctl) {
if (copy_to_user(p->pointer, param, p->length)) {
ret = -EFAULT;
goto out;
}
}
out:
kfree(param);
return ret;
}
| gpl-2.0 |
sgstreet/linux-socfpga | arch/arm/mach-ixp4xx/vulcan-pci.c | 4936 | 1769 | /*
* arch/arch/mach-ixp4xx/vulcan-pci.c
*
* Vulcan board-level PCI initialization
*
* Copyright (C) 2010 Marc Zyngier <maz@misterjones.org>
*
* based on ixdp425-pci.c:
* Copyright (C) 2002 Intel Corporation.
* Copyright (C) 2003-2004 MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <asm/mach/pci.h>
#include <asm/mach-types.h>
/* PCI controller GPIO to IRQ pin mappings */
#define INTA 2
#define INTB 3
void __init vulcan_pci_preinit(void)
{
#ifndef CONFIG_IXP4XX_INDIRECT_PCI
/*
* Cardbus bridge wants way more than the SoC can actually offer,
* and leaves the whole PCI bus in a mess. Artificially limit it
* to 8MB per region. Of course indirect mode doesn't have this
* limitation...
*/
pci_cardbus_mem_size = SZ_8M;
pr_info("Vulcan PCI: limiting CardBus memory size to %dMB\n",
(int)(pci_cardbus_mem_size >> 20));
#endif
irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW);
ixp4xx_pci_preinit();
}
static int __init vulcan_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
if (slot == 1)
return IXP4XX_GPIO_IRQ(INTA);
if (slot == 2)
return IXP4XX_GPIO_IRQ(INTB);
return -1;
}
struct hw_pci vulcan_pci __initdata = {
.nr_controllers = 1,
.ops = &ixp4xx_ops,
.preinit = vulcan_pci_preinit,
.setup = ixp4xx_setup,
.map_irq = vulcan_map_irq,
};
int __init vulcan_pci_init(void)
{
if (machine_is_arcom_vulcan())
pci_common_init(&vulcan_pci);
return 0;
}
subsys_initcall(vulcan_pci_init);
| gpl-2.0 |
martyborya/N3-CM-Unified | drivers/staging/vt6656/wpactl.c | 4936 | 23925 | /*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*
* File: wpactl.c
*
* Purpose: handle wpa supplicant ioctl input/out functions
*
* Author: Lyndon Chen
*
* Date: July 28, 2006
*
* Functions:
*
* Revision History:
*
*/
#include "wpactl.h"
#include "key.h"
#include "mac.h"
#include "device.h"
#include "wmgr.h"
#include "iocmd.h"
#include "iowpa.h"
#include "control.h"
#include "rndis.h"
#include "rf.h"
/*--------------------- Static Definitions -------------------------*/
#define VIAWGET_WPA_MAX_BUF_SIZE 1024
static const int frequency_list[] = {
2412, 2417, 2422, 2427, 2432, 2437, 2442,
2447, 2452, 2457, 2462, 2467, 2472, 2484
};
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
static int msglevel = MSG_LEVEL_INFO;
/*--------------------- Static Functions --------------------------*/
/*--------------------- Export Variables --------------------------*/
static void wpadev_setup(struct net_device *dev)
{
dev->type = ARPHRD_IEEE80211;
dev->hard_header_len = ETH_HLEN;
dev->mtu = 2048;
dev->addr_len = ETH_ALEN;
dev->tx_queue_len = 1000;
memset(dev->broadcast, 0xFF, ETH_ALEN);
dev->flags = IFF_BROADCAST | IFF_MULTICAST;
}
/*
* Description:
* register netdev for wpa supplicant deamon
*
* Parameters:
* In:
* pDevice -
* enable -
* Out:
*
* Return Value:
*
*/
static int wpa_init_wpadev(PSDevice pDevice)
{
PSDevice wpadev_priv;
struct net_device *dev = pDevice->dev;
int ret = 0;
pDevice->wpadev = alloc_netdev(sizeof(PSDevice), "vntwpa", wpadev_setup);
if (pDevice->wpadev == NULL)
return -ENOMEM;
wpadev_priv = netdev_priv(pDevice->wpadev);
*wpadev_priv = *pDevice;
memcpy(pDevice->wpadev->dev_addr, dev->dev_addr, ETH_ALEN);
pDevice->wpadev->base_addr = dev->base_addr;
pDevice->wpadev->irq = dev->irq;
pDevice->wpadev->mem_start = dev->mem_start;
pDevice->wpadev->mem_end = dev->mem_end;
ret = register_netdev(pDevice->wpadev);
if (ret) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: register_netdev(WPA) failed!\n",
dev->name);
free_netdev(pDevice->wpadev);
return -1;
}
if (pDevice->skb == NULL) {
pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
if (pDevice->skb == NULL)
return -ENOMEM;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Registered netdev %s for WPA management\n",
dev->name, pDevice->wpadev->name);
return 0;
}
/*
* Description:
* unregister net_device (wpadev)
*
* Parameters:
* In:
* pDevice -
* Out:
*
* Return Value:
*
*/
static int wpa_release_wpadev(PSDevice pDevice)
{
if (pDevice->skb) {
dev_kfree_skb(pDevice->skb);
pDevice->skb = NULL;
}
if (pDevice->wpadev) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
pDevice->dev->name, pDevice->wpadev->name);
unregister_netdev(pDevice->wpadev);
free_netdev(pDevice->wpadev);
pDevice->wpadev = NULL;
}
return 0;
}
/*
* Description:
* Set enable/disable dev for wpa supplicant deamon
*
* Parameters:
* In:
* pDevice -
* val -
* Out:
*
* Return Value:
*
*/
int wpa_set_wpadev(PSDevice pDevice, int val)
{
if (val)
return wpa_init_wpadev(pDevice);
return wpa_release_wpadev(pDevice);
}
/*
* Description:
* Set WPA algorithm & keys
*
* Parameters:
* In:
* pDevice -
* param -
* Out:
*
* Return Value:
*
*/
int wpa_set_keys(PSDevice pDevice, void *ctx, BOOL fcpfkernel)
{
struct viawget_wpa_param *param = ctx;
PSMgmtObject pMgmt = &pDevice->sMgmtObj;
DWORD dwKeyIndex = 0;
BYTE abyKey[MAX_KEY_LEN];
BYTE abySeq[MAX_KEY_LEN];
QWORD KeyRSC;
BYTE byKeyDecMode = KEY_CTL_WEP;
int ret = 0;
int uu;
int ii;
if (param->u.wpa_key.alg_name > WPA_ALG_CCMP)
return -EINVAL;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "param->u.wpa_key.alg_name = %d \n",
param->u.wpa_key.alg_name);
if (param->u.wpa_key.alg_name == WPA_ALG_NONE) {
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
pDevice->bEncryptionEnable = FALSE;
pDevice->byKeyIndex = 0;
pDevice->bTransmitKey = FALSE;
for (uu=0; uu<MAX_KEY_TABLE; uu++) {
MACvDisableKeyEntry(pDevice, uu);
}
return ret;
}
if (param->u.wpa_key.key && param->u.wpa_key.key_len > sizeof(abyKey))
return -EINVAL;
spin_unlock_irq(&pDevice->lock);
if (param->u.wpa_key.key && fcpfkernel) {
memcpy(&abyKey[0], param->u.wpa_key.key, param->u.wpa_key.key_len);
} else {
if (param->u.wpa_key.key &&
copy_from_user(&abyKey[0], param->u.wpa_key.key,
param->u.wpa_key.key_len)) {
spin_lock_irq(&pDevice->lock);
return -EINVAL;
}
}
spin_lock_irq(&pDevice->lock);
dwKeyIndex = (DWORD)(param->u.wpa_key.key_index);
if (param->u.wpa_key.alg_name == WPA_ALG_WEP) {
if (dwKeyIndex > 3) {
return -EINVAL;
} else {
if (param->u.wpa_key.set_tx) {
pDevice->byKeyIndex = (BYTE)dwKeyIndex;
pDevice->bTransmitKey = TRUE;
dwKeyIndex |= (1 << 31);
}
KeybSetDefaultKey( pDevice,
&(pDevice->sKey),
dwKeyIndex & ~(BIT30 | USE_KEYRSC),
param->u.wpa_key.key_len,
NULL,
abyKey,
KEY_CTL_WEP
);
}
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
pDevice->bEncryptionEnable = TRUE;
return ret;
}
if (param->u.wpa_key.seq && param->u.wpa_key.seq_len > sizeof(abySeq))
return -EINVAL;
spin_unlock_irq(&pDevice->lock);
if (param->u.wpa_key.seq && fcpfkernel) {
memcpy(&abySeq[0], param->u.wpa_key.seq, param->u.wpa_key.seq_len);
} else {
if (param->u.wpa_key.seq &&
copy_from_user(&abySeq[0], param->u.wpa_key.seq,
param->u.wpa_key.seq_len)) {
spin_lock_irq(&pDevice->lock);
return -EINVAL;
}
}
spin_lock_irq(&pDevice->lock);
if (param->u.wpa_key.seq_len > 0) {
for (ii = 0 ; ii < param->u.wpa_key.seq_len ; ii++) {
if (ii < 4)
LODWORD(KeyRSC) |= (abySeq[ii] << (ii * 8));
else
HIDWORD(KeyRSC) |= (abySeq[ii] << ((ii-4) * 8));
}
dwKeyIndex |= 1 << 29;
}
if (param->u.wpa_key.key_index >= MAX_GROUP_KEY) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return dwKeyIndex > 3\n");
return -EINVAL;
}
if (param->u.wpa_key.alg_name == WPA_ALG_TKIP) {
pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
}
if (param->u.wpa_key.alg_name == WPA_ALG_CCMP) {
pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
}
if (param->u.wpa_key.set_tx)
dwKeyIndex |= (1 << 31);
if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled)
byKeyDecMode = KEY_CTL_CCMP;
else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled)
byKeyDecMode = KEY_CTL_TKIP;
else
byKeyDecMode = KEY_CTL_WEP;
// Fix HCT test that set 256 bits KEY and Ndis802_11Encryption3Enabled
if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
if (param->u.wpa_key.key_len == MAX_KEY_LEN)
byKeyDecMode = KEY_CTL_TKIP;
else if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN)
byKeyDecMode = KEY_CTL_WEP;
else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN)
byKeyDecMode = KEY_CTL_WEP;
} else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN)
byKeyDecMode = KEY_CTL_WEP;
else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN)
byKeyDecMode = KEY_CTL_WEP;
}
// Check TKIP key length
if ((byKeyDecMode == KEY_CTL_TKIP) &&
(param->u.wpa_key.key_len != MAX_KEY_LEN)) {
// TKIP Key must be 256 bits
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return- TKIP Key must be 256 bits!\n");
return -EINVAL;
}
// Check AES key length
if ((byKeyDecMode == KEY_CTL_CCMP) &&
(param->u.wpa_key.key_len != AES_KEY_LEN)) {
// AES Key must be 128 bits
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return - AES Key must be 128 bits\n");
return -EINVAL;
}
if (is_broadcast_ether_addr(¶m->addr[0]) || (param->addr == NULL)) {
/* if broadcast, set the key as every key entry's group key */
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Groupe Key Assign.\n");
if ((KeybSetAllGroupKey(pDevice, &(pDevice->sKey), dwKeyIndex,
param->u.wpa_key.key_len,
(PQWORD) &(KeyRSC),
(PBYTE)abyKey,
byKeyDecMode
) == TRUE) &&
(KeybSetDefaultKey(pDevice,
&(pDevice->sKey),
dwKeyIndex,
param->u.wpa_key.key_len,
(PQWORD) &(KeyRSC),
(PBYTE)abyKey,
byKeyDecMode
) == TRUE) ) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "GROUP Key Assign.\n");
} else {
return -EINVAL;
}
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Assign.\n");
// BSSID not 0xffffffffffff
// Pairwise Key can't be WEP
if (byKeyDecMode == KEY_CTL_WEP) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key can't be WEP\n");
return -EINVAL;
}
dwKeyIndex |= (1 << 30); // set pairwise key
if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) {
//DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA - WMAC_CONFIG_IBSS_STA\n"));
return -EINVAL;
}
if (KeybSetKey(pDevice, &(pDevice->sKey), ¶m->addr[0],
dwKeyIndex, param->u.wpa_key.key_len,
(PQWORD) &(KeyRSC), (PBYTE)abyKey, byKeyDecMode
) == TRUE) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Set\n");
} else {
// Key Table Full
if (!compare_ether_addr(¶m->addr[0], pDevice->abyBSSID)) {
//DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA -Key Table Full.2\n"));
return -EINVAL;
} else {
// Save Key and configure just before associate/reassociate to BSSID
// we do not implement now
return -EINVAL;
}
}
} // BSSID not 0xffffffffffff
if ((ret == 0) && ((param->u.wpa_key.set_tx) != 0)) {
pDevice->byKeyIndex = (BYTE)param->u.wpa_key.key_index;
pDevice->bTransmitKey = TRUE;
}
pDevice->bEncryptionEnable = TRUE;
return ret;
}
/*
* Description:
* enable wpa auth & mode
*
* Parameters:
* In:
* pDevice -
* param -
* Out:
*
* Return Value:
*
*/
static int wpa_set_wpa(PSDevice pDevice, struct viawget_wpa_param *param)
{
PSMgmtObject pMgmt = &pDevice->sMgmtObj;
int ret = 0;
pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
pMgmt->bShareKeyAlgorithm = FALSE;
return ret;
}
/*
* Description:
* set disassociate
*
* Parameters:
* In:
* pDevice -
* param -
* Out:
*
* Return Value:
*
*/
static int wpa_set_disassociate(PSDevice pDevice, struct viawget_wpa_param *param)
{
PSMgmtObject pMgmt = &pDevice->sMgmtObj;
int ret = 0;
spin_lock_irq(&pDevice->lock);
if (pDevice->bLinkPass) {
if (!memcmp(param->addr, pMgmt->abyCurrBSSID, 6))
bScheduleCommand((void *)pDevice, WLAN_CMD_DISASSOCIATE, NULL);
}
spin_unlock_irq(&pDevice->lock);
return ret;
}
/*
* Description:
* enable scan process
*
* Parameters:
* In:
* pDevice -
* param -
* Out:
*
* Return Value:
*
*/
static int wpa_set_scan(PSDevice pDevice, struct viawget_wpa_param *param)
{
int ret = 0;
/**set ap_scan=1&&scan_ssid=1 under hidden ssid mode**/
PSMgmtObject pMgmt = &pDevice->sMgmtObj;
PWLAN_IE_SSID pItemSSID;
printk("wpa_set_scan-->desired [ssid=%s,ssid_len=%d]\n",
param->u.scan_req.ssid,param->u.scan_req.ssid_len);
// Set the SSID
memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
pItemSSID->byElementID = WLAN_EID_SSID;
memcpy(pItemSSID->abySSID, param->u.scan_req.ssid, param->u.scan_req.ssid_len);
pItemSSID->len = param->u.scan_req.ssid_len;
spin_lock_irq(&pDevice->lock);
BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass);
bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN,
pMgmt->abyDesireSSID);
spin_unlock_irq(&pDevice->lock);
return ret;
}
/*
* Description:
* get bssid
*
* Parameters:
* In:
* pDevice -
* param -
* Out:
*
* Return Value:
*
*/
static int wpa_get_bssid(PSDevice pDevice, struct viawget_wpa_param *param)
{
PSMgmtObject pMgmt = &pDevice->sMgmtObj;
int ret = 0;
memcpy(param->u.wpa_associate.bssid, pMgmt->abyCurrBSSID, 6);
return ret;
}
/*
* Description:
* get bssid
*
* Parameters:
* In:
* pDevice -
* param -
* Out:
*
* Return Value:
*
*/
static int wpa_get_ssid(PSDevice pDevice, struct viawget_wpa_param *param)
{
PSMgmtObject pMgmt = &pDevice->sMgmtObj;
PWLAN_IE_SSID pItemSSID;
int ret = 0;
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
memcpy(param->u.wpa_associate.ssid, pItemSSID->abySSID, pItemSSID->len);
param->u.wpa_associate.ssid_len = pItemSSID->len;
return ret;
}
/*
* Description:
* get scan results
*
* Parameters:
* In:
* pDevice -
* param -
* Out:
*
* Return Value:
*
*/
static int wpa_get_scan(PSDevice pDevice, struct viawget_wpa_param *param)
{
struct viawget_scan_result *scan_buf;
PSMgmtObject pMgmt = &pDevice->sMgmtObj;
PWLAN_IE_SSID pItemSSID;
PKnownBSS pBSS;
PBYTE pBuf;
int ret = 0;
u16 count = 0;
u16 ii;
u16 jj;
long ldBm; //James //add
//******mike:bubble sort by stronger RSSI*****//
PBYTE ptempBSS;
ptempBSS = kmalloc(sizeof(KnownBSS), GFP_ATOMIC);
if (ptempBSS == NULL) {
printk("bubble sort kmalloc memory fail@@@\n");
ret = -ENOMEM;
return ret;
}
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
for (jj = 0; jj < MAX_BSS_NUM - ii - 1; jj++) {
if ((pMgmt->sBSSList[jj].bActive != TRUE)
|| ((pMgmt->sBSSList[jj].uRSSI > pMgmt->sBSSList[jj + 1].uRSSI)
&& (pMgmt->sBSSList[jj + 1].bActive != FALSE))) {
memcpy(ptempBSS,&pMgmt->sBSSList[jj], sizeof(KnownBSS));
memcpy(&pMgmt->sBSSList[jj], &pMgmt->sBSSList[jj + 1],
sizeof(KnownBSS));
memcpy(&pMgmt->sBSSList[jj + 1], ptempBSS, sizeof(KnownBSS));
}
}
}
kfree(ptempBSS);
count = 0;
pBSS = &(pMgmt->sBSSList[0]);
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
pBSS = &(pMgmt->sBSSList[ii]);
if (!pBSS->bActive)
continue;
count++;
}
pBuf = kcalloc(count, sizeof(struct viawget_scan_result), GFP_ATOMIC);
if (pBuf == NULL) {
ret = -ENOMEM;
return ret;
}
scan_buf = (struct viawget_scan_result *)pBuf;
pBSS = &(pMgmt->sBSSList[0]);
for (ii = 0, jj = 0; ii < MAX_BSS_NUM; ii++) {
pBSS = &(pMgmt->sBSSList[ii]);
if (pBSS->bActive) {
if (jj >= count)
break;
memcpy(scan_buf->bssid, pBSS->abyBSSID, WLAN_BSSID_LEN);
pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
memcpy(scan_buf->ssid, pItemSSID->abySSID, pItemSSID->len);
scan_buf->ssid_len = pItemSSID->len;
scan_buf->freq = frequency_list[pBSS->uChannel-1];
scan_buf->caps = pBSS->wCapInfo; // DavidWang for sharemode
RFvRSSITodBm(pDevice, (BYTE)(pBSS->uRSSI), &ldBm);
if (-ldBm < 50)
scan_buf->qual = 100;
else if (-ldBm > 90)
scan_buf->qual = 0;
else
scan_buf->qual=(40-(-ldBm-50))*100/40;
//James
//scan_buf->caps = pBSS->wCapInfo;
//scan_buf->qual =
scan_buf->noise = 0;
scan_buf->level = ldBm;
//scan_buf->maxrate =
if (pBSS->wWPALen != 0) {
scan_buf->wpa_ie_len = pBSS->wWPALen;
memcpy(scan_buf->wpa_ie, pBSS->byWPAIE, pBSS->wWPALen);
}
if (pBSS->wRSNLen != 0) {
scan_buf->rsn_ie_len = pBSS->wRSNLen;
memcpy(scan_buf->rsn_ie, pBSS->byRSNIE, pBSS->wRSNLen);
}
scan_buf = (struct viawget_scan_result *)((PBYTE)scan_buf + sizeof(struct viawget_scan_result));
jj ++;
}
}
if (jj < count)
count = jj;
if (copy_to_user(param->u.scan_results.buf, pBuf, sizeof(struct viawget_scan_result) * count))
ret = -EFAULT;
param->u.scan_results.scan_count = count;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " param->u.scan_results.scan_count = %d\n", count);
kfree(pBuf);
return ret;
}
/*
* Description:
* set associate with AP
*
* Parameters:
* In:
* pDevice -
* param -
* Out:
*
* Return Value:
*
*/
static int wpa_set_associate(PSDevice pDevice, struct viawget_wpa_param *param)
{
PSMgmtObject pMgmt = &pDevice->sMgmtObj;
PWLAN_IE_SSID pItemSSID;
BYTE abyNullAddr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
BYTE abyWPAIE[64];
int ret = 0;
BOOL bwepEnabled=FALSE;
// set key type & algorithm
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pairwise_suite = %d\n", param->u.wpa_associate.pairwise_suite);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "group_suite = %d\n", param->u.wpa_associate.group_suite);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "key_mgmt_suite = %d\n", param->u.wpa_associate.key_mgmt_suite);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "auth_alg = %d\n", param->u.wpa_associate.auth_alg);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "mode = %d\n", param->u.wpa_associate.mode);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Roaming dBm = %d\n", param->u.wpa_associate.roam_dbm); // Davidwang
if (param->u.wpa_associate.wpa_ie) {
if (param->u.wpa_associate.wpa_ie_len > sizeof(abyWPAIE))
return -EINVAL;
if (copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie,
param->u.wpa_associate.wpa_ie_len))
return -EFAULT;
}
if (param->u.wpa_associate.mode == 1)
pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
else
pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
// set bssid
if (memcmp(param->u.wpa_associate.bssid, &abyNullAddr[0], 6) != 0)
memcpy(pMgmt->abyDesireBSSID, param->u.wpa_associate.bssid, 6);
// set ssid
memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
pItemSSID->byElementID = WLAN_EID_SSID;
pItemSSID->len = param->u.wpa_associate.ssid_len;
memcpy(pItemSSID->abySSID, param->u.wpa_associate.ssid, pItemSSID->len);
if (param->u.wpa_associate.wpa_ie_len == 0) {
if (param->u.wpa_associate.auth_alg & AUTH_ALG_SHARED_KEY)
pMgmt->eAuthenMode = WMAC_AUTH_SHAREKEY;
else
pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
} else if (abyWPAIE[0] == RSN_INFO_ELEM) {
if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_PSK)
pMgmt->eAuthenMode = WMAC_AUTH_WPA2PSK;
else
pMgmt->eAuthenMode = WMAC_AUTH_WPA2;
} else {
if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_WPA_NONE)
pMgmt->eAuthenMode = WMAC_AUTH_WPANONE;
else if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_PSK)
pMgmt->eAuthenMode = WMAC_AUTH_WPAPSK;
else
pMgmt->eAuthenMode = WMAC_AUTH_WPA;
}
switch (param->u.wpa_associate.pairwise_suite) {
case CIPHER_CCMP:
pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
break;
case CIPHER_TKIP:
pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
break;
case CIPHER_WEP40:
case CIPHER_WEP104:
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
bwepEnabled = TRUE;
break;
case CIPHER_NONE:
if (param->u.wpa_associate.group_suite == CIPHER_CCMP)
pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
else
pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
break;
default:
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
}
pMgmt->Roam_dbm = param->u.wpa_associate.roam_dbm;
if (pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) { // @wep-sharekey
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
pMgmt->bShareKeyAlgorithm = TRUE;
} else if (pMgmt->eAuthenMode == WMAC_AUTH_OPEN) {
if(bwepEnabled==TRUE) { //@open-wep
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
} else {
// @only open
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
}
}
// mike save old encryption status
pDevice->eOldEncryptionStatus = pDevice->eEncryptionStatus;
if (pDevice->eEncryptionStatus != Ndis802_11EncryptionDisabled)
pDevice->bEncryptionEnable = TRUE;
else
pDevice->bEncryptionEnable = FALSE;
if ((pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) ||
((pMgmt->eAuthenMode == WMAC_AUTH_OPEN) && (bwepEnabled==TRUE))) {
// mike re-comment:open-wep && sharekey-wep needn't do initial key!!
} else {
KeyvInitTable(pDevice,&pDevice->sKey);
}
spin_lock_irq(&pDevice->lock);
pDevice->bLinkPass = FALSE;
ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_SLOW);
memset(pMgmt->abyCurrBSSID, 0, 6);
pMgmt->eCurrState = WMAC_STATE_IDLE;
netif_stop_queue(pDevice->dev);
/******* search if ap_scan=2, which is associating request in hidden ssid mode ****/
{
PKnownBSS pCurr = NULL;
pCurr = BSSpSearchBSSList(pDevice,
pMgmt->abyDesireBSSID,
pMgmt->abyDesireSSID,
pDevice->eConfigPHYMode
);
if (pCurr == NULL){
printk("wpa_set_associate---->hidden mode site survey before associate.......\n");
bScheduleCommand((void *)pDevice,
WLAN_CMD_BSSID_SCAN,
pMgmt->abyDesireSSID);
}
}
/****************************************************************/
bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
spin_unlock_irq(&pDevice->lock);
return ret;
}
/*
* Description:
* wpa_ioctl main function supported for wpa supplicant
*
* Parameters:
* In:
* pDevice -
* iw_point -
* Out:
*
* Return Value:
*
*/
int wpa_ioctl(PSDevice pDevice, struct iw_point *p)
{
struct viawget_wpa_param *param;
int ret = 0;
int wpa_ioctl = 0;
if (p->length < sizeof(struct viawget_wpa_param) ||
p->length > VIAWGET_WPA_MAX_BUF_SIZE || !p->pointer)
return -EINVAL;
param = kmalloc((int)p->length, GFP_KERNEL);
if (param == NULL)
return -ENOMEM;
if (copy_from_user(param, p->pointer, p->length)) {
ret = -EFAULT;
goto out;
}
switch (param->cmd) {
case VIAWGET_SET_WPA:
ret = wpa_set_wpa(pDevice, param);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_WPA \n");
break;
case VIAWGET_SET_KEY:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_KEY \n");
spin_lock_irq(&pDevice->lock);
ret = wpa_set_keys(pDevice, param, FALSE);
spin_unlock_irq(&pDevice->lock);
break;
case VIAWGET_SET_SCAN:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_SCAN \n");
ret = wpa_set_scan(pDevice, param);
break;
case VIAWGET_GET_SCAN:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SCAN\n");
ret = wpa_get_scan(pDevice, param);
wpa_ioctl = 1;
break;
case VIAWGET_GET_SSID:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SSID \n");
ret = wpa_get_ssid(pDevice, param);
wpa_ioctl = 1;
break;
case VIAWGET_GET_BSSID:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_BSSID \n");
ret = wpa_get_bssid(pDevice, param);
wpa_ioctl = 1;
break;
case VIAWGET_SET_ASSOCIATE:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_ASSOCIATE \n");
ret = wpa_set_associate(pDevice, param);
break;
case VIAWGET_SET_DISASSOCIATE:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DISASSOCIATE \n");
ret = wpa_set_disassociate(pDevice, param);
break;
case VIAWGET_SET_DROP_UNENCRYPT:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DROP_UNENCRYPT \n");
break;
case VIAWGET_SET_DEAUTHENTICATE:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DEAUTHENTICATE \n");
break;
default:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ioctl: unknown cmd=%d\n",
param->cmd);
kfree(param);
return -EOPNOTSUPP;
}
if ((ret == 0) && wpa_ioctl) {
if (copy_to_user(p->pointer, param, p->length)) {
ret = -EFAULT;
goto out;
}
}
out:
kfree(param);
return ret;
}
| gpl-2.0 |
arpitjain9819/FIRE-AND-FLAMES | arch/powerpc/kernel/syscalls.c | 8520 | 3966 | /*
* Implementation of various system calls for Linux/PowerPC
*
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Derived from "arch/i386/kernel/sys_i386.c"
* Adapted from the i386 version by Gary Thomas
* Modified by Cort Dougan (cort@cs.nmt.edu)
* and Paul Mackerras (paulus@cs.anu.edu.au).
*
* This file contains various random system calls that
* have a non-standard calling sequence on the Linux/PPC
* platform.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/stat.h>
#include <linux/mman.h>
#include <linux/sys.h>
#include <linux/ipc.h>
#include <linux/utsname.h>
#include <linux/file.h>
#include <linux/init.h>
#include <linux/personality.h>
#include <asm/uaccess.h>
#include <asm/syscalls.h>
#include <asm/time.h>
#include <asm/unistd.h>
static inline unsigned long do_mmap2(unsigned long addr, size_t len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long off, int shift)
{
unsigned long ret = -EINVAL;
if (!arch_validate_prot(prot))
goto out;
if (shift) {
if (off & ((1 << shift) - 1))
goto out;
off >>= shift;
}
ret = sys_mmap_pgoff(addr, len, prot, flags, fd, off);
out:
return ret;
}
unsigned long sys_mmap2(unsigned long addr, size_t len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
{
return do_mmap2(addr, len, prot, flags, fd, pgoff, PAGE_SHIFT-12);
}
unsigned long sys_mmap(unsigned long addr, size_t len,
unsigned long prot, unsigned long flags,
unsigned long fd, off_t offset)
{
return do_mmap2(addr, len, prot, flags, fd, offset, PAGE_SHIFT);
}
#ifdef CONFIG_PPC32
/*
* Due to some executables calling the wrong select we sometimes
* get wrong args. This determines how the args are being passed
* (a single ptr to them all args passed) then calls
* sys_select() with the appropriate args. -- Cort
*/
int
ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp)
{
if ( (unsigned long)n >= 4096 )
{
unsigned long __user *buffer = (unsigned long __user *)n;
if (!access_ok(VERIFY_READ, buffer, 5*sizeof(unsigned long))
|| __get_user(n, buffer)
|| __get_user(inp, ((fd_set __user * __user *)(buffer+1)))
|| __get_user(outp, ((fd_set __user * __user *)(buffer+2)))
|| __get_user(exp, ((fd_set __user * __user *)(buffer+3)))
|| __get_user(tvp, ((struct timeval __user * __user *)(buffer+4))))
return -EFAULT;
}
return sys_select(n, inp, outp, exp, tvp);
}
#endif
#ifdef CONFIG_PPC64
long ppc64_personality(unsigned long personality)
{
long ret;
if (personality(current->personality) == PER_LINUX32
&& personality == PER_LINUX)
personality = PER_LINUX32;
ret = sys_personality(personality);
if (ret == PER_LINUX32)
ret = PER_LINUX;
return ret;
}
#endif
long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
u32 len_high, u32 len_low)
{
return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low,
(u64)len_high << 32 | len_low, advice);
}
void do_show_syscall(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7, unsigned long r8,
struct pt_regs *regs)
{
printk("syscall %ld(%lx, %lx, %lx, %lx, %lx, %lx) regs=%p current=%p"
" cpu=%d\n", regs->gpr[0], r3, r4, r5, r6, r7, r8, regs,
current, smp_processor_id());
}
void do_show_syscall_exit(unsigned long r3)
{
printk(" -> %lx, current=%p cpu=%d\n", r3, current, smp_processor_id());
}
| gpl-2.0 |
asis92/kernel-lp-lg-d802 | arch/mips/sibyte/common/cfe.c | 8776 | 8443 | /*
* Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/linkage.h>
#include <linux/mm.h>
#include <linux/blkdev.h>
#include <linux/bootmem.h>
#include <linux/pm.h>
#include <linux/smp.h>
#include <asm/bootinfo.h>
#include <asm/reboot.h>
#include <asm/sibyte/board.h>
#include <asm/smp-ops.h>
#include <asm/fw/cfe/cfe_api.h>
#include <asm/fw/cfe/cfe_error.h>
/* Max ram addressable in 32-bit segments */
#ifdef CONFIG_64BIT
#define MAX_RAM_SIZE (~0ULL)
#else
#ifdef CONFIG_HIGHMEM
#ifdef CONFIG_64BIT_PHYS_ADDR
#define MAX_RAM_SIZE (~0ULL)
#else
#define MAX_RAM_SIZE (0xffffffffULL)
#endif
#else
#define MAX_RAM_SIZE (0x1fffffffULL)
#endif
#endif
#define SIBYTE_MAX_MEM_REGIONS 8
phys_t board_mem_region_addrs[SIBYTE_MAX_MEM_REGIONS];
phys_t board_mem_region_sizes[SIBYTE_MAX_MEM_REGIONS];
unsigned int board_mem_region_count;
int cfe_cons_handle;
#ifdef CONFIG_BLK_DEV_INITRD
extern unsigned long initrd_start, initrd_end;
#endif
static void __noreturn cfe_linux_exit(void *arg)
{
int warm = *(int *)arg;
if (smp_processor_id()) {
static int reboot_smp;
/* Don't repeat the process from another CPU */
if (!reboot_smp) {
/* Get CPU 0 to do the cfe_exit */
reboot_smp = 1;
smp_call_function(cfe_linux_exit, arg, 0);
}
} else {
printk("Passing control back to CFE...\n");
cfe_exit(warm, 0);
printk("cfe_exit returned??\n");
}
while (1);
}
static void __noreturn cfe_linux_restart(char *command)
{
static const int zero;
cfe_linux_exit((void *)&zero);
}
static void __noreturn cfe_linux_halt(void)
{
static const int one = 1;
cfe_linux_exit((void *)&one);
}
static __init void prom_meminit(void)
{
u64 addr, size, type; /* regardless of 64BIT_PHYS_ADDR */
int mem_flags = 0;
unsigned int idx;
int rd_flag;
#ifdef CONFIG_BLK_DEV_INITRD
unsigned long initrd_pstart;
unsigned long initrd_pend;
initrd_pstart = CPHYSADDR(initrd_start);
initrd_pend = CPHYSADDR(initrd_end);
if (initrd_start &&
((initrd_pstart > MAX_RAM_SIZE)
|| (initrd_pend > MAX_RAM_SIZE))) {
panic("initrd out of addressable memory");
}
#endif /* INITRD */
for (idx = 0; cfe_enummem(idx, mem_flags, &addr, &size, &type) != CFE_ERR_NOMORE;
idx++) {
rd_flag = 0;
if (type == CFE_MI_AVAILABLE) {
/*
* See if this block contains (any portion of) the
* ramdisk
*/
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start) {
if ((initrd_pstart > addr) &&
(initrd_pstart < (addr + size))) {
add_memory_region(addr,
initrd_pstart - addr,
BOOT_MEM_RAM);
rd_flag = 1;
}
if ((initrd_pend > addr) &&
(initrd_pend < (addr + size))) {
add_memory_region(initrd_pend,
(addr + size) - initrd_pend,
BOOT_MEM_RAM);
rd_flag = 1;
}
}
#endif
if (!rd_flag) {
if (addr > MAX_RAM_SIZE)
continue;
if (addr+size > MAX_RAM_SIZE)
size = MAX_RAM_SIZE - (addr+size) + 1;
/*
* memcpy/__copy_user prefetch, which
* will cause a bus error for
* KSEG/KUSEG addrs not backed by RAM.
* Hence, reserve some padding for the
* prefetch distance.
*/
if (size > 512)
size -= 512;
add_memory_region(addr, size, BOOT_MEM_RAM);
}
board_mem_region_addrs[board_mem_region_count] = addr;
board_mem_region_sizes[board_mem_region_count] = size;
board_mem_region_count++;
if (board_mem_region_count ==
SIBYTE_MAX_MEM_REGIONS) {
/*
* Too many regions. Need to configure more
*/
while(1);
}
}
}
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start) {
add_memory_region(initrd_pstart, initrd_pend - initrd_pstart,
BOOT_MEM_RESERVED);
}
#endif
}
#ifdef CONFIG_BLK_DEV_INITRD
static int __init initrd_setup(char *str)
{
char rdarg[64];
int idx;
char *tmp, *endptr;
unsigned long initrd_size;
/* Make a copy of the initrd argument so we can smash it up here */
for (idx = 0; idx < sizeof(rdarg)-1; idx++) {
if (!str[idx] || (str[idx] == ' ')) break;
rdarg[idx] = str[idx];
}
rdarg[idx] = 0;
str = rdarg;
/*
*Initrd location comes in the form "<hex size of ramdisk in bytes>@<location in memory>"
* e.g. initrd=3abfd@80010000. This is set up by the loader.
*/
for (tmp = str; *tmp != '@'; tmp++) {
if (!*tmp) {
goto fail;
}
}
*tmp = 0;
tmp++;
if (!*tmp) {
goto fail;
}
initrd_size = simple_strtoul(str, &endptr, 16);
if (*endptr) {
*(tmp-1) = '@';
goto fail;
}
*(tmp-1) = '@';
initrd_start = simple_strtoul(tmp, &endptr, 16);
if (*endptr) {
goto fail;
}
initrd_end = initrd_start + initrd_size;
printk("Found initrd of %lx@%lx\n", initrd_size, initrd_start);
return 1;
fail:
printk("Bad initrd argument. Disabling initrd\n");
initrd_start = 0;
initrd_end = 0;
return 1;
}
#endif
extern struct plat_smp_ops sb_smp_ops;
extern struct plat_smp_ops bcm1480_smp_ops;
/*
* prom_init is called just after the cpu type is determined, from setup_arch()
*/
void __init prom_init(void)
{
uint64_t cfe_ept, cfe_handle;
unsigned int cfe_eptseal;
int argc = fw_arg0;
char **envp = (char **) fw_arg2;
int *prom_vec = (int *) fw_arg3;
_machine_restart = cfe_linux_restart;
_machine_halt = cfe_linux_halt;
pm_power_off = cfe_linux_halt;
/*
* Check if a loader was used; if NOT, the 4 arguments are
* what CFE gives us (handle, 0, EPT and EPTSEAL)
*/
if (argc < 0) {
cfe_handle = (uint64_t)(long)argc;
cfe_ept = (long)envp;
cfe_eptseal = (uint32_t)(unsigned long)prom_vec;
} else {
if ((int32_t)(long)prom_vec < 0) {
/*
* Old loader; all it gives us is the handle,
* so use the "known" entrypoint and assume
* the seal.
*/
cfe_handle = (uint64_t)(long)prom_vec;
cfe_ept = (uint64_t)((int32_t)0x9fc00500);
cfe_eptseal = CFE_EPTSEAL;
} else {
/*
* Newer loaders bundle the handle/ept/eptseal
* Note: prom_vec is in the loader's useg
* which is still alive in the TLB.
*/
cfe_handle = (uint64_t)((int32_t *)prom_vec)[0];
cfe_ept = (uint64_t)((int32_t *)prom_vec)[2];
cfe_eptseal = (unsigned int)((uint32_t *)prom_vec)[3];
}
}
if (cfe_eptseal != CFE_EPTSEAL) {
/* too early for panic to do any good */
printk("CFE's entrypoint seal doesn't match. Spinning.");
while (1) ;
}
cfe_init(cfe_handle, cfe_ept);
/*
* Get the handle for (at least) prom_putchar, possibly for
* boot console
*/
cfe_cons_handle = cfe_getstdhandle(CFE_STDHANDLE_CONSOLE);
if (cfe_getenv("LINUX_CMDLINE", arcs_cmdline, COMMAND_LINE_SIZE) < 0) {
if (argc >= 0) {
/* The loader should have set the command line */
/* too early for panic to do any good */
printk("LINUX_CMDLINE not defined in cfe.");
while (1) ;
}
}
#ifdef CONFIG_BLK_DEV_INITRD
{
char *ptr;
/* Need to find out early whether we've got an initrd. So scan
the list looking now */
for (ptr = arcs_cmdline; *ptr; ptr++) {
while (*ptr == ' ') {
ptr++;
}
if (!strncmp(ptr, "initrd=", 7)) {
initrd_setup(ptr+7);
break;
} else {
while (*ptr && (*ptr != ' ')) {
ptr++;
}
}
}
}
#endif /* CONFIG_BLK_DEV_INITRD */
/* Not sure this is needed, but it's the safe way. */
arcs_cmdline[COMMAND_LINE_SIZE-1] = 0;
prom_meminit();
#if defined(CONFIG_SIBYTE_BCM112X) || defined(CONFIG_SIBYTE_SB1250)
register_smp_ops(&sb_smp_ops);
#endif
#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
register_smp_ops(&bcm1480_smp_ops);
#endif
}
void __init prom_free_prom_memory(void)
{
/* Not sure what I'm supposed to do here. Nothing, I think */
}
void prom_putchar(char c)
{
int ret;
while ((ret = cfe_write(cfe_cons_handle, &c, 1)) == 0)
;
}
| gpl-2.0 |
OMAP4-AOSP/android_kernel_samsung_tuna | arch/mips/pci/fixup-mpc30x.c | 9032 | 1506 | /*
* fixup-mpc30x.c, The Victor MP-C303/304 specific PCI fixups.
*
* Copyright (C) 2002,2004 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/pci.h>
#include <asm/vr41xx/mpc30x.h>
static const int internal_func_irqs[] __initdata = {
VRC4173_CASCADE_IRQ,
VRC4173_AC97_IRQ,
VRC4173_USB_IRQ,
};
static const int irq_tab_mpc30x[] __initdata = {
[12] = VRC4173_PCMCIA1_IRQ,
[13] = VRC4173_PCMCIA2_IRQ,
[29] = MQ200_IRQ,
};
int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
if (slot == 30)
return internal_func_irqs[PCI_FUNC(dev->devfn)];
return irq_tab_mpc30x[slot];
}
/* Do platform specific device initialization at pci_enable_device() time */
int pcibios_plat_dev_init(struct pci_dev *dev)
{
return 0;
}
| gpl-2.0 |
Buckmarble/Lunar_kernel_sense_m7 | arch/score/kernel/irq.c | 12104 | 2986 | /*
* arch/score/kernel/irq.c
*
* Score Processor version.
*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
* Chen Liqin <liqin.chen@sunplusct.com>
* Lennox Wu <lennox.wu@sunplusct.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <asm/io.h>
/* the interrupt controller is hardcoded at this address */
#define SCORE_PIC ((u32 __iomem __force *)0x95F50000)
#define INT_PNDL 0
#define INT_PNDH 1
#define INT_PRIORITY_M 2
#define INT_PRIORITY_SG0 4
#define INT_PRIORITY_SG1 5
#define INT_PRIORITY_SG2 6
#define INT_PRIORITY_SG3 7
#define INT_MASKL 8
#define INT_MASKH 9
/*
* handles all normal device IRQs
*/
asmlinkage void do_IRQ(int irq)
{
irq_enter();
generic_handle_irq(irq);
irq_exit();
}
static void score_mask(struct irq_data *d)
{
unsigned int irq_source = 63 - d->irq;
if (irq_source < 32)
__raw_writel((__raw_readl(SCORE_PIC + INT_MASKL) | \
(1 << irq_source)), SCORE_PIC + INT_MASKL);
else
__raw_writel((__raw_readl(SCORE_PIC + INT_MASKH) | \
(1 << (irq_source - 32))), SCORE_PIC + INT_MASKH);
}
static void score_unmask(struct irq_data *d)
{
unsigned int irq_source = 63 - d->irq;
if (irq_source < 32)
__raw_writel((__raw_readl(SCORE_PIC + INT_MASKL) & \
~(1 << irq_source)), SCORE_PIC + INT_MASKL);
else
__raw_writel((__raw_readl(SCORE_PIC + INT_MASKH) & \
~(1 << (irq_source - 32))), SCORE_PIC + INT_MASKH);
}
struct irq_chip score_irq_chip = {
.name = "Score7-level",
.irq_mask = score_mask,
.irq_mask_ack = score_mask,
.irq_unmask = score_unmask,
};
/*
* initialise the interrupt system
*/
void __init init_IRQ(void)
{
int index;
unsigned long target_addr;
for (index = 0; index < NR_IRQS; ++index)
irq_set_chip_and_handler(index, &score_irq_chip,
handle_level_irq);
for (target_addr = IRQ_VECTOR_BASE_ADDR;
target_addr <= IRQ_VECTOR_END_ADDR;
target_addr += IRQ_VECTOR_SIZE)
memcpy((void *)target_addr, \
interrupt_exception_vector, IRQ_VECTOR_SIZE);
__raw_writel(0xffffffff, SCORE_PIC + INT_MASKL);
__raw_writel(0xffffffff, SCORE_PIC + INT_MASKH);
__asm__ __volatile__(
"mtcr %0, cr3\n\t"
: : "r" (EXCEPTION_VECTOR_BASE_ADDR | \
VECTOR_ADDRESS_OFFSET_MODE16));
}
| gpl-2.0 |
jianpingye/linux | drivers/media/firewire/firedtv-rc.c | 12872 | 4075 | /*
* FireDTV driver (formerly known as FireSAT)
*
* Copyright (C) 2004 Andreas Monitzer <andy@monitzer.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*/
#include <linux/bitops.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include "firedtv.h"
/* fixed table with older keycodes, geared towards MythTV */
static const u16 oldtable[] = {
/* code from device: 0x4501...0x451f */
KEY_ESC,
KEY_F9,
KEY_1,
KEY_2,
KEY_3,
KEY_4,
KEY_5,
KEY_6,
KEY_7,
KEY_8,
KEY_9,
KEY_I,
KEY_0,
KEY_ENTER,
KEY_RED,
KEY_UP,
KEY_GREEN,
KEY_F10,
KEY_SPACE,
KEY_F11,
KEY_YELLOW,
KEY_DOWN,
KEY_BLUE,
KEY_Z,
KEY_P,
KEY_PAGEDOWN,
KEY_LEFT,
KEY_W,
KEY_RIGHT,
KEY_P,
KEY_M,
/* code from device: 0x4540...0x4542 */
KEY_R,
KEY_V,
KEY_C,
};
/* user-modifiable table for a remote as sold in 2008 */
static const u16 keytable[] = {
/* code from device: 0x0300...0x031f */
[0x00] = KEY_POWER,
[0x01] = KEY_SLEEP,
[0x02] = KEY_STOP,
[0x03] = KEY_OK,
[0x04] = KEY_RIGHT,
[0x05] = KEY_1,
[0x06] = KEY_2,
[0x07] = KEY_3,
[0x08] = KEY_LEFT,
[0x09] = KEY_4,
[0x0a] = KEY_5,
[0x0b] = KEY_6,
[0x0c] = KEY_UP,
[0x0d] = KEY_7,
[0x0e] = KEY_8,
[0x0f] = KEY_9,
[0x10] = KEY_DOWN,
[0x11] = KEY_TITLE, /* "OSD" - fixme */
[0x12] = KEY_0,
[0x13] = KEY_F20, /* "16:9" - fixme */
[0x14] = KEY_SCREEN, /* "FULL" - fixme */
[0x15] = KEY_MUTE,
[0x16] = KEY_SUBTITLE,
[0x17] = KEY_RECORD,
[0x18] = KEY_TEXT,
[0x19] = KEY_AUDIO,
[0x1a] = KEY_RED,
[0x1b] = KEY_PREVIOUS,
[0x1c] = KEY_REWIND,
[0x1d] = KEY_PLAYPAUSE,
[0x1e] = KEY_NEXT,
[0x1f] = KEY_VOLUMEUP,
/* code from device: 0x0340...0x0354 */
[0x20] = KEY_CHANNELUP,
[0x21] = KEY_F21, /* "4:3" - fixme */
[0x22] = KEY_TV,
[0x23] = KEY_DVD,
[0x24] = KEY_VCR,
[0x25] = KEY_AUX,
[0x26] = KEY_GREEN,
[0x27] = KEY_YELLOW,
[0x28] = KEY_BLUE,
[0x29] = KEY_CHANNEL, /* "CH.LIST" */
[0x2a] = KEY_VENDOR, /* "CI" - fixme */
[0x2b] = KEY_VOLUMEDOWN,
[0x2c] = KEY_CHANNELDOWN,
[0x2d] = KEY_LAST,
[0x2e] = KEY_INFO,
[0x2f] = KEY_FORWARD,
[0x30] = KEY_LIST,
[0x31] = KEY_FAVORITES,
[0x32] = KEY_MENU,
[0x33] = KEY_EPG,
[0x34] = KEY_EXIT,
};
int fdtv_register_rc(struct firedtv *fdtv, struct device *dev)
{
struct input_dev *idev;
int i, err;
idev = input_allocate_device();
if (!idev)
return -ENOMEM;
fdtv->remote_ctrl_dev = idev;
idev->name = "FireDTV remote control";
idev->dev.parent = dev;
idev->evbit[0] = BIT_MASK(EV_KEY);
idev->keycode = kmemdup(keytable, sizeof(keytable), GFP_KERNEL);
if (!idev->keycode) {
err = -ENOMEM;
goto fail;
}
idev->keycodesize = sizeof(keytable[0]);
idev->keycodemax = ARRAY_SIZE(keytable);
for (i = 0; i < ARRAY_SIZE(keytable); i++)
set_bit(keytable[i], idev->keybit);
err = input_register_device(idev);
if (err)
goto fail_free_keymap;
return 0;
fail_free_keymap:
kfree(idev->keycode);
fail:
input_free_device(idev);
return err;
}
void fdtv_unregister_rc(struct firedtv *fdtv)
{
cancel_work_sync(&fdtv->remote_ctrl_work);
kfree(fdtv->remote_ctrl_dev->keycode);
input_unregister_device(fdtv->remote_ctrl_dev);
}
void fdtv_handle_rc(struct firedtv *fdtv, unsigned int code)
{
struct input_dev *idev = fdtv->remote_ctrl_dev;
u16 *keycode = idev->keycode;
if (code >= 0x0300 && code <= 0x031f)
code = keycode[code - 0x0300];
else if (code >= 0x0340 && code <= 0x0354)
code = keycode[code - 0x0320];
else if (code >= 0x4501 && code <= 0x451f)
code = oldtable[code - 0x4501];
else if (code >= 0x4540 && code <= 0x4542)
code = oldtable[code - 0x4521];
else {
printk(KERN_DEBUG "firedtv: invalid key code 0x%04x "
"from remote control\n", code);
return;
}
input_report_key(idev, code, 1);
input_sync(idev);
input_report_key(idev, code, 0);
input_sync(idev);
}
| gpl-2.0 |
DJSteve/G800F-LL_Kernel | drivers/scsi/raid_class.c | 13896 | 7840 | /*
* raid_class.c - implementation of a simple raid visualisation class
*
* Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com>
*
* This file is licensed under GPLv2
*
* This class is designed to allow raid attributes to be visualised and
* manipulated in a form independent of the underlying raid. Ultimately this
* should work for both hardware and software raids.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/raid_class.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#define RAID_NUM_ATTRS 3
struct raid_internal {
struct raid_template r;
struct raid_function_template *f;
/* The actual attributes */
struct device_attribute private_attrs[RAID_NUM_ATTRS];
/* The array of null terminated pointers to attributes
* needed by scsi_sysfs.c */
struct device_attribute *attrs[RAID_NUM_ATTRS + 1];
};
struct raid_component {
struct list_head node;
struct device dev;
int num;
};
#define to_raid_internal(tmpl) container_of(tmpl, struct raid_internal, r)
#define tc_to_raid_internal(tcont) ({ \
struct raid_template *r = \
container_of(tcont, struct raid_template, raid_attrs); \
to_raid_internal(r); \
})
#define ac_to_raid_internal(acont) ({ \
struct transport_container *tc = \
container_of(acont, struct transport_container, ac); \
tc_to_raid_internal(tc); \
})
#define device_to_raid_internal(dev) ({ \
struct attribute_container *ac = \
attribute_container_classdev_to_container(dev); \
ac_to_raid_internal(ac); \
})
static int raid_match(struct attribute_container *cont, struct device *dev)
{
/* We have to look for every subsystem that could house
* emulated RAID devices, so start with SCSI */
struct raid_internal *i = ac_to_raid_internal(cont);
#if defined(CONFIG_SCSI) || defined(CONFIG_SCSI_MODULE)
if (scsi_is_sdev_device(dev)) {
struct scsi_device *sdev = to_scsi_device(dev);
if (i->f->cookie != sdev->host->hostt)
return 0;
return i->f->is_raid(dev);
}
#endif
/* FIXME: look at other subsystems too */
return 0;
}
static int raid_setup(struct transport_container *tc, struct device *dev,
struct device *cdev)
{
struct raid_data *rd;
BUG_ON(dev_get_drvdata(cdev));
rd = kzalloc(sizeof(*rd), GFP_KERNEL);
if (!rd)
return -ENOMEM;
INIT_LIST_HEAD(&rd->component_list);
dev_set_drvdata(cdev, rd);
return 0;
}
static int raid_remove(struct transport_container *tc, struct device *dev,
struct device *cdev)
{
struct raid_data *rd = dev_get_drvdata(cdev);
struct raid_component *rc, *next;
dev_printk(KERN_ERR, dev, "RAID REMOVE\n");
dev_set_drvdata(cdev, NULL);
list_for_each_entry_safe(rc, next, &rd->component_list, node) {
list_del(&rc->node);
dev_printk(KERN_ERR, rc->dev.parent, "RAID COMPONENT REMOVE\n");
device_unregister(&rc->dev);
}
dev_printk(KERN_ERR, dev, "RAID REMOVE DONE\n");
kfree(rd);
return 0;
}
static DECLARE_TRANSPORT_CLASS(raid_class,
"raid_devices",
raid_setup,
raid_remove,
NULL);
static const struct {
enum raid_state value;
char *name;
} raid_states[] = {
{ RAID_STATE_UNKNOWN, "unknown" },
{ RAID_STATE_ACTIVE, "active" },
{ RAID_STATE_DEGRADED, "degraded" },
{ RAID_STATE_RESYNCING, "resyncing" },
{ RAID_STATE_OFFLINE, "offline" },
};
static const char *raid_state_name(enum raid_state state)
{
int i;
char *name = NULL;
for (i = 0; i < ARRAY_SIZE(raid_states); i++) {
if (raid_states[i].value == state) {
name = raid_states[i].name;
break;
}
}
return name;
}
static struct {
enum raid_level value;
char *name;
} raid_levels[] = {
{ RAID_LEVEL_UNKNOWN, "unknown" },
{ RAID_LEVEL_LINEAR, "linear" },
{ RAID_LEVEL_0, "raid0" },
{ RAID_LEVEL_1, "raid1" },
{ RAID_LEVEL_10, "raid10" },
{ RAID_LEVEL_1E, "raid1e" },
{ RAID_LEVEL_3, "raid3" },
{ RAID_LEVEL_4, "raid4" },
{ RAID_LEVEL_5, "raid5" },
{ RAID_LEVEL_50, "raid50" },
{ RAID_LEVEL_6, "raid6" },
};
static const char *raid_level_name(enum raid_level level)
{
int i;
char *name = NULL;
for (i = 0; i < ARRAY_SIZE(raid_levels); i++) {
if (raid_levels[i].value == level) {
name = raid_levels[i].name;
break;
}
}
return name;
}
#define raid_attr_show_internal(attr, fmt, var, code) \
static ssize_t raid_show_##attr(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct raid_data *rd = dev_get_drvdata(dev); \
code \
return snprintf(buf, 20, #fmt "\n", var); \
}
#define raid_attr_ro_states(attr, states, code) \
raid_attr_show_internal(attr, %s, name, \
const char *name; \
code \
name = raid_##states##_name(rd->attr); \
) \
static DEVICE_ATTR(attr, S_IRUGO, raid_show_##attr, NULL)
#define raid_attr_ro_internal(attr, code) \
raid_attr_show_internal(attr, %d, rd->attr, code) \
static DEVICE_ATTR(attr, S_IRUGO, raid_show_##attr, NULL)
#define ATTR_CODE(attr) \
struct raid_internal *i = device_to_raid_internal(dev); \
if (i->f->get_##attr) \
i->f->get_##attr(dev->parent);
#define raid_attr_ro(attr) raid_attr_ro_internal(attr, )
#define raid_attr_ro_fn(attr) raid_attr_ro_internal(attr, ATTR_CODE(attr))
#define raid_attr_ro_state(attr) raid_attr_ro_states(attr, attr, )
#define raid_attr_ro_state_fn(attr) raid_attr_ro_states(attr, attr, ATTR_CODE(attr))
raid_attr_ro_state(level);
raid_attr_ro_fn(resync);
raid_attr_ro_state_fn(state);
static void raid_component_release(struct device *dev)
{
struct raid_component *rc =
container_of(dev, struct raid_component, dev);
dev_printk(KERN_ERR, rc->dev.parent, "COMPONENT RELEASE\n");
put_device(rc->dev.parent);
kfree(rc);
}
int raid_component_add(struct raid_template *r,struct device *raid_dev,
struct device *component_dev)
{
struct device *cdev =
attribute_container_find_class_device(&r->raid_attrs.ac,
raid_dev);
struct raid_component *rc;
struct raid_data *rd = dev_get_drvdata(cdev);
int err;
rc = kzalloc(sizeof(*rc), GFP_KERNEL);
if (!rc)
return -ENOMEM;
INIT_LIST_HEAD(&rc->node);
device_initialize(&rc->dev);
rc->dev.release = raid_component_release;
rc->dev.parent = get_device(component_dev);
rc->num = rd->component_count++;
dev_set_name(&rc->dev, "component-%d", rc->num);
list_add_tail(&rc->node, &rd->component_list);
rc->dev.class = &raid_class.class;
err = device_add(&rc->dev);
if (err)
goto err_out;
return 0;
err_out:
list_del(&rc->node);
rd->component_count--;
put_device(component_dev);
kfree(rc);
return err;
}
EXPORT_SYMBOL(raid_component_add);
struct raid_template *
raid_class_attach(struct raid_function_template *ft)
{
struct raid_internal *i = kzalloc(sizeof(struct raid_internal),
GFP_KERNEL);
int count = 0;
if (unlikely(!i))
return NULL;
i->f = ft;
i->r.raid_attrs.ac.class = &raid_class.class;
i->r.raid_attrs.ac.match = raid_match;
i->r.raid_attrs.ac.attrs = &i->attrs[0];
attribute_container_register(&i->r.raid_attrs.ac);
i->attrs[count++] = &dev_attr_level;
i->attrs[count++] = &dev_attr_resync;
i->attrs[count++] = &dev_attr_state;
i->attrs[count] = NULL;
BUG_ON(count > RAID_NUM_ATTRS);
return &i->r;
}
EXPORT_SYMBOL(raid_class_attach);
void
raid_class_release(struct raid_template *r)
{
struct raid_internal *i = to_raid_internal(r);
BUG_ON(attribute_container_unregister(&i->r.raid_attrs.ac));
kfree(i);
}
EXPORT_SYMBOL(raid_class_release);
static __init int raid_init(void)
{
return transport_class_register(&raid_class);
}
static __exit void raid_exit(void)
{
transport_class_unregister(&raid_class);
}
MODULE_AUTHOR("James Bottomley");
MODULE_DESCRIPTION("RAID device class");
MODULE_LICENSE("GPL");
module_init(raid_init);
module_exit(raid_exit);
| gpl-2.0 |
plexinc/plex-home-theater-public | lib/timidity/libarc/url_inflate.c | 73 | 3437 | /*
TiMidity++ -- MIDI to WAVE converter and player
Copyright (C) 1999-2002 Masanao Izumo <mo@goice.co.jp>
Copyright (C) 1995 Tuukka Toivonen <tt@cgs.fi>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif /* HAVE_CONFIG_H */
#include <stdio.h>
#include <stdlib.h>
#include "timidity.h"
#include "url.h"
#include "mblock.h"
#include "zip.h"
typedef struct _URL_inflate
{
char common[sizeof(struct _URL)];
InflateHandler decoder;
URL instream;
long compsize;
long pos;
int autoclose;
} URL_inflate;
static long url_inflate_read_func(char *buf, long size, void *v);
static long url_inflate_read(URL url, void *buff, long n);
static long url_inflate_tell(URL url);
static void url_inflate_close(URL url);
URL url_inflate_open(URL instream, long compsize, int autoclose)
{
URL_inflate *url;
url = (URL_inflate *)alloc_url(sizeof(URL_inflate));
if(url == NULL)
{
if(autoclose)
url_close(instream);
url_errno = errno;
return NULL;
}
/* common members */
URLm(url, type) = URL_inflate_t;
URLm(url, url_read) = url_inflate_read;
URLm(url, url_gets) = NULL;
URLm(url, url_fgetc) = NULL;
URLm(url, url_seek) = NULL;
URLm(url, url_tell) = url_inflate_tell;
URLm(url, url_close) = url_inflate_close;
/* private members */
url->decoder = NULL;
url->instream = instream;
url->pos = 0;
url->compsize = compsize;
url->autoclose = autoclose;
errno = 0;
url->decoder = open_inflate_handler(url_inflate_read_func, url);
if(url->decoder == NULL)
{
if(autoclose)
url_close(instream);
url_inflate_close((URL)url);
url_errno = errno;
return NULL;
}
return (URL)url;
}
static long url_inflate_read_func(char *buf, long size, void *v)
{
URL_inflate *urlp = (URL_inflate *)v;
long n;
if(urlp->compsize == -1) /* size if unknown */
return url_read(urlp->instream, buf, size);
if(urlp->compsize == 0)
return 0;
n = size;
if(n > urlp->compsize)
n = urlp->compsize;
n = url_read(urlp->instream, buf, n);
if(n == -1)
return -1;
urlp->compsize -= n;
return n;
}
static long url_inflate_read(URL url, void *buff, long n)
{
URL_inflate *urlp = (URL_inflate *)url;
n = zip_inflate(urlp->decoder, (char *)buff, n);
if(n <= 0)
return n;
urlp->pos += n;
return n;
}
static long url_inflate_tell(URL url)
{
return ((URL_inflate *)url)->pos;
}
static void url_inflate_close(URL url)
{
int save_errno = errno;
URL_inflate *urlp = (URL_inflate *)url;
if(urlp->decoder)
close_inflate_handler(urlp->decoder);
if(urlp->autoclose)
url_close(urlp->instream);
free(url);
errno = save_errno;
}
| gpl-2.0 |
shivamk11/phantom_taoshan | drivers/media/video/msm/csi/msm_csid.c | 73 | 18945 | /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of.h>
#include <mach/board.h>
#include <mach/camera.h>
#include <media/msm_isp.h>
#include "msm_csid.h"
#include "msm_csid_hwreg.h"
#include "msm.h"
#include "msm_cam_server.h"
#define V4L2_IDENT_CSID 50002
#define CSID_VERSION_V2 0x02000011
#define CSID_VERSION_V3 0x30000000
#define DBG_CSID 0
#define TRUE 1
#define FALSE 0
static int msm_csid_cid_lut(
struct msm_camera_csid_lut_params *csid_lut_params,
void __iomem *csidbase)
{
int rc = 0, i = 0;
uint32_t val = 0;
if (!csid_lut_params) {
pr_err("%s:%d csid_lut_params NULL\n", __func__, __LINE__);
return -EINVAL;
}
for (i = 0; i < csid_lut_params->num_cid && i < 16; i++) {
CDBG("%s lut params num_cid = %d, cid = %d, dt = %x, df = %d\n",
__func__,
csid_lut_params->num_cid,
csid_lut_params->vc_cfg[i].cid,
csid_lut_params->vc_cfg[i].dt,
csid_lut_params->vc_cfg[i].decode_format);
if (csid_lut_params->vc_cfg[i].dt < 0x12 ||
csid_lut_params->vc_cfg[i].dt > 0x37) {
CDBG("%s: unsupported data type 0x%x\n",
__func__, csid_lut_params->vc_cfg[i].dt);
return rc;
}
val = msm_camera_io_r(csidbase + CSID_CID_LUT_VC_0_ADDR +
(csid_lut_params->vc_cfg[i].cid >> 2) * 4)
& ~(0xFF << ((csid_lut_params->vc_cfg[i].cid % 4) * 8));
val |= (csid_lut_params->vc_cfg[i].dt <<
((csid_lut_params->vc_cfg[i].cid % 4) * 8));
msm_camera_io_w(val, csidbase + CSID_CID_LUT_VC_0_ADDR +
(csid_lut_params->vc_cfg[i].cid >> 2) * 4);
val = (csid_lut_params->vc_cfg[i].decode_format << 4) | 0x3;
msm_camera_io_w(val, csidbase + CSID_CID_n_CFG_ADDR +
(csid_lut_params->vc_cfg[i].cid * 4));
}
return rc;
}
#if DBG_CSID
static void msm_csid_set_debug_reg(void __iomem *csidbase,
struct msm_camera_csid_params *csid_params)
{
uint32_t val = 0;
val = ((1 << csid_params->lane_cnt) - 1) << 20;
msm_camera_io_w(0x7f010800 | val, csidbase + CSID_IRQ_MASK_ADDR);
msm_camera_io_w(0x7f010800 | val, csidbase + CSID_IRQ_CLEAR_CMD_ADDR);
}
#else
static void msm_csid_set_debug_reg(void __iomem *csidbase,
struct msm_camera_csid_params *csid_params) {}
#endif
static int msm_csid_config(struct csid_device *csid_dev,
struct msm_camera_csid_params *csid_params)
{
int rc = 0;
uint32_t val = 0;
void __iomem *csidbase;
csidbase = csid_dev->base;
if (!csidbase || !csid_params) {
pr_err("%s:%d csidbase %p, csid params %p\n", __func__,
__LINE__, csidbase, csid_params);
return -EINVAL;
}
CDBG("%s csid_params, lane_cnt = %d, lane_assign = %x, phy sel = %d\n",
__func__,
csid_params->lane_cnt,
csid_params->lane_assign,
csid_params->phy_sel);
val = csid_params->lane_cnt - 1;
val |= csid_params->lane_assign << CSID_DL_INPUT_SEL_SHIFT;
if (csid_dev->hw_version < 0x30000000) {
val |= (0xF << 10);
msm_camera_io_w(val, csidbase + CSID_CORE_CTRL_0_ADDR);
} else {
msm_camera_io_w(val, csidbase + CSID_CORE_CTRL_0_ADDR);
val = csid_params->phy_sel << CSID_PHY_SEL_SHIFT;
val |= 0xF;
msm_camera_io_w(val, csidbase + CSID_CORE_CTRL_1_ADDR);
}
rc = msm_csid_cid_lut(&csid_params->lut_params, csidbase);
if (rc < 0)
return rc;
msm_csid_set_debug_reg(csidbase, csid_params);
return rc;
}
static irqreturn_t msm_csid_irq(int irq_num, void *data)
{
uint32_t irq;
struct csid_device *csid_dev = data;
if (!csid_dev||!csid_dev->base) {
pr_err("%s:%d csid_dev NULL\n", __func__, __LINE__);
return IRQ_HANDLED;
}
irq = msm_camera_io_r(csid_dev->base + CSID_IRQ_STATUS_ADDR);
CDBG("%s CSID%d_IRQ_STATUS_ADDR = 0x%x\n",
__func__, csid_dev->pdev->id, irq);
if (irq & (0x1 << CSID_RST_DONE_IRQ_BITSHIFT))
complete(&csid_dev->reset_complete);
msm_camera_io_w(irq, csid_dev->base + CSID_IRQ_CLEAR_CMD_ADDR);
return IRQ_HANDLED;
}
int msm_csid_irq_routine(struct v4l2_subdev *sd, u32 status, bool *handled)
{
struct csid_device *csid_dev = v4l2_get_subdevdata(sd);
irqreturn_t ret;
CDBG("%s E\n", __func__);
ret = msm_csid_irq(csid_dev->irq->start, csid_dev);
*handled = TRUE;
return 0;
}
static void msm_csid_reset(struct csid_device *csid_dev)
{
msm_camera_io_w(CSID_RST_STB_ALL, csid_dev->base + CSID_RST_CMD_ADDR);
wait_for_completion_interruptible(&csid_dev->reset_complete);
return;
}
static int msm_csid_subdev_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *chip)
{
BUG_ON(!chip);
chip->ident = V4L2_IDENT_CSID;
chip->revision = 0;
return 0;
}
static struct msm_cam_clk_info csid_8960_clk_info[] = {
{"csi_src_clk", 177780000},
{"csi_clk", -1},
{"csi_phy_clk", -1},
{"csi_pclk", -1},
};
static struct msm_cam_clk_info csid0_8974_clk_info[] = {
{"csi0_ahb_clk", -1},
{"csi0_src_clk", 200000000},
{"csi0_clk", -1},
{"csi0_phy_clk", -1},
{"csi0_pix_clk", -1},
{"csi0_rdi_clk", -1},
};
static struct msm_cam_clk_info csid1_8974_clk_info[] = {
{"csi1_ahb_clk", -1},
{"csi1_src_clk", 200000000},
{"csi1_clk", -1},
{"csi1_phy_clk", -1},
{"csi1_pix_clk", -1},
{"csi1_rdi_clk", -1},
};
static struct msm_cam_clk_info csid2_8974_clk_info[] = {
{"csi2_ahb_clk", -1},
{"csi2_src_clk", 200000000},
{"csi2_clk", -1},
{"csi2_phy_clk", -1},
{"csi2_pix_clk", -1},
{"csi2_rdi_clk", -1},
};
static struct msm_cam_clk_info csid3_8974_clk_info[] = {
{"csi3_ahb_clk", -1},
{"csi3_src_clk", 200000000},
{"csi3_clk", -1},
{"csi3_phy_clk", -1},
{"csi3_pix_clk", -1},
{"csi3_rdi_clk", -1},
};
static struct msm_cam_clk_setting csid_8974_clk_info[] = {
{&csid0_8974_clk_info[0], ARRAY_SIZE(csid0_8974_clk_info)},
{&csid1_8974_clk_info[0], ARRAY_SIZE(csid1_8974_clk_info)},
{&csid2_8974_clk_info[0], ARRAY_SIZE(csid2_8974_clk_info)},
{&csid3_8974_clk_info[0], ARRAY_SIZE(csid3_8974_clk_info)},
};
static struct camera_vreg_t csid_8960_vreg_info[] = {
{"mipi_csi_vdd", REG_LDO, 1200000, 1200000, 20000},
};
static struct camera_vreg_t csid_8974_vreg_info[] = {
{"mipi_csi_vdd", REG_LDO, 1800000, 1800000, 12000},
};
static int msm_csid_init(struct csid_device *csid_dev, uint32_t *csid_version)
{
int rc = 0;
uint8_t core_id = 0;
if (!csid_version) {
pr_err("%s:%d csid_version NULL\n", __func__, __LINE__);
rc = -EINVAL;
return rc;
}
if (csid_dev->csid_state == CSID_POWER_UP) {
pr_err("%s: csid invalid state %d\n", __func__,
csid_dev->csid_state);
rc = -EINVAL;
return rc;
}
csid_dev->base = ioremap(csid_dev->mem->start,
resource_size(csid_dev->mem));
if (!csid_dev->base) {
pr_err("%s csid_dev->base NULL\n", __func__);
rc = -ENOMEM;
return rc;
}
if (CSID_VERSION <= CSID_VERSION_V2) {
rc = msm_camera_config_vreg(&csid_dev->pdev->dev,
csid_8960_vreg_info, ARRAY_SIZE(csid_8960_vreg_info),
NULL, 0, &csid_dev->csi_vdd, 1);
if (rc < 0) {
pr_err("%s: regulator on failed\n", __func__);
goto vreg_config_failed;
}
rc = msm_camera_enable_vreg(&csid_dev->pdev->dev,
csid_8960_vreg_info, ARRAY_SIZE(csid_8960_vreg_info),
NULL, 0, &csid_dev->csi_vdd, 1);
if (rc < 0) {
pr_err("%s: regulator enable failed\n", __func__);
goto vreg_enable_failed;
}
rc = msm_cam_clk_enable(&csid_dev->pdev->dev,
csid_8960_clk_info, csid_dev->csid_clk,
ARRAY_SIZE(csid_8960_clk_info), 1);
if (rc < 0) {
pr_err("%s: clock enable failed\n", __func__);
goto clk_enable_failed;
}
} else if (CSID_VERSION == CSID_VERSION_V3) {
rc = msm_camera_config_vreg(&csid_dev->pdev->dev,
csid_8974_vreg_info, ARRAY_SIZE(csid_8974_vreg_info),
NULL, 0, &csid_dev->csi_vdd, 1);
if (rc < 0) {
pr_err("%s: regulator on failed\n", __func__);
goto vreg_config_failed;
}
rc = msm_camera_enable_vreg(&csid_dev->pdev->dev,
csid_8974_vreg_info, ARRAY_SIZE(csid_8974_vreg_info),
NULL, 0, &csid_dev->csi_vdd, 1);
if (rc < 0) {
pr_err("%s: regulator enable failed\n", __func__);
goto vreg_enable_failed;
}
rc = msm_cam_clk_enable(&csid_dev->pdev->dev,
csid_8974_clk_info[0].clk_info, csid_dev->csid0_clk,
csid_8974_clk_info[0].num_clk_info, 1);
if (rc < 0) {
pr_err("%s: clock enable failed\n", __func__);
goto csid0_clk_enable_failed;
}
core_id = csid_dev->pdev->id;
if (core_id) {
rc = msm_cam_clk_enable(&csid_dev->pdev->dev,
csid_8974_clk_info[core_id].clk_info,
csid_dev->csid_clk,
csid_8974_clk_info[core_id].num_clk_info, 1);
if (rc < 0) {
pr_err("%s: clock enable failed\n",
__func__);
goto clk_enable_failed;
}
}
}
csid_dev->hw_version =
msm_camera_io_r(csid_dev->base + CSID_HW_VERSION_ADDR);
*csid_version = csid_dev->hw_version;
init_completion(&csid_dev->reset_complete);
enable_irq(csid_dev->irq->start);
msm_csid_reset(csid_dev);
csid_dev->csid_state = CSID_POWER_UP;
return rc;
clk_enable_failed:
if (CSID_VERSION == CSID_VERSION_V3) {
msm_cam_clk_enable(&csid_dev->pdev->dev,
csid_8974_clk_info[0].clk_info, csid_dev->csid0_clk,
csid_8974_clk_info[0].num_clk_info, 0);
}
csid0_clk_enable_failed:
if (CSID_VERSION <= CSID_VERSION_V2) {
msm_camera_enable_vreg(&csid_dev->pdev->dev,
csid_8960_vreg_info, ARRAY_SIZE(csid_8960_vreg_info),
NULL, 0, &csid_dev->csi_vdd, 0);
} else if (CSID_VERSION == CSID_VERSION_V3) {
msm_camera_enable_vreg(&csid_dev->pdev->dev,
csid_8974_vreg_info, ARRAY_SIZE(csid_8974_vreg_info),
NULL, 0, &csid_dev->csi_vdd, 0);
}
vreg_enable_failed:
if (CSID_VERSION <= CSID_VERSION_V2) {
msm_camera_config_vreg(&csid_dev->pdev->dev,
csid_8960_vreg_info, ARRAY_SIZE(csid_8960_vreg_info),
NULL, 0, &csid_dev->csi_vdd, 0);
} else if (CSID_VERSION == CSID_VERSION_V3) {
msm_camera_config_vreg(&csid_dev->pdev->dev,
csid_8974_vreg_info, ARRAY_SIZE(csid_8974_vreg_info),
NULL, 0, &csid_dev->csi_vdd, 0);
}
vreg_config_failed:
iounmap(csid_dev->base);
csid_dev->base = NULL;
return rc;
}
static int msm_csid_release(struct csid_device *csid_dev)
{
uint32_t irq;
uint8_t core_id = 0;
if (csid_dev->csid_state != CSID_POWER_UP) {
pr_err("%s: csid invalid state %d\n", __func__,
csid_dev->csid_state);
return -EINVAL;
}
irq = msm_camera_io_r(csid_dev->base + CSID_IRQ_STATUS_ADDR);
msm_camera_io_w(irq, csid_dev->base + CSID_IRQ_CLEAR_CMD_ADDR);
msm_camera_io_w(0, csid_dev->base + CSID_IRQ_MASK_ADDR);
disable_irq(csid_dev->irq->start);
if (csid_dev->hw_version <= CSID_VERSION_V2) {
msm_cam_clk_enable(&csid_dev->pdev->dev, csid_8960_clk_info,
csid_dev->csid_clk, ARRAY_SIZE(csid_8960_clk_info), 0);
msm_camera_enable_vreg(&csid_dev->pdev->dev,
csid_8960_vreg_info, ARRAY_SIZE(csid_8960_vreg_info),
NULL, 0, &csid_dev->csi_vdd, 0);
msm_camera_config_vreg(&csid_dev->pdev->dev,
csid_8960_vreg_info, ARRAY_SIZE(csid_8960_vreg_info),
NULL, 0, &csid_dev->csi_vdd, 0);
} else if (csid_dev->hw_version == CSID_VERSION_V3) {
core_id = csid_dev->pdev->id;
if (core_id)
msm_cam_clk_enable(&csid_dev->pdev->dev,
csid_8974_clk_info[core_id].clk_info,
csid_dev->csid_clk,
csid_8974_clk_info[core_id].num_clk_info, 0);
msm_cam_clk_enable(&csid_dev->pdev->dev,
csid_8974_clk_info[0].clk_info, csid_dev->csid0_clk,
csid_8974_clk_info[0].num_clk_info, 0);
msm_camera_enable_vreg(&csid_dev->pdev->dev,
csid_8974_vreg_info, ARRAY_SIZE(csid_8974_vreg_info),
NULL, 0, &csid_dev->csi_vdd, 0);
msm_camera_config_vreg(&csid_dev->pdev->dev,
csid_8974_vreg_info, ARRAY_SIZE(csid_8974_vreg_info),
NULL, 0, &csid_dev->csi_vdd, 0);
}
iounmap(csid_dev->base);
csid_dev->base = NULL;
csid_dev->csid_state = CSID_POWER_DOWN;
return 0;
}
static long msm_csid_cmd(struct csid_device *csid_dev, void *arg)
{
int rc = 0;
struct csid_cfg_data cdata;
if (!csid_dev) {
pr_err("%s:%d csid_dev NULL\n", __func__, __LINE__);
return -EINVAL;
}
if (copy_from_user(&cdata,
(void *)arg,
sizeof(struct csid_cfg_data))) {
pr_err("%s: %d failed\n", __func__, __LINE__);
return -EFAULT;
}
CDBG("%s cfgtype = %d\n", __func__, cdata.cfgtype);
switch (cdata.cfgtype) {
case CSID_INIT:
rc = msm_csid_init(csid_dev, &cdata.cfg.csid_version);
if (copy_to_user((void *)arg,
&cdata,
sizeof(struct csid_cfg_data))) {
pr_err("%s: %d failed\n", __func__, __LINE__);
rc = -EFAULT;
}
break;
case CSID_CFG: {
struct msm_camera_csid_params csid_params;
struct msm_camera_csid_vc_cfg *vc_cfg = NULL;
if (copy_from_user(&csid_params,
(void *)cdata.cfg.csid_params,
sizeof(struct msm_camera_csid_params))) {
pr_err("%s: %d failed\n", __func__, __LINE__);
rc = -EFAULT;
break;
}
vc_cfg = kzalloc(csid_params.lut_params.num_cid *
sizeof(struct msm_camera_csid_vc_cfg),
GFP_KERNEL);
if (!vc_cfg) {
pr_err("%s: %d failed\n", __func__, __LINE__);
rc = -ENOMEM;
break;
}
if (copy_from_user(vc_cfg,
(void *)csid_params.lut_params.vc_cfg,
(csid_params.lut_params.num_cid *
sizeof(struct msm_camera_csid_vc_cfg)))) {
pr_err("%s: %d failed\n", __func__, __LINE__);
kfree(vc_cfg);
rc = -EFAULT;
break;
}
csid_params.lut_params.vc_cfg = vc_cfg;
rc = msm_csid_config(csid_dev, &csid_params);
kfree(vc_cfg);
break;
}
default:
pr_err("%s: %d failed\n", __func__, __LINE__);
rc = -ENOIOCTLCMD;
break;
}
return rc;
}
static long msm_csid_subdev_ioctl(struct v4l2_subdev *sd,
unsigned int cmd, void *arg)
{
int rc = -ENOIOCTLCMD;
struct csid_device *csid_dev = v4l2_get_subdevdata(sd);
mutex_lock(&csid_dev->mutex);
switch (cmd) {
case VIDIOC_MSM_CSID_CFG:
rc = msm_csid_cmd(csid_dev, arg);
break;
case VIDIOC_MSM_CSID_RELEASE:
rc = msm_csid_release(csid_dev);
break;
default:
pr_err("%s: command not found\n", __func__);
}
mutex_unlock(&csid_dev->mutex);
return rc;
}
static const struct v4l2_subdev_internal_ops msm_csid_internal_ops;
static struct v4l2_subdev_core_ops msm_csid_subdev_core_ops = {
.g_chip_ident = &msm_csid_subdev_g_chip_ident,
.ioctl = &msm_csid_subdev_ioctl,
.interrupt_service_routine = msm_csid_irq_routine,
};
static const struct v4l2_subdev_ops msm_csid_subdev_ops = {
.core = &msm_csid_subdev_core_ops,
};
static int __devinit csid_probe(struct platform_device *pdev)
{
struct csid_device *new_csid_dev;
struct msm_cam_subdev_info sd_info;
struct intr_table_entry irq_req;
int rc = 0;
CDBG("%s:%d called\n", __func__, __LINE__);
new_csid_dev = kzalloc(sizeof(struct csid_device), GFP_KERNEL);
if (!new_csid_dev) {
pr_err("%s: no enough memory\n", __func__);
return -ENOMEM;
}
v4l2_subdev_init(&new_csid_dev->subdev, &msm_csid_subdev_ops);
new_csid_dev->subdev.internal_ops = &msm_csid_internal_ops;
new_csid_dev->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
snprintf(new_csid_dev->subdev.name,
ARRAY_SIZE(new_csid_dev->subdev.name), "msm_csid");
v4l2_set_subdevdata(&new_csid_dev->subdev, new_csid_dev);
platform_set_drvdata(pdev, &new_csid_dev->subdev);
mutex_init(&new_csid_dev->mutex);
if (pdev->dev.of_node)
of_property_read_u32((&pdev->dev)->of_node,
"cell-index", &pdev->id);
CDBG("%s device id %d\n", __func__, pdev->id);
new_csid_dev->mem = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "csid");
if (!new_csid_dev->mem) {
pr_err("%s: no mem resource?\n", __func__);
rc = -ENODEV;
goto csid_no_resource;
}
new_csid_dev->irq = platform_get_resource_byname(pdev,
IORESOURCE_IRQ, "csid");
if (!new_csid_dev->irq) {
pr_err("%s: no irq resource?\n", __func__);
rc = -ENODEV;
goto csid_no_resource;
}
new_csid_dev->io = request_mem_region(new_csid_dev->mem->start,
resource_size(new_csid_dev->mem), pdev->name);
if (!new_csid_dev->io) {
pr_err("%s: no valid mem region\n", __func__);
rc = -EBUSY;
goto csid_no_resource;
}
new_csid_dev->pdev = pdev;
sd_info.sdev_type = CSID_DEV;
sd_info.sd_index = pdev->id;
sd_info.irq_num = new_csid_dev->irq->start;
msm_cam_register_subdev_node(&new_csid_dev->subdev, &sd_info);
media_entity_init(&new_csid_dev->subdev.entity, 0, NULL, 0);
new_csid_dev->subdev.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
new_csid_dev->subdev.entity.group_id = CSID_DEV;
new_csid_dev->subdev.entity.name = pdev->name;
new_csid_dev->subdev.entity.revision =
new_csid_dev->subdev.devnode->num;
/* Request for this device irq from the camera server. If the
* IRQ Router is present on this target, the interrupt will be
* handled by the camera server and the interrupt service
* routine called. If the request_irq call returns ENXIO, then
* the IRQ Router hardware is not present on this target. We
* have to request for the irq ourselves and register the
* appropriate interrupt handler. */
irq_req.cam_hw_idx = MSM_CAM_HW_CSI0 + pdev->id;
irq_req.dev_name = "csid";
irq_req.irq_idx = CAMERA_SS_IRQ_2 + pdev->id;
irq_req.irq_num = new_csid_dev->irq->start;
irq_req.is_composite = 0;
irq_req.irq_trigger_type = IRQF_TRIGGER_RISING;
irq_req.num_hwcore = 1;
irq_req.subdev_list[0] = &new_csid_dev->subdev;
irq_req.data = (void *)new_csid_dev;
rc = msm_cam_server_request_irq(&irq_req);
if (rc == -ENXIO) {
/* IRQ Router hardware is not present on this hardware.
* Request for the IRQ and register the interrupt handler. */
rc = request_irq(new_csid_dev->irq->start, msm_csid_irq,
IRQF_TRIGGER_RISING, "csid", new_csid_dev);
if (rc < 0) {
release_mem_region(new_csid_dev->mem->start,
resource_size(new_csid_dev->mem));
pr_err("%s: irq request fail\n", __func__);
rc = -EBUSY;
goto csid_no_resource;
}
disable_irq(new_csid_dev->irq->start);
} else if (rc < 0) {
release_mem_region(new_csid_dev->mem->start,
resource_size(new_csid_dev->mem));
pr_err("%s Error registering irq ", __func__);
goto csid_no_resource;
}
new_csid_dev->csid_state = CSID_POWER_DOWN;
return 0;
csid_no_resource:
mutex_destroy(&new_csid_dev->mutex);
kfree(new_csid_dev);
return rc;
}
static const struct of_device_id msm_csid_dt_match[] = {
{.compatible = "qcom,csid"},
{}
};
MODULE_DEVICE_TABLE(of, msm_csid_dt_match);
static struct platform_driver csid_driver = {
.probe = csid_probe,
.driver = {
.name = MSM_CSID_DRV_NAME,
.owner = THIS_MODULE,
.of_match_table = msm_csid_dt_match,
},
};
static int __init msm_csid_init_module(void)
{
return platform_driver_register(&csid_driver);
}
static void __exit msm_csid_exit_module(void)
{
platform_driver_unregister(&csid_driver);
}
module_init(msm_csid_init_module);
module_exit(msm_csid_exit_module);
MODULE_DESCRIPTION("MSM CSID driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
adafruit/adafruit-raspberrypi-linux | arch/mips/mm/highmem.c | 329 | 2799 | #include <linux/compiler.h>
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
static pte_t *kmap_pte;
unsigned long highstart_pfn, highend_pfn;
void *kmap(struct page *page)
{
void *addr;
might_sleep();
if (!PageHighMem(page))
return page_address(page);
addr = kmap_high(page);
flush_tlb_one((unsigned long)addr);
return addr;
}
EXPORT_SYMBOL(kmap);
void kunmap(struct page *page)
{
BUG_ON(in_interrupt());
if (!PageHighMem(page))
return;
kunmap_high(page);
}
EXPORT_SYMBOL(kunmap);
/*
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
* no global lock is needed and because the kmap code must perform a global TLB
* invalidation when the kmap pool wraps.
*
* However when holding an atomic kmap is is not legal to sleep, so atomic
* kmaps are appropriate for short, tight code paths only.
*/
void *kmap_atomic(struct page *page)
{
unsigned long vaddr;
int idx, type;
preempt_disable();
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(*(kmap_pte - idx)));
#endif
set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
local_flush_tlb_one((unsigned long)vaddr);
return (void*) vaddr;
}
EXPORT_SYMBOL(kmap_atomic);
void __kunmap_atomic(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
int type __maybe_unused;
if (vaddr < FIXADDR_START) { // FIXME
pagefault_enable();
preempt_enable();
return;
}
type = kmap_atomic_idx();
#ifdef CONFIG_DEBUG_HIGHMEM
{
int idx = type + KM_TYPE_NR * smp_processor_id();
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
/*
* force other mappings to Oops if they'll try to access
* this pte without first remap it
*/
pte_clear(&init_mm, vaddr, kmap_pte-idx);
local_flush_tlb_one(vaddr);
}
#endif
kmap_atomic_idx_pop();
pagefault_enable();
preempt_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
/*
* This is the same as kmap_atomic() but can map memory that doesn't
* have a struct page associated with it.
*/
void *kmap_atomic_pfn(unsigned long pfn)
{
unsigned long vaddr;
int idx, type;
preempt_disable();
pagefault_disable();
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
flush_tlb_one(vaddr);
return (void*) vaddr;
}
void __init kmap_init(void)
{
unsigned long kmap_vstart;
/* cache the first kmap pte */
kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
}
| gpl-2.0 |
LuckJava/KVMGT-kernel | sound/oss/sb_ess.c | 329 | 52659 | #undef FKS_LOGGING
#undef FKS_TEST
/*
* tabs should be 4 spaces, in vi(m): set tabstop=4
*
* TODO: consistency speed calculations!!
* cleanup!
* ????: Did I break MIDI support?
*
* History:
*
* Rolf Fokkens (Dec 20 1998): ES188x recording level support on a per
* fokkensr@vertis.nl input basis.
* (Dec 24 1998): Recognition of ES1788, ES1887, ES1888,
* ES1868, ES1869 and ES1878. Could be used for
* specific handling in the future. All except
* ES1887 and ES1888 and ES688 are handled like
* ES1688.
* (Dec 27 1998): RECLEV for all (?) ES1688+ chips. ES188x now
* have the "Dec 20" support + RECLEV
* (Jan 2 1999): Preparation for Full Duplex. This means
* Audio 2 is now used for playback when dma16
* is specified. The next step would be to use
* Audio 1 and Audio 2 at the same time.
* (Jan 9 1999): Put all ESS stuff into sb_ess.[ch], this
* includes both the ESS stuff that has been in
* sb_*[ch] before I touched it and the ESS support
* I added later
* (Jan 23 1999): Full Duplex seems to work. I wrote a small
* test proggy which works OK. Haven't found
* any applications to test it though. So why did
* I bother to create it anyway?? :) Just for
* fun.
* (May 2 1999): I tried to be too smart by "introducing"
* ess_calc_best_speed (). The idea was that two
* dividers could be used to setup a samplerate,
* ess_calc_best_speed () would choose the best.
* This works for playback, but results in
* recording problems for high samplerates. I
* fixed this by removing ess_calc_best_speed ()
* and just doing what the documentation says.
* Andy Sloane (Jun 4 1999): Stole some code from ALSA to fix the playback
* andy@guildsoftware.com speed on ES1869, ES1879, ES1887, and ES1888.
* 1879's were previously ignored by this driver;
* added (untested) support for those.
* Cvetan Ivanov (Oct 27 1999): Fixed ess_dsp_init to call ess_set_dma_hw for
* zezo@inet.bg _ALL_ ESS models, not only ES1887
*
* This files contains ESS chip specifics. It's based on the existing ESS
* handling as it resided in sb_common.c, sb_mixer.c and sb_audio.c. This
* file adds features like:
* - Chip Identification (as shown in /proc/sound)
* - RECLEV support for ES1688 and later
* - 6 bits playback level support chips later than ES1688
* - Recording level support on a per-device basis for ES1887
* - Full-Duplex for ES1887
*
* Full duplex is enabled by specifying dma16. While the normal dma must
* be one of 0, 1 or 3, dma16 can be one of 0, 1, 3 or 5. DMA 5 is a 16 bit
* DMA channel, while the others are 8 bit..
*
* ESS detection isn't full proof (yet). If it fails an additional module
* parameter esstype can be specified to be one of the following:
* -1, 0, 688, 1688, 1868, 1869, 1788, 1887, 1888
* -1 means: mimic 2.0 behaviour,
* 0 means: auto detect.
* others: explicitly specify chip
* -1 is default, cause auto detect still doesn't work.
*/
/*
* About the documentation
*
* I don't know if the chips all are OK, but the documentation is buggy. 'cause
* I don't have all the cips myself, there's a lot I cannot verify. I'll try to
* keep track of my latest insights about his here. If you have additional info,
* please enlighten me (fokkensr@vertis.nl)!
*
* I had the impression that ES1688 also has 6 bit master volume control. The
* documentation about ES1888 (rev C, october '95) claims that ES1888 has
* the following features ES1688 doesn't have:
* - 6 bit master volume
* - Full Duplex
* So ES1688 apparently doesn't have 6 bit master volume control, but the
* ES1688 does have RECLEV control. Makes me wonder: does ES688 have it too?
* Without RECLEV ES688 won't be much fun I guess.
*
* From the ES1888 (rev C, october '95) documentation I got the impression
* that registers 0x68 to 0x6e don't exist which means: no recording volume
* controls. To my surprise the ES888 documentation (1/14/96) claims that
* ES888 does have these record mixer registers, but that ES1888 doesn't have
* 0x69 and 0x6b. So the rest should be there.
*
* I'm trying to get ES1887 Full Duplex. Audio 2 is playback only, while Audio 2
* is both record and playback. I think I should use Audio 2 for all playback.
*
* The documentation is an adventure: it's close but not fully accurate. I
* found out that after a reset some registers are *NOT* reset, though the
* docs say the would be. Interesting ones are 0x7f, 0x7d and 0x7a. They are
* related to the Audio 2 channel. I also was surprised about the consequences
* of writing 0x00 to 0x7f (which should be done by reset): The ES1887 moves
* into ES1888 mode. This means that it claims IRQ 11, which happens to be my
* ISDN adapter. Needless to say it no longer worked. I now understand why
* after rebooting 0x7f already was 0x05, the value of my choice: the BIOS
* did it.
*
* Oh, and this is another trap: in ES1887 docs mixer register 0x70 is
* described as if it's exactly the same as register 0xa1. This is *NOT* true.
* The description of 0x70 in ES1869 docs is accurate however.
* Well, the assumption about ES1869 was wrong: register 0x70 is very much
* like register 0xa1, except that bit 7 is always 1, whatever you want
* it to be.
*
* When using audio 2 mixer register 0x72 seems te be meaningless. Only 0xa2
* has effect.
*
* Software reset not being able to reset all registers is great! Especially
* the fact that register 0x78 isn't reset is great when you wanna change back
* to single dma operation (simplex): audio 2 is still operational, and uses
* the same dma as audio 1: your ess changes into a funny echo machine.
*
* Received the news that ES1688 is detected as a ES1788. Did some thinking:
* the ES1887 detection scheme suggests in step 2 to try if bit 3 of register
* 0x64 can be changed. This is inaccurate, first I inverted the * check: "If
* can be modified, it's a 1688", which lead to a correct detection
* of my ES1887. It resulted however in bad detection of 1688 (reported by mail)
* and 1868 (if no PnP detection first): they result in a 1788 being detected.
* I don't have docs on 1688, but I do have docs on 1868: The documentation is
* probably inaccurate in the fact that I should check bit 2, not bit 3. This
* is what I do now.
*/
/*
* About recognition of ESS chips
*
* The distinction of ES688, ES1688, ES1788, ES1887 and ES1888 is described in
* a (preliminary ??) datasheet on ES1887. Its aim is to identify ES1887, but
* during detection the text claims that "this chip may be ..." when a step
* fails. This scheme is used to distinct between the above chips.
* It appears however that some PnP chips like ES1868 are recognized as ES1788
* by the ES1887 detection scheme. These PnP chips can be detected in another
* way however: ES1868, ES1869 and ES1878 can be recognized (full proof I think)
* by repeatedly reading mixer register 0x40. This is done by ess_identify in
* sb_common.c.
* This results in the following detection steps:
* - distinct between ES688 and ES1688+ (as always done in this driver)
* if ES688 we're ready
* - try to detect ES1868, ES1869 or ES1878
* if successful we're ready
* - try to detect ES1888, ES1887 or ES1788
* if successful we're ready
* - Dunno. Must be 1688. Will do in general
*
* About RECLEV support:
*
* The existing ES1688 support didn't take care of the ES1688+ recording
* levels very well. Whenever a device was selected (recmask) for recording
* its recording level was loud, and it couldn't be changed. The fact that
* internal register 0xb4 could take care of RECLEV, didn't work meaning until
* its value was restored every time the chip was reset; this reset the
* value of 0xb4 too. I guess that's what 4front also had (have?) trouble with.
*
* About ES1887 support:
*
* The ES1887 has separate registers to control the recording levels, for all
* inputs. The ES1887 specific software makes these levels the same as their
* corresponding playback levels, unless recmask says they aren't recorded. In
* the latter case the recording volumes are 0.
* Now recording levels of inputs can be controlled, by changing the playback
* levels. Furthermore several devices can be recorded together (which is not
* possible with the ES1688).
* Besides the separate recording level control for each input, the common
* recording level can also be controlled by RECLEV as described above.
*
* Not only ES1887 have this recording mixer. I know the following from the
* documentation:
* ES688 no
* ES1688 no
* ES1868 no
* ES1869 yes
* ES1878 no
* ES1879 yes
* ES1888 no/yes Contradicting documentation; most recent: yes
* ES1946 yes This is a PCI chip; not handled by this driver
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include "sound_config.h"
#include "sb_mixer.h"
#include "sb.h"
#include "sb_ess.h"
#define ESSTYPE_LIKE20 -1 /* Mimic 2.0 behaviour */
#define ESSTYPE_DETECT 0 /* Mimic 2.0 behaviour */
#define SUBMDL_ES1788 0x10 /* Subtype ES1788 for specific handling */
#define SUBMDL_ES1868 0x11 /* Subtype ES1868 for specific handling */
#define SUBMDL_ES1869 0x12 /* Subtype ES1869 for specific handling */
#define SUBMDL_ES1878 0x13 /* Subtype ES1878 for specific handling */
#define SUBMDL_ES1879 0x16 /* ES1879 was initially forgotten */
#define SUBMDL_ES1887 0x14 /* Subtype ES1887 for specific handling */
#define SUBMDL_ES1888 0x15 /* Subtype ES1888 for specific handling */
#define SB_CAP_ES18XX_RATE 0x100
#define ES1688_CLOCK1 795444 /* 128 - div */
#define ES1688_CLOCK2 397722 /* 256 - div */
#define ES18XX_CLOCK1 793800 /* 128 - div */
#define ES18XX_CLOCK2 768000 /* 256 - div */
#ifdef FKS_LOGGING
static void ess_show_mixerregs (sb_devc *devc);
#endif
static int ess_read (sb_devc * devc, unsigned char reg);
static int ess_write (sb_devc * devc, unsigned char reg, unsigned char data);
static void ess_chgmixer
(sb_devc * devc, unsigned int reg, unsigned int mask, unsigned int val);
/****************************************************************************
* *
* ESS audio *
* *
****************************************************************************/
struct ess_command {short cmd; short data;};
/*
* Commands for initializing Audio 1 for input (record)
*/
static struct ess_command ess_i08m[] = /* input 8 bit mono */
{ {0xb7, 0x51}, {0xb7, 0xd0}, {-1, 0} };
static struct ess_command ess_i16m[] = /* input 16 bit mono */
{ {0xb7, 0x71}, {0xb7, 0xf4}, {-1, 0} };
static struct ess_command ess_i08s[] = /* input 8 bit stereo */
{ {0xb7, 0x51}, {0xb7, 0x98}, {-1, 0} };
static struct ess_command ess_i16s[] = /* input 16 bit stereo */
{ {0xb7, 0x71}, {0xb7, 0xbc}, {-1, 0} };
static struct ess_command *ess_inp_cmds[] =
{ ess_i08m, ess_i16m, ess_i08s, ess_i16s };
/*
* Commands for initializing Audio 1 for output (playback)
*/
static struct ess_command ess_o08m[] = /* output 8 bit mono */
{ {0xb6, 0x80}, {0xb7, 0x51}, {0xb7, 0xd0}, {-1, 0} };
static struct ess_command ess_o16m[] = /* output 16 bit mono */
{ {0xb6, 0x00}, {0xb7, 0x71}, {0xb7, 0xf4}, {-1, 0} };
static struct ess_command ess_o08s[] = /* output 8 bit stereo */
{ {0xb6, 0x80}, {0xb7, 0x51}, {0xb7, 0x98}, {-1, 0} };
static struct ess_command ess_o16s[] = /* output 16 bit stereo */
{ {0xb6, 0x00}, {0xb7, 0x71}, {0xb7, 0xbc}, {-1, 0} };
static struct ess_command *ess_out_cmds[] =
{ ess_o08m, ess_o16m, ess_o08s, ess_o16s };
static void ess_exec_commands
(sb_devc *devc, struct ess_command *cmdtab[])
{
struct ess_command *cmd;
cmd = cmdtab [ ((devc->channels != 1) << 1) + (devc->bits != AFMT_U8) ];
while (cmd->cmd != -1) {
ess_write (devc, cmd->cmd, cmd->data);
cmd++;
}
}
static void ess_change
(sb_devc *devc, unsigned int reg, unsigned int mask, unsigned int val)
{
int value;
value = ess_read (devc, reg);
value = (value & ~mask) | (val & mask);
ess_write (devc, reg, value);
}
static void ess_set_output_parms
(int dev, unsigned long buf, int nr_bytes, int intrflag)
{
sb_devc *devc = audio_devs[dev]->devc;
if (devc->duplex) {
devc->trg_buf_16 = buf;
devc->trg_bytes_16 = nr_bytes;
devc->trg_intrflag_16 = intrflag;
devc->irq_mode_16 = IMODE_OUTPUT;
} else {
devc->trg_buf = buf;
devc->trg_bytes = nr_bytes;
devc->trg_intrflag = intrflag;
devc->irq_mode = IMODE_OUTPUT;
}
}
static void ess_set_input_parms
(int dev, unsigned long buf, int count, int intrflag)
{
sb_devc *devc = audio_devs[dev]->devc;
devc->trg_buf = buf;
devc->trg_bytes = count;
devc->trg_intrflag = intrflag;
devc->irq_mode = IMODE_INPUT;
}
static int ess_calc_div (int clock, int revert, int *speedp, int *diffp)
{
int divider;
int speed, diff;
int retval;
speed = *speedp;
divider = (clock + speed / 2) / speed;
retval = revert - divider;
if (retval > revert - 1) {
retval = revert - 1;
divider = revert - retval;
}
/* This line is suggested. Must be wrong I think
*speedp = (clock + divider / 2) / divider;
So I chose the next one */
*speedp = clock / divider;
diff = speed - *speedp;
if (diff < 0) diff =-diff;
*diffp = diff;
return retval;
}
static int ess_calc_best_speed
(int clock1, int rev1, int clock2, int rev2, int *divp, int *speedp)
{
int speed1 = *speedp, speed2 = *speedp;
int div1, div2;
int diff1, diff2;
int retval;
div1 = ess_calc_div (clock1, rev1, &speed1, &diff1);
div2 = ess_calc_div (clock2, rev2, &speed2, &diff2);
if (diff1 < diff2) {
*divp = div1;
*speedp = speed1;
retval = 1;
} else {
/* *divp = div2; */
*divp = 0x80 | div2;
*speedp = speed2;
retval = 2;
}
return retval;
}
/*
* Depending on the audiochannel ESS devices can
* have different clock settings. These are made consistent for duplex
* however.
* callers of ess_speed only do an audionum suggestion, which means
* input suggests 1, output suggests 2. This suggestion is only true
* however when doing duplex.
*/
static void ess_common_speed (sb_devc *devc, int *speedp, int *divp)
{
int diff = 0, div;
if (devc->duplex) {
/*
* The 0x80 is important for the first audio channel
*/
if (devc->submodel == SUBMDL_ES1888) {
div = 0x80 | ess_calc_div (795500, 256, speedp, &diff);
} else {
div = 0x80 | ess_calc_div (795500, 128, speedp, &diff);
}
} else if(devc->caps & SB_CAP_ES18XX_RATE) {
if (devc->submodel == SUBMDL_ES1888) {
ess_calc_best_speed(397700, 128, 795500, 256,
&div, speedp);
} else {
ess_calc_best_speed(ES18XX_CLOCK1, 128, ES18XX_CLOCK2, 256,
&div, speedp);
}
} else {
if (*speedp > 22000) {
div = 0x80 | ess_calc_div (ES1688_CLOCK1, 256, speedp, &diff);
} else {
div = 0x00 | ess_calc_div (ES1688_CLOCK2, 128, speedp, &diff);
}
}
*divp = div;
}
static void ess_speed (sb_devc *devc, int audionum)
{
int speed;
int div, div2;
ess_common_speed (devc, &(devc->speed), &div);
#ifdef FKS_REG_LOGGING
printk (KERN_INFO "FKS: ess_speed (%d) b speed = %d, div=%x\n", audionum, devc->speed, div);
#endif
/* Set filter roll-off to 90% of speed/2 */
speed = (devc->speed * 9) / 20;
div2 = 256 - 7160000 / (speed * 82);
if (!devc->duplex) audionum = 1;
if (audionum == 1) {
/* Change behaviour of register A1 *
sb_chg_mixer(devc, 0x71, 0x20, 0x20)
* For ES1869 only??? */
ess_write (devc, 0xa1, div);
ess_write (devc, 0xa2, div2);
} else {
ess_setmixer (devc, 0x70, div);
/*
* FKS: fascinating: 0x72 doesn't seem to work.
*/
ess_write (devc, 0xa2, div2);
ess_setmixer (devc, 0x72, div2);
}
}
static int ess_audio_prepare_for_input(int dev, int bsize, int bcount)
{
sb_devc *devc = audio_devs[dev]->devc;
ess_speed(devc, 1);
sb_dsp_command(devc, DSP_CMD_SPKOFF);
ess_write (devc, 0xb8, 0x0e); /* Auto init DMA mode */
ess_change (devc, 0xa8, 0x03, 3 - devc->channels); /* Mono/stereo */
ess_write (devc, 0xb9, 2); /* Demand mode (4 bytes/DMA request) */
ess_exec_commands (devc, ess_inp_cmds);
ess_change (devc, 0xb1, 0xf0, 0x50);
ess_change (devc, 0xb2, 0xf0, 0x50);
devc->trigger_bits = 0;
return 0;
}
static int ess_audio_prepare_for_output_audio1 (int dev, int bsize, int bcount)
{
sb_devc *devc = audio_devs[dev]->devc;
sb_dsp_reset(devc);
ess_speed(devc, 1);
ess_write (devc, 0xb8, 4); /* Auto init DMA mode */
ess_change (devc, 0xa8, 0x03, 3 - devc->channels); /* Mono/stereo */
ess_write (devc, 0xb9, 2); /* Demand mode (4 bytes/request) */
ess_exec_commands (devc, ess_out_cmds);
ess_change (devc, 0xb1, 0xf0, 0x50); /* Enable DMA */
ess_change (devc, 0xb2, 0xf0, 0x50); /* Enable IRQ */
sb_dsp_command(devc, DSP_CMD_SPKON); /* There be sound! */
devc->trigger_bits = 0;
return 0;
}
static int ess_audio_prepare_for_output_audio2 (int dev, int bsize, int bcount)
{
sb_devc *devc = audio_devs[dev]->devc;
unsigned char bits;
/* FKS: qqq
sb_dsp_reset(devc);
*/
/*
* Auto-Initialize:
* DMA mode + demand mode (8 bytes/request, yes I want it all!)
* But leave 16-bit DMA bit untouched!
*/
ess_chgmixer (devc, 0x78, 0xd0, 0xd0);
ess_speed(devc, 2);
/* bits 4:3 on ES1887 represent recording source. Keep them! */
bits = ess_getmixer (devc, 0x7a) & 0x18;
/* Set stereo/mono */
if (devc->channels != 1) bits |= 0x02;
/* Init DACs; UNSIGNED mode for 8 bit; SIGNED mode for 16 bit */
if (devc->bits != AFMT_U8) bits |= 0x05; /* 16 bit */
/* Enable DMA, IRQ will be shared (hopefully)*/
bits |= 0x60;
ess_setmixer (devc, 0x7a, bits);
ess_mixer_reload (devc, SOUND_MIXER_PCM); /* There be sound! */
devc->trigger_bits = 0;
return 0;
}
static int ess_audio_prepare_for_output(int dev, int bsize, int bcount)
{
sb_devc *devc = audio_devs[dev]->devc;
#ifdef FKS_REG_LOGGING
printk(KERN_INFO "ess_audio_prepare_for_output: dma_out=%d,dma_in=%d\n"
, audio_devs[dev]->dmap_out->dma, audio_devs[dev]->dmap_in->dma);
#endif
if (devc->duplex) {
return ess_audio_prepare_for_output_audio2 (dev, bsize, bcount);
} else {
return ess_audio_prepare_for_output_audio1 (dev, bsize, bcount);
}
}
static void ess_audio_halt_xfer(int dev)
{
unsigned long flags;
sb_devc *devc = audio_devs[dev]->devc;
spin_lock_irqsave(&devc->lock, flags);
sb_dsp_reset(devc);
spin_unlock_irqrestore(&devc->lock, flags);
/*
* Audio 2 may still be operational! Creates awful sounds!
*/
if (devc->duplex) ess_chgmixer(devc, 0x78, 0x03, 0x00);
}
static void ess_audio_start_input
(int dev, unsigned long buf, int nr_bytes, int intrflag)
{
int count = nr_bytes;
sb_devc *devc = audio_devs[dev]->devc;
short c = -nr_bytes;
/*
* Start a DMA input to the buffer pointed by dmaqtail
*/
if (audio_devs[dev]->dmap_in->dma > 3) count >>= 1;
count--;
devc->irq_mode = IMODE_INPUT;
ess_write (devc, 0xa4, (unsigned char) ((unsigned short) c & 0xff));
ess_write (devc, 0xa5, (unsigned char) (((unsigned short) c >> 8) & 0xff));
ess_change (devc, 0xb8, 0x0f, 0x0f); /* Go */
devc->intr_active = 1;
}
static void ess_audio_output_block_audio1
(int dev, unsigned long buf, int nr_bytes, int intrflag)
{
int count = nr_bytes;
sb_devc *devc = audio_devs[dev]->devc;
short c = -nr_bytes;
if (audio_devs[dev]->dmap_out->dma > 3)
count >>= 1;
count--;
devc->irq_mode = IMODE_OUTPUT;
ess_write (devc, 0xa4, (unsigned char) ((unsigned short) c & 0xff));
ess_write (devc, 0xa5, (unsigned char) (((unsigned short) c >> 8) & 0xff));
ess_change (devc, 0xb8, 0x05, 0x05); /* Go */
devc->intr_active = 1;
}
static void ess_audio_output_block_audio2
(int dev, unsigned long buf, int nr_bytes, int intrflag)
{
int count = nr_bytes;
sb_devc *devc = audio_devs[dev]->devc;
short c = -nr_bytes;
if (audio_devs[dev]->dmap_out->dma > 3) count >>= 1;
count--;
ess_setmixer (devc, 0x74, (unsigned char) ((unsigned short) c & 0xff));
ess_setmixer (devc, 0x76, (unsigned char) (((unsigned short) c >> 8) & 0xff));
ess_chgmixer (devc, 0x78, 0x03, 0x03); /* Go */
devc->irq_mode_16 = IMODE_OUTPUT;
devc->intr_active_16 = 1;
}
static void ess_audio_output_block
(int dev, unsigned long buf, int nr_bytes, int intrflag)
{
sb_devc *devc = audio_devs[dev]->devc;
if (devc->duplex) {
ess_audio_output_block_audio2 (dev, buf, nr_bytes, intrflag);
} else {
ess_audio_output_block_audio1 (dev, buf, nr_bytes, intrflag);
}
}
/*
* FKS: the if-statements for both bits and bits_16 are quite alike.
* Combine this...
*/
static void ess_audio_trigger(int dev, int bits)
{
sb_devc *devc = audio_devs[dev]->devc;
int bits_16 = bits & devc->irq_mode_16;
bits &= devc->irq_mode;
if (!bits && !bits_16) {
/* FKS oh oh.... wrong?? for dma 16? */
sb_dsp_command(devc, 0xd0); /* Halt DMA */
}
if (bits) {
switch (devc->irq_mode)
{
case IMODE_INPUT:
ess_audio_start_input(dev, devc->trg_buf, devc->trg_bytes,
devc->trg_intrflag);
break;
case IMODE_OUTPUT:
ess_audio_output_block(dev, devc->trg_buf, devc->trg_bytes,
devc->trg_intrflag);
break;
}
}
if (bits_16) {
switch (devc->irq_mode_16) {
case IMODE_INPUT:
ess_audio_start_input(dev, devc->trg_buf_16, devc->trg_bytes_16,
devc->trg_intrflag_16);
break;
case IMODE_OUTPUT:
ess_audio_output_block(dev, devc->trg_buf_16, devc->trg_bytes_16,
devc->trg_intrflag_16);
break;
}
}
devc->trigger_bits = bits | bits_16;
}
static int ess_audio_set_speed(int dev, int speed)
{
sb_devc *devc = audio_devs[dev]->devc;
int minspeed, maxspeed, dummydiv;
if (speed > 0) {
minspeed = (devc->duplex ? 6215 : 5000 );
maxspeed = (devc->duplex ? 44100 : 48000);
if (speed < minspeed) speed = minspeed;
if (speed > maxspeed) speed = maxspeed;
ess_common_speed (devc, &speed, &dummydiv);
devc->speed = speed;
}
return devc->speed;
}
/*
* FKS: This is a one-on-one copy of sb1_audio_set_bits
*/
static unsigned int ess_audio_set_bits(int dev, unsigned int bits)
{
sb_devc *devc = audio_devs[dev]->devc;
if (bits != 0) {
if (bits == AFMT_U8 || bits == AFMT_S16_LE) {
devc->bits = bits;
} else {
devc->bits = AFMT_U8;
}
}
return devc->bits;
}
/*
* FKS: This is a one-on-one copy of sbpro_audio_set_channels
* (*) Modified it!!
*/
static short ess_audio_set_channels(int dev, short channels)
{
sb_devc *devc = audio_devs[dev]->devc;
if (channels == 1 || channels == 2) devc->channels = channels;
return devc->channels;
}
static struct audio_driver ess_audio_driver = /* ESS ES688/1688 */
{
.owner = THIS_MODULE,
.open = sb_audio_open,
.close = sb_audio_close,
.output_block = ess_set_output_parms,
.start_input = ess_set_input_parms,
.prepare_for_input = ess_audio_prepare_for_input,
.prepare_for_output = ess_audio_prepare_for_output,
.halt_io = ess_audio_halt_xfer,
.trigger = ess_audio_trigger,
.set_speed = ess_audio_set_speed,
.set_bits = ess_audio_set_bits,
.set_channels = ess_audio_set_channels
};
/*
* ess_audio_init must be called from sb_audio_init
*/
struct audio_driver *ess_audio_init
(sb_devc *devc, int *audio_flags, int *format_mask)
{
*audio_flags = DMA_AUTOMODE;
*format_mask |= AFMT_S16_LE;
if (devc->duplex) {
int tmp_dma;
/*
* sb_audio_init thinks dma8 is for playback and
* dma16 is for record. Not now! So swap them.
*/
tmp_dma = devc->dma16;
devc->dma16 = devc->dma8;
devc->dma8 = tmp_dma;
*audio_flags |= DMA_DUPLEX;
}
return &ess_audio_driver;
}
/****************************************************************************
* *
* ESS common *
* *
****************************************************************************/
static void ess_handle_channel
(char *channel, int dev, int intr_active, unsigned char flag, int irq_mode)
{
if (!intr_active || !flag) return;
#ifdef FKS_REG_LOGGING
printk(KERN_INFO "FKS: ess_handle_channel %s irq_mode=%d\n", channel, irq_mode);
#endif
switch (irq_mode) {
case IMODE_OUTPUT:
DMAbuf_outputintr (dev, 1);
break;
case IMODE_INPUT:
DMAbuf_inputintr (dev);
break;
case IMODE_INIT:
break;
default:;
/* printk(KERN_WARNING "ESS: Unexpected interrupt\n"); */
}
}
/*
* FKS: TODO!!! Finish this!
*
* I think midi stuff uses uart401, without interrupts.
* So IMODE_MIDI isn't a value for devc->irq_mode.
*/
void ess_intr (sb_devc *devc)
{
int status;
unsigned char src;
if (devc->submodel == SUBMDL_ES1887) {
src = ess_getmixer (devc, 0x7f) >> 4;
} else {
src = 0xff;
}
#ifdef FKS_REG_LOGGING
printk(KERN_INFO "FKS: sbintr src=%x\n",(int)src);
#endif
ess_handle_channel
( "Audio 1"
, devc->dev, devc->intr_active , src & 0x01, devc->irq_mode );
ess_handle_channel
( "Audio 2"
, devc->dev, devc->intr_active_16, src & 0x02, devc->irq_mode_16);
/*
* Acknowledge interrupts
*/
if (devc->submodel == SUBMDL_ES1887 && (src & 0x02)) {
ess_chgmixer (devc, 0x7a, 0x80, 0x00);
}
if (src & 0x01) {
status = inb(DSP_DATA_AVAIL);
}
}
static void ess_extended (sb_devc * devc)
{
/* Enable extended mode */
sb_dsp_command(devc, 0xc6);
}
static int ess_write (sb_devc * devc, unsigned char reg, unsigned char data)
{
#ifdef FKS_REG_LOGGING
printk(KERN_INFO "FKS: write reg %x: %x\n", reg, data);
#endif
/* Write a byte to an extended mode register of ES1688 */
if (!sb_dsp_command(devc, reg))
return 0;
return sb_dsp_command(devc, data);
}
static int ess_read (sb_devc * devc, unsigned char reg)
{
/* Read a byte from an extended mode register of ES1688 */
/* Read register command */
if (!sb_dsp_command(devc, 0xc0)) return -1;
if (!sb_dsp_command(devc, reg )) return -1;
return sb_dsp_get_byte(devc);
}
int ess_dsp_reset(sb_devc * devc)
{
int loopc;
#ifdef FKS_REG_LOGGING
printk(KERN_INFO "FKS: ess_dsp_reset 1\n");
ess_show_mixerregs (devc);
#endif
DEB(printk("Entered ess_dsp_reset()\n"));
outb(3, DSP_RESET); /* Reset FIFO too */
udelay(10);
outb(0, DSP_RESET);
udelay(30);
for (loopc = 0; loopc < 1000 && !(inb(DSP_DATA_AVAIL) & 0x80); loopc++);
if (inb(DSP_READ) != 0xAA) {
DDB(printk("sb: No response to RESET\n"));
return 0; /* Sorry */
}
ess_extended (devc);
DEB(printk("sb_dsp_reset() OK\n"));
#ifdef FKS_LOGGING
printk(KERN_INFO "FKS: dsp_reset 2\n");
ess_show_mixerregs (devc);
#endif
return 1;
}
static int ess_irq_bits (int irq)
{
switch (irq) {
case 2:
case 9:
return 0;
case 5:
return 1;
case 7:
return 2;
case 10:
return 3;
default:
printk(KERN_ERR "ESS1688: Invalid IRQ %d\n", irq);
return -1;
}
}
/*
* Set IRQ configuration register for all ESS models
*/
static int ess_common_set_irq_hw (sb_devc * devc)
{
int irq_bits;
if ((irq_bits = ess_irq_bits (devc->irq)) == -1) return 0;
if (!ess_write (devc, 0xb1, 0x50 | (irq_bits << 2))) {
printk(KERN_ERR "ES1688: Failed to write to IRQ config register\n");
return 0;
}
return 1;
}
/*
* I wanna use modern ES1887 mixer irq handling. Funny is the
* fact that my BIOS wants the same. But suppose someone's BIOS
* doesn't do this!
* This is independent of duplex. If there's a 1887 this will
* prevent it from going into 1888 mode.
*/
static void ess_es1887_set_irq_hw (sb_devc * devc)
{
int irq_bits;
if ((irq_bits = ess_irq_bits (devc->irq)) == -1) return;
ess_chgmixer (devc, 0x7f, 0x0f, 0x01 | ((irq_bits + 1) << 1));
}
static int ess_set_irq_hw (sb_devc * devc)
{
if (devc->submodel == SUBMDL_ES1887) ess_es1887_set_irq_hw (devc);
return ess_common_set_irq_hw (devc);
}
#ifdef FKS_TEST
/*
* FKS_test:
* for ES1887: 00, 18, non wr bits: 0001 1000
* for ES1868: 00, b8, non wr bits: 1011 1000
* for ES1888: 00, f8, non wr bits: 1111 1000
* for ES1688: 00, f8, non wr bits: 1111 1000
* + ES968
*/
static void FKS_test (sb_devc * devc)
{
int val1, val2;
val1 = ess_getmixer (devc, 0x64);
ess_setmixer (devc, 0x64, ~val1);
val2 = ess_getmixer (devc, 0x64) ^ ~val1;
ess_setmixer (devc, 0x64, val1);
val1 ^= ess_getmixer (devc, 0x64);
printk (KERN_INFO "FKS: FKS_test %02x, %02x\n", (val1 & 0x0ff), (val2 & 0x0ff));
};
#endif
static unsigned int ess_identify (sb_devc * devc)
{
unsigned int val;
unsigned long flags;
spin_lock_irqsave(&devc->lock, flags);
outb(((unsigned char) (0x40 & 0xff)), MIXER_ADDR);
udelay(20);
val = inb(MIXER_DATA) << 8;
udelay(20);
val |= inb(MIXER_DATA);
udelay(20);
spin_unlock_irqrestore(&devc->lock, flags);
return val;
}
/*
* ESS technology describes a detection scheme in their docs. It involves
* fiddling with the bits in certain mixer registers. ess_probe is supposed
* to help.
*
* FKS: tracing shows ess_probe writes wrong value to 0x64. Bit 3 reads 1, but
* should be written 0 only. Check this.
*/
static int ess_probe (sb_devc * devc, int reg, int xorval)
{
int val1, val2, val3;
val1 = ess_getmixer (devc, reg);
val2 = val1 ^ xorval;
ess_setmixer (devc, reg, val2);
val3 = ess_getmixer (devc, reg);
ess_setmixer (devc, reg, val1);
return (val2 == val3);
}
int ess_init(sb_devc * devc, struct address_info *hw_config)
{
unsigned char cfg;
int ess_major = 0, ess_minor = 0;
int i;
static char name[100], modelname[10];
/*
* Try to detect ESS chips.
*/
sb_dsp_command(devc, 0xe7); /* Return identification */
for (i = 1000; i; i--) {
if (inb(DSP_DATA_AVAIL) & 0x80) {
if (ess_major == 0) {
ess_major = inb(DSP_READ);
} else {
ess_minor = inb(DSP_READ);
break;
}
}
}
if (ess_major == 0) return 0;
if (ess_major == 0x48 && (ess_minor & 0xf0) == 0x80) {
sprintf(name, "ESS ES488 AudioDrive (rev %d)",
ess_minor & 0x0f);
hw_config->name = name;
devc->model = MDL_SBPRO;
return 1;
}
/*
* This the detection heuristic of ESS technology, though somewhat
* changed to actually make it work.
* This results in the following detection steps:
* - distinct between ES688 and ES1688+ (as always done in this driver)
* if ES688 we're ready
* - try to detect ES1868, ES1869 or ES1878 (ess_identify)
* if successful we're ready
* - try to detect ES1888, ES1887 or ES1788 (aim: detect ES1887)
* if successful we're ready
* - Dunno. Must be 1688. Will do in general
*
* This is the most BETA part of the software: Will the detection
* always work?
*/
devc->model = MDL_ESS;
devc->submodel = ess_minor & 0x0f;
if (ess_major == 0x68 && (ess_minor & 0xf0) == 0x80) {
char *chip = NULL;
int submodel = -1;
switch (devc->sbmo.esstype) {
case ESSTYPE_DETECT:
case ESSTYPE_LIKE20:
break;
case 688:
submodel = 0x00;
break;
case 1688:
submodel = 0x08;
break;
case 1868:
submodel = SUBMDL_ES1868;
break;
case 1869:
submodel = SUBMDL_ES1869;
break;
case 1788:
submodel = SUBMDL_ES1788;
break;
case 1878:
submodel = SUBMDL_ES1878;
break;
case 1879:
submodel = SUBMDL_ES1879;
break;
case 1887:
submodel = SUBMDL_ES1887;
break;
case 1888:
submodel = SUBMDL_ES1888;
break;
default:
printk (KERN_ERR "Invalid esstype=%d specified\n", devc->sbmo.esstype);
return 0;
}
if (submodel != -1) {
devc->submodel = submodel;
sprintf (modelname, "ES%d", devc->sbmo.esstype);
chip = modelname;
}
if (chip == NULL && (ess_minor & 0x0f) < 8) {
chip = "ES688";
}
#ifdef FKS_TEST
FKS_test (devc);
#endif
/*
* If Nothing detected yet, and we want 2.0 behaviour...
* Then let's assume it's ES1688.
*/
if (chip == NULL && devc->sbmo.esstype == ESSTYPE_LIKE20) {
chip = "ES1688";
}
if (chip == NULL) {
int type;
type = ess_identify (devc);
switch (type) {
case 0x1868:
chip = "ES1868";
devc->submodel = SUBMDL_ES1868;
break;
case 0x1869:
chip = "ES1869";
devc->submodel = SUBMDL_ES1869;
break;
case 0x1878:
chip = "ES1878";
devc->submodel = SUBMDL_ES1878;
break;
case 0x1879:
chip = "ES1879";
devc->submodel = SUBMDL_ES1879;
break;
default:
if ((type & 0x00ff) != ((type >> 8) & 0x00ff)) {
printk ("ess_init: Unrecognized %04x\n", type);
}
}
}
#if 0
/*
* this one failed:
* the probing of bit 4 is another thought: from ES1788 and up, all
* chips seem to have hardware volume control. Bit 4 is readonly to
* check if a hardware volume interrupt has fired.
* Cause ES688/ES1688 don't have this feature, bit 4 might be writeable
* for these chips.
*/
if (chip == NULL && !ess_probe(devc, 0x64, (1 << 4))) {
#endif
/*
* the probing of bit 2 is my idea. The ES1887 docs want me to probe
* bit 3. This results in ES1688 being detected as ES1788.
* Bit 2 is for "Enable HWV IRQE", but as ES(1)688 chips don't have
* HardWare Volume, I think they don't have this IRQE.
*/
if (chip == NULL && ess_probe(devc, 0x64, (1 << 2))) {
if (ess_probe (devc, 0x70, 0x7f)) {
if (ess_probe (devc, 0x64, (1 << 5))) {
chip = "ES1887";
devc->submodel = SUBMDL_ES1887;
} else {
chip = "ES1888";
devc->submodel = SUBMDL_ES1888;
}
} else {
chip = "ES1788";
devc->submodel = SUBMDL_ES1788;
}
}
if (chip == NULL) {
chip = "ES1688";
}
printk ( KERN_INFO "ESS chip %s %s%s\n"
, chip
, ( devc->sbmo.esstype == ESSTYPE_DETECT || devc->sbmo.esstype == ESSTYPE_LIKE20
? "detected"
: "specified"
)
, ( devc->sbmo.esstype == ESSTYPE_LIKE20
? " (kernel 2.0 compatible)"
: ""
)
);
sprintf(name,"ESS %s AudioDrive (rev %d)", chip, ess_minor & 0x0f);
} else {
strcpy(name, "Jazz16");
}
/* AAS: info stolen from ALSA: these boards have different clocks */
switch(devc->submodel) {
/* APPARENTLY NOT 1869 AND 1887
case SUBMDL_ES1869:
case SUBMDL_ES1887:
*/
case SUBMDL_ES1888:
devc->caps |= SB_CAP_ES18XX_RATE;
break;
}
hw_config->name = name;
/* FKS: sb_dsp_reset to enable extended mode???? */
sb_dsp_reset(devc); /* Turn on extended mode */
/*
* Enable joystick and OPL3
*/
cfg = ess_getmixer (devc, 0x40);
ess_setmixer (devc, 0x40, cfg | 0x03);
if (devc->submodel >= 8) { /* ES1688 */
devc->caps |= SB_NO_MIDI; /* ES1688 uses MPU401 MIDI mode */
}
sb_dsp_reset (devc);
/*
* This is important! If it's not done, the IRQ probe in sb_dsp_init
* may fail.
*/
return ess_set_irq_hw (devc);
}
static int ess_set_dma_hw(sb_devc * devc)
{
unsigned char cfg, dma_bits = 0, dma16_bits;
int dma;
#ifdef FKS_LOGGING
printk(KERN_INFO "ess_set_dma_hw: dma8=%d,dma16=%d,dup=%d\n"
, devc->dma8, devc->dma16, devc->duplex);
#endif
/*
* FKS: It seems as if this duplex flag isn't set yet. Check it.
*/
dma = devc->dma8;
if (dma > 3 || dma < 0 || dma == 2) {
dma_bits = 0;
printk(KERN_ERR "ESS1688: Invalid DMA8 %d\n", dma);
return 0;
} else {
/* Extended mode DMA enable */
cfg = 0x50;
if (dma == 3) {
dma_bits = 3;
} else {
dma_bits = dma + 1;
}
}
if (!ess_write (devc, 0xb2, cfg | (dma_bits << 2))) {
printk(KERN_ERR "ESS1688: Failed to write to DMA config register\n");
return 0;
}
if (devc->duplex) {
dma = devc->dma16;
dma16_bits = 0;
if (dma >= 0) {
switch (dma) {
case 0:
dma_bits = 0x04;
break;
case 1:
dma_bits = 0x05;
break;
case 3:
dma_bits = 0x06;
break;
case 5:
dma_bits = 0x07;
dma16_bits = 0x20;
break;
default:
printk(KERN_ERR "ESS1887: Invalid DMA16 %d\n", dma);
return 0;
}
ess_chgmixer (devc, 0x78, 0x20, dma16_bits);
ess_chgmixer (devc, 0x7d, 0x07, dma_bits);
}
}
return 1;
}
/*
* This one is called from sb_dsp_init.
*
* Return values:
* 0: Failed
* 1: Succeeded or doesn't apply (not SUBMDL_ES1887)
*/
int ess_dsp_init (sb_devc *devc, struct address_info *hw_config)
{
/*
* Caller also checks this, but anyway
*/
if (devc->model != MDL_ESS) {
printk (KERN_INFO "ess_dsp_init for non ESS chip\n");
return 1;
}
/*
* This for ES1887 to run Full Duplex. Actually ES1888
* is allowed to do so too. I have no idea yet if this
* will work for ES1888 however.
*
* For SB16 having both dma8 and dma16 means enable
* Full Duplex. Let's try this for ES1887 too
*
*/
if (devc->submodel == SUBMDL_ES1887) {
if (hw_config->dma2 != -1) {
devc->dma16 = hw_config->dma2;
}
/*
* devc->duplex initialization is put here, cause
* ess_set_dma_hw needs it.
*/
if (devc->dma8 != devc->dma16 && devc->dma16 != -1) {
devc->duplex = 1;
}
}
if (!ess_set_dma_hw (devc)) {
free_irq(devc->irq, devc);
return 0;
}
return 1;
}
/****************************************************************************
* *
* ESS mixer *
* *
****************************************************************************/
#define ES688_RECORDING_DEVICES \
( SOUND_MASK_LINE | SOUND_MASK_MIC | SOUND_MASK_CD )
#define ES688_MIXER_DEVICES \
( SOUND_MASK_SYNTH | SOUND_MASK_PCM | SOUND_MASK_LINE \
| SOUND_MASK_MIC | SOUND_MASK_CD | SOUND_MASK_VOLUME \
| SOUND_MASK_LINE2 | SOUND_MASK_SPEAKER )
#define ES1688_RECORDING_DEVICES \
( ES688_RECORDING_DEVICES )
#define ES1688_MIXER_DEVICES \
( ES688_MIXER_DEVICES | SOUND_MASK_RECLEV )
#define ES1887_RECORDING_DEVICES \
( ES1688_RECORDING_DEVICES | SOUND_MASK_LINE2 | SOUND_MASK_SYNTH)
#define ES1887_MIXER_DEVICES \
( ES1688_MIXER_DEVICES )
/*
* Mixer registers of ES1887
*
* These registers specifically take care of recording levels. To make the
* mapping from playback devices to recording devices every recording
* devices = playback device + ES_REC_MIXER_RECDIFF
*/
#define ES_REC_MIXER_RECBASE (SOUND_MIXER_LINE3 + 1)
#define ES_REC_MIXER_RECDIFF (ES_REC_MIXER_RECBASE - SOUND_MIXER_SYNTH)
#define ES_REC_MIXER_RECSYNTH (SOUND_MIXER_SYNTH + ES_REC_MIXER_RECDIFF)
#define ES_REC_MIXER_RECPCM (SOUND_MIXER_PCM + ES_REC_MIXER_RECDIFF)
#define ES_REC_MIXER_RECSPEAKER (SOUND_MIXER_SPEAKER + ES_REC_MIXER_RECDIFF)
#define ES_REC_MIXER_RECLINE (SOUND_MIXER_LINE + ES_REC_MIXER_RECDIFF)
#define ES_REC_MIXER_RECMIC (SOUND_MIXER_MIC + ES_REC_MIXER_RECDIFF)
#define ES_REC_MIXER_RECCD (SOUND_MIXER_CD + ES_REC_MIXER_RECDIFF)
#define ES_REC_MIXER_RECIMIX (SOUND_MIXER_IMIX + ES_REC_MIXER_RECDIFF)
#define ES_REC_MIXER_RECALTPCM (SOUND_MIXER_ALTPCM + ES_REC_MIXER_RECDIFF)
#define ES_REC_MIXER_RECRECLEV (SOUND_MIXER_RECLEV + ES_REC_MIXER_RECDIFF)
#define ES_REC_MIXER_RECIGAIN (SOUND_MIXER_IGAIN + ES_REC_MIXER_RECDIFF)
#define ES_REC_MIXER_RECOGAIN (SOUND_MIXER_OGAIN + ES_REC_MIXER_RECDIFF)
#define ES_REC_MIXER_RECLINE1 (SOUND_MIXER_LINE1 + ES_REC_MIXER_RECDIFF)
#define ES_REC_MIXER_RECLINE2 (SOUND_MIXER_LINE2 + ES_REC_MIXER_RECDIFF)
#define ES_REC_MIXER_RECLINE3 (SOUND_MIXER_LINE3 + ES_REC_MIXER_RECDIFF)
static mixer_tab es688_mix = {
MIX_ENT(SOUND_MIXER_VOLUME, 0x32, 7, 4, 0x32, 3, 4),
MIX_ENT(SOUND_MIXER_BASS, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_TREBLE, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_SYNTH, 0x36, 7, 4, 0x36, 3, 4),
MIX_ENT(SOUND_MIXER_PCM, 0x14, 7, 4, 0x14, 3, 4),
MIX_ENT(SOUND_MIXER_SPEAKER, 0x3c, 2, 3, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_LINE, 0x3e, 7, 4, 0x3e, 3, 4),
MIX_ENT(SOUND_MIXER_MIC, 0x1a, 7, 4, 0x1a, 3, 4),
MIX_ENT(SOUND_MIXER_CD, 0x38, 7, 4, 0x38, 3, 4),
MIX_ENT(SOUND_MIXER_IMIX, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_ALTPCM, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_RECLEV, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_IGAIN, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_OGAIN, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_LINE1, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_LINE2, 0x3a, 7, 4, 0x3a, 3, 4),
MIX_ENT(SOUND_MIXER_LINE3, 0x00, 0, 0, 0x00, 0, 0)
};
/*
* The ES1688 specifics... hopefully correct...
* - 6 bit master volume
* I was wrong, ES1888 docs say ES1688 didn't have it.
* - RECLEV control
* These may apply to ES688 too. I have no idea.
*/
static mixer_tab es1688_mix = {
MIX_ENT(SOUND_MIXER_VOLUME, 0x32, 7, 4, 0x32, 3, 4),
MIX_ENT(SOUND_MIXER_BASS, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_TREBLE, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_SYNTH, 0x36, 7, 4, 0x36, 3, 4),
MIX_ENT(SOUND_MIXER_PCM, 0x14, 7, 4, 0x14, 3, 4),
MIX_ENT(SOUND_MIXER_SPEAKER, 0x3c, 2, 3, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_LINE, 0x3e, 7, 4, 0x3e, 3, 4),
MIX_ENT(SOUND_MIXER_MIC, 0x1a, 7, 4, 0x1a, 3, 4),
MIX_ENT(SOUND_MIXER_CD, 0x38, 7, 4, 0x38, 3, 4),
MIX_ENT(SOUND_MIXER_IMIX, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_ALTPCM, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_RECLEV, 0xb4, 7, 4, 0xb4, 3, 4),
MIX_ENT(SOUND_MIXER_IGAIN, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_OGAIN, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_LINE1, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_LINE2, 0x3a, 7, 4, 0x3a, 3, 4),
MIX_ENT(SOUND_MIXER_LINE3, 0x00, 0, 0, 0x00, 0, 0)
};
static mixer_tab es1688later_mix = {
MIX_ENT(SOUND_MIXER_VOLUME, 0x60, 5, 6, 0x62, 5, 6),
MIX_ENT(SOUND_MIXER_BASS, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_TREBLE, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_SYNTH, 0x36, 7, 4, 0x36, 3, 4),
MIX_ENT(SOUND_MIXER_PCM, 0x14, 7, 4, 0x14, 3, 4),
MIX_ENT(SOUND_MIXER_SPEAKER, 0x3c, 2, 3, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_LINE, 0x3e, 7, 4, 0x3e, 3, 4),
MIX_ENT(SOUND_MIXER_MIC, 0x1a, 7, 4, 0x1a, 3, 4),
MIX_ENT(SOUND_MIXER_CD, 0x38, 7, 4, 0x38, 3, 4),
MIX_ENT(SOUND_MIXER_IMIX, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_ALTPCM, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_RECLEV, 0xb4, 7, 4, 0xb4, 3, 4),
MIX_ENT(SOUND_MIXER_IGAIN, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_OGAIN, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_LINE1, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_LINE2, 0x3a, 7, 4, 0x3a, 3, 4),
MIX_ENT(SOUND_MIXER_LINE3, 0x00, 0, 0, 0x00, 0, 0)
};
/*
* This one is for all ESS chips with a record mixer.
* It's not used (yet) however
*/
static mixer_tab es_rec_mix = {
MIX_ENT(SOUND_MIXER_VOLUME, 0x60, 5, 6, 0x62, 5, 6),
MIX_ENT(SOUND_MIXER_BASS, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_TREBLE, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_SYNTH, 0x36, 7, 4, 0x36, 3, 4),
MIX_ENT(SOUND_MIXER_PCM, 0x14, 7, 4, 0x14, 3, 4),
MIX_ENT(SOUND_MIXER_SPEAKER, 0x3c, 2, 3, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_LINE, 0x3e, 7, 4, 0x3e, 3, 4),
MIX_ENT(SOUND_MIXER_MIC, 0x1a, 7, 4, 0x1a, 3, 4),
MIX_ENT(SOUND_MIXER_CD, 0x38, 7, 4, 0x38, 3, 4),
MIX_ENT(SOUND_MIXER_IMIX, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_ALTPCM, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_RECLEV, 0xb4, 7, 4, 0xb4, 3, 4),
MIX_ENT(SOUND_MIXER_IGAIN, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_OGAIN, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_LINE1, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_LINE2, 0x3a, 7, 4, 0x3a, 3, 4),
MIX_ENT(SOUND_MIXER_LINE3, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(ES_REC_MIXER_RECSYNTH, 0x6b, 7, 4, 0x6b, 3, 4),
MIX_ENT(ES_REC_MIXER_RECPCM, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(ES_REC_MIXER_RECSPEAKER, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(ES_REC_MIXER_RECLINE, 0x6e, 7, 4, 0x6e, 3, 4),
MIX_ENT(ES_REC_MIXER_RECMIC, 0x68, 7, 4, 0x68, 3, 4),
MIX_ENT(ES_REC_MIXER_RECCD, 0x6a, 7, 4, 0x6a, 3, 4),
MIX_ENT(ES_REC_MIXER_RECIMIX, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(ES_REC_MIXER_RECALTPCM, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(ES_REC_MIXER_RECRECLEV, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(ES_REC_MIXER_RECIGAIN, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(ES_REC_MIXER_RECOGAIN, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(ES_REC_MIXER_RECLINE1, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(ES_REC_MIXER_RECLINE2, 0x6c, 7, 4, 0x6c, 3, 4),
MIX_ENT(ES_REC_MIXER_RECLINE3, 0x00, 0, 0, 0x00, 0, 0)
};
/*
* This one is for ES1887. It's little different from es_rec_mix: it
* has 0x7c for PCM playback level. This is because ES1887 uses
* Audio 2 for playback.
*/
static mixer_tab es1887_mix = {
MIX_ENT(SOUND_MIXER_VOLUME, 0x60, 5, 6, 0x62, 5, 6),
MIX_ENT(SOUND_MIXER_BASS, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_TREBLE, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_SYNTH, 0x36, 7, 4, 0x36, 3, 4),
MIX_ENT(SOUND_MIXER_PCM, 0x7c, 7, 4, 0x7c, 3, 4),
MIX_ENT(SOUND_MIXER_SPEAKER, 0x3c, 2, 3, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_LINE, 0x3e, 7, 4, 0x3e, 3, 4),
MIX_ENT(SOUND_MIXER_MIC, 0x1a, 7, 4, 0x1a, 3, 4),
MIX_ENT(SOUND_MIXER_CD, 0x38, 7, 4, 0x38, 3, 4),
MIX_ENT(SOUND_MIXER_IMIX, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_ALTPCM, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_RECLEV, 0xb4, 7, 4, 0xb4, 3, 4),
MIX_ENT(SOUND_MIXER_IGAIN, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_OGAIN, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_LINE1, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(SOUND_MIXER_LINE2, 0x3a, 7, 4, 0x3a, 3, 4),
MIX_ENT(SOUND_MIXER_LINE3, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(ES_REC_MIXER_RECSYNTH, 0x6b, 7, 4, 0x6b, 3, 4),
MIX_ENT(ES_REC_MIXER_RECPCM, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(ES_REC_MIXER_RECSPEAKER, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(ES_REC_MIXER_RECLINE, 0x6e, 7, 4, 0x6e, 3, 4),
MIX_ENT(ES_REC_MIXER_RECMIC, 0x68, 7, 4, 0x68, 3, 4),
MIX_ENT(ES_REC_MIXER_RECCD, 0x6a, 7, 4, 0x6a, 3, 4),
MIX_ENT(ES_REC_MIXER_RECIMIX, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(ES_REC_MIXER_RECALTPCM, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(ES_REC_MIXER_RECRECLEV, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(ES_REC_MIXER_RECIGAIN, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(ES_REC_MIXER_RECOGAIN, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(ES_REC_MIXER_RECLINE1, 0x00, 0, 0, 0x00, 0, 0),
MIX_ENT(ES_REC_MIXER_RECLINE2, 0x6c, 7, 4, 0x6c, 3, 4),
MIX_ENT(ES_REC_MIXER_RECLINE3, 0x00, 0, 0, 0x00, 0, 0)
};
static int ess_has_rec_mixer (int submodel)
{
switch (submodel) {
case SUBMDL_ES1887:
return 1;
default:
return 0;
}
};
#ifdef FKS_LOGGING
static int ess_mixer_mon_regs[]
= { 0x70, 0x71, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7d, 0x7f
, 0xa1, 0xa2, 0xa4, 0xa5, 0xa8, 0xa9
, 0xb1, 0xb2, 0xb4, 0xb5, 0xb6, 0xb7, 0xb9
, 0x00};
static void ess_show_mixerregs (sb_devc *devc)
{
int *mp = ess_mixer_mon_regs;
return;
while (*mp != 0) {
printk (KERN_INFO "res (%x)=%x\n", *mp, (int)(ess_getmixer (devc, *mp)));
mp++;
}
}
#endif
void ess_setmixer (sb_devc * devc, unsigned int port, unsigned int value)
{
unsigned long flags;
#ifdef FKS_LOGGING
printk(KERN_INFO "FKS: write mixer %x: %x\n", port, value);
#endif
spin_lock_irqsave(&devc->lock, flags);
if (port >= 0xa0) {
ess_write (devc, port, value);
} else {
outb(((unsigned char) (port & 0xff)), MIXER_ADDR);
udelay(20);
outb(((unsigned char) (value & 0xff)), MIXER_DATA);
udelay(20);
}
spin_unlock_irqrestore(&devc->lock, flags);
}
unsigned int ess_getmixer (sb_devc * devc, unsigned int port)
{
unsigned int val;
unsigned long flags;
spin_lock_irqsave(&devc->lock, flags);
if (port >= 0xa0) {
val = ess_read (devc, port);
} else {
outb(((unsigned char) (port & 0xff)), MIXER_ADDR);
udelay(20);
val = inb(MIXER_DATA);
udelay(20);
}
spin_unlock_irqrestore(&devc->lock, flags);
return val;
}
static void ess_chgmixer
(sb_devc * devc, unsigned int reg, unsigned int mask, unsigned int val)
{
int value;
value = ess_getmixer (devc, reg);
value = (value & ~mask) | (val & mask);
ess_setmixer (devc, reg, value);
}
/*
* ess_mixer_init must be called from sb_mixer_init
*/
void ess_mixer_init (sb_devc * devc)
{
devc->mixer_caps = SOUND_CAP_EXCL_INPUT;
/*
* Take care of ES1887 specifics...
*/
switch (devc->submodel) {
case SUBMDL_ES1887:
devc->supported_devices = ES1887_MIXER_DEVICES;
devc->supported_rec_devices = ES1887_RECORDING_DEVICES;
#ifdef FKS_LOGGING
printk (KERN_INFO "FKS: ess_mixer_init dup = %d\n", devc->duplex);
#endif
if (devc->duplex) {
devc->iomap = &es1887_mix;
devc->iomap_sz = ARRAY_SIZE(es1887_mix);
} else {
devc->iomap = &es_rec_mix;
devc->iomap_sz = ARRAY_SIZE(es_rec_mix);
}
break;
default:
if (devc->submodel < 8) {
devc->supported_devices = ES688_MIXER_DEVICES;
devc->supported_rec_devices = ES688_RECORDING_DEVICES;
devc->iomap = &es688_mix;
devc->iomap_sz = ARRAY_SIZE(es688_mix);
} else {
/*
* es1688 has 4 bits master vol.
* later chips have 6 bits (?)
*/
devc->supported_devices = ES1688_MIXER_DEVICES;
devc->supported_rec_devices = ES1688_RECORDING_DEVICES;
if (devc->submodel < 0x10) {
devc->iomap = &es1688_mix;
devc->iomap_sz = ARRAY_SIZE(es688_mix);
} else {
devc->iomap = &es1688later_mix;
devc->iomap_sz = ARRAY_SIZE(es1688later_mix);
}
}
}
}
/*
* Changing playback levels at an ESS chip with record mixer means having to
* take care of recording levels of recorded inputs (devc->recmask) too!
*/
int ess_mixer_set(sb_devc *devc, int dev, int left, int right)
{
if (ess_has_rec_mixer (devc->submodel) && (devc->recmask & (1 << dev))) {
sb_common_mixer_set (devc, dev + ES_REC_MIXER_RECDIFF, left, right);
}
return sb_common_mixer_set (devc, dev, left, right);
}
/*
* After a sb_dsp_reset extended register 0xb4 (RECLEV) is reset too. After
* sb_dsp_reset RECLEV has to be restored. This is where ess_mixer_reload
* helps.
*/
void ess_mixer_reload (sb_devc *devc, int dev)
{
int left, right, value;
value = devc->levels[dev];
left = value & 0x000000ff;
right = (value & 0x0000ff00) >> 8;
sb_common_mixer_set(devc, dev, left, right);
}
static int es_rec_set_recmask(sb_devc * devc, int mask)
{
int i, i_mask, cur_mask, diff_mask;
int value, left, right;
#ifdef FKS_LOGGING
printk (KERN_INFO "FKS: es_rec_set_recmask mask = %x\n", mask);
#endif
/*
* Changing the recmask on an ESS chip with recording mixer means:
* (1) Find the differences
* (2) For "turned-on" inputs: make the recording level the playback level
* (3) For "turned-off" inputs: make the recording level zero
*/
cur_mask = devc->recmask;
diff_mask = (cur_mask ^ mask);
for (i = 0; i < 32; i++) {
i_mask = (1 << i);
if (diff_mask & i_mask) { /* Difference? (1) */
if (mask & i_mask) { /* Turn it on (2) */
value = devc->levels[i];
left = value & 0x000000ff;
right = (value & 0x0000ff00) >> 8;
} else { /* Turn it off (3) */
left = 0;
right = 0;
}
sb_common_mixer_set(devc, i + ES_REC_MIXER_RECDIFF, left, right);
}
}
return mask;
}
int ess_set_recmask(sb_devc * devc, int *mask)
{
/* This applies to ESS chips with record mixers only! */
if (ess_has_rec_mixer (devc->submodel)) {
*mask = es_rec_set_recmask (devc, *mask);
return 1; /* Applied */
} else {
return 0; /* Not applied */
}
}
/*
* ess_mixer_reset must be called from sb_mixer_reset
*/
int ess_mixer_reset (sb_devc * devc)
{
/*
* Separate actions for ESS chips with a record mixer:
*/
if (ess_has_rec_mixer (devc->submodel)) {
switch (devc->submodel) {
case SUBMDL_ES1887:
/*
* Separate actions for ES1887:
* Change registers 7a and 1c to make the record mixer the
* actual recording source.
*/
ess_chgmixer(devc, 0x7a, 0x18, 0x08);
ess_chgmixer(devc, 0x1c, 0x07, 0x07);
break;
}
/*
* Call set_recmask for proper initialization
*/
devc->recmask = devc->supported_rec_devices;
es_rec_set_recmask(devc, 0);
devc->recmask = 0;
return 1; /* We took care of recmask. */
} else {
return 0; /* We didn't take care; caller do it */
}
}
/****************************************************************************
* *
* ESS midi *
* *
****************************************************************************/
/*
* FKS: IRQ may be shared. Hm. And if so? Then What?
*/
int ess_midi_init(sb_devc * devc, struct address_info *hw_config)
{
unsigned char cfg, tmp;
cfg = ess_getmixer (devc, 0x40) & 0x03;
if (devc->submodel < 8) {
ess_setmixer (devc, 0x40, cfg | 0x03); /* Enable OPL3 & joystick */
return 0; /* ES688 doesn't support MPU401 mode */
}
tmp = (hw_config->io_base & 0x0f0) >> 4;
if (tmp > 3) {
ess_setmixer (devc, 0x40, cfg);
return 0;
}
cfg |= tmp << 3;
tmp = 1; /* MPU enabled without interrupts */
/* May be shared: if so the value is -ve */
switch (abs(hw_config->irq)) {
case 9:
tmp = 0x4;
break;
case 5:
tmp = 0x5;
break;
case 7:
tmp = 0x6;
break;
case 10:
tmp = 0x7;
break;
default:
return 0;
}
cfg |= tmp << 5;
ess_setmixer (devc, 0x40, cfg | 0x03);
return 1;
}
| gpl-2.0 |
PatrikKT/KofilaKernel | kernel/events/ring_buffer.c | 329 | 7103 | /*
* Performance events ring-buffer code:
*
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*
* For licensing details see kernel-base/COPYING
*/
#include <linux/perf_event.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include "internal.h"
static bool perf_output_space(struct ring_buffer *rb, unsigned long tail,
unsigned long offset, unsigned long head)
{
unsigned long mask;
if (!rb->writable)
return true;
mask = perf_data_size(rb) - 1;
offset = (offset - tail) & mask;
head = (head - tail) & mask;
if ((int)(head - offset) < 0)
return false;
return true;
}
static void perf_output_wakeup(struct perf_output_handle *handle)
{
atomic_set(&handle->rb->poll, POLL_IN);
handle->event->pending_wakeup = 1;
irq_work_queue(&handle->event->pending);
}
static void perf_output_get_handle(struct perf_output_handle *handle)
{
struct ring_buffer *rb = handle->rb;
preempt_disable();
local_inc(&rb->nest);
handle->wakeup = local_read(&rb->wakeup);
}
static void perf_output_put_handle(struct perf_output_handle *handle)
{
struct ring_buffer *rb = handle->rb;
unsigned long head;
again:
head = local_read(&rb->head);
if (!local_dec_and_test(&rb->nest))
goto out;
rb->user_page->data_head = head;
if (unlikely(head != local_read(&rb->head))) {
local_inc(&rb->nest);
goto again;
}
if (handle->wakeup != local_read(&rb->wakeup))
perf_output_wakeup(handle);
out:
preempt_enable();
}
int perf_output_begin(struct perf_output_handle *handle,
struct perf_event *event, unsigned int size)
{
struct ring_buffer *rb;
unsigned long tail, offset, head;
int have_lost;
struct perf_sample_data sample_data;
struct {
struct perf_event_header header;
u64 id;
u64 lost;
} lost_event;
rcu_read_lock();
if (event->parent)
event = event->parent;
rb = rcu_dereference(event->rb);
if (!rb)
goto out;
handle->rb = rb;
handle->event = event;
if (!rb->nr_pages)
goto out;
have_lost = local_read(&rb->lost);
if (have_lost) {
lost_event.header.size = sizeof(lost_event);
perf_event_header__init_id(&lost_event.header, &sample_data,
event);
size += lost_event.header.size;
}
perf_output_get_handle(handle);
do {
tail = ACCESS_ONCE(rb->user_page->data_tail);
smp_rmb();
offset = head = local_read(&rb->head);
head += size;
if (unlikely(!perf_output_space(rb, tail, offset, head)))
goto fail;
} while (local_cmpxchg(&rb->head, offset, head) != offset);
if (head - local_read(&rb->wakeup) > rb->watermark)
local_add(rb->watermark, &rb->wakeup);
handle->page = offset >> (PAGE_SHIFT + page_order(rb));
handle->page &= rb->nr_pages - 1;
handle->size = offset & ((PAGE_SIZE << page_order(rb)) - 1);
handle->addr = rb->data_pages[handle->page];
handle->addr += handle->size;
handle->size = (PAGE_SIZE << page_order(rb)) - handle->size;
if (have_lost) {
lost_event.header.type = PERF_RECORD_LOST;
lost_event.header.misc = 0;
lost_event.id = event->id;
lost_event.lost = local_xchg(&rb->lost, 0);
perf_output_put(handle, lost_event);
perf_event__output_id_sample(event, handle, &sample_data);
}
return 0;
fail:
local_inc(&rb->lost);
perf_output_put_handle(handle);
out:
rcu_read_unlock();
return -ENOSPC;
}
void perf_output_copy(struct perf_output_handle *handle,
const void *buf, unsigned int len)
{
__output_copy(handle, buf, len);
}
void perf_output_end(struct perf_output_handle *handle)
{
perf_output_put_handle(handle);
rcu_read_unlock();
}
static void
ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
{
long max_size = perf_data_size(rb);
if (watermark)
rb->watermark = min(max_size, watermark);
if (!rb->watermark)
rb->watermark = max_size / 2;
if (flags & RING_BUFFER_WRITABLE)
rb->writable = 1;
atomic_set(&rb->refcount, 1);
INIT_LIST_HEAD(&rb->event_list);
spin_lock_init(&rb->event_lock);
}
#ifndef CONFIG_PERF_USE_VMALLOC
struct page *
perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
{
if (pgoff > rb->nr_pages)
return NULL;
if (pgoff == 0)
return virt_to_page(rb->user_page);
return virt_to_page(rb->data_pages[pgoff - 1]);
}
static void *perf_mmap_alloc_page(int cpu)
{
struct page *page;
int node;
node = (cpu == -1) ? cpu : cpu_to_node(cpu);
page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
if (!page)
return NULL;
return page_address(page);
}
struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
{
struct ring_buffer *rb;
unsigned long size;
int i;
size = sizeof(struct ring_buffer);
size += nr_pages * sizeof(void *);
rb = kzalloc(size, GFP_KERNEL);
if (!rb)
goto fail;
rb->user_page = perf_mmap_alloc_page(cpu);
if (!rb->user_page)
goto fail_user_page;
for (i = 0; i < nr_pages; i++) {
rb->data_pages[i] = perf_mmap_alloc_page(cpu);
if (!rb->data_pages[i])
goto fail_data_pages;
}
rb->nr_pages = nr_pages;
ring_buffer_init(rb, watermark, flags);
return rb;
fail_data_pages:
for (i--; i >= 0; i--)
free_page((unsigned long)rb->data_pages[i]);
free_page((unsigned long)rb->user_page);
fail_user_page:
kfree(rb);
fail:
return NULL;
}
static void perf_mmap_free_page(unsigned long addr)
{
struct page *page = virt_to_page((void *)addr);
page->mapping = NULL;
__free_page(page);
}
void rb_free(struct ring_buffer *rb)
{
int i;
perf_mmap_free_page((unsigned long)rb->user_page);
for (i = 0; i < rb->nr_pages; i++)
perf_mmap_free_page((unsigned long)rb->data_pages[i]);
kfree(rb);
}
#else
struct page *
perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
{
if (pgoff > (1UL << page_order(rb)))
return NULL;
return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
}
static void perf_mmap_unmark_page(void *addr)
{
struct page *page = vmalloc_to_page(addr);
page->mapping = NULL;
}
static void rb_free_work(struct work_struct *work)
{
struct ring_buffer *rb;
void *base;
int i, nr;
rb = container_of(work, struct ring_buffer, work);
nr = 1 << page_order(rb);
base = rb->user_page;
for (i = 0; i < nr + 1; i++)
perf_mmap_unmark_page(base + (i * PAGE_SIZE));
vfree(base);
kfree(rb);
}
void rb_free(struct ring_buffer *rb)
{
schedule_work(&rb->work);
}
struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
{
struct ring_buffer *rb;
unsigned long size;
void *all_buf;
size = sizeof(struct ring_buffer);
size += sizeof(void *);
rb = kzalloc(size, GFP_KERNEL);
if (!rb)
goto fail;
INIT_WORK(&rb->work, rb_free_work);
all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
if (!all_buf)
goto fail_all_buf;
rb->user_page = all_buf;
rb->data_pages[0] = all_buf + PAGE_SIZE;
rb->page_order = ilog2(nr_pages);
rb->nr_pages = 1;
ring_buffer_init(rb, watermark, flags);
return rb;
fail_all_buf:
kfree(rb);
fail:
return NULL;
}
#endif
| gpl-2.0 |
wenfengliaoshuzhai/linux | drivers/clk/ti/clk-3xxx.c | 329 | 14694 | /*
* OMAP3 Clock init
*
* Copyright (C) 2013 Texas Instruments, Inc
* Tero Kristo (t-kristo@ti.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/clk-provider.h>
#include <linux/clk/ti.h>
static struct ti_dt_clk omap3xxx_clks[] = {
DT_CLK(NULL, "apb_pclk", "dummy_apb_pclk"),
DT_CLK(NULL, "omap_32k_fck", "omap_32k_fck"),
DT_CLK(NULL, "virt_12m_ck", "virt_12m_ck"),
DT_CLK(NULL, "virt_13m_ck", "virt_13m_ck"),
DT_CLK(NULL, "virt_19200000_ck", "virt_19200000_ck"),
DT_CLK(NULL, "virt_26000000_ck", "virt_26000000_ck"),
DT_CLK(NULL, "virt_38_4m_ck", "virt_38_4m_ck"),
DT_CLK(NULL, "osc_sys_ck", "osc_sys_ck"),
DT_CLK("twl", "fck", "osc_sys_ck"),
DT_CLK(NULL, "sys_ck", "sys_ck"),
DT_CLK(NULL, "omap_96m_alwon_fck", "omap_96m_alwon_fck"),
DT_CLK("etb", "emu_core_alwon_ck", "emu_core_alwon_ck"),
DT_CLK(NULL, "sys_altclk", "sys_altclk"),
DT_CLK(NULL, "sys_clkout1", "sys_clkout1"),
DT_CLK(NULL, "dpll1_ck", "dpll1_ck"),
DT_CLK(NULL, "dpll1_x2_ck", "dpll1_x2_ck"),
DT_CLK(NULL, "dpll1_x2m2_ck", "dpll1_x2m2_ck"),
DT_CLK(NULL, "dpll3_ck", "dpll3_ck"),
DT_CLK(NULL, "core_ck", "core_ck"),
DT_CLK(NULL, "dpll3_x2_ck", "dpll3_x2_ck"),
DT_CLK(NULL, "dpll3_m2_ck", "dpll3_m2_ck"),
DT_CLK(NULL, "dpll3_m2x2_ck", "dpll3_m2x2_ck"),
DT_CLK(NULL, "dpll3_m3_ck", "dpll3_m3_ck"),
DT_CLK(NULL, "dpll3_m3x2_ck", "dpll3_m3x2_ck"),
DT_CLK(NULL, "dpll4_ck", "dpll4_ck"),
DT_CLK(NULL, "dpll4_x2_ck", "dpll4_x2_ck"),
DT_CLK(NULL, "omap_96m_fck", "omap_96m_fck"),
DT_CLK(NULL, "cm_96m_fck", "cm_96m_fck"),
DT_CLK(NULL, "omap_54m_fck", "omap_54m_fck"),
DT_CLK(NULL, "omap_48m_fck", "omap_48m_fck"),
DT_CLK(NULL, "omap_12m_fck", "omap_12m_fck"),
DT_CLK(NULL, "dpll4_m2_ck", "dpll4_m2_ck"),
DT_CLK(NULL, "dpll4_m2x2_ck", "dpll4_m2x2_ck"),
DT_CLK(NULL, "dpll4_m3_ck", "dpll4_m3_ck"),
DT_CLK(NULL, "dpll4_m3x2_ck", "dpll4_m3x2_ck"),
DT_CLK(NULL, "dpll4_m4_ck", "dpll4_m4_ck"),
DT_CLK(NULL, "dpll4_m4x2_ck", "dpll4_m4x2_ck"),
DT_CLK(NULL, "dpll4_m5_ck", "dpll4_m5_ck"),
DT_CLK(NULL, "dpll4_m5x2_ck", "dpll4_m5x2_ck"),
DT_CLK(NULL, "dpll4_m6_ck", "dpll4_m6_ck"),
DT_CLK(NULL, "dpll4_m6x2_ck", "dpll4_m6x2_ck"),
DT_CLK("etb", "emu_per_alwon_ck", "emu_per_alwon_ck"),
DT_CLK(NULL, "clkout2_src_ck", "clkout2_src_ck"),
DT_CLK(NULL, "sys_clkout2", "sys_clkout2"),
DT_CLK(NULL, "corex2_fck", "corex2_fck"),
DT_CLK(NULL, "dpll1_fck", "dpll1_fck"),
DT_CLK(NULL, "mpu_ck", "mpu_ck"),
DT_CLK(NULL, "arm_fck", "arm_fck"),
DT_CLK("etb", "emu_mpu_alwon_ck", "emu_mpu_alwon_ck"),
DT_CLK(NULL, "l3_ick", "l3_ick"),
DT_CLK(NULL, "l4_ick", "l4_ick"),
DT_CLK(NULL, "rm_ick", "rm_ick"),
DT_CLK(NULL, "gpt10_fck", "gpt10_fck"),
DT_CLK(NULL, "gpt11_fck", "gpt11_fck"),
DT_CLK(NULL, "core_96m_fck", "core_96m_fck"),
DT_CLK(NULL, "mmchs2_fck", "mmchs2_fck"),
DT_CLK(NULL, "mmchs1_fck", "mmchs1_fck"),
DT_CLK(NULL, "i2c3_fck", "i2c3_fck"),
DT_CLK(NULL, "i2c2_fck", "i2c2_fck"),
DT_CLK(NULL, "i2c1_fck", "i2c1_fck"),
DT_CLK(NULL, "core_48m_fck", "core_48m_fck"),
DT_CLK(NULL, "mcspi4_fck", "mcspi4_fck"),
DT_CLK(NULL, "mcspi3_fck", "mcspi3_fck"),
DT_CLK(NULL, "mcspi2_fck", "mcspi2_fck"),
DT_CLK(NULL, "mcspi1_fck", "mcspi1_fck"),
DT_CLK(NULL, "uart2_fck", "uart2_fck"),
DT_CLK(NULL, "uart1_fck", "uart1_fck"),
DT_CLK(NULL, "core_12m_fck", "core_12m_fck"),
DT_CLK("omap_hdq.0", "fck", "hdq_fck"),
DT_CLK(NULL, "hdq_fck", "hdq_fck"),
DT_CLK(NULL, "core_l3_ick", "core_l3_ick"),
DT_CLK(NULL, "sdrc_ick", "sdrc_ick"),
DT_CLK(NULL, "gpmc_fck", "gpmc_fck"),
DT_CLK(NULL, "core_l4_ick", "core_l4_ick"),
DT_CLK("omap_hsmmc.1", "ick", "mmchs2_ick"),
DT_CLK("omap_hsmmc.0", "ick", "mmchs1_ick"),
DT_CLK(NULL, "mmchs2_ick", "mmchs2_ick"),
DT_CLK(NULL, "mmchs1_ick", "mmchs1_ick"),
DT_CLK("omap_hdq.0", "ick", "hdq_ick"),
DT_CLK(NULL, "hdq_ick", "hdq_ick"),
DT_CLK("omap2_mcspi.4", "ick", "mcspi4_ick"),
DT_CLK("omap2_mcspi.3", "ick", "mcspi3_ick"),
DT_CLK("omap2_mcspi.2", "ick", "mcspi2_ick"),
DT_CLK("omap2_mcspi.1", "ick", "mcspi1_ick"),
DT_CLK(NULL, "mcspi4_ick", "mcspi4_ick"),
DT_CLK(NULL, "mcspi3_ick", "mcspi3_ick"),
DT_CLK(NULL, "mcspi2_ick", "mcspi2_ick"),
DT_CLK(NULL, "mcspi1_ick", "mcspi1_ick"),
DT_CLK("omap_i2c.3", "ick", "i2c3_ick"),
DT_CLK("omap_i2c.2", "ick", "i2c2_ick"),
DT_CLK("omap_i2c.1", "ick", "i2c1_ick"),
DT_CLK(NULL, "i2c3_ick", "i2c3_ick"),
DT_CLK(NULL, "i2c2_ick", "i2c2_ick"),
DT_CLK(NULL, "i2c1_ick", "i2c1_ick"),
DT_CLK(NULL, "uart2_ick", "uart2_ick"),
DT_CLK(NULL, "uart1_ick", "uart1_ick"),
DT_CLK(NULL, "gpt11_ick", "gpt11_ick"),
DT_CLK(NULL, "gpt10_ick", "gpt10_ick"),
DT_CLK(NULL, "omapctrl_ick", "omapctrl_ick"),
DT_CLK(NULL, "dss_tv_fck", "dss_tv_fck"),
DT_CLK(NULL, "dss_96m_fck", "dss_96m_fck"),
DT_CLK(NULL, "dss2_alwon_fck", "dss2_alwon_fck"),
DT_CLK(NULL, "init_60m_fclk", "dummy_ck"),
DT_CLK(NULL, "gpt1_fck", "gpt1_fck"),
DT_CLK(NULL, "aes2_ick", "aes2_ick"),
DT_CLK(NULL, "wkup_32k_fck", "wkup_32k_fck"),
DT_CLK(NULL, "gpio1_dbck", "gpio1_dbck"),
DT_CLK(NULL, "sha12_ick", "sha12_ick"),
DT_CLK(NULL, "wdt2_fck", "wdt2_fck"),
DT_CLK("omap_wdt", "ick", "wdt2_ick"),
DT_CLK(NULL, "wdt2_ick", "wdt2_ick"),
DT_CLK(NULL, "wdt1_ick", "wdt1_ick"),
DT_CLK(NULL, "gpio1_ick", "gpio1_ick"),
DT_CLK(NULL, "omap_32ksync_ick", "omap_32ksync_ick"),
DT_CLK(NULL, "gpt12_ick", "gpt12_ick"),
DT_CLK(NULL, "gpt1_ick", "gpt1_ick"),
DT_CLK(NULL, "per_96m_fck", "per_96m_fck"),
DT_CLK(NULL, "per_48m_fck", "per_48m_fck"),
DT_CLK(NULL, "uart3_fck", "uart3_fck"),
DT_CLK(NULL, "gpt2_fck", "gpt2_fck"),
DT_CLK(NULL, "gpt3_fck", "gpt3_fck"),
DT_CLK(NULL, "gpt4_fck", "gpt4_fck"),
DT_CLK(NULL, "gpt5_fck", "gpt5_fck"),
DT_CLK(NULL, "gpt6_fck", "gpt6_fck"),
DT_CLK(NULL, "gpt7_fck", "gpt7_fck"),
DT_CLK(NULL, "gpt8_fck", "gpt8_fck"),
DT_CLK(NULL, "gpt9_fck", "gpt9_fck"),
DT_CLK(NULL, "per_32k_alwon_fck", "per_32k_alwon_fck"),
DT_CLK(NULL, "gpio6_dbck", "gpio6_dbck"),
DT_CLK(NULL, "gpio5_dbck", "gpio5_dbck"),
DT_CLK(NULL, "gpio4_dbck", "gpio4_dbck"),
DT_CLK(NULL, "gpio3_dbck", "gpio3_dbck"),
DT_CLK(NULL, "gpio2_dbck", "gpio2_dbck"),
DT_CLK(NULL, "wdt3_fck", "wdt3_fck"),
DT_CLK(NULL, "per_l4_ick", "per_l4_ick"),
DT_CLK(NULL, "gpio6_ick", "gpio6_ick"),
DT_CLK(NULL, "gpio5_ick", "gpio5_ick"),
DT_CLK(NULL, "gpio4_ick", "gpio4_ick"),
DT_CLK(NULL, "gpio3_ick", "gpio3_ick"),
DT_CLK(NULL, "gpio2_ick", "gpio2_ick"),
DT_CLK(NULL, "wdt3_ick", "wdt3_ick"),
DT_CLK(NULL, "uart3_ick", "uart3_ick"),
DT_CLK(NULL, "uart4_ick", "uart4_ick"),
DT_CLK(NULL, "gpt9_ick", "gpt9_ick"),
DT_CLK(NULL, "gpt8_ick", "gpt8_ick"),
DT_CLK(NULL, "gpt7_ick", "gpt7_ick"),
DT_CLK(NULL, "gpt6_ick", "gpt6_ick"),
DT_CLK(NULL, "gpt5_ick", "gpt5_ick"),
DT_CLK(NULL, "gpt4_ick", "gpt4_ick"),
DT_CLK(NULL, "gpt3_ick", "gpt3_ick"),
DT_CLK(NULL, "gpt2_ick", "gpt2_ick"),
DT_CLK(NULL, "mcbsp_clks", "mcbsp_clks"),
DT_CLK(NULL, "mcbsp1_ick", "mcbsp1_ick"),
DT_CLK(NULL, "mcbsp2_ick", "mcbsp2_ick"),
DT_CLK(NULL, "mcbsp3_ick", "mcbsp3_ick"),
DT_CLK(NULL, "mcbsp4_ick", "mcbsp4_ick"),
DT_CLK(NULL, "mcbsp5_ick", "mcbsp5_ick"),
DT_CLK(NULL, "mcbsp1_fck", "mcbsp1_fck"),
DT_CLK(NULL, "mcbsp2_fck", "mcbsp2_fck"),
DT_CLK(NULL, "mcbsp3_fck", "mcbsp3_fck"),
DT_CLK(NULL, "mcbsp4_fck", "mcbsp4_fck"),
DT_CLK(NULL, "mcbsp5_fck", "mcbsp5_fck"),
DT_CLK("etb", "emu_src_ck", "emu_src_ck"),
DT_CLK(NULL, "emu_src_ck", "emu_src_ck"),
DT_CLK(NULL, "pclk_fck", "pclk_fck"),
DT_CLK(NULL, "pclkx2_fck", "pclkx2_fck"),
DT_CLK(NULL, "atclk_fck", "atclk_fck"),
DT_CLK(NULL, "traceclk_src_fck", "traceclk_src_fck"),
DT_CLK(NULL, "traceclk_fck", "traceclk_fck"),
DT_CLK(NULL, "secure_32k_fck", "secure_32k_fck"),
DT_CLK(NULL, "gpt12_fck", "gpt12_fck"),
DT_CLK(NULL, "wdt1_fck", "wdt1_fck"),
DT_CLK(NULL, "timer_32k_ck", "omap_32k_fck"),
DT_CLK(NULL, "timer_sys_ck", "sys_ck"),
DT_CLK(NULL, "cpufreq_ck", "dpll1_ck"),
{ .node_name = NULL },
};
static struct ti_dt_clk omap34xx_omap36xx_clks[] = {
DT_CLK(NULL, "aes1_ick", "aes1_ick"),
DT_CLK("omap_rng", "ick", "rng_ick"),
DT_CLK("omap3-rom-rng", "ick", "rng_ick"),
DT_CLK(NULL, "sha11_ick", "sha11_ick"),
DT_CLK(NULL, "des1_ick", "des1_ick"),
DT_CLK(NULL, "cam_mclk", "cam_mclk"),
DT_CLK(NULL, "cam_ick", "cam_ick"),
DT_CLK(NULL, "csi2_96m_fck", "csi2_96m_fck"),
DT_CLK(NULL, "security_l3_ick", "security_l3_ick"),
DT_CLK(NULL, "pka_ick", "pka_ick"),
DT_CLK(NULL, "icr_ick", "icr_ick"),
DT_CLK("omap-aes", "ick", "aes2_ick"),
DT_CLK("omap-sham", "ick", "sha12_ick"),
DT_CLK(NULL, "des2_ick", "des2_ick"),
DT_CLK(NULL, "mspro_ick", "mspro_ick"),
DT_CLK(NULL, "mailboxes_ick", "mailboxes_ick"),
DT_CLK(NULL, "ssi_l4_ick", "ssi_l4_ick"),
DT_CLK(NULL, "sr1_fck", "sr1_fck"),
DT_CLK(NULL, "sr2_fck", "sr2_fck"),
DT_CLK(NULL, "sr_l4_ick", "sr_l4_ick"),
DT_CLK(NULL, "security_l4_ick2", "security_l4_ick2"),
DT_CLK(NULL, "wkup_l4_ick", "wkup_l4_ick"),
DT_CLK(NULL, "dpll2_fck", "dpll2_fck"),
DT_CLK(NULL, "iva2_ck", "iva2_ck"),
DT_CLK(NULL, "modem_fck", "modem_fck"),
DT_CLK(NULL, "sad2d_ick", "sad2d_ick"),
DT_CLK(NULL, "mad2d_ick", "mad2d_ick"),
DT_CLK(NULL, "mspro_fck", "mspro_fck"),
DT_CLK(NULL, "dpll2_ck", "dpll2_ck"),
DT_CLK(NULL, "dpll2_m2_ck", "dpll2_m2_ck"),
{ .node_name = NULL },
};
static struct ti_dt_clk omap36xx_omap3430es2plus_clks[] = {
DT_CLK(NULL, "ssi_ssr_fck", "ssi_ssr_fck_3430es2"),
DT_CLK(NULL, "ssi_sst_fck", "ssi_sst_fck_3430es2"),
DT_CLK("musb-omap2430", "ick", "hsotgusb_ick_3430es2"),
DT_CLK(NULL, "hsotgusb_ick", "hsotgusb_ick_3430es2"),
DT_CLK(NULL, "ssi_ick", "ssi_ick_3430es2"),
DT_CLK(NULL, "usim_fck", "usim_fck"),
DT_CLK(NULL, "usim_ick", "usim_ick"),
{ .node_name = NULL },
};
static struct ti_dt_clk omap3430es1_clks[] = {
DT_CLK(NULL, "gfx_l3_ck", "gfx_l3_ck"),
DT_CLK(NULL, "gfx_l3_fck", "gfx_l3_fck"),
DT_CLK(NULL, "gfx_l3_ick", "gfx_l3_ick"),
DT_CLK(NULL, "gfx_cg1_ck", "gfx_cg1_ck"),
DT_CLK(NULL, "gfx_cg2_ck", "gfx_cg2_ck"),
DT_CLK(NULL, "d2d_26m_fck", "d2d_26m_fck"),
DT_CLK(NULL, "fshostusb_fck", "fshostusb_fck"),
DT_CLK(NULL, "ssi_ssr_fck", "ssi_ssr_fck_3430es1"),
DT_CLK(NULL, "ssi_sst_fck", "ssi_sst_fck_3430es1"),
DT_CLK("musb-omap2430", "ick", "hsotgusb_ick_3430es1"),
DT_CLK(NULL, "hsotgusb_ick", "hsotgusb_ick_3430es1"),
DT_CLK(NULL, "fac_ick", "fac_ick"),
DT_CLK(NULL, "ssi_ick", "ssi_ick_3430es1"),
DT_CLK(NULL, "usb_l4_ick", "usb_l4_ick"),
DT_CLK(NULL, "dss1_alwon_fck", "dss1_alwon_fck_3430es1"),
DT_CLK("omapdss_dss", "ick", "dss_ick_3430es1"),
DT_CLK(NULL, "dss_ick", "dss_ick_3430es1"),
{ .node_name = NULL },
};
static struct ti_dt_clk omap36xx_am35xx_omap3430es2plus_clks[] = {
DT_CLK(NULL, "virt_16_8m_ck", "virt_16_8m_ck"),
DT_CLK(NULL, "dpll5_ck", "dpll5_ck"),
DT_CLK(NULL, "dpll5_m2_ck", "dpll5_m2_ck"),
DT_CLK(NULL, "sgx_fck", "sgx_fck"),
DT_CLK(NULL, "sgx_ick", "sgx_ick"),
DT_CLK(NULL, "cpefuse_fck", "cpefuse_fck"),
DT_CLK(NULL, "ts_fck", "ts_fck"),
DT_CLK(NULL, "usbtll_fck", "usbtll_fck"),
DT_CLK(NULL, "usbtll_ick", "usbtll_ick"),
DT_CLK("omap_hsmmc.2", "ick", "mmchs3_ick"),
DT_CLK(NULL, "mmchs3_ick", "mmchs3_ick"),
DT_CLK(NULL, "mmchs3_fck", "mmchs3_fck"),
DT_CLK(NULL, "dss1_alwon_fck", "dss1_alwon_fck_3430es2"),
DT_CLK("omapdss_dss", "ick", "dss_ick_3430es2"),
DT_CLK(NULL, "dss_ick", "dss_ick_3430es2"),
DT_CLK(NULL, "usbhost_120m_fck", "usbhost_120m_fck"),
DT_CLK(NULL, "usbhost_48m_fck", "usbhost_48m_fck"),
DT_CLK(NULL, "usbhost_ick", "usbhost_ick"),
{ .node_name = NULL },
};
static struct ti_dt_clk am35xx_clks[] = {
DT_CLK(NULL, "ipss_ick", "ipss_ick"),
DT_CLK(NULL, "rmii_ck", "rmii_ck"),
DT_CLK(NULL, "pclk_ck", "pclk_ck"),
DT_CLK(NULL, "emac_ick", "emac_ick"),
DT_CLK(NULL, "emac_fck", "emac_fck"),
DT_CLK("davinci_emac.0", NULL, "emac_ick"),
DT_CLK("davinci_mdio.0", NULL, "emac_fck"),
DT_CLK("vpfe-capture", "master", "vpfe_ick"),
DT_CLK("vpfe-capture", "slave", "vpfe_fck"),
DT_CLK(NULL, "hsotgusb_ick", "hsotgusb_ick_am35xx"),
DT_CLK(NULL, "hsotgusb_fck", "hsotgusb_fck_am35xx"),
DT_CLK(NULL, "hecc_ck", "hecc_ck"),
DT_CLK(NULL, "uart4_ick", "uart4_ick_am35xx"),
DT_CLK(NULL, "uart4_fck", "uart4_fck_am35xx"),
{ .node_name = NULL },
};
static struct ti_dt_clk omap36xx_clks[] = {
DT_CLK(NULL, "omap_192m_alwon_fck", "omap_192m_alwon_fck"),
DT_CLK(NULL, "uart4_fck", "uart4_fck"),
{ .node_name = NULL },
};
static const char *enable_init_clks[] = {
"sdrc_ick",
"gpmc_fck",
"omapctrl_ick",
};
enum {
OMAP3_SOC_AM35XX,
OMAP3_SOC_OMAP3430_ES1,
OMAP3_SOC_OMAP3430_ES2_PLUS,
OMAP3_SOC_OMAP3630,
};
static int __init omap3xxx_dt_clk_init(int soc_type)
{
if (soc_type == OMAP3_SOC_AM35XX || soc_type == OMAP3_SOC_OMAP3630 ||
soc_type == OMAP3_SOC_OMAP3430_ES1 ||
soc_type == OMAP3_SOC_OMAP3430_ES2_PLUS)
ti_dt_clocks_register(omap3xxx_clks);
if (soc_type == OMAP3_SOC_AM35XX)
ti_dt_clocks_register(am35xx_clks);
if (soc_type == OMAP3_SOC_OMAP3630 || soc_type == OMAP3_SOC_AM35XX ||
soc_type == OMAP3_SOC_OMAP3430_ES2_PLUS)
ti_dt_clocks_register(omap36xx_am35xx_omap3430es2plus_clks);
if (soc_type == OMAP3_SOC_OMAP3430_ES1)
ti_dt_clocks_register(omap3430es1_clks);
if (soc_type == OMAP3_SOC_OMAP3430_ES2_PLUS ||
soc_type == OMAP3_SOC_OMAP3630)
ti_dt_clocks_register(omap36xx_omap3430es2plus_clks);
if (soc_type == OMAP3_SOC_OMAP3430_ES1 ||
soc_type == OMAP3_SOC_OMAP3430_ES2_PLUS ||
soc_type == OMAP3_SOC_OMAP3630)
ti_dt_clocks_register(omap34xx_omap36xx_clks);
if (soc_type == OMAP3_SOC_OMAP3630)
ti_dt_clocks_register(omap36xx_clks);
omap2_clk_disable_autoidle_all();
omap2_clk_enable_init_clocks(enable_init_clks,
ARRAY_SIZE(enable_init_clks));
pr_info("Clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n",
(clk_get_rate(clk_get_sys(NULL, "osc_sys_ck")) / 1000000),
(clk_get_rate(clk_get_sys(NULL, "osc_sys_ck")) / 100000) % 10,
(clk_get_rate(clk_get_sys(NULL, "core_ck")) / 1000000),
(clk_get_rate(clk_get_sys(NULL, "arm_fck")) / 1000000));
if (soc_type != OMAP3_SOC_OMAP3430_ES1)
omap3_clk_lock_dpll5();
return 0;
}
int __init omap3430_dt_clk_init(void)
{
return omap3xxx_dt_clk_init(OMAP3_SOC_OMAP3430_ES2_PLUS);
}
int __init omap3630_dt_clk_init(void)
{
return omap3xxx_dt_clk_init(OMAP3_SOC_OMAP3630);
}
int __init am35xx_dt_clk_init(void)
{
return omap3xxx_dt_clk_init(OMAP3_SOC_AM35XX);
}
| gpl-2.0 |
jrior001/android_kernel_htc_msm8960 | sound/soc/codecs/wm2200.c | 585 | 73020 | /*
* wm2200.c -- WM2200 ALSA SoC Audio driver
*
* Copyright 2012 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/gcd.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/fixed.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/jack.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include <sound/wm2200.h>
#include "wm2200.h"
/* The code assumes DCVDD is generated internally */
#define WM2200_NUM_CORE_SUPPLIES 2
static const char *wm2200_core_supply_names[WM2200_NUM_CORE_SUPPLIES] = {
"DBVDD",
"LDOVDD",
};
struct wm2200_fll {
int fref;
int fout;
int src;
struct completion lock;
};
/* codec private data */
struct wm2200_priv {
struct regmap *regmap;
struct device *dev;
struct snd_soc_codec *codec;
struct wm2200_pdata pdata;
struct regulator_bulk_data core_supplies[WM2200_NUM_CORE_SUPPLIES];
struct completion fll_lock;
int fll_fout;
int fll_fref;
int fll_src;
int rev;
int sysclk;
};
static struct reg_default wm2200_reg_defaults[] = {
{ 0x000B, 0x0000 }, /* R11 - Tone Generator 1 */
{ 0x0102, 0x0000 }, /* R258 - Clocking 3 */
{ 0x0103, 0x0011 }, /* R259 - Clocking 4 */
{ 0x0111, 0x0000 }, /* R273 - FLL Control 1 */
{ 0x0112, 0x0000 }, /* R274 - FLL Control 2 */
{ 0x0113, 0x0000 }, /* R275 - FLL Control 3 */
{ 0x0114, 0x0000 }, /* R276 - FLL Control 4 */
{ 0x0116, 0x0177 }, /* R278 - FLL Control 6 */
{ 0x0117, 0x0004 }, /* R279 - FLL Control 7 */
{ 0x0119, 0x0000 }, /* R281 - FLL EFS 1 */
{ 0x011A, 0x0002 }, /* R282 - FLL EFS 2 */
{ 0x0200, 0x0000 }, /* R512 - Mic Charge Pump 1 */
{ 0x0201, 0x03FF }, /* R513 - Mic Charge Pump 2 */
{ 0x0202, 0x9BDE }, /* R514 - DM Charge Pump 1 */
{ 0x020C, 0x0000 }, /* R524 - Mic Bias Ctrl 1 */
{ 0x020D, 0x0000 }, /* R525 - Mic Bias Ctrl 2 */
{ 0x020F, 0x0000 }, /* R527 - Ear Piece Ctrl 1 */
{ 0x0210, 0x0000 }, /* R528 - Ear Piece Ctrl 2 */
{ 0x0301, 0x0000 }, /* R769 - Input Enables */
{ 0x0302, 0x2240 }, /* R770 - IN1L Control */
{ 0x0303, 0x0040 }, /* R771 - IN1R Control */
{ 0x0304, 0x2240 }, /* R772 - IN2L Control */
{ 0x0305, 0x0040 }, /* R773 - IN2R Control */
{ 0x0306, 0x2240 }, /* R774 - IN3L Control */
{ 0x0307, 0x0040 }, /* R775 - IN3R Control */
{ 0x030A, 0x0000 }, /* R778 - RXANC_SRC */
{ 0x030B, 0x0022 }, /* R779 - Input Volume Ramp */
{ 0x030C, 0x0180 }, /* R780 - ADC Digital Volume 1L */
{ 0x030D, 0x0180 }, /* R781 - ADC Digital Volume 1R */
{ 0x030E, 0x0180 }, /* R782 - ADC Digital Volume 2L */
{ 0x030F, 0x0180 }, /* R783 - ADC Digital Volume 2R */
{ 0x0310, 0x0180 }, /* R784 - ADC Digital Volume 3L */
{ 0x0311, 0x0180 }, /* R785 - ADC Digital Volume 3R */
{ 0x0400, 0x0000 }, /* R1024 - Output Enables */
{ 0x0401, 0x0000 }, /* R1025 - DAC Volume Limit 1L */
{ 0x0402, 0x0000 }, /* R1026 - DAC Volume Limit 1R */
{ 0x0403, 0x0000 }, /* R1027 - DAC Volume Limit 2L */
{ 0x0404, 0x0000 }, /* R1028 - DAC Volume Limit 2R */
{ 0x0409, 0x0000 }, /* R1033 - DAC AEC Control 1 */
{ 0x040A, 0x0022 }, /* R1034 - Output Volume Ramp */
{ 0x040B, 0x0180 }, /* R1035 - DAC Digital Volume 1L */
{ 0x040C, 0x0180 }, /* R1036 - DAC Digital Volume 1R */
{ 0x040D, 0x0180 }, /* R1037 - DAC Digital Volume 2L */
{ 0x040E, 0x0180 }, /* R1038 - DAC Digital Volume 2R */
{ 0x0417, 0x0069 }, /* R1047 - PDM 1 */
{ 0x0418, 0x0000 }, /* R1048 - PDM 2 */
{ 0x0500, 0x0000 }, /* R1280 - Audio IF 1_1 */
{ 0x0501, 0x0008 }, /* R1281 - Audio IF 1_2 */
{ 0x0502, 0x0000 }, /* R1282 - Audio IF 1_3 */
{ 0x0503, 0x0000 }, /* R1283 - Audio IF 1_4 */
{ 0x0504, 0x0000 }, /* R1284 - Audio IF 1_5 */
{ 0x0505, 0x0001 }, /* R1285 - Audio IF 1_6 */
{ 0x0506, 0x0001 }, /* R1286 - Audio IF 1_7 */
{ 0x0507, 0x0000 }, /* R1287 - Audio IF 1_8 */
{ 0x0508, 0x0000 }, /* R1288 - Audio IF 1_9 */
{ 0x0509, 0x0000 }, /* R1289 - Audio IF 1_10 */
{ 0x050A, 0x0000 }, /* R1290 - Audio IF 1_11 */
{ 0x050B, 0x0000 }, /* R1291 - Audio IF 1_12 */
{ 0x050C, 0x0000 }, /* R1292 - Audio IF 1_13 */
{ 0x050D, 0x0000 }, /* R1293 - Audio IF 1_14 */
{ 0x050E, 0x0000 }, /* R1294 - Audio IF 1_15 */
{ 0x050F, 0x0000 }, /* R1295 - Audio IF 1_16 */
{ 0x0510, 0x0000 }, /* R1296 - Audio IF 1_17 */
{ 0x0511, 0x0000 }, /* R1297 - Audio IF 1_18 */
{ 0x0512, 0x0000 }, /* R1298 - Audio IF 1_19 */
{ 0x0513, 0x0000 }, /* R1299 - Audio IF 1_20 */
{ 0x0514, 0x0000 }, /* R1300 - Audio IF 1_21 */
{ 0x0515, 0x0001 }, /* R1301 - Audio IF 1_22 */
{ 0x0600, 0x0000 }, /* R1536 - OUT1LMIX Input 1 Source */
{ 0x0601, 0x0080 }, /* R1537 - OUT1LMIX Input 1 Volume */
{ 0x0602, 0x0000 }, /* R1538 - OUT1LMIX Input 2 Source */
{ 0x0603, 0x0080 }, /* R1539 - OUT1LMIX Input 2 Volume */
{ 0x0604, 0x0000 }, /* R1540 - OUT1LMIX Input 3 Source */
{ 0x0605, 0x0080 }, /* R1541 - OUT1LMIX Input 3 Volume */
{ 0x0606, 0x0000 }, /* R1542 - OUT1LMIX Input 4 Source */
{ 0x0607, 0x0080 }, /* R1543 - OUT1LMIX Input 4 Volume */
{ 0x0608, 0x0000 }, /* R1544 - OUT1RMIX Input 1 Source */
{ 0x0609, 0x0080 }, /* R1545 - OUT1RMIX Input 1 Volume */
{ 0x060A, 0x0000 }, /* R1546 - OUT1RMIX Input 2 Source */
{ 0x060B, 0x0080 }, /* R1547 - OUT1RMIX Input 2 Volume */
{ 0x060C, 0x0000 }, /* R1548 - OUT1RMIX Input 3 Source */
{ 0x060D, 0x0080 }, /* R1549 - OUT1RMIX Input 3 Volume */
{ 0x060E, 0x0000 }, /* R1550 - OUT1RMIX Input 4 Source */
{ 0x060F, 0x0080 }, /* R1551 - OUT1RMIX Input 4 Volume */
{ 0x0610, 0x0000 }, /* R1552 - OUT2LMIX Input 1 Source */
{ 0x0611, 0x0080 }, /* R1553 - OUT2LMIX Input 1 Volume */
{ 0x0612, 0x0000 }, /* R1554 - OUT2LMIX Input 2 Source */
{ 0x0613, 0x0080 }, /* R1555 - OUT2LMIX Input 2 Volume */
{ 0x0614, 0x0000 }, /* R1556 - OUT2LMIX Input 3 Source */
{ 0x0615, 0x0080 }, /* R1557 - OUT2LMIX Input 3 Volume */
{ 0x0616, 0x0000 }, /* R1558 - OUT2LMIX Input 4 Source */
{ 0x0617, 0x0080 }, /* R1559 - OUT2LMIX Input 4 Volume */
{ 0x0618, 0x0000 }, /* R1560 - OUT2RMIX Input 1 Source */
{ 0x0619, 0x0080 }, /* R1561 - OUT2RMIX Input 1 Volume */
{ 0x061A, 0x0000 }, /* R1562 - OUT2RMIX Input 2 Source */
{ 0x061B, 0x0080 }, /* R1563 - OUT2RMIX Input 2 Volume */
{ 0x061C, 0x0000 }, /* R1564 - OUT2RMIX Input 3 Source */
{ 0x061D, 0x0080 }, /* R1565 - OUT2RMIX Input 3 Volume */
{ 0x061E, 0x0000 }, /* R1566 - OUT2RMIX Input 4 Source */
{ 0x061F, 0x0080 }, /* R1567 - OUT2RMIX Input 4 Volume */
{ 0x0620, 0x0000 }, /* R1568 - AIF1TX1MIX Input 1 Source */
{ 0x0621, 0x0080 }, /* R1569 - AIF1TX1MIX Input 1 Volume */
{ 0x0622, 0x0000 }, /* R1570 - AIF1TX1MIX Input 2 Source */
{ 0x0623, 0x0080 }, /* R1571 - AIF1TX1MIX Input 2 Volume */
{ 0x0624, 0x0000 }, /* R1572 - AIF1TX1MIX Input 3 Source */
{ 0x0625, 0x0080 }, /* R1573 - AIF1TX1MIX Input 3 Volume */
{ 0x0626, 0x0000 }, /* R1574 - AIF1TX1MIX Input 4 Source */
{ 0x0627, 0x0080 }, /* R1575 - AIF1TX1MIX Input 4 Volume */
{ 0x0628, 0x0000 }, /* R1576 - AIF1TX2MIX Input 1 Source */
{ 0x0629, 0x0080 }, /* R1577 - AIF1TX2MIX Input 1 Volume */
{ 0x062A, 0x0000 }, /* R1578 - AIF1TX2MIX Input 2 Source */
{ 0x062B, 0x0080 }, /* R1579 - AIF1TX2MIX Input 2 Volume */
{ 0x062C, 0x0000 }, /* R1580 - AIF1TX2MIX Input 3 Source */
{ 0x062D, 0x0080 }, /* R1581 - AIF1TX2MIX Input 3 Volume */
{ 0x062E, 0x0000 }, /* R1582 - AIF1TX2MIX Input 4 Source */
{ 0x062F, 0x0080 }, /* R1583 - AIF1TX2MIX Input 4 Volume */
{ 0x0630, 0x0000 }, /* R1584 - AIF1TX3MIX Input 1 Source */
{ 0x0631, 0x0080 }, /* R1585 - AIF1TX3MIX Input 1 Volume */
{ 0x0632, 0x0000 }, /* R1586 - AIF1TX3MIX Input 2 Source */
{ 0x0633, 0x0080 }, /* R1587 - AIF1TX3MIX Input 2 Volume */
{ 0x0634, 0x0000 }, /* R1588 - AIF1TX3MIX Input 3 Source */
{ 0x0635, 0x0080 }, /* R1589 - AIF1TX3MIX Input 3 Volume */
{ 0x0636, 0x0000 }, /* R1590 - AIF1TX3MIX Input 4 Source */
{ 0x0637, 0x0080 }, /* R1591 - AIF1TX3MIX Input 4 Volume */
{ 0x0638, 0x0000 }, /* R1592 - AIF1TX4MIX Input 1 Source */
{ 0x0639, 0x0080 }, /* R1593 - AIF1TX4MIX Input 1 Volume */
{ 0x063A, 0x0000 }, /* R1594 - AIF1TX4MIX Input 2 Source */
{ 0x063B, 0x0080 }, /* R1595 - AIF1TX4MIX Input 2 Volume */
{ 0x063C, 0x0000 }, /* R1596 - AIF1TX4MIX Input 3 Source */
{ 0x063D, 0x0080 }, /* R1597 - AIF1TX4MIX Input 3 Volume */
{ 0x063E, 0x0000 }, /* R1598 - AIF1TX4MIX Input 4 Source */
{ 0x063F, 0x0080 }, /* R1599 - AIF1TX4MIX Input 4 Volume */
{ 0x0640, 0x0000 }, /* R1600 - AIF1TX5MIX Input 1 Source */
{ 0x0641, 0x0080 }, /* R1601 - AIF1TX5MIX Input 1 Volume */
{ 0x0642, 0x0000 }, /* R1602 - AIF1TX5MIX Input 2 Source */
{ 0x0643, 0x0080 }, /* R1603 - AIF1TX5MIX Input 2 Volume */
{ 0x0644, 0x0000 }, /* R1604 - AIF1TX5MIX Input 3 Source */
{ 0x0645, 0x0080 }, /* R1605 - AIF1TX5MIX Input 3 Volume */
{ 0x0646, 0x0000 }, /* R1606 - AIF1TX5MIX Input 4 Source */
{ 0x0647, 0x0080 }, /* R1607 - AIF1TX5MIX Input 4 Volume */
{ 0x0648, 0x0000 }, /* R1608 - AIF1TX6MIX Input 1 Source */
{ 0x0649, 0x0080 }, /* R1609 - AIF1TX6MIX Input 1 Volume */
{ 0x064A, 0x0000 }, /* R1610 - AIF1TX6MIX Input 2 Source */
{ 0x064B, 0x0080 }, /* R1611 - AIF1TX6MIX Input 2 Volume */
{ 0x064C, 0x0000 }, /* R1612 - AIF1TX6MIX Input 3 Source */
{ 0x064D, 0x0080 }, /* R1613 - AIF1TX6MIX Input 3 Volume */
{ 0x064E, 0x0000 }, /* R1614 - AIF1TX6MIX Input 4 Source */
{ 0x064F, 0x0080 }, /* R1615 - AIF1TX6MIX Input 4 Volume */
{ 0x0650, 0x0000 }, /* R1616 - EQLMIX Input 1 Source */
{ 0x0651, 0x0080 }, /* R1617 - EQLMIX Input 1 Volume */
{ 0x0652, 0x0000 }, /* R1618 - EQLMIX Input 2 Source */
{ 0x0653, 0x0080 }, /* R1619 - EQLMIX Input 2 Volume */
{ 0x0654, 0x0000 }, /* R1620 - EQLMIX Input 3 Source */
{ 0x0655, 0x0080 }, /* R1621 - EQLMIX Input 3 Volume */
{ 0x0656, 0x0000 }, /* R1622 - EQLMIX Input 4 Source */
{ 0x0657, 0x0080 }, /* R1623 - EQLMIX Input 4 Volume */
{ 0x0658, 0x0000 }, /* R1624 - EQRMIX Input 1 Source */
{ 0x0659, 0x0080 }, /* R1625 - EQRMIX Input 1 Volume */
{ 0x065A, 0x0000 }, /* R1626 - EQRMIX Input 2 Source */
{ 0x065B, 0x0080 }, /* R1627 - EQRMIX Input 2 Volume */
{ 0x065C, 0x0000 }, /* R1628 - EQRMIX Input 3 Source */
{ 0x065D, 0x0080 }, /* R1629 - EQRMIX Input 3 Volume */
{ 0x065E, 0x0000 }, /* R1630 - EQRMIX Input 4 Source */
{ 0x065F, 0x0080 }, /* R1631 - EQRMIX Input 4 Volume */
{ 0x0660, 0x0000 }, /* R1632 - LHPF1MIX Input 1 Source */
{ 0x0661, 0x0080 }, /* R1633 - LHPF1MIX Input 1 Volume */
{ 0x0662, 0x0000 }, /* R1634 - LHPF1MIX Input 2 Source */
{ 0x0663, 0x0080 }, /* R1635 - LHPF1MIX Input 2 Volume */
{ 0x0664, 0x0000 }, /* R1636 - LHPF1MIX Input 3 Source */
{ 0x0665, 0x0080 }, /* R1637 - LHPF1MIX Input 3 Volume */
{ 0x0666, 0x0000 }, /* R1638 - LHPF1MIX Input 4 Source */
{ 0x0667, 0x0080 }, /* R1639 - LHPF1MIX Input 4 Volume */
{ 0x0668, 0x0000 }, /* R1640 - LHPF2MIX Input 1 Source */
{ 0x0669, 0x0080 }, /* R1641 - LHPF2MIX Input 1 Volume */
{ 0x066A, 0x0000 }, /* R1642 - LHPF2MIX Input 2 Source */
{ 0x066B, 0x0080 }, /* R1643 - LHPF2MIX Input 2 Volume */
{ 0x066C, 0x0000 }, /* R1644 - LHPF2MIX Input 3 Source */
{ 0x066D, 0x0080 }, /* R1645 - LHPF2MIX Input 3 Volume */
{ 0x066E, 0x0000 }, /* R1646 - LHPF2MIX Input 4 Source */
{ 0x066F, 0x0080 }, /* R1647 - LHPF2MIX Input 4 Volume */
{ 0x0670, 0x0000 }, /* R1648 - DSP1LMIX Input 1 Source */
{ 0x0671, 0x0080 }, /* R1649 - DSP1LMIX Input 1 Volume */
{ 0x0672, 0x0000 }, /* R1650 - DSP1LMIX Input 2 Source */
{ 0x0673, 0x0080 }, /* R1651 - DSP1LMIX Input 2 Volume */
{ 0x0674, 0x0000 }, /* R1652 - DSP1LMIX Input 3 Source */
{ 0x0675, 0x0080 }, /* R1653 - DSP1LMIX Input 3 Volume */
{ 0x0676, 0x0000 }, /* R1654 - DSP1LMIX Input 4 Source */
{ 0x0677, 0x0080 }, /* R1655 - DSP1LMIX Input 4 Volume */
{ 0x0678, 0x0000 }, /* R1656 - DSP1RMIX Input 1 Source */
{ 0x0679, 0x0080 }, /* R1657 - DSP1RMIX Input 1 Volume */
{ 0x067A, 0x0000 }, /* R1658 - DSP1RMIX Input 2 Source */
{ 0x067B, 0x0080 }, /* R1659 - DSP1RMIX Input 2 Volume */
{ 0x067C, 0x0000 }, /* R1660 - DSP1RMIX Input 3 Source */
{ 0x067D, 0x0080 }, /* R1661 - DSP1RMIX Input 3 Volume */
{ 0x067E, 0x0000 }, /* R1662 - DSP1RMIX Input 4 Source */
{ 0x067F, 0x0080 }, /* R1663 - DSP1RMIX Input 4 Volume */
{ 0x0680, 0x0000 }, /* R1664 - DSP1AUX1MIX Input 1 Source */
{ 0x0681, 0x0000 }, /* R1665 - DSP1AUX2MIX Input 1 Source */
{ 0x0682, 0x0000 }, /* R1666 - DSP1AUX3MIX Input 1 Source */
{ 0x0683, 0x0000 }, /* R1667 - DSP1AUX4MIX Input 1 Source */
{ 0x0684, 0x0000 }, /* R1668 - DSP1AUX5MIX Input 1 Source */
{ 0x0685, 0x0000 }, /* R1669 - DSP1AUX6MIX Input 1 Source */
{ 0x0686, 0x0000 }, /* R1670 - DSP2LMIX Input 1 Source */
{ 0x0687, 0x0080 }, /* R1671 - DSP2LMIX Input 1 Volume */
{ 0x0688, 0x0000 }, /* R1672 - DSP2LMIX Input 2 Source */
{ 0x0689, 0x0080 }, /* R1673 - DSP2LMIX Input 2 Volume */
{ 0x068A, 0x0000 }, /* R1674 - DSP2LMIX Input 3 Source */
{ 0x068B, 0x0080 }, /* R1675 - DSP2LMIX Input 3 Volume */
{ 0x068C, 0x0000 }, /* R1676 - DSP2LMIX Input 4 Source */
{ 0x068D, 0x0080 }, /* R1677 - DSP2LMIX Input 4 Volume */
{ 0x068E, 0x0000 }, /* R1678 - DSP2RMIX Input 1 Source */
{ 0x068F, 0x0080 }, /* R1679 - DSP2RMIX Input 1 Volume */
{ 0x0690, 0x0000 }, /* R1680 - DSP2RMIX Input 2 Source */
{ 0x0691, 0x0080 }, /* R1681 - DSP2RMIX Input 2 Volume */
{ 0x0692, 0x0000 }, /* R1682 - DSP2RMIX Input 3 Source */
{ 0x0693, 0x0080 }, /* R1683 - DSP2RMIX Input 3 Volume */
{ 0x0694, 0x0000 }, /* R1684 - DSP2RMIX Input 4 Source */
{ 0x0695, 0x0080 }, /* R1685 - DSP2RMIX Input 4 Volume */
{ 0x0696, 0x0000 }, /* R1686 - DSP2AUX1MIX Input 1 Source */
{ 0x0697, 0x0000 }, /* R1687 - DSP2AUX2MIX Input 1 Source */
{ 0x0698, 0x0000 }, /* R1688 - DSP2AUX3MIX Input 1 Source */
{ 0x0699, 0x0000 }, /* R1689 - DSP2AUX4MIX Input 1 Source */
{ 0x069A, 0x0000 }, /* R1690 - DSP2AUX5MIX Input 1 Source */
{ 0x069B, 0x0000 }, /* R1691 - DSP2AUX6MIX Input 1 Source */
{ 0x0700, 0xA101 }, /* R1792 - GPIO CTRL 1 */
{ 0x0701, 0xA101 }, /* R1793 - GPIO CTRL 2 */
{ 0x0702, 0xA101 }, /* R1794 - GPIO CTRL 3 */
{ 0x0703, 0xA101 }, /* R1795 - GPIO CTRL 4 */
{ 0x0709, 0x0000 }, /* R1801 - Misc Pad Ctrl 1 */
{ 0x0801, 0x00FF }, /* R2049 - Interrupt Status 1 Mask */
{ 0x0804, 0xFFFF }, /* R2052 - Interrupt Status 2 Mask */
{ 0x0808, 0x0000 }, /* R2056 - Interrupt Control */
{ 0x0900, 0x0000 }, /* R2304 - EQL_1 */
{ 0x0901, 0x0000 }, /* R2305 - EQL_2 */
{ 0x0902, 0x0000 }, /* R2306 - EQL_3 */
{ 0x0903, 0x0000 }, /* R2307 - EQL_4 */
{ 0x0904, 0x0000 }, /* R2308 - EQL_5 */
{ 0x0905, 0x0000 }, /* R2309 - EQL_6 */
{ 0x0906, 0x0000 }, /* R2310 - EQL_7 */
{ 0x0907, 0x0000 }, /* R2311 - EQL_8 */
{ 0x0908, 0x0000 }, /* R2312 - EQL_9 */
{ 0x0909, 0x0000 }, /* R2313 - EQL_10 */
{ 0x090A, 0x0000 }, /* R2314 - EQL_11 */
{ 0x090B, 0x0000 }, /* R2315 - EQL_12 */
{ 0x090C, 0x0000 }, /* R2316 - EQL_13 */
{ 0x090D, 0x0000 }, /* R2317 - EQL_14 */
{ 0x090E, 0x0000 }, /* R2318 - EQL_15 */
{ 0x090F, 0x0000 }, /* R2319 - EQL_16 */
{ 0x0910, 0x0000 }, /* R2320 - EQL_17 */
{ 0x0911, 0x0000 }, /* R2321 - EQL_18 */
{ 0x0912, 0x0000 }, /* R2322 - EQL_19 */
{ 0x0913, 0x0000 }, /* R2323 - EQL_20 */
{ 0x0916, 0x0000 }, /* R2326 - EQR_1 */
{ 0x0917, 0x0000 }, /* R2327 - EQR_2 */
{ 0x0918, 0x0000 }, /* R2328 - EQR_3 */
{ 0x0919, 0x0000 }, /* R2329 - EQR_4 */
{ 0x091A, 0x0000 }, /* R2330 - EQR_5 */
{ 0x091B, 0x0000 }, /* R2331 - EQR_6 */
{ 0x091C, 0x0000 }, /* R2332 - EQR_7 */
{ 0x091D, 0x0000 }, /* R2333 - EQR_8 */
{ 0x091E, 0x0000 }, /* R2334 - EQR_9 */
{ 0x091F, 0x0000 }, /* R2335 - EQR_10 */
{ 0x0920, 0x0000 }, /* R2336 - EQR_11 */
{ 0x0921, 0x0000 }, /* R2337 - EQR_12 */
{ 0x0922, 0x0000 }, /* R2338 - EQR_13 */
{ 0x0923, 0x0000 }, /* R2339 - EQR_14 */
{ 0x0924, 0x0000 }, /* R2340 - EQR_15 */
{ 0x0925, 0x0000 }, /* R2341 - EQR_16 */
{ 0x0926, 0x0000 }, /* R2342 - EQR_17 */
{ 0x0927, 0x0000 }, /* R2343 - EQR_18 */
{ 0x0928, 0x0000 }, /* R2344 - EQR_19 */
{ 0x0929, 0x0000 }, /* R2345 - EQR_20 */
{ 0x093E, 0x0000 }, /* R2366 - HPLPF1_1 */
{ 0x093F, 0x0000 }, /* R2367 - HPLPF1_2 */
{ 0x0942, 0x0000 }, /* R2370 - HPLPF2_1 */
{ 0x0943, 0x0000 }, /* R2371 - HPLPF2_2 */
{ 0x0A00, 0x0000 }, /* R2560 - DSP1 Control 1 */
{ 0x0A02, 0x0000 }, /* R2562 - DSP1 Control 2 */
{ 0x0A03, 0x0000 }, /* R2563 - DSP1 Control 3 */
{ 0x0A04, 0x0000 }, /* R2564 - DSP1 Control 4 */
{ 0x0A06, 0x0000 }, /* R2566 - DSP1 Control 5 */
{ 0x0A07, 0x0000 }, /* R2567 - DSP1 Control 6 */
{ 0x0A08, 0x0000 }, /* R2568 - DSP1 Control 7 */
{ 0x0A09, 0x0000 }, /* R2569 - DSP1 Control 8 */
{ 0x0A0A, 0x0000 }, /* R2570 - DSP1 Control 9 */
{ 0x0A0B, 0x0000 }, /* R2571 - DSP1 Control 10 */
{ 0x0A0C, 0x0000 }, /* R2572 - DSP1 Control 11 */
{ 0x0A0D, 0x0000 }, /* R2573 - DSP1 Control 12 */
{ 0x0A0F, 0x0000 }, /* R2575 - DSP1 Control 13 */
{ 0x0A10, 0x0000 }, /* R2576 - DSP1 Control 14 */
{ 0x0A11, 0x0000 }, /* R2577 - DSP1 Control 15 */
{ 0x0A12, 0x0000 }, /* R2578 - DSP1 Control 16 */
{ 0x0A13, 0x0000 }, /* R2579 - DSP1 Control 17 */
{ 0x0A14, 0x0000 }, /* R2580 - DSP1 Control 18 */
{ 0x0A16, 0x0000 }, /* R2582 - DSP1 Control 19 */
{ 0x0A17, 0x0000 }, /* R2583 - DSP1 Control 20 */
{ 0x0A18, 0x0000 }, /* R2584 - DSP1 Control 21 */
{ 0x0A1A, 0x1800 }, /* R2586 - DSP1 Control 22 */
{ 0x0A1B, 0x1000 }, /* R2587 - DSP1 Control 23 */
{ 0x0A1C, 0x0400 }, /* R2588 - DSP1 Control 24 */
{ 0x0A1E, 0x0000 }, /* R2590 - DSP1 Control 25 */
{ 0x0A20, 0x0000 }, /* R2592 - DSP1 Control 26 */
{ 0x0A21, 0x0000 }, /* R2593 - DSP1 Control 27 */
{ 0x0A22, 0x0000 }, /* R2594 - DSP1 Control 28 */
{ 0x0A23, 0x0000 }, /* R2595 - DSP1 Control 29 */
{ 0x0A24, 0x0000 }, /* R2596 - DSP1 Control 30 */
{ 0x0A26, 0x0000 }, /* R2598 - DSP1 Control 31 */
{ 0x0B00, 0x0000 }, /* R2816 - DSP2 Control 1 */
{ 0x0B02, 0x0000 }, /* R2818 - DSP2 Control 2 */
{ 0x0B03, 0x0000 }, /* R2819 - DSP2 Control 3 */
{ 0x0B04, 0x0000 }, /* R2820 - DSP2 Control 4 */
{ 0x0B06, 0x0000 }, /* R2822 - DSP2 Control 5 */
{ 0x0B07, 0x0000 }, /* R2823 - DSP2 Control 6 */
{ 0x0B08, 0x0000 }, /* R2824 - DSP2 Control 7 */
{ 0x0B09, 0x0000 }, /* R2825 - DSP2 Control 8 */
{ 0x0B0A, 0x0000 }, /* R2826 - DSP2 Control 9 */
{ 0x0B0B, 0x0000 }, /* R2827 - DSP2 Control 10 */
{ 0x0B0C, 0x0000 }, /* R2828 - DSP2 Control 11 */
{ 0x0B0D, 0x0000 }, /* R2829 - DSP2 Control 12 */
{ 0x0B0F, 0x0000 }, /* R2831 - DSP2 Control 13 */
{ 0x0B10, 0x0000 }, /* R2832 - DSP2 Control 14 */
{ 0x0B11, 0x0000 }, /* R2833 - DSP2 Control 15 */
{ 0x0B12, 0x0000 }, /* R2834 - DSP2 Control 16 */
{ 0x0B13, 0x0000 }, /* R2835 - DSP2 Control 17 */
{ 0x0B14, 0x0000 }, /* R2836 - DSP2 Control 18 */
{ 0x0B16, 0x0000 }, /* R2838 - DSP2 Control 19 */
{ 0x0B17, 0x0000 }, /* R2839 - DSP2 Control 20 */
{ 0x0B18, 0x0000 }, /* R2840 - DSP2 Control 21 */
{ 0x0B1A, 0x0800 }, /* R2842 - DSP2 Control 22 */
{ 0x0B1B, 0x1000 }, /* R2843 - DSP2 Control 23 */
{ 0x0B1C, 0x0400 }, /* R2844 - DSP2 Control 24 */
{ 0x0B1E, 0x0000 }, /* R2846 - DSP2 Control 25 */
{ 0x0B20, 0x0000 }, /* R2848 - DSP2 Control 26 */
{ 0x0B21, 0x0000 }, /* R2849 - DSP2 Control 27 */
{ 0x0B22, 0x0000 }, /* R2850 - DSP2 Control 28 */
{ 0x0B23, 0x0000 }, /* R2851 - DSP2 Control 29 */
{ 0x0B24, 0x0000 }, /* R2852 - DSP2 Control 30 */
{ 0x0B26, 0x0000 }, /* R2854 - DSP2 Control 31 */
};
static bool wm2200_volatile_register(struct device *dev, unsigned int reg)
{
switch (reg) {
case WM2200_SOFTWARE_RESET:
case WM2200_DEVICE_REVISION:
case WM2200_ADPS1_IRQ0:
case WM2200_ADPS1_IRQ1:
case WM2200_INTERRUPT_STATUS_1:
case WM2200_INTERRUPT_STATUS_2:
case WM2200_INTERRUPT_RAW_STATUS_2:
return true;
default:
return false;
}
}
static bool wm2200_readable_register(struct device *dev, unsigned int reg)
{
switch (reg) {
case WM2200_SOFTWARE_RESET:
case WM2200_DEVICE_REVISION:
case WM2200_TONE_GENERATOR_1:
case WM2200_CLOCKING_3:
case WM2200_CLOCKING_4:
case WM2200_FLL_CONTROL_1:
case WM2200_FLL_CONTROL_2:
case WM2200_FLL_CONTROL_3:
case WM2200_FLL_CONTROL_4:
case WM2200_FLL_CONTROL_6:
case WM2200_FLL_CONTROL_7:
case WM2200_FLL_EFS_1:
case WM2200_FLL_EFS_2:
case WM2200_MIC_CHARGE_PUMP_1:
case WM2200_MIC_CHARGE_PUMP_2:
case WM2200_DM_CHARGE_PUMP_1:
case WM2200_MIC_BIAS_CTRL_1:
case WM2200_MIC_BIAS_CTRL_2:
case WM2200_EAR_PIECE_CTRL_1:
case WM2200_EAR_PIECE_CTRL_2:
case WM2200_INPUT_ENABLES:
case WM2200_IN1L_CONTROL:
case WM2200_IN1R_CONTROL:
case WM2200_IN2L_CONTROL:
case WM2200_IN2R_CONTROL:
case WM2200_IN3L_CONTROL:
case WM2200_IN3R_CONTROL:
case WM2200_RXANC_SRC:
case WM2200_INPUT_VOLUME_RAMP:
case WM2200_ADC_DIGITAL_VOLUME_1L:
case WM2200_ADC_DIGITAL_VOLUME_1R:
case WM2200_ADC_DIGITAL_VOLUME_2L:
case WM2200_ADC_DIGITAL_VOLUME_2R:
case WM2200_ADC_DIGITAL_VOLUME_3L:
case WM2200_ADC_DIGITAL_VOLUME_3R:
case WM2200_OUTPUT_ENABLES:
case WM2200_DAC_VOLUME_LIMIT_1L:
case WM2200_DAC_VOLUME_LIMIT_1R:
case WM2200_DAC_VOLUME_LIMIT_2L:
case WM2200_DAC_VOLUME_LIMIT_2R:
case WM2200_DAC_AEC_CONTROL_1:
case WM2200_OUTPUT_VOLUME_RAMP:
case WM2200_DAC_DIGITAL_VOLUME_1L:
case WM2200_DAC_DIGITAL_VOLUME_1R:
case WM2200_DAC_DIGITAL_VOLUME_2L:
case WM2200_DAC_DIGITAL_VOLUME_2R:
case WM2200_PDM_1:
case WM2200_PDM_2:
case WM2200_AUDIO_IF_1_1:
case WM2200_AUDIO_IF_1_2:
case WM2200_AUDIO_IF_1_3:
case WM2200_AUDIO_IF_1_4:
case WM2200_AUDIO_IF_1_5:
case WM2200_AUDIO_IF_1_6:
case WM2200_AUDIO_IF_1_7:
case WM2200_AUDIO_IF_1_8:
case WM2200_AUDIO_IF_1_9:
case WM2200_AUDIO_IF_1_10:
case WM2200_AUDIO_IF_1_11:
case WM2200_AUDIO_IF_1_12:
case WM2200_AUDIO_IF_1_13:
case WM2200_AUDIO_IF_1_14:
case WM2200_AUDIO_IF_1_15:
case WM2200_AUDIO_IF_1_16:
case WM2200_AUDIO_IF_1_17:
case WM2200_AUDIO_IF_1_18:
case WM2200_AUDIO_IF_1_19:
case WM2200_AUDIO_IF_1_20:
case WM2200_AUDIO_IF_1_21:
case WM2200_AUDIO_IF_1_22:
case WM2200_OUT1LMIX_INPUT_1_SOURCE:
case WM2200_OUT1LMIX_INPUT_1_VOLUME:
case WM2200_OUT1LMIX_INPUT_2_SOURCE:
case WM2200_OUT1LMIX_INPUT_2_VOLUME:
case WM2200_OUT1LMIX_INPUT_3_SOURCE:
case WM2200_OUT1LMIX_INPUT_3_VOLUME:
case WM2200_OUT1LMIX_INPUT_4_SOURCE:
case WM2200_OUT1LMIX_INPUT_4_VOLUME:
case WM2200_OUT1RMIX_INPUT_1_SOURCE:
case WM2200_OUT1RMIX_INPUT_1_VOLUME:
case WM2200_OUT1RMIX_INPUT_2_SOURCE:
case WM2200_OUT1RMIX_INPUT_2_VOLUME:
case WM2200_OUT1RMIX_INPUT_3_SOURCE:
case WM2200_OUT1RMIX_INPUT_3_VOLUME:
case WM2200_OUT1RMIX_INPUT_4_SOURCE:
case WM2200_OUT1RMIX_INPUT_4_VOLUME:
case WM2200_OUT2LMIX_INPUT_1_SOURCE:
case WM2200_OUT2LMIX_INPUT_1_VOLUME:
case WM2200_OUT2LMIX_INPUT_2_SOURCE:
case WM2200_OUT2LMIX_INPUT_2_VOLUME:
case WM2200_OUT2LMIX_INPUT_3_SOURCE:
case WM2200_OUT2LMIX_INPUT_3_VOLUME:
case WM2200_OUT2LMIX_INPUT_4_SOURCE:
case WM2200_OUT2LMIX_INPUT_4_VOLUME:
case WM2200_OUT2RMIX_INPUT_1_SOURCE:
case WM2200_OUT2RMIX_INPUT_1_VOLUME:
case WM2200_OUT2RMIX_INPUT_2_SOURCE:
case WM2200_OUT2RMIX_INPUT_2_VOLUME:
case WM2200_OUT2RMIX_INPUT_3_SOURCE:
case WM2200_OUT2RMIX_INPUT_3_VOLUME:
case WM2200_OUT2RMIX_INPUT_4_SOURCE:
case WM2200_OUT2RMIX_INPUT_4_VOLUME:
case WM2200_AIF1TX1MIX_INPUT_1_SOURCE:
case WM2200_AIF1TX1MIX_INPUT_1_VOLUME:
case WM2200_AIF1TX1MIX_INPUT_2_SOURCE:
case WM2200_AIF1TX1MIX_INPUT_2_VOLUME:
case WM2200_AIF1TX1MIX_INPUT_3_SOURCE:
case WM2200_AIF1TX1MIX_INPUT_3_VOLUME:
case WM2200_AIF1TX1MIX_INPUT_4_SOURCE:
case WM2200_AIF1TX1MIX_INPUT_4_VOLUME:
case WM2200_AIF1TX2MIX_INPUT_1_SOURCE:
case WM2200_AIF1TX2MIX_INPUT_1_VOLUME:
case WM2200_AIF1TX2MIX_INPUT_2_SOURCE:
case WM2200_AIF1TX2MIX_INPUT_2_VOLUME:
case WM2200_AIF1TX2MIX_INPUT_3_SOURCE:
case WM2200_AIF1TX2MIX_INPUT_3_VOLUME:
case WM2200_AIF1TX2MIX_INPUT_4_SOURCE:
case WM2200_AIF1TX2MIX_INPUT_4_VOLUME:
case WM2200_AIF1TX3MIX_INPUT_1_SOURCE:
case WM2200_AIF1TX3MIX_INPUT_1_VOLUME:
case WM2200_AIF1TX3MIX_INPUT_2_SOURCE:
case WM2200_AIF1TX3MIX_INPUT_2_VOLUME:
case WM2200_AIF1TX3MIX_INPUT_3_SOURCE:
case WM2200_AIF1TX3MIX_INPUT_3_VOLUME:
case WM2200_AIF1TX3MIX_INPUT_4_SOURCE:
case WM2200_AIF1TX3MIX_INPUT_4_VOLUME:
case WM2200_AIF1TX4MIX_INPUT_1_SOURCE:
case WM2200_AIF1TX4MIX_INPUT_1_VOLUME:
case WM2200_AIF1TX4MIX_INPUT_2_SOURCE:
case WM2200_AIF1TX4MIX_INPUT_2_VOLUME:
case WM2200_AIF1TX4MIX_INPUT_3_SOURCE:
case WM2200_AIF1TX4MIX_INPUT_3_VOLUME:
case WM2200_AIF1TX4MIX_INPUT_4_SOURCE:
case WM2200_AIF1TX4MIX_INPUT_4_VOLUME:
case WM2200_AIF1TX5MIX_INPUT_1_SOURCE:
case WM2200_AIF1TX5MIX_INPUT_1_VOLUME:
case WM2200_AIF1TX5MIX_INPUT_2_SOURCE:
case WM2200_AIF1TX5MIX_INPUT_2_VOLUME:
case WM2200_AIF1TX5MIX_INPUT_3_SOURCE:
case WM2200_AIF1TX5MIX_INPUT_3_VOLUME:
case WM2200_AIF1TX5MIX_INPUT_4_SOURCE:
case WM2200_AIF1TX5MIX_INPUT_4_VOLUME:
case WM2200_AIF1TX6MIX_INPUT_1_SOURCE:
case WM2200_AIF1TX6MIX_INPUT_1_VOLUME:
case WM2200_AIF1TX6MIX_INPUT_2_SOURCE:
case WM2200_AIF1TX6MIX_INPUT_2_VOLUME:
case WM2200_AIF1TX6MIX_INPUT_3_SOURCE:
case WM2200_AIF1TX6MIX_INPUT_3_VOLUME:
case WM2200_AIF1TX6MIX_INPUT_4_SOURCE:
case WM2200_AIF1TX6MIX_INPUT_4_VOLUME:
case WM2200_EQLMIX_INPUT_1_SOURCE:
case WM2200_EQLMIX_INPUT_1_VOLUME:
case WM2200_EQLMIX_INPUT_2_SOURCE:
case WM2200_EQLMIX_INPUT_2_VOLUME:
case WM2200_EQLMIX_INPUT_3_SOURCE:
case WM2200_EQLMIX_INPUT_3_VOLUME:
case WM2200_EQLMIX_INPUT_4_SOURCE:
case WM2200_EQLMIX_INPUT_4_VOLUME:
case WM2200_EQRMIX_INPUT_1_SOURCE:
case WM2200_EQRMIX_INPUT_1_VOLUME:
case WM2200_EQRMIX_INPUT_2_SOURCE:
case WM2200_EQRMIX_INPUT_2_VOLUME:
case WM2200_EQRMIX_INPUT_3_SOURCE:
case WM2200_EQRMIX_INPUT_3_VOLUME:
case WM2200_EQRMIX_INPUT_4_SOURCE:
case WM2200_EQRMIX_INPUT_4_VOLUME:
case WM2200_LHPF1MIX_INPUT_1_SOURCE:
case WM2200_LHPF1MIX_INPUT_1_VOLUME:
case WM2200_LHPF1MIX_INPUT_2_SOURCE:
case WM2200_LHPF1MIX_INPUT_2_VOLUME:
case WM2200_LHPF1MIX_INPUT_3_SOURCE:
case WM2200_LHPF1MIX_INPUT_3_VOLUME:
case WM2200_LHPF1MIX_INPUT_4_SOURCE:
case WM2200_LHPF1MIX_INPUT_4_VOLUME:
case WM2200_LHPF2MIX_INPUT_1_SOURCE:
case WM2200_LHPF2MIX_INPUT_1_VOLUME:
case WM2200_LHPF2MIX_INPUT_2_SOURCE:
case WM2200_LHPF2MIX_INPUT_2_VOLUME:
case WM2200_LHPF2MIX_INPUT_3_SOURCE:
case WM2200_LHPF2MIX_INPUT_3_VOLUME:
case WM2200_LHPF2MIX_INPUT_4_SOURCE:
case WM2200_LHPF2MIX_INPUT_4_VOLUME:
case WM2200_DSP1LMIX_INPUT_1_SOURCE:
case WM2200_DSP1LMIX_INPUT_1_VOLUME:
case WM2200_DSP1LMIX_INPUT_2_SOURCE:
case WM2200_DSP1LMIX_INPUT_2_VOLUME:
case WM2200_DSP1LMIX_INPUT_3_SOURCE:
case WM2200_DSP1LMIX_INPUT_3_VOLUME:
case WM2200_DSP1LMIX_INPUT_4_SOURCE:
case WM2200_DSP1LMIX_INPUT_4_VOLUME:
case WM2200_DSP1RMIX_INPUT_1_SOURCE:
case WM2200_DSP1RMIX_INPUT_1_VOLUME:
case WM2200_DSP1RMIX_INPUT_2_SOURCE:
case WM2200_DSP1RMIX_INPUT_2_VOLUME:
case WM2200_DSP1RMIX_INPUT_3_SOURCE:
case WM2200_DSP1RMIX_INPUT_3_VOLUME:
case WM2200_DSP1RMIX_INPUT_4_SOURCE:
case WM2200_DSP1RMIX_INPUT_4_VOLUME:
case WM2200_DSP1AUX1MIX_INPUT_1_SOURCE:
case WM2200_DSP1AUX2MIX_INPUT_1_SOURCE:
case WM2200_DSP1AUX3MIX_INPUT_1_SOURCE:
case WM2200_DSP1AUX4MIX_INPUT_1_SOURCE:
case WM2200_DSP1AUX5MIX_INPUT_1_SOURCE:
case WM2200_DSP1AUX6MIX_INPUT_1_SOURCE:
case WM2200_DSP2LMIX_INPUT_1_SOURCE:
case WM2200_DSP2LMIX_INPUT_1_VOLUME:
case WM2200_DSP2LMIX_INPUT_2_SOURCE:
case WM2200_DSP2LMIX_INPUT_2_VOLUME:
case WM2200_DSP2LMIX_INPUT_3_SOURCE:
case WM2200_DSP2LMIX_INPUT_3_VOLUME:
case WM2200_DSP2LMIX_INPUT_4_SOURCE:
case WM2200_DSP2LMIX_INPUT_4_VOLUME:
case WM2200_DSP2RMIX_INPUT_1_SOURCE:
case WM2200_DSP2RMIX_INPUT_1_VOLUME:
case WM2200_DSP2RMIX_INPUT_2_SOURCE:
case WM2200_DSP2RMIX_INPUT_2_VOLUME:
case WM2200_DSP2RMIX_INPUT_3_SOURCE:
case WM2200_DSP2RMIX_INPUT_3_VOLUME:
case WM2200_DSP2RMIX_INPUT_4_SOURCE:
case WM2200_DSP2RMIX_INPUT_4_VOLUME:
case WM2200_DSP2AUX1MIX_INPUT_1_SOURCE:
case WM2200_DSP2AUX2MIX_INPUT_1_SOURCE:
case WM2200_DSP2AUX3MIX_INPUT_1_SOURCE:
case WM2200_DSP2AUX4MIX_INPUT_1_SOURCE:
case WM2200_DSP2AUX5MIX_INPUT_1_SOURCE:
case WM2200_DSP2AUX6MIX_INPUT_1_SOURCE:
case WM2200_GPIO_CTRL_1:
case WM2200_GPIO_CTRL_2:
case WM2200_GPIO_CTRL_3:
case WM2200_GPIO_CTRL_4:
case WM2200_ADPS1_IRQ0:
case WM2200_ADPS1_IRQ1:
case WM2200_MISC_PAD_CTRL_1:
case WM2200_INTERRUPT_STATUS_1:
case WM2200_INTERRUPT_STATUS_1_MASK:
case WM2200_INTERRUPT_STATUS_2:
case WM2200_INTERRUPT_RAW_STATUS_2:
case WM2200_INTERRUPT_STATUS_2_MASK:
case WM2200_INTERRUPT_CONTROL:
case WM2200_EQL_1:
case WM2200_EQL_2:
case WM2200_EQL_3:
case WM2200_EQL_4:
case WM2200_EQL_5:
case WM2200_EQL_6:
case WM2200_EQL_7:
case WM2200_EQL_8:
case WM2200_EQL_9:
case WM2200_EQL_10:
case WM2200_EQL_11:
case WM2200_EQL_12:
case WM2200_EQL_13:
case WM2200_EQL_14:
case WM2200_EQL_15:
case WM2200_EQL_16:
case WM2200_EQL_17:
case WM2200_EQL_18:
case WM2200_EQL_19:
case WM2200_EQL_20:
case WM2200_EQR_1:
case WM2200_EQR_2:
case WM2200_EQR_3:
case WM2200_EQR_4:
case WM2200_EQR_5:
case WM2200_EQR_6:
case WM2200_EQR_7:
case WM2200_EQR_8:
case WM2200_EQR_9:
case WM2200_EQR_10:
case WM2200_EQR_11:
case WM2200_EQR_12:
case WM2200_EQR_13:
case WM2200_EQR_14:
case WM2200_EQR_15:
case WM2200_EQR_16:
case WM2200_EQR_17:
case WM2200_EQR_18:
case WM2200_EQR_19:
case WM2200_EQR_20:
case WM2200_HPLPF1_1:
case WM2200_HPLPF1_2:
case WM2200_HPLPF2_1:
case WM2200_HPLPF2_2:
case WM2200_DSP1_CONTROL_1:
case WM2200_DSP1_CONTROL_2:
case WM2200_DSP1_CONTROL_3:
case WM2200_DSP1_CONTROL_4:
case WM2200_DSP1_CONTROL_5:
case WM2200_DSP1_CONTROL_6:
case WM2200_DSP1_CONTROL_7:
case WM2200_DSP1_CONTROL_8:
case WM2200_DSP1_CONTROL_9:
case WM2200_DSP1_CONTROL_10:
case WM2200_DSP1_CONTROL_11:
case WM2200_DSP1_CONTROL_12:
case WM2200_DSP1_CONTROL_13:
case WM2200_DSP1_CONTROL_14:
case WM2200_DSP1_CONTROL_15:
case WM2200_DSP1_CONTROL_16:
case WM2200_DSP1_CONTROL_17:
case WM2200_DSP1_CONTROL_18:
case WM2200_DSP1_CONTROL_19:
case WM2200_DSP1_CONTROL_20:
case WM2200_DSP1_CONTROL_21:
case WM2200_DSP1_CONTROL_22:
case WM2200_DSP1_CONTROL_23:
case WM2200_DSP1_CONTROL_24:
case WM2200_DSP1_CONTROL_25:
case WM2200_DSP1_CONTROL_26:
case WM2200_DSP1_CONTROL_27:
case WM2200_DSP1_CONTROL_28:
case WM2200_DSP1_CONTROL_29:
case WM2200_DSP1_CONTROL_30:
case WM2200_DSP1_CONTROL_31:
case WM2200_DSP2_CONTROL_1:
case WM2200_DSP2_CONTROL_2:
case WM2200_DSP2_CONTROL_3:
case WM2200_DSP2_CONTROL_4:
case WM2200_DSP2_CONTROL_5:
case WM2200_DSP2_CONTROL_6:
case WM2200_DSP2_CONTROL_7:
case WM2200_DSP2_CONTROL_8:
case WM2200_DSP2_CONTROL_9:
case WM2200_DSP2_CONTROL_10:
case WM2200_DSP2_CONTROL_11:
case WM2200_DSP2_CONTROL_12:
case WM2200_DSP2_CONTROL_13:
case WM2200_DSP2_CONTROL_14:
case WM2200_DSP2_CONTROL_15:
case WM2200_DSP2_CONTROL_16:
case WM2200_DSP2_CONTROL_17:
case WM2200_DSP2_CONTROL_18:
case WM2200_DSP2_CONTROL_19:
case WM2200_DSP2_CONTROL_20:
case WM2200_DSP2_CONTROL_21:
case WM2200_DSP2_CONTROL_22:
case WM2200_DSP2_CONTROL_23:
case WM2200_DSP2_CONTROL_24:
case WM2200_DSP2_CONTROL_25:
case WM2200_DSP2_CONTROL_26:
case WM2200_DSP2_CONTROL_27:
case WM2200_DSP2_CONTROL_28:
case WM2200_DSP2_CONTROL_29:
case WM2200_DSP2_CONTROL_30:
case WM2200_DSP2_CONTROL_31:
return true;
default:
return false;
}
}
static const struct reg_default wm2200_reva_patch[] = {
{ 0x07, 0x0003 },
{ 0x102, 0x0200 },
{ 0x203, 0x0084 },
{ 0x201, 0x83FF },
{ 0x20C, 0x0062 },
{ 0x20D, 0x0062 },
{ 0x207, 0x2002 },
{ 0x208, 0x20C0 },
{ 0x21D, 0x01C0 },
{ 0x50A, 0x0001 },
{ 0x50B, 0x0002 },
{ 0x50C, 0x0003 },
{ 0x50D, 0x0004 },
{ 0x50E, 0x0005 },
{ 0x510, 0x0001 },
{ 0x511, 0x0002 },
{ 0x512, 0x0003 },
{ 0x513, 0x0004 },
{ 0x514, 0x0005 },
{ 0x515, 0x0000 },
{ 0x201, 0x8084 },
{ 0x202, 0xBBDE },
{ 0x203, 0x00EC },
{ 0x500, 0x8000 },
{ 0x507, 0x1820 },
{ 0x508, 0x1820 },
{ 0x505, 0x0300 },
{ 0x506, 0x0300 },
{ 0x302, 0x2280 },
{ 0x303, 0x0080 },
{ 0x304, 0x2280 },
{ 0x305, 0x0080 },
{ 0x306, 0x2280 },
{ 0x307, 0x0080 },
{ 0x401, 0x0080 },
{ 0x402, 0x0080 },
{ 0x417, 0x3069 },
{ 0x900, 0x6318 },
{ 0x901, 0x6300 },
{ 0x902, 0x0FC8 },
{ 0x903, 0x03FE },
{ 0x904, 0x00E0 },
{ 0x905, 0x1EC4 },
{ 0x906, 0xF136 },
{ 0x907, 0x0409 },
{ 0x908, 0x04CC },
{ 0x909, 0x1C9B },
{ 0x90A, 0xF337 },
{ 0x90B, 0x040B },
{ 0x90C, 0x0CBB },
{ 0x90D, 0x16F8 },
{ 0x90E, 0xF7D9 },
{ 0x90F, 0x040A },
{ 0x910, 0x1F14 },
{ 0x911, 0x058C },
{ 0x912, 0x0563 },
{ 0x913, 0x4000 },
{ 0x916, 0x6318 },
{ 0x917, 0x6300 },
{ 0x918, 0x0FC8 },
{ 0x919, 0x03FE },
{ 0x91A, 0x00E0 },
{ 0x91B, 0x1EC4 },
{ 0x91C, 0xF136 },
{ 0x91D, 0x0409 },
{ 0x91E, 0x04CC },
{ 0x91F, 0x1C9B },
{ 0x920, 0xF337 },
{ 0x921, 0x040B },
{ 0x922, 0x0CBB },
{ 0x923, 0x16F8 },
{ 0x924, 0xF7D9 },
{ 0x925, 0x040A },
{ 0x926, 0x1F14 },
{ 0x927, 0x058C },
{ 0x928, 0x0563 },
{ 0x929, 0x4000 },
{ 0x709, 0x2000 },
{ 0x207, 0x200E },
{ 0x208, 0x20D4 },
{ 0x20A, 0x0080 },
{ 0x07, 0x0000 },
};
static int wm2200_reset(struct wm2200_priv *wm2200)
{
if (wm2200->pdata.reset) {
gpio_set_value_cansleep(wm2200->pdata.reset, 0);
gpio_set_value_cansleep(wm2200->pdata.reset, 1);
return 0;
} else {
return regmap_write(wm2200->regmap, WM2200_SOFTWARE_RESET,
0x2200);
}
}
static DECLARE_TLV_DB_SCALE(in_tlv, -6300, 100, 0);
static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
static DECLARE_TLV_DB_SCALE(out_tlv, -6400, 100, 0);
static const char *wm2200_mixer_texts[] = {
"None",
"Tone Generator",
"AEC loopback",
"IN1L",
"IN1R",
"IN2L",
"IN2R",
"IN3L",
"IN3R",
"AIF1RX1",
"AIF1RX2",
"AIF1RX3",
"AIF1RX4",
"AIF1RX5",
"AIF1RX6",
"EQL",
"EQR",
"LHPF1",
"LHPF2",
"LHPF3",
"LHPF4",
"DSP1.1",
"DSP1.2",
"DSP1.3",
"DSP1.4",
"DSP1.5",
"DSP1.6",
"DSP2.1",
"DSP2.2",
"DSP2.3",
"DSP2.4",
"DSP2.5",
"DSP2.6",
};
static int wm2200_mixer_values[] = {
0x00,
0x04, /* Tone */
0x08, /* AEC */
0x10, /* Input */
0x11,
0x12,
0x13,
0x14,
0x15,
0x20, /* AIF */
0x21,
0x22,
0x23,
0x24,
0x25,
0x50, /* EQ */
0x51,
0x52,
0x60, /* LHPF1 */
0x61, /* LHPF2 */
0x68, /* DSP1 */
0x69,
0x6a,
0x6b,
0x6c,
0x6d,
0x70, /* DSP2 */
0x71,
0x72,
0x73,
0x74,
0x75,
};
#define WM2200_MIXER_CONTROLS(name, base) \
SOC_SINGLE_TLV(name " Input 1 Volume", base + 1 , \
WM2200_MIXER_VOL_SHIFT, 80, 0, mixer_tlv), \
SOC_SINGLE_TLV(name " Input 2 Volume", base + 3 , \
WM2200_MIXER_VOL_SHIFT, 80, 0, mixer_tlv), \
SOC_SINGLE_TLV(name " Input 3 Volume", base + 5 , \
WM2200_MIXER_VOL_SHIFT, 80, 0, mixer_tlv), \
SOC_SINGLE_TLV(name " Input 4 Volume", base + 7 , \
WM2200_MIXER_VOL_SHIFT, 80, 0, mixer_tlv)
#define WM2200_MUX_ENUM_DECL(name, reg) \
SOC_VALUE_ENUM_SINGLE_DECL(name, reg, 0, 0xff, \
wm2200_mixer_texts, wm2200_mixer_values)
#define WM2200_MUX_CTL_DECL(name) \
const struct snd_kcontrol_new name##_mux = \
SOC_DAPM_VALUE_ENUM("Route", name##_enum)
#define WM2200_MIXER_ENUMS(name, base_reg) \
static WM2200_MUX_ENUM_DECL(name##_in1_enum, base_reg); \
static WM2200_MUX_ENUM_DECL(name##_in2_enum, base_reg + 2); \
static WM2200_MUX_ENUM_DECL(name##_in3_enum, base_reg + 4); \
static WM2200_MUX_ENUM_DECL(name##_in4_enum, base_reg + 6); \
static WM2200_MUX_CTL_DECL(name##_in1); \
static WM2200_MUX_CTL_DECL(name##_in2); \
static WM2200_MUX_CTL_DECL(name##_in3); \
static WM2200_MUX_CTL_DECL(name##_in4)
static const struct snd_kcontrol_new wm2200_snd_controls[] = {
SOC_SINGLE("IN1 High Performance Switch", WM2200_IN1L_CONTROL,
WM2200_IN1_OSR_SHIFT, 1, 0),
SOC_SINGLE("IN2 High Performance Switch", WM2200_IN2L_CONTROL,
WM2200_IN2_OSR_SHIFT, 1, 0),
SOC_SINGLE("IN3 High Performance Switch", WM2200_IN3L_CONTROL,
WM2200_IN3_OSR_SHIFT, 1, 0),
SOC_DOUBLE_R_TLV("IN1 Volume", WM2200_IN1L_CONTROL, WM2200_IN1R_CONTROL,
WM2200_IN1L_PGA_VOL_SHIFT, 0x5f, 0, in_tlv),
SOC_DOUBLE_R_TLV("IN2 Volume", WM2200_IN2L_CONTROL, WM2200_IN2R_CONTROL,
WM2200_IN2L_PGA_VOL_SHIFT, 0x5f, 0, in_tlv),
SOC_DOUBLE_R_TLV("IN3 Volume", WM2200_IN3L_CONTROL, WM2200_IN3R_CONTROL,
WM2200_IN3L_PGA_VOL_SHIFT, 0x5f, 0, in_tlv),
SOC_DOUBLE_R("IN1 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_1L,
WM2200_ADC_DIGITAL_VOLUME_1R, WM2200_IN1L_MUTE_SHIFT, 1, 1),
SOC_DOUBLE_R("IN2 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_1L,
WM2200_ADC_DIGITAL_VOLUME_2R, WM2200_IN2L_MUTE_SHIFT, 1, 1),
SOC_DOUBLE_R("IN3 Digital Switch", WM2200_ADC_DIGITAL_VOLUME_1L,
WM2200_ADC_DIGITAL_VOLUME_3R, WM2200_IN3L_MUTE_SHIFT, 1, 1),
SOC_DOUBLE_R_TLV("IN1 Digital Volume", WM2200_ADC_DIGITAL_VOLUME_1L,
WM2200_ADC_DIGITAL_VOLUME_1R, WM2200_IN1L_DIG_VOL_SHIFT,
0xbf, 0, digital_tlv),
SOC_DOUBLE_R_TLV("IN2 Digital Volume", WM2200_ADC_DIGITAL_VOLUME_2L,
WM2200_ADC_DIGITAL_VOLUME_2R, WM2200_IN2L_DIG_VOL_SHIFT,
0xbf, 0, digital_tlv),
SOC_DOUBLE_R_TLV("IN3 Digital Volume", WM2200_ADC_DIGITAL_VOLUME_3L,
WM2200_ADC_DIGITAL_VOLUME_3R, WM2200_IN3L_DIG_VOL_SHIFT,
0xbf, 0, digital_tlv),
SOC_SINGLE("OUT1 High Performance Switch", WM2200_DAC_DIGITAL_VOLUME_1L,
WM2200_OUT1_OSR_SHIFT, 1, 0),
SOC_SINGLE("OUT2 High Performance Switch", WM2200_DAC_DIGITAL_VOLUME_2L,
WM2200_OUT2_OSR_SHIFT, 1, 0),
SOC_DOUBLE_R("OUT1 Digital Switch", WM2200_DAC_DIGITAL_VOLUME_1L,
WM2200_DAC_DIGITAL_VOLUME_1R, WM2200_OUT1L_MUTE_SHIFT, 1, 1),
SOC_DOUBLE_R_TLV("OUT1 Digital Volume", WM2200_DAC_DIGITAL_VOLUME_1L,
WM2200_DAC_DIGITAL_VOLUME_1R, WM2200_OUT1L_VOL_SHIFT, 0x9f, 0,
digital_tlv),
SOC_DOUBLE_R_TLV("OUT1 Volume", WM2200_DAC_VOLUME_LIMIT_1L,
WM2200_DAC_VOLUME_LIMIT_1R, WM2200_OUT1L_PGA_VOL_SHIFT,
0x46, 0, out_tlv),
SOC_DOUBLE_R("OUT2 Digital Switch", WM2200_DAC_DIGITAL_VOLUME_2L,
WM2200_DAC_DIGITAL_VOLUME_2R, WM2200_OUT2L_MUTE_SHIFT, 1, 1),
SOC_DOUBLE_R_TLV("OUT2 Digital Volume", WM2200_DAC_DIGITAL_VOLUME_2L,
WM2200_DAC_DIGITAL_VOLUME_2R, WM2200_OUT2L_VOL_SHIFT, 0x9f, 0,
digital_tlv),
SOC_DOUBLE("OUT2 Switch", WM2200_PDM_1, WM2200_SPK1L_MUTE_SHIFT,
WM2200_SPK1R_MUTE_SHIFT, 1, 0),
};
WM2200_MIXER_ENUMS(OUT1L, WM2200_OUT1LMIX_INPUT_1_SOURCE);
WM2200_MIXER_ENUMS(OUT1R, WM2200_OUT1RMIX_INPUT_1_SOURCE);
WM2200_MIXER_ENUMS(OUT2L, WM2200_OUT2LMIX_INPUT_1_SOURCE);
WM2200_MIXER_ENUMS(OUT2R, WM2200_OUT2RMIX_INPUT_1_SOURCE);
WM2200_MIXER_ENUMS(AIF1TX1, WM2200_AIF1TX1MIX_INPUT_1_SOURCE);
WM2200_MIXER_ENUMS(AIF1TX2, WM2200_AIF1TX2MIX_INPUT_1_SOURCE);
WM2200_MIXER_ENUMS(AIF1TX3, WM2200_AIF1TX3MIX_INPUT_1_SOURCE);
WM2200_MIXER_ENUMS(AIF1TX4, WM2200_AIF1TX4MIX_INPUT_1_SOURCE);
WM2200_MIXER_ENUMS(AIF1TX5, WM2200_AIF1TX5MIX_INPUT_1_SOURCE);
WM2200_MIXER_ENUMS(AIF1TX6, WM2200_AIF1TX6MIX_INPUT_1_SOURCE);
WM2200_MIXER_ENUMS(EQL, WM2200_EQLMIX_INPUT_1_SOURCE);
WM2200_MIXER_ENUMS(EQR, WM2200_EQRMIX_INPUT_1_SOURCE);
WM2200_MIXER_ENUMS(DSP1L, WM2200_DSP1LMIX_INPUT_1_SOURCE);
WM2200_MIXER_ENUMS(DSP1R, WM2200_DSP1RMIX_INPUT_1_SOURCE);
WM2200_MIXER_ENUMS(DSP2L, WM2200_DSP2LMIX_INPUT_1_SOURCE);
WM2200_MIXER_ENUMS(DSP2R, WM2200_DSP2RMIX_INPUT_1_SOURCE);
WM2200_MIXER_ENUMS(LHPF1, WM2200_LHPF1MIX_INPUT_1_SOURCE);
WM2200_MIXER_ENUMS(LHPF2, WM2200_LHPF2MIX_INPUT_1_SOURCE);
#define WM2200_MUX(name, ctrl) \
SND_SOC_DAPM_VALUE_MUX(name, SND_SOC_NOPM, 0, 0, ctrl)
#define WM2200_MIXER_WIDGETS(name, name_str) \
WM2200_MUX(name_str " Input 1", &name##_in1_mux), \
WM2200_MUX(name_str " Input 2", &name##_in2_mux), \
WM2200_MUX(name_str " Input 3", &name##_in3_mux), \
WM2200_MUX(name_str " Input 4", &name##_in4_mux), \
SND_SOC_DAPM_MIXER(name_str " Mixer", SND_SOC_NOPM, 0, 0, NULL, 0)
#define WM2200_MIXER_INPUT_ROUTES(name) \
{ name, "Tone Generator", "Tone Generator" }, \
{ name, "IN1L", "IN1L PGA" }, \
{ name, "IN1R", "IN1R PGA" }, \
{ name, "IN2L", "IN2L PGA" }, \
{ name, "IN2R", "IN2R PGA" }, \
{ name, "IN3L", "IN3L PGA" }, \
{ name, "IN3R", "IN3R PGA" }, \
{ name, "DSP1.1", "DSP1" }, \
{ name, "DSP1.2", "DSP1" }, \
{ name, "DSP1.3", "DSP1" }, \
{ name, "DSP1.4", "DSP1" }, \
{ name, "DSP1.5", "DSP1" }, \
{ name, "DSP1.6", "DSP1" }, \
{ name, "DSP2.1", "DSP2" }, \
{ name, "DSP2.2", "DSP2" }, \
{ name, "DSP2.3", "DSP2" }, \
{ name, "DSP2.4", "DSP2" }, \
{ name, "DSP2.5", "DSP2" }, \
{ name, "DSP2.6", "DSP2" }, \
{ name, "AIF1RX1", "AIF1RX1" }, \
{ name, "AIF1RX2", "AIF1RX2" }, \
{ name, "AIF1RX3", "AIF1RX3" }, \
{ name, "AIF1RX4", "AIF1RX4" }, \
{ name, "AIF1RX5", "AIF1RX5" }, \
{ name, "AIF1RX6", "AIF1RX6" }, \
{ name, "EQL", "EQL" }, \
{ name, "EQR", "EQR" }, \
{ name, "LHPF1", "LHPF1" }, \
{ name, "LHPF2", "LHPF2" }
#define WM2200_MIXER_ROUTES(widget, name) \
{ widget, NULL, name " Mixer" }, \
{ name " Mixer", NULL, name " Input 1" }, \
{ name " Mixer", NULL, name " Input 2" }, \
{ name " Mixer", NULL, name " Input 3" }, \
{ name " Mixer", NULL, name " Input 4" }, \
WM2200_MIXER_INPUT_ROUTES(name " Input 1"), \
WM2200_MIXER_INPUT_ROUTES(name " Input 2"), \
WM2200_MIXER_INPUT_ROUTES(name " Input 3"), \
WM2200_MIXER_INPUT_ROUTES(name " Input 4")
static const struct snd_soc_dapm_widget wm2200_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("SYSCLK", WM2200_CLOCKING_3, WM2200_SYSCLK_ENA_SHIFT, 0,
NULL, 0),
SND_SOC_DAPM_SUPPLY("CP1", WM2200_DM_CHARGE_PUMP_1, WM2200_CPDM_ENA_SHIFT, 0,
NULL, 0),
SND_SOC_DAPM_SUPPLY("CP2", WM2200_MIC_CHARGE_PUMP_1, WM2200_CPMIC_ENA_SHIFT, 0,
NULL, 0),
SND_SOC_DAPM_SUPPLY("MICBIAS1", WM2200_MIC_BIAS_CTRL_1, WM2200_MICB1_ENA_SHIFT,
0, NULL, 0),
SND_SOC_DAPM_SUPPLY("MICBIAS2", WM2200_MIC_BIAS_CTRL_2, WM2200_MICB2_ENA_SHIFT,
0, NULL, 0),
SND_SOC_DAPM_REGULATOR_SUPPLY("CPVDD", 20),
SND_SOC_DAPM_REGULATOR_SUPPLY("AVDD", 20),
SND_SOC_DAPM_INPUT("IN1L"),
SND_SOC_DAPM_INPUT("IN1R"),
SND_SOC_DAPM_INPUT("IN2L"),
SND_SOC_DAPM_INPUT("IN2R"),
SND_SOC_DAPM_INPUT("IN3L"),
SND_SOC_DAPM_INPUT("IN3R"),
SND_SOC_DAPM_SIGGEN("TONE"),
SND_SOC_DAPM_PGA("Tone Generator", WM2200_TONE_GENERATOR_1,
WM2200_TONE_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_PGA("IN1L PGA", WM2200_INPUT_ENABLES, WM2200_IN1L_ENA_SHIFT, 0,
NULL, 0),
SND_SOC_DAPM_PGA("IN1R PGA", WM2200_INPUT_ENABLES, WM2200_IN1R_ENA_SHIFT, 0,
NULL, 0),
SND_SOC_DAPM_PGA("IN2L PGA", WM2200_INPUT_ENABLES, WM2200_IN2L_ENA_SHIFT, 0,
NULL, 0),
SND_SOC_DAPM_PGA("IN2R PGA", WM2200_INPUT_ENABLES, WM2200_IN2R_ENA_SHIFT, 0,
NULL, 0),
SND_SOC_DAPM_PGA("IN3L PGA", WM2200_INPUT_ENABLES, WM2200_IN3L_ENA_SHIFT, 0,
NULL, 0),
SND_SOC_DAPM_PGA("IN3R PGA", WM2200_INPUT_ENABLES, WM2200_IN3R_ENA_SHIFT, 0,
NULL, 0),
SND_SOC_DAPM_AIF_IN("AIF1RX1", "Playback", 0,
WM2200_AUDIO_IF_1_22, WM2200_AIF1RX1_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF1RX2", "Playback", 1,
WM2200_AUDIO_IF_1_22, WM2200_AIF1RX2_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF1RX3", "Playback", 2,
WM2200_AUDIO_IF_1_22, WM2200_AIF1RX3_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF1RX4", "Playback", 3,
WM2200_AUDIO_IF_1_22, WM2200_AIF1RX4_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF1RX5", "Playback", 4,
WM2200_AUDIO_IF_1_22, WM2200_AIF1RX5_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_IN("AIF1RX6", "Playback", 5,
WM2200_AUDIO_IF_1_22, WM2200_AIF1RX6_ENA_SHIFT, 0),
SND_SOC_DAPM_PGA("EQL", WM2200_EQL_1, WM2200_EQL_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_PGA("EQR", WM2200_EQR_1, WM2200_EQR_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_PGA("LHPF1", WM2200_HPLPF1_1, WM2200_LHPF1_ENA_SHIFT, 0,
NULL, 0),
SND_SOC_DAPM_PGA("LHPF2", WM2200_HPLPF2_1, WM2200_LHPF2_ENA_SHIFT, 0,
NULL, 0),
SND_SOC_DAPM_PGA_E("DSP1", SND_SOC_NOPM, 0, 0, NULL, 0, NULL, 0),
SND_SOC_DAPM_PGA_E("DSP2", SND_SOC_NOPM, 1, 0, NULL, 0, NULL, 0),
SND_SOC_DAPM_AIF_OUT("AIF1TX1", "Capture", 0,
WM2200_AUDIO_IF_1_22, WM2200_AIF1TX1_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("AIF1TX2", "Capture", 1,
WM2200_AUDIO_IF_1_22, WM2200_AIF1TX2_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("AIF1TX3", "Capture", 2,
WM2200_AUDIO_IF_1_22, WM2200_AIF1TX3_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("AIF1TX4", "Capture", 3,
WM2200_AUDIO_IF_1_22, WM2200_AIF1TX4_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("AIF1TX5", "Capture", 4,
WM2200_AUDIO_IF_1_22, WM2200_AIF1TX5_ENA_SHIFT, 0),
SND_SOC_DAPM_AIF_OUT("AIF1TX6", "Capture", 5,
WM2200_AUDIO_IF_1_22, WM2200_AIF1TX6_ENA_SHIFT, 0),
SND_SOC_DAPM_PGA_S("OUT1L", 0, WM2200_OUTPUT_ENABLES,
WM2200_OUT1L_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("OUT1R", 0, WM2200_OUTPUT_ENABLES,
WM2200_OUT1R_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("EPD_LP", 1, WM2200_EAR_PIECE_CTRL_1,
WM2200_EPD_LP_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("EPD_OUTP_LP", 1, WM2200_EAR_PIECE_CTRL_1,
WM2200_EPD_OUTP_LP_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("EPD_RMV_SHRT_LP", 1, WM2200_EAR_PIECE_CTRL_1,
WM2200_EPD_RMV_SHRT_LP_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("EPD_LN", 1, WM2200_EAR_PIECE_CTRL_1,
WM2200_EPD_LN_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("EPD_OUTP_LN", 1, WM2200_EAR_PIECE_CTRL_1,
WM2200_EPD_OUTP_LN_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("EPD_RMV_SHRT_LN", 1, WM2200_EAR_PIECE_CTRL_1,
WM2200_EPD_RMV_SHRT_LN_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("EPD_RP", 1, WM2200_EAR_PIECE_CTRL_2,
WM2200_EPD_RP_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("EPD_OUTP_RP", 1, WM2200_EAR_PIECE_CTRL_2,
WM2200_EPD_OUTP_RP_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("EPD_RMV_SHRT_RP", 1, WM2200_EAR_PIECE_CTRL_2,
WM2200_EPD_RMV_SHRT_RP_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("EPD_RN", 1, WM2200_EAR_PIECE_CTRL_2,
WM2200_EPD_RN_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("EPD_OUTP_RN", 1, WM2200_EAR_PIECE_CTRL_2,
WM2200_EPD_OUTP_RN_ENA_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("EPD_RMV_SHRT_RN", 1, WM2200_EAR_PIECE_CTRL_2,
WM2200_EPD_RMV_SHRT_RN_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_PGA("OUT2L", WM2200_OUTPUT_ENABLES, WM2200_OUT2L_ENA_SHIFT,
0, NULL, 0),
SND_SOC_DAPM_PGA("OUT2R", WM2200_OUTPUT_ENABLES, WM2200_OUT2R_ENA_SHIFT,
0, NULL, 0),
SND_SOC_DAPM_OUTPUT("EPOUTLN"),
SND_SOC_DAPM_OUTPUT("EPOUTLP"),
SND_SOC_DAPM_OUTPUT("EPOUTRN"),
SND_SOC_DAPM_OUTPUT("EPOUTRP"),
SND_SOC_DAPM_OUTPUT("SPK"),
WM2200_MIXER_WIDGETS(EQL, "EQL"),
WM2200_MIXER_WIDGETS(EQR, "EQR"),
WM2200_MIXER_WIDGETS(LHPF1, "LHPF1"),
WM2200_MIXER_WIDGETS(LHPF2, "LHPF2"),
WM2200_MIXER_WIDGETS(DSP1L, "DSP1L"),
WM2200_MIXER_WIDGETS(DSP1R, "DSP1R"),
WM2200_MIXER_WIDGETS(DSP2L, "DSP2L"),
WM2200_MIXER_WIDGETS(DSP2R, "DSP2R"),
WM2200_MIXER_WIDGETS(AIF1TX1, "AIF1TX1"),
WM2200_MIXER_WIDGETS(AIF1TX2, "AIF1TX2"),
WM2200_MIXER_WIDGETS(AIF1TX3, "AIF1TX3"),
WM2200_MIXER_WIDGETS(AIF1TX4, "AIF1TX4"),
WM2200_MIXER_WIDGETS(AIF1TX5, "AIF1TX5"),
WM2200_MIXER_WIDGETS(AIF1TX6, "AIF1TX6"),
WM2200_MIXER_WIDGETS(OUT1L, "OUT1L"),
WM2200_MIXER_WIDGETS(OUT1R, "OUT1R"),
WM2200_MIXER_WIDGETS(OUT2L, "OUT2L"),
WM2200_MIXER_WIDGETS(OUT2R, "OUT2R"),
};
static const struct snd_soc_dapm_route wm2200_dapm_routes[] = {
/* Everything needs SYSCLK but only hook up things on the edge
* of the chip */
{ "IN1L", NULL, "SYSCLK" },
{ "IN1R", NULL, "SYSCLK" },
{ "IN2L", NULL, "SYSCLK" },
{ "IN2R", NULL, "SYSCLK" },
{ "IN3L", NULL, "SYSCLK" },
{ "IN3R", NULL, "SYSCLK" },
{ "OUT1L", NULL, "SYSCLK" },
{ "OUT1R", NULL, "SYSCLK" },
{ "OUT2L", NULL, "SYSCLK" },
{ "OUT2R", NULL, "SYSCLK" },
{ "AIF1RX1", NULL, "SYSCLK" },
{ "AIF1RX2", NULL, "SYSCLK" },
{ "AIF1RX3", NULL, "SYSCLK" },
{ "AIF1RX4", NULL, "SYSCLK" },
{ "AIF1RX5", NULL, "SYSCLK" },
{ "AIF1RX6", NULL, "SYSCLK" },
{ "AIF1TX1", NULL, "SYSCLK" },
{ "AIF1TX2", NULL, "SYSCLK" },
{ "AIF1TX3", NULL, "SYSCLK" },
{ "AIF1TX4", NULL, "SYSCLK" },
{ "AIF1TX5", NULL, "SYSCLK" },
{ "AIF1TX6", NULL, "SYSCLK" },
{ "IN1L", NULL, "AVDD" },
{ "IN1R", NULL, "AVDD" },
{ "IN2L", NULL, "AVDD" },
{ "IN2R", NULL, "AVDD" },
{ "IN3L", NULL, "AVDD" },
{ "IN3R", NULL, "AVDD" },
{ "OUT1L", NULL, "AVDD" },
{ "OUT1R", NULL, "AVDD" },
{ "IN1L PGA", NULL, "IN1L" },
{ "IN1R PGA", NULL, "IN1R" },
{ "IN2L PGA", NULL, "IN2L" },
{ "IN2R PGA", NULL, "IN2R" },
{ "IN3L PGA", NULL, "IN3L" },
{ "IN3R PGA", NULL, "IN3R" },
{ "Tone Generator", NULL, "TONE" },
{ "CP2", NULL, "CPVDD" },
{ "MICBIAS1", NULL, "CP2" },
{ "MICBIAS2", NULL, "CP2" },
{ "CP1", NULL, "CPVDD" },
{ "EPD_LN", NULL, "CP1" },
{ "EPD_LP", NULL, "CP1" },
{ "EPD_RN", NULL, "CP1" },
{ "EPD_RP", NULL, "CP1" },
{ "EPD_LP", NULL, "OUT1L" },
{ "EPD_OUTP_LP", NULL, "EPD_LP" },
{ "EPD_RMV_SHRT_LP", NULL, "EPD_OUTP_LP" },
{ "EPOUTLP", NULL, "EPD_RMV_SHRT_LP" },
{ "EPD_LN", NULL, "OUT1L" },
{ "EPD_OUTP_LN", NULL, "EPD_LN" },
{ "EPD_RMV_SHRT_LN", NULL, "EPD_OUTP_LN" },
{ "EPOUTLN", NULL, "EPD_RMV_SHRT_LN" },
{ "EPD_RP", NULL, "OUT1R" },
{ "EPD_OUTP_RP", NULL, "EPD_RP" },
{ "EPD_RMV_SHRT_RP", NULL, "EPD_OUTP_RP" },
{ "EPOUTRP", NULL, "EPD_RMV_SHRT_RP" },
{ "EPD_RN", NULL, "OUT1R" },
{ "EPD_OUTP_RN", NULL, "EPD_RN" },
{ "EPD_RMV_SHRT_RN", NULL, "EPD_OUTP_RN" },
{ "EPOUTRN", NULL, "EPD_RMV_SHRT_RN" },
{ "SPK", NULL, "OUT2L" },
{ "SPK", NULL, "OUT2R" },
WM2200_MIXER_ROUTES("DSP1", "DSP1L"),
WM2200_MIXER_ROUTES("DSP1", "DSP1R"),
WM2200_MIXER_ROUTES("DSP2", "DSP2L"),
WM2200_MIXER_ROUTES("DSP2", "DSP2R"),
WM2200_MIXER_ROUTES("OUT1L", "OUT1L"),
WM2200_MIXER_ROUTES("OUT1R", "OUT1R"),
WM2200_MIXER_ROUTES("OUT2L", "OUT2L"),
WM2200_MIXER_ROUTES("OUT2R", "OUT2R"),
WM2200_MIXER_ROUTES("AIF1TX1", "AIF1TX1"),
WM2200_MIXER_ROUTES("AIF1TX2", "AIF1TX2"),
WM2200_MIXER_ROUTES("AIF1TX3", "AIF1TX3"),
WM2200_MIXER_ROUTES("AIF1TX4", "AIF1TX4"),
WM2200_MIXER_ROUTES("AIF1TX5", "AIF1TX5"),
WM2200_MIXER_ROUTES("AIF1TX6", "AIF1TX6"),
WM2200_MIXER_ROUTES("EQL", "EQL"),
WM2200_MIXER_ROUTES("EQR", "EQR"),
WM2200_MIXER_ROUTES("LHPF1", "LHPF1"),
WM2200_MIXER_ROUTES("LHPF2", "LHPF2"),
};
static int wm2200_probe(struct snd_soc_codec *codec)
{
struct wm2200_priv *wm2200 = dev_get_drvdata(codec->dev);
int ret;
wm2200->codec = codec;
codec->control_data = wm2200->regmap;
codec->dapm.bias_level = SND_SOC_BIAS_OFF;
ret = snd_soc_codec_set_cache_io(codec, 16, 16, SND_SOC_REGMAP);
if (ret != 0) {
dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
return ret;
}
return ret;
}
static int wm2200_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct snd_soc_codec *codec = dai->codec;
int lrclk, bclk, fmt_val;
lrclk = 0;
bclk = 0;
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_A:
fmt_val = 0;
break;
case SND_SOC_DAIFMT_DSP_B:
fmt_val = 1;
break;
case SND_SOC_DAIFMT_I2S:
fmt_val = 2;
break;
case SND_SOC_DAIFMT_LEFT_J:
fmt_val = 3;
break;
default:
dev_err(codec->dev, "Unsupported DAI format %d\n",
fmt & SND_SOC_DAIFMT_FORMAT_MASK);
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
break;
case SND_SOC_DAIFMT_CBS_CFM:
lrclk |= WM2200_AIF1TX_LRCLK_MSTR;
break;
case SND_SOC_DAIFMT_CBM_CFS:
bclk |= WM2200_AIF1_BCLK_MSTR;
break;
case SND_SOC_DAIFMT_CBM_CFM:
lrclk |= WM2200_AIF1TX_LRCLK_MSTR;
bclk |= WM2200_AIF1_BCLK_MSTR;
break;
default:
dev_err(codec->dev, "Unsupported master mode %d\n",
fmt & SND_SOC_DAIFMT_MASTER_MASK);
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_IB_IF:
bclk |= WM2200_AIF1_BCLK_INV;
lrclk |= WM2200_AIF1TX_LRCLK_INV;
break;
case SND_SOC_DAIFMT_IB_NF:
bclk |= WM2200_AIF1_BCLK_INV;
break;
case SND_SOC_DAIFMT_NB_IF:
lrclk |= WM2200_AIF1TX_LRCLK_INV;
break;
default:
return -EINVAL;
}
snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_1, WM2200_AIF1_BCLK_MSTR |
WM2200_AIF1_BCLK_INV, bclk);
snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_2,
WM2200_AIF1TX_LRCLK_MSTR | WM2200_AIF1TX_LRCLK_INV,
lrclk);
snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_3,
WM2200_AIF1TX_LRCLK_MSTR | WM2200_AIF1TX_LRCLK_INV,
lrclk);
snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_5,
WM2200_AIF1_FMT_MASK << 1, fmt_val << 1);
return 0;
}
static int wm2200_sr_code[] = {
0,
12000,
24000,
48000,
96000,
192000,
384000,
768000,
0,
11025,
22050,
44100,
88200,
176400,
352800,
705600,
4000,
8000,
16000,
32000,
64000,
128000,
256000,
512000,
};
#define WM2200_NUM_BCLK_RATES 12
static int wm2200_bclk_rates_dat[WM2200_NUM_BCLK_RATES] = {
6144000,
3072000,
2048000,
1536000,
768000,
512000,
384000,
256000,
192000,
128000,
96000,
64000,
};
static int wm2200_bclk_rates_cd[WM2200_NUM_BCLK_RATES] = {
5644800,
3763200,
2882400,
1881600,
1411200,
705600,
470400,
352800,
176400,
117600,
88200,
58800,
};
static int wm2200_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
struct wm2200_priv *wm2200 = snd_soc_codec_get_drvdata(codec);
int i, bclk, lrclk, wl, fl, sr_code;
int *bclk_rates;
/* Data sizes if not using TDM */
wl = snd_pcm_format_width(params_format(params));
if (wl < 0)
return wl;
fl = snd_soc_params_to_frame_size(params);
if (fl < 0)
return fl;
dev_dbg(codec->dev, "Word length %d bits, frame length %d bits\n",
wl, fl);
/* Target BCLK rate */
bclk = snd_soc_params_to_bclk(params);
if (bclk < 0)
return bclk;
if (!wm2200->sysclk) {
dev_err(codec->dev, "SYSCLK has no rate set\n");
return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(wm2200_sr_code); i++)
if (wm2200_sr_code[i] == params_rate(params))
break;
if (i == ARRAY_SIZE(wm2200_sr_code)) {
dev_err(codec->dev, "Unsupported sample rate: %dHz\n",
params_rate(params));
return -EINVAL;
}
sr_code = i;
dev_dbg(codec->dev, "Target BCLK is %dHz, using %dHz SYSCLK\n",
bclk, wm2200->sysclk);
if (wm2200->sysclk % 4000)
bclk_rates = wm2200_bclk_rates_cd;
else
bclk_rates = wm2200_bclk_rates_dat;
for (i = 0; i < WM2200_NUM_BCLK_RATES; i++)
if (bclk_rates[i] >= bclk && (bclk_rates[i] % bclk == 0))
break;
if (i == WM2200_NUM_BCLK_RATES) {
dev_err(codec->dev,
"No valid BCLK for %dHz found from %dHz SYSCLK\n",
bclk, wm2200->sysclk);
return -EINVAL;
}
bclk = i;
dev_dbg(codec->dev, "Setting %dHz BCLK\n", bclk_rates[bclk]);
snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_1,
WM2200_AIF1_BCLK_DIV_MASK, bclk);
lrclk = bclk_rates[bclk] / params_rate(params);
dev_dbg(codec->dev, "Setting %dHz LRCLK\n", bclk_rates[bclk] / lrclk);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ||
dai->symmetric_rates)
snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_7,
WM2200_AIF1RX_BCPF_MASK, lrclk);
else
snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_6,
WM2200_AIF1TX_BCPF_MASK, lrclk);
i = (wl << WM2200_AIF1TX_WL_SHIFT) | wl;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_9,
WM2200_AIF1RX_WL_MASK |
WM2200_AIF1RX_SLOT_LEN_MASK, i);
else
snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_8,
WM2200_AIF1TX_WL_MASK |
WM2200_AIF1TX_SLOT_LEN_MASK, i);
snd_soc_update_bits(codec, WM2200_CLOCKING_4,
WM2200_SAMPLE_RATE_1_MASK, sr_code);
return 0;
}
static const struct snd_soc_dai_ops wm2200_dai_ops = {
.set_fmt = wm2200_set_fmt,
.hw_params = wm2200_hw_params,
};
static int wm2200_set_sysclk(struct snd_soc_codec *codec, int clk_id,
int source, unsigned int freq, int dir)
{
struct wm2200_priv *wm2200 = snd_soc_codec_get_drvdata(codec);
int fval;
switch (clk_id) {
case WM2200_CLK_SYSCLK:
break;
default:
dev_err(codec->dev, "Unknown clock %d\n", clk_id);
return -EINVAL;
}
switch (source) {
case WM2200_CLKSRC_MCLK1:
case WM2200_CLKSRC_MCLK2:
case WM2200_CLKSRC_FLL:
case WM2200_CLKSRC_BCLK1:
break;
default:
dev_err(codec->dev, "Invalid source %d\n", source);
return -EINVAL;
}
switch (freq) {
case 22579200:
case 24576000:
fval = 2;
break;
default:
dev_err(codec->dev, "Invalid clock rate: %d\n", freq);
return -EINVAL;
}
/* TODO: Check if MCLKs are in use and enable/disable pulls to
* match.
*/
snd_soc_update_bits(codec, WM2200_CLOCKING_3, WM2200_SYSCLK_FREQ_MASK |
WM2200_SYSCLK_SRC_MASK,
fval << WM2200_SYSCLK_FREQ_SHIFT | source);
wm2200->sysclk = freq;
return 0;
}
struct _fll_div {
u16 fll_fratio;
u16 fll_outdiv;
u16 fll_refclk_div;
u16 n;
u16 theta;
u16 lambda;
};
static struct {
unsigned int min;
unsigned int max;
u16 fll_fratio;
int ratio;
} fll_fratios[] = {
{ 0, 64000, 4, 16 },
{ 64000, 128000, 3, 8 },
{ 128000, 256000, 2, 4 },
{ 256000, 1000000, 1, 2 },
{ 1000000, 13500000, 0, 1 },
};
static int fll_factors(struct _fll_div *fll_div, unsigned int Fref,
unsigned int Fout)
{
unsigned int target;
unsigned int div;
unsigned int fratio, gcd_fll;
int i;
/* Fref must be <=13.5MHz */
div = 1;
fll_div->fll_refclk_div = 0;
while ((Fref / div) > 13500000) {
div *= 2;
fll_div->fll_refclk_div++;
if (div > 8) {
pr_err("Can't scale %dMHz input down to <=13.5MHz\n",
Fref);
return -EINVAL;
}
}
pr_debug("FLL Fref=%u Fout=%u\n", Fref, Fout);
/* Apply the division for our remaining calculations */
Fref /= div;
/* Fvco should be 90-100MHz; don't check the upper bound */
div = 2;
while (Fout * div < 90000000) {
div++;
if (div > 64) {
pr_err("Unable to find FLL_OUTDIV for Fout=%uHz\n",
Fout);
return -EINVAL;
}
}
target = Fout * div;
fll_div->fll_outdiv = div - 1;
pr_debug("FLL Fvco=%dHz\n", target);
/* Find an appropraite FLL_FRATIO and factor it out of the target */
for (i = 0; i < ARRAY_SIZE(fll_fratios); i++) {
if (fll_fratios[i].min <= Fref && Fref <= fll_fratios[i].max) {
fll_div->fll_fratio = fll_fratios[i].fll_fratio;
fratio = fll_fratios[i].ratio;
break;
}
}
if (i == ARRAY_SIZE(fll_fratios)) {
pr_err("Unable to find FLL_FRATIO for Fref=%uHz\n", Fref);
return -EINVAL;
}
fll_div->n = target / (fratio * Fref);
if (target % Fref == 0) {
fll_div->theta = 0;
fll_div->lambda = 0;
} else {
gcd_fll = gcd(target, fratio * Fref);
fll_div->theta = (target - (fll_div->n * fratio * Fref))
/ gcd_fll;
fll_div->lambda = (fratio * Fref) / gcd_fll;
}
pr_debug("FLL N=%x THETA=%x LAMBDA=%x\n",
fll_div->n, fll_div->theta, fll_div->lambda);
pr_debug("FLL_FRATIO=%x(%d) FLL_OUTDIV=%x FLL_REFCLK_DIV=%x\n",
fll_div->fll_fratio, fratio, fll_div->fll_outdiv,
fll_div->fll_refclk_div);
return 0;
}
static int wm2200_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
unsigned int Fref, unsigned int Fout)
{
struct i2c_client *i2c = to_i2c_client(codec->dev);
struct wm2200_priv *wm2200 = snd_soc_codec_get_drvdata(codec);
struct _fll_div factors;
int ret, i, timeout;
if (!Fout) {
dev_dbg(codec->dev, "FLL disabled");
if (wm2200->fll_fout)
pm_runtime_put(codec->dev);
wm2200->fll_fout = 0;
snd_soc_update_bits(codec, WM2200_FLL_CONTROL_1,
WM2200_FLL_ENA, 0);
return 0;
}
switch (source) {
case WM2200_FLL_SRC_MCLK1:
case WM2200_FLL_SRC_MCLK2:
case WM2200_FLL_SRC_BCLK:
break;
default:
dev_err(codec->dev, "Invalid FLL source %d\n", source);
return -EINVAL;
}
ret = fll_factors(&factors, Fref, Fout);
if (ret < 0)
return ret;
/* Disable the FLL while we reconfigure */
snd_soc_update_bits(codec, WM2200_FLL_CONTROL_1, WM2200_FLL_ENA, 0);
snd_soc_update_bits(codec, WM2200_FLL_CONTROL_2,
WM2200_FLL_OUTDIV_MASK | WM2200_FLL_FRATIO_MASK,
(factors.fll_outdiv << WM2200_FLL_OUTDIV_SHIFT) |
factors.fll_fratio);
if (factors.theta) {
snd_soc_update_bits(codec, WM2200_FLL_CONTROL_3,
WM2200_FLL_FRACN_ENA,
WM2200_FLL_FRACN_ENA);
snd_soc_update_bits(codec, WM2200_FLL_EFS_2,
WM2200_FLL_EFS_ENA,
WM2200_FLL_EFS_ENA);
} else {
snd_soc_update_bits(codec, WM2200_FLL_CONTROL_3,
WM2200_FLL_FRACN_ENA, 0);
snd_soc_update_bits(codec, WM2200_FLL_EFS_2,
WM2200_FLL_EFS_ENA, 0);
}
snd_soc_update_bits(codec, WM2200_FLL_CONTROL_4, WM2200_FLL_THETA_MASK,
factors.theta);
snd_soc_update_bits(codec, WM2200_FLL_CONTROL_6, WM2200_FLL_N_MASK,
factors.n);
snd_soc_update_bits(codec, WM2200_FLL_CONTROL_7,
WM2200_FLL_CLK_REF_DIV_MASK |
WM2200_FLL_CLK_REF_SRC_MASK,
(factors.fll_refclk_div
<< WM2200_FLL_CLK_REF_DIV_SHIFT) | source);
snd_soc_update_bits(codec, WM2200_FLL_EFS_1,
WM2200_FLL_LAMBDA_MASK, factors.lambda);
/* Clear any pending completions */
try_wait_for_completion(&wm2200->fll_lock);
pm_runtime_get_sync(codec->dev);
snd_soc_update_bits(codec, WM2200_FLL_CONTROL_1,
WM2200_FLL_ENA, WM2200_FLL_ENA);
if (i2c->irq)
timeout = 2;
else
timeout = 50;
snd_soc_update_bits(codec, WM2200_CLOCKING_3, WM2200_SYSCLK_ENA,
WM2200_SYSCLK_ENA);
/* Poll for the lock; will use the interrupt to exit quickly */
for (i = 0; i < timeout; i++) {
if (i2c->irq) {
ret = wait_for_completion_timeout(&wm2200->fll_lock,
msecs_to_jiffies(25));
if (ret > 0)
break;
} else {
msleep(1);
}
ret = snd_soc_read(codec,
WM2200_INTERRUPT_RAW_STATUS_2);
if (ret < 0) {
dev_err(codec->dev,
"Failed to read FLL status: %d\n",
ret);
continue;
}
if (ret & WM2200_FLL_LOCK_STS)
break;
}
if (i == timeout) {
dev_err(codec->dev, "FLL lock timed out\n");
pm_runtime_put(codec->dev);
return -ETIMEDOUT;
}
wm2200->fll_src = source;
wm2200->fll_fref = Fref;
wm2200->fll_fout = Fout;
dev_dbg(codec->dev, "FLL running %dHz->%dHz\n", Fref, Fout);
return 0;
}
static int wm2200_dai_probe(struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
unsigned int val = 0;
int ret;
ret = snd_soc_read(codec, WM2200_GPIO_CTRL_1);
if (ret >= 0) {
if ((ret & WM2200_GP1_FN_MASK) != 0) {
dai->symmetric_rates = true;
val = WM2200_AIF1TX_LRCLK_SRC;
}
} else {
dev_err(codec->dev, "Failed to read GPIO 1 config: %d\n", ret);
}
snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_2,
WM2200_AIF1TX_LRCLK_SRC, val);
return 0;
}
#define WM2200_RATES SNDRV_PCM_RATE_8000_48000
#define WM2200_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
static struct snd_soc_dai_driver wm2200_dai = {
.name = "wm2200",
.probe = wm2200_dai_probe,
.playback = {
.stream_name = "Playback",
.channels_min = 2,
.channels_max = 2,
.rates = WM2200_RATES,
.formats = WM2200_FORMATS,
},
.capture = {
.stream_name = "Capture",
.channels_min = 2,
.channels_max = 2,
.rates = WM2200_RATES,
.formats = WM2200_FORMATS,
},
.ops = &wm2200_dai_ops,
};
static struct snd_soc_codec_driver soc_codec_wm2200 = {
.probe = wm2200_probe,
.idle_bias_off = true,
.ignore_pmdown_time = true,
.set_sysclk = wm2200_set_sysclk,
.set_pll = wm2200_set_fll,
.controls = wm2200_snd_controls,
.num_controls = ARRAY_SIZE(wm2200_snd_controls),
.dapm_widgets = wm2200_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wm2200_dapm_widgets),
.dapm_routes = wm2200_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(wm2200_dapm_routes),
};
static irqreturn_t wm2200_irq(int irq, void *data)
{
struct wm2200_priv *wm2200 = data;
unsigned int val, mask;
int ret;
ret = regmap_read(wm2200->regmap, WM2200_INTERRUPT_STATUS_2, &val);
if (ret != 0) {
dev_err(wm2200->dev, "Failed to read IRQ status: %d\n", ret);
return IRQ_NONE;
}
ret = regmap_read(wm2200->regmap, WM2200_INTERRUPT_STATUS_2_MASK,
&mask);
if (ret != 0) {
dev_warn(wm2200->dev, "Failed to read IRQ mask: %d\n", ret);
mask = 0;
}
val &= ~mask;
if (val & WM2200_FLL_LOCK_EINT) {
dev_dbg(wm2200->dev, "FLL locked\n");
complete(&wm2200->fll_lock);
}
if (val) {
regmap_write(wm2200->regmap, WM2200_INTERRUPT_STATUS_2, val);
return IRQ_HANDLED;
} else {
return IRQ_NONE;
}
}
static const struct regmap_config wm2200_regmap = {
.reg_bits = 16,
.val_bits = 16,
.max_register = WM2200_MAX_REGISTER,
.reg_defaults = wm2200_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(wm2200_reg_defaults),
.volatile_reg = wm2200_volatile_register,
.readable_reg = wm2200_readable_register,
.cache_type = REGCACHE_RBTREE,
};
static const unsigned int wm2200_dig_vu[] = {
WM2200_DAC_DIGITAL_VOLUME_1L,
WM2200_DAC_DIGITAL_VOLUME_1R,
WM2200_DAC_DIGITAL_VOLUME_2L,
WM2200_DAC_DIGITAL_VOLUME_2R,
WM2200_ADC_DIGITAL_VOLUME_1L,
WM2200_ADC_DIGITAL_VOLUME_1R,
WM2200_ADC_DIGITAL_VOLUME_2L,
WM2200_ADC_DIGITAL_VOLUME_2R,
WM2200_ADC_DIGITAL_VOLUME_3L,
WM2200_ADC_DIGITAL_VOLUME_3R,
};
static const unsigned int wm2200_mic_ctrl_reg[] = {
WM2200_IN1L_CONTROL,
WM2200_IN2L_CONTROL,
WM2200_IN3L_CONTROL,
};
static __devinit int wm2200_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct wm2200_pdata *pdata = dev_get_platdata(&i2c->dev);
struct wm2200_priv *wm2200;
unsigned int reg;
int ret, i;
wm2200 = devm_kzalloc(&i2c->dev, sizeof(struct wm2200_priv),
GFP_KERNEL);
if (wm2200 == NULL)
return -ENOMEM;
wm2200->dev = &i2c->dev;
init_completion(&wm2200->fll_lock);
wm2200->regmap = regmap_init_i2c(i2c, &wm2200_regmap);
if (IS_ERR(wm2200->regmap)) {
ret = PTR_ERR(wm2200->regmap);
dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
ret);
goto err;
}
if (pdata)
wm2200->pdata = *pdata;
i2c_set_clientdata(i2c, wm2200);
for (i = 0; i < ARRAY_SIZE(wm2200->core_supplies); i++)
wm2200->core_supplies[i].supply = wm2200_core_supply_names[i];
ret = regulator_bulk_get(&i2c->dev, ARRAY_SIZE(wm2200->core_supplies),
wm2200->core_supplies);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to request core supplies: %d\n",
ret);
goto err_regmap;
}
ret = regulator_bulk_enable(ARRAY_SIZE(wm2200->core_supplies),
wm2200->core_supplies);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to enable core supplies: %d\n",
ret);
goto err_core;
}
if (wm2200->pdata.ldo_ena) {
ret = gpio_request_one(wm2200->pdata.ldo_ena,
GPIOF_OUT_INIT_HIGH, "WM2200 LDOENA");
if (ret < 0) {
dev_err(&i2c->dev, "Failed to request LDOENA %d: %d\n",
wm2200->pdata.ldo_ena, ret);
goto err_enable;
}
msleep(2);
}
if (wm2200->pdata.reset) {
ret = gpio_request_one(wm2200->pdata.reset,
GPIOF_OUT_INIT_HIGH, "WM2200 /RESET");
if (ret < 0) {
dev_err(&i2c->dev, "Failed to request /RESET %d: %d\n",
wm2200->pdata.reset, ret);
goto err_ldo;
}
}
ret = regmap_read(wm2200->regmap, WM2200_SOFTWARE_RESET, ®);
if (ret < 0) {
dev_err(&i2c->dev, "Failed to read ID register: %d\n", ret);
goto err_reset;
}
switch (reg) {
case 0x2200:
break;
default:
dev_err(&i2c->dev, "Device is not a WM2200, ID is %x\n", reg);
ret = -EINVAL;
goto err_reset;
}
ret = regmap_read(wm2200->regmap, WM2200_DEVICE_REVISION, ®);
if (ret < 0) {
dev_err(&i2c->dev, "Failed to read revision register\n");
goto err_reset;
}
wm2200->rev = reg & WM2200_DEVICE_REVISION_MASK;
dev_info(&i2c->dev, "revision %c\n", wm2200->rev + 'A');
switch (wm2200->rev) {
case 0:
ret = regmap_register_patch(wm2200->regmap, wm2200_reva_patch,
ARRAY_SIZE(wm2200_reva_patch));
if (ret != 0) {
dev_err(&i2c->dev, "Failed to register patch: %d\n",
ret);
}
break;
default:
break;
}
ret = wm2200_reset(wm2200);
if (ret < 0) {
dev_err(&i2c->dev, "Failed to issue reset\n");
goto err_reset;
}
for (i = 0; i < ARRAY_SIZE(wm2200->pdata.gpio_defaults); i++) {
if (!wm2200->pdata.gpio_defaults[i])
continue;
regmap_write(wm2200->regmap, WM2200_GPIO_CTRL_1 + i,
wm2200->pdata.gpio_defaults[i]);
}
for (i = 0; i < ARRAY_SIZE(wm2200_dig_vu); i++)
regmap_update_bits(wm2200->regmap, wm2200_dig_vu[i],
WM2200_OUT_VU, WM2200_OUT_VU);
/* Assign slots 1-6 to channels 1-6 for both TX and RX */
for (i = 0; i < 6; i++) {
regmap_write(wm2200->regmap, WM2200_AUDIO_IF_1_10 + i, i);
regmap_write(wm2200->regmap, WM2200_AUDIO_IF_1_16 + i, i);
}
for (i = 0; i < ARRAY_SIZE(wm2200->pdata.in_mode); i++) {
regmap_update_bits(wm2200->regmap, wm2200_mic_ctrl_reg[i],
WM2200_IN1_MODE_MASK |
WM2200_IN1_DMIC_SUP_MASK,
(wm2200->pdata.in_mode[i] <<
WM2200_IN1_MODE_SHIFT) |
(wm2200->pdata.dmic_sup[i] <<
WM2200_IN1_DMIC_SUP_SHIFT));
}
if (i2c->irq) {
ret = request_threaded_irq(i2c->irq, NULL, wm2200_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"wm2200", wm2200);
if (ret == 0)
regmap_update_bits(wm2200->regmap,
WM2200_INTERRUPT_STATUS_2_MASK,
WM2200_FLL_LOCK_EINT, 0);
else
dev_err(&i2c->dev, "Failed to request IRQ %d: %d\n",
i2c->irq, ret);
}
pm_runtime_set_active(&i2c->dev);
pm_runtime_enable(&i2c->dev);
pm_request_idle(&i2c->dev);
ret = snd_soc_register_codec(&i2c->dev, &soc_codec_wm2200,
&wm2200_dai, 1);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to register CODEC: %d\n", ret);
goto err_pm_runtime;
}
return 0;
err_pm_runtime:
pm_runtime_disable(&i2c->dev);
err_reset:
if (wm2200->pdata.reset) {
gpio_set_value_cansleep(wm2200->pdata.reset, 0);
gpio_free(wm2200->pdata.reset);
}
err_ldo:
if (wm2200->pdata.ldo_ena) {
gpio_set_value_cansleep(wm2200->pdata.ldo_ena, 0);
gpio_free(wm2200->pdata.ldo_ena);
}
err_enable:
regulator_bulk_disable(ARRAY_SIZE(wm2200->core_supplies),
wm2200->core_supplies);
err_core:
regulator_bulk_free(ARRAY_SIZE(wm2200->core_supplies),
wm2200->core_supplies);
err_regmap:
regmap_exit(wm2200->regmap);
err:
return ret;
}
static __devexit int wm2200_i2c_remove(struct i2c_client *i2c)
{
struct wm2200_priv *wm2200 = i2c_get_clientdata(i2c);
snd_soc_unregister_codec(&i2c->dev);
if (i2c->irq)
free_irq(i2c->irq, wm2200);
if (wm2200->pdata.reset) {
gpio_set_value_cansleep(wm2200->pdata.reset, 0);
gpio_free(wm2200->pdata.reset);
}
if (wm2200->pdata.ldo_ena) {
gpio_set_value_cansleep(wm2200->pdata.ldo_ena, 0);
gpio_free(wm2200->pdata.ldo_ena);
}
regulator_bulk_free(ARRAY_SIZE(wm2200->core_supplies),
wm2200->core_supplies);
regmap_exit(wm2200->regmap);
return 0;
}
#ifdef CONFIG_PM_RUNTIME
static int wm2200_runtime_suspend(struct device *dev)
{
struct wm2200_priv *wm2200 = dev_get_drvdata(dev);
regcache_cache_only(wm2200->regmap, true);
regcache_mark_dirty(wm2200->regmap);
if (wm2200->pdata.ldo_ena)
gpio_set_value_cansleep(wm2200->pdata.ldo_ena, 0);
regulator_bulk_disable(ARRAY_SIZE(wm2200->core_supplies),
wm2200->core_supplies);
return 0;
}
static int wm2200_runtime_resume(struct device *dev)
{
struct wm2200_priv *wm2200 = dev_get_drvdata(dev);
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(wm2200->core_supplies),
wm2200->core_supplies);
if (ret != 0) {
dev_err(dev, "Failed to enable supplies: %d\n",
ret);
return ret;
}
if (wm2200->pdata.ldo_ena) {
gpio_set_value_cansleep(wm2200->pdata.ldo_ena, 1);
msleep(2);
}
regcache_cache_only(wm2200->regmap, false);
regcache_sync(wm2200->regmap);
return 0;
}
#endif
static struct dev_pm_ops wm2200_pm = {
SET_RUNTIME_PM_OPS(wm2200_runtime_suspend, wm2200_runtime_resume,
NULL)
};
static const struct i2c_device_id wm2200_i2c_id[] = {
{ "wm2200", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm2200_i2c_id);
static struct i2c_driver wm2200_i2c_driver = {
.driver = {
.name = "wm2200",
.owner = THIS_MODULE,
.pm = &wm2200_pm,
},
.probe = wm2200_i2c_probe,
.remove = __devexit_p(wm2200_i2c_remove),
.id_table = wm2200_i2c_id,
};
static int __init wm2200_modinit(void)
{
return i2c_add_driver(&wm2200_i2c_driver);
}
module_init(wm2200_modinit);
static void __exit wm2200_exit(void)
{
i2c_del_driver(&wm2200_i2c_driver);
}
module_exit(wm2200_exit);
MODULE_DESCRIPTION("ASoC WM2200 driver");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Jackeagle/android_kernel_htc_dlxub1 | drivers/net/usb/gl620a.c | 841 | 6612 | /*
* GeneSys GL620USB-A based links
* Copyright (C) 2001 by Jiun-Jie Huang <huangjj@genesyslogic.com.tw>
* Copyright (C) 2001 by Stanislav Brabec <utx@penguin.cz>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
// #define DEBUG // error path messages, extra info
// #define VERBOSE // more; success messages
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/workqueue.h>
#include <linux/mii.h>
#include <linux/usb.h>
#include <linux/usb/usbnet.h>
#include <linux/gfp.h>
/*
* GeneSys GL620USB-A (www.genesyslogic.com.tw)
*
* ... should partially interop with the Win32 driver for this hardware.
* The GeneSys docs imply there's some NDIS issue motivating this framing.
*
* Some info from GeneSys:
* - GL620USB-A is full duplex; GL620USB is only half duplex for bulk.
* (Some cables, like the BAFO-100c, use the half duplex version.)
* - For the full duplex model, the low bit of the version code says
* which side is which ("left/right").
* - For the half duplex type, a control/interrupt handshake settles
* the transfer direction. (That's disabled here, partially coded.)
* A control URB would block until other side writes an interrupt.
*
* Original code from Jiun-Jie Huang <huangjj@genesyslogic.com.tw>
* and merged into "usbnet" by Stanislav Brabec <utx@penguin.cz>.
*/
// control msg write command
#define GENELINK_CONNECT_WRITE 0xF0
// interrupt pipe index
#define GENELINK_INTERRUPT_PIPE 0x03
// interrupt read buffer size
#define INTERRUPT_BUFSIZE 0x08
// interrupt pipe interval value
#define GENELINK_INTERRUPT_INTERVAL 0x10
// max transmit packet number per transmit
#define GL_MAX_TRANSMIT_PACKETS 32
// max packet length
#define GL_MAX_PACKET_LEN 1514
// max receive buffer size
#define GL_RCV_BUF_SIZE \
(((GL_MAX_PACKET_LEN + 4) * GL_MAX_TRANSMIT_PACKETS) + 4)
struct gl_packet {
__le32 packet_length;
char packet_data [1];
};
struct gl_header {
__le32 packet_count;
struct gl_packet packets;
};
static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
struct gl_header *header;
struct gl_packet *packet;
struct sk_buff *gl_skb;
u32 size;
u32 count;
/* This check is no longer done by usbnet */
if (skb->len < dev->net->hard_header_len)
return 0;
header = (struct gl_header *) skb->data;
// get the packet count of the received skb
count = le32_to_cpu(header->packet_count);
if (count > GL_MAX_TRANSMIT_PACKETS) {
dbg("genelink: invalid received packet count %u", count);
return 0;
}
// set the current packet pointer to the first packet
packet = &header->packets;
// decrement the length for the packet count size 4 bytes
skb_pull(skb, 4);
while (count > 1) {
// get the packet length
size = le32_to_cpu(packet->packet_length);
// this may be a broken packet
if (size > GL_MAX_PACKET_LEN) {
dbg("genelink: invalid rx length %d", size);
return 0;
}
// allocate the skb for the individual packet
gl_skb = alloc_skb(size, GFP_ATOMIC);
if (gl_skb) {
// copy the packet data to the new skb
memcpy(skb_put(gl_skb, size),
packet->packet_data, size);
usbnet_skb_return(dev, gl_skb);
}
// advance to the next packet
packet = (struct gl_packet *)&packet->packet_data[size];
count--;
// shift the data pointer to the next gl_packet
skb_pull(skb, size + 4);
}
// skip the packet length field 4 bytes
skb_pull(skb, 4);
if (skb->len > GL_MAX_PACKET_LEN) {
dbg("genelink: invalid rx length %d", skb->len);
return 0;
}
return 1;
}
static struct sk_buff *
genelink_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
{
int padlen;
int length = skb->len;
int headroom = skb_headroom(skb);
int tailroom = skb_tailroom(skb);
__le32 *packet_count;
__le32 *packet_len;
// FIXME: magic numbers, bleech
padlen = ((skb->len + (4 + 4*1)) % 64) ? 0 : 1;
if ((!skb_cloned(skb))
&& ((headroom + tailroom) >= (padlen + (4 + 4*1)))) {
if ((headroom < (4 + 4*1)) || (tailroom < padlen)) {
skb->data = memmove(skb->head + (4 + 4*1),
skb->data, skb->len);
skb_set_tail_pointer(skb, skb->len);
}
} else {
struct sk_buff *skb2;
skb2 = skb_copy_expand(skb, (4 + 4*1) , padlen, flags);
dev_kfree_skb_any(skb);
skb = skb2;
if (!skb)
return NULL;
}
// attach the packet count to the header
packet_count = (__le32 *) skb_push(skb, (4 + 4*1));
packet_len = packet_count + 1;
*packet_count = cpu_to_le32(1);
*packet_len = cpu_to_le32(length);
// add padding byte
if ((skb->len % dev->maxpacket) == 0)
skb_put(skb, 1);
return skb;
}
static int genelink_bind(struct usbnet *dev, struct usb_interface *intf)
{
dev->hard_mtu = GL_RCV_BUF_SIZE;
dev->net->hard_header_len += 4;
dev->in = usb_rcvbulkpipe(dev->udev, dev->driver_info->in);
dev->out = usb_sndbulkpipe(dev->udev, dev->driver_info->out);
return 0;
}
static const struct driver_info genelink_info = {
.description = "Genesys GeneLink",
.flags = FLAG_POINTTOPOINT | FLAG_FRAMING_GL | FLAG_NO_SETINT,
.bind = genelink_bind,
.rx_fixup = genelink_rx_fixup,
.tx_fixup = genelink_tx_fixup,
.in = 1, .out = 2,
#ifdef GENELINK_ACK
.check_connect =genelink_check_connect,
#endif
};
static const struct usb_device_id products [] = {
{
USB_DEVICE(0x05e3, 0x0502), // GL620USB-A
.driver_info = (unsigned long) &genelink_info,
},
/* NOT: USB_DEVICE(0x05e3, 0x0501), // GL620USB
* that's half duplex, not currently supported
*/
{ }, // END
};
MODULE_DEVICE_TABLE(usb, products);
static struct usb_driver gl620a_driver = {
.name = "gl620a",
.id_table = products,
.probe = usbnet_probe,
.disconnect = usbnet_disconnect,
.suspend = usbnet_suspend,
.resume = usbnet_resume,
};
module_usb_driver(gl620a_driver);
MODULE_AUTHOR("Jiun-Jie Huang");
MODULE_DESCRIPTION("GL620-USB-A Host-to-Host Link cables");
MODULE_LICENSE("GPL");
| gpl-2.0 |
nekromant/linux | drivers/rtc/rtc-ds1347.c | 1353 | 4195 | /* rtc-ds1347.c
*
* Driver for Dallas Semiconductor DS1347 Low Current, SPI Compatible
* Real Time Clock
*
* Author : Raghavendra Chandra Ganiga <ravi23ganiga@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/spi/spi.h>
#include <linux/bcd.h>
/* Registers in ds1347 rtc */
#define DS1347_SECONDS_REG 0x01
#define DS1347_MINUTES_REG 0x03
#define DS1347_HOURS_REG 0x05
#define DS1347_DATE_REG 0x07
#define DS1347_MONTH_REG 0x09
#define DS1347_DAY_REG 0x0B
#define DS1347_YEAR_REG 0x0D
#define DS1347_CONTROL_REG 0x0F
#define DS1347_STATUS_REG 0x17
#define DS1347_CLOCK_BURST 0x3F
static int ds1347_read_reg(struct device *dev, unsigned char address,
unsigned char *data)
{
struct spi_device *spi = to_spi_device(dev);
*data = address | 0x80;
return spi_write_then_read(spi, data, 1, data, 1);
}
static int ds1347_write_reg(struct device *dev, unsigned char address,
unsigned char data)
{
struct spi_device *spi = to_spi_device(dev);
unsigned char buf[2];
buf[0] = address & 0x7F;
buf[1] = data;
return spi_write_then_read(spi, buf, 2, NULL, 0);
}
static int ds1347_read_time(struct device *dev, struct rtc_time *dt)
{
struct spi_device *spi = to_spi_device(dev);
int err;
unsigned char buf[8];
buf[0] = DS1347_CLOCK_BURST | 0x80;
err = spi_write_then_read(spi, buf, 1, buf, 8);
if (err)
return err;
dt->tm_sec = bcd2bin(buf[0]);
dt->tm_min = bcd2bin(buf[1]);
dt->tm_hour = bcd2bin(buf[2] & 0x3F);
dt->tm_mday = bcd2bin(buf[3]);
dt->tm_mon = bcd2bin(buf[4]) - 1;
dt->tm_wday = bcd2bin(buf[5]) - 1;
dt->tm_year = bcd2bin(buf[6]) + 100;
return rtc_valid_tm(dt);
}
static int ds1347_set_time(struct device *dev, struct rtc_time *dt)
{
struct spi_device *spi = to_spi_device(dev);
unsigned char buf[9];
buf[0] = DS1347_CLOCK_BURST & 0x7F;
buf[1] = bin2bcd(dt->tm_sec);
buf[2] = bin2bcd(dt->tm_min);
buf[3] = (bin2bcd(dt->tm_hour) & 0x3F);
buf[4] = bin2bcd(dt->tm_mday);
buf[5] = bin2bcd(dt->tm_mon + 1);
buf[6] = bin2bcd(dt->tm_wday + 1);
/* year in linux is from 1900 i.e in range of 100
in rtc it is from 00 to 99 */
dt->tm_year = dt->tm_year % 100;
buf[7] = bin2bcd(dt->tm_year);
buf[8] = bin2bcd(0x00);
/* write the rtc settings */
return spi_write_then_read(spi, buf, 9, NULL, 0);
}
static const struct rtc_class_ops ds1347_rtc_ops = {
.read_time = ds1347_read_time,
.set_time = ds1347_set_time,
};
static int ds1347_probe(struct spi_device *spi)
{
struct rtc_device *rtc;
unsigned char data;
int res;
/* spi setup with ds1347 in mode 3 and bits per word as 8 */
spi->mode = SPI_MODE_3;
spi->bits_per_word = 8;
spi_setup(spi);
/* RTC Settings */
res = ds1347_read_reg(&spi->dev, DS1347_SECONDS_REG, &data);
if (res)
return res;
/* Disable the write protect of rtc */
ds1347_read_reg(&spi->dev, DS1347_CONTROL_REG, &data);
data = data & ~(1<<7);
ds1347_write_reg(&spi->dev, DS1347_CONTROL_REG, data);
/* Enable the oscillator , disable the oscillator stop flag,
and glitch filter to reduce current consumption */
ds1347_read_reg(&spi->dev, DS1347_STATUS_REG, &data);
data = data & 0x1B;
ds1347_write_reg(&spi->dev, DS1347_STATUS_REG, data);
/* display the settings */
ds1347_read_reg(&spi->dev, DS1347_CONTROL_REG, &data);
dev_info(&spi->dev, "DS1347 RTC CTRL Reg = 0x%02x\n", data);
ds1347_read_reg(&spi->dev, DS1347_STATUS_REG, &data);
dev_info(&spi->dev, "DS1347 RTC Status Reg = 0x%02x\n", data);
rtc = devm_rtc_device_register(&spi->dev, "ds1347",
&ds1347_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
spi_set_drvdata(spi, rtc);
return 0;
}
static struct spi_driver ds1347_driver = {
.driver = {
.name = "ds1347",
.owner = THIS_MODULE,
},
.probe = ds1347_probe,
};
module_spi_driver(ds1347_driver);
MODULE_DESCRIPTION("DS1347 SPI RTC DRIVER");
MODULE_AUTHOR("Raghavendra C Ganiga <ravi23ganiga@gmail.com>");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
glewarne/SM-G928-CLT | drivers/usb/serial/mos7840.c | 1609 | 75206 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Clean ups from Moschip version and a few ioctl implementations by:
* Paul B Schroeder <pschroeder "at" uplogix "dot" com>
*
* Originally based on drivers/usb/serial/io_edgeport.c which is:
* Copyright (C) 2000 Inside Out Networks, All rights reserved.
* Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com>
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/serial.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
#define DRIVER_DESC "Moschip 7840/7820 USB Serial Driver"
/*
* 16C50 UART register defines
*/
#define LCR_BITS_5 0x00 /* 5 bits/char */
#define LCR_BITS_6 0x01 /* 6 bits/char */
#define LCR_BITS_7 0x02 /* 7 bits/char */
#define LCR_BITS_8 0x03 /* 8 bits/char */
#define LCR_BITS_MASK 0x03 /* Mask for bits/char field */
#define LCR_STOP_1 0x00 /* 1 stop bit */
#define LCR_STOP_1_5 0x04 /* 1.5 stop bits (if 5 bits/char) */
#define LCR_STOP_2 0x04 /* 2 stop bits (if 6-8 bits/char) */
#define LCR_STOP_MASK 0x04 /* Mask for stop bits field */
#define LCR_PAR_NONE 0x00 /* No parity */
#define LCR_PAR_ODD 0x08 /* Odd parity */
#define LCR_PAR_EVEN 0x18 /* Even parity */
#define LCR_PAR_MARK 0x28 /* Force parity bit to 1 */
#define LCR_PAR_SPACE 0x38 /* Force parity bit to 0 */
#define LCR_PAR_MASK 0x38 /* Mask for parity field */
#define LCR_SET_BREAK 0x40 /* Set Break condition */
#define LCR_DL_ENABLE 0x80 /* Enable access to divisor latch */
#define MCR_DTR 0x01 /* Assert DTR */
#define MCR_RTS 0x02 /* Assert RTS */
#define MCR_OUT1 0x04 /* Loopback only: Sets state of RI */
#define MCR_MASTER_IE 0x08 /* Enable interrupt outputs */
#define MCR_LOOPBACK 0x10 /* Set internal (digital) loopback mode */
#define MCR_XON_ANY 0x20 /* Enable any char to exit XOFF mode */
#define MOS7840_MSR_CTS 0x10 /* Current state of CTS */
#define MOS7840_MSR_DSR 0x20 /* Current state of DSR */
#define MOS7840_MSR_RI 0x40 /* Current state of RI */
#define MOS7840_MSR_CD 0x80 /* Current state of CD */
/*
* Defines used for sending commands to port
*/
#define MOS_WDR_TIMEOUT 5000 /* default urb timeout */
#define MOS_PORT1 0x0200
#define MOS_PORT2 0x0300
#define MOS_VENREG 0x0000
#define MOS_MAX_PORT 0x02
#define MOS_WRITE 0x0E
#define MOS_READ 0x0D
/* Requests */
#define MCS_RD_RTYPE 0xC0
#define MCS_WR_RTYPE 0x40
#define MCS_RDREQ 0x0D
#define MCS_WRREQ 0x0E
#define MCS_CTRL_TIMEOUT 500
#define VENDOR_READ_LENGTH (0x01)
#define MAX_NAME_LEN 64
#define ZLP_REG1 0x3A /* Zero_Flag_Reg1 58 */
#define ZLP_REG5 0x3E /* Zero_Flag_Reg5 62 */
/* For higher baud Rates use TIOCEXBAUD */
#define TIOCEXBAUD 0x5462
/* vendor id and device id defines */
/* The native mos7840/7820 component */
#define USB_VENDOR_ID_MOSCHIP 0x9710
#define MOSCHIP_DEVICE_ID_7840 0x7840
#define MOSCHIP_DEVICE_ID_7820 0x7820
#define MOSCHIP_DEVICE_ID_7810 0x7810
/* The native component can have its vendor/device id's overridden
* in vendor-specific implementations. Such devices can be handled
* by making a change here, in id_table.
*/
#define USB_VENDOR_ID_BANDB 0x0856
#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22
#define BANDB_DEVICE_ID_USO9ML2_2P 0xBC00
#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24
#define BANDB_DEVICE_ID_USO9ML2_4P 0xBC01
#define BANDB_DEVICE_ID_US9ML2_2 0xAC29
#define BANDB_DEVICE_ID_US9ML2_4 0xAC30
#define BANDB_DEVICE_ID_USPTL4_2 0xAC31
#define BANDB_DEVICE_ID_USPTL4_4 0xAC32
#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
#define BANDB_DEVICE_ID_USOPTL4_2P 0xBC02
#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
#define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03
#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24
/* This driver also supports
* ATEN UC2324 device using Moschip MCS7840
* ATEN UC2322 device using Moschip MCS7820
*/
#define USB_VENDOR_ID_ATENINTL 0x0557
#define ATENINTL_DEVICE_ID_UC2324 0x2011
#define ATENINTL_DEVICE_ID_UC2322 0x7820
/* Interrupt Routine Defines */
#define SERIAL_IIR_RLS 0x06
#define SERIAL_IIR_MS 0x00
/*
* Emulation of the bit mask on the LINE STATUS REGISTER.
*/
#define SERIAL_LSR_DR 0x0001
#define SERIAL_LSR_OE 0x0002
#define SERIAL_LSR_PE 0x0004
#define SERIAL_LSR_FE 0x0008
#define SERIAL_LSR_BI 0x0010
#define MOS_MSR_DELTA_CTS 0x10
#define MOS_MSR_DELTA_DSR 0x20
#define MOS_MSR_DELTA_RI 0x40
#define MOS_MSR_DELTA_CD 0x80
/* Serial Port register Address */
#define INTERRUPT_ENABLE_REGISTER ((__u16)(0x01))
#define FIFO_CONTROL_REGISTER ((__u16)(0x02))
#define LINE_CONTROL_REGISTER ((__u16)(0x03))
#define MODEM_CONTROL_REGISTER ((__u16)(0x04))
#define LINE_STATUS_REGISTER ((__u16)(0x05))
#define MODEM_STATUS_REGISTER ((__u16)(0x06))
#define SCRATCH_PAD_REGISTER ((__u16)(0x07))
#define DIVISOR_LATCH_LSB ((__u16)(0x00))
#define DIVISOR_LATCH_MSB ((__u16)(0x01))
#define CLK_MULTI_REGISTER ((__u16)(0x02))
#define CLK_START_VALUE_REGISTER ((__u16)(0x03))
#define GPIO_REGISTER ((__u16)(0x07))
#define SERIAL_LCR_DLAB ((__u16)(0x0080))
/*
* URB POOL related defines
*/
#define NUM_URBS 16 /* URB Count */
#define URB_TRANSFER_BUFFER_SIZE 32 /* URB Size */
/* LED on/off milliseconds*/
#define LED_ON_MS 500
#define LED_OFF_MS 500
enum mos7840_flag {
MOS7840_FLAG_CTRL_BUSY,
MOS7840_FLAG_LED_BUSY,
};
static const struct usb_device_id id_table[] = {
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7810)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
{} /* terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
/* This structure holds all of the local port information */
struct moschip_port {
int port_num; /*Actual port number in the device(1,2,etc) */
struct urb *write_urb; /* write URB for this port */
struct urb *read_urb; /* read URB for this port */
__u8 shadowLCR; /* last LCR value received */
__u8 shadowMCR; /* last MCR value received */
char open;
char open_ports;
wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */
struct usb_serial_port *port; /* loop back to the owner of this object */
/* Offsets */
__u8 SpRegOffset;
__u8 ControlRegOffset;
__u8 DcrRegOffset;
/* for processing control URBS in interrupt context */
struct urb *control_urb;
struct usb_ctrlrequest *dr;
char *ctrl_buf;
int MsrLsr;
spinlock_t pool_lock;
struct urb *write_urb_pool[NUM_URBS];
char busy[NUM_URBS];
bool read_urb_busy;
/* For device(s) with LED indicator */
bool has_led;
struct timer_list led_timer1; /* Timer for LED on */
struct timer_list led_timer2; /* Timer for LED off */
struct urb *led_urb;
struct usb_ctrlrequest *led_dr;
unsigned long flags;
};
/*
* mos7840_set_reg_sync
* To set the Control register by calling usb_fill_control_urb function
* by passing usb_sndctrlpipe function as parameter.
*/
static int mos7840_set_reg_sync(struct usb_serial_port *port, __u16 reg,
__u16 val)
{
struct usb_device *dev = port->serial->dev;
val = val & 0x00ff;
dev_dbg(&port->dev, "mos7840_set_reg_sync offset is %x, value %x\n", reg, val);
return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), MCS_WRREQ,
MCS_WR_RTYPE, val, reg, NULL, 0,
MOS_WDR_TIMEOUT);
}
/*
* mos7840_get_reg_sync
* To set the Uart register by calling usb_fill_control_urb function by
* passing usb_rcvctrlpipe function as parameter.
*/
static int mos7840_get_reg_sync(struct usb_serial_port *port, __u16 reg,
__u16 *val)
{
struct usb_device *dev = port->serial->dev;
int ret = 0;
u8 *buf;
buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
MCS_RD_RTYPE, 0, reg, buf, VENDOR_READ_LENGTH,
MOS_WDR_TIMEOUT);
*val = buf[0];
dev_dbg(&port->dev, "%s offset is %x, return val %x\n", __func__, reg, *val);
kfree(buf);
return ret;
}
/*
* mos7840_set_uart_reg
* To set the Uart register by calling usb_fill_control_urb function by
* passing usb_sndctrlpipe function as parameter.
*/
static int mos7840_set_uart_reg(struct usb_serial_port *port, __u16 reg,
__u16 val)
{
struct usb_device *dev = port->serial->dev;
val = val & 0x00ff;
/* For the UART control registers, the application number need
to be Or'ed */
if (port->serial->num_ports == 4) {
val |= (((__u16) port->number -
(__u16) (port->serial->minor)) + 1) << 8;
} else {
if (((__u16) port->number - (__u16) (port->serial->minor)) == 0) {
val |= (((__u16) port->number -
(__u16) (port->serial->minor)) + 1) << 8;
} else {
val |= (((__u16) port->number -
(__u16) (port->serial->minor)) + 2) << 8;
}
}
dev_dbg(&port->dev, "%s application number is %x\n", __func__, val);
return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), MCS_WRREQ,
MCS_WR_RTYPE, val, reg, NULL, 0,
MOS_WDR_TIMEOUT);
}
/*
* mos7840_get_uart_reg
* To set the Control register by calling usb_fill_control_urb function
* by passing usb_rcvctrlpipe function as parameter.
*/
static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg,
__u16 *val)
{
struct usb_device *dev = port->serial->dev;
int ret = 0;
__u16 Wval;
u8 *buf;
buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
if (!buf)
return -ENOMEM;
/* Wval is same as application number */
if (port->serial->num_ports == 4) {
Wval =
(((__u16) port->number - (__u16) (port->serial->minor)) +
1) << 8;
} else {
if (((__u16) port->number - (__u16) (port->serial->minor)) == 0) {
Wval = (((__u16) port->number -
(__u16) (port->serial->minor)) + 1) << 8;
} else {
Wval = (((__u16) port->number -
(__u16) (port->serial->minor)) + 2) << 8;
}
}
dev_dbg(&port->dev, "%s application number is %x\n", __func__, Wval);
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
MCS_RD_RTYPE, Wval, reg, buf, VENDOR_READ_LENGTH,
MOS_WDR_TIMEOUT);
*val = buf[0];
kfree(buf);
return ret;
}
static void mos7840_dump_serial_port(struct usb_serial_port *port,
struct moschip_port *mos7840_port)
{
dev_dbg(&port->dev, "SpRegOffset is %2x\n", mos7840_port->SpRegOffset);
dev_dbg(&port->dev, "ControlRegOffset is %2x\n", mos7840_port->ControlRegOffset);
dev_dbg(&port->dev, "DCRRegOffset is %2x\n", mos7840_port->DcrRegOffset);
}
/************************************************************************/
/************************************************************************/
/* I N T E R F A C E F U N C T I O N S */
/* I N T E R F A C E F U N C T I O N S */
/************************************************************************/
/************************************************************************/
static inline void mos7840_set_port_private(struct usb_serial_port *port,
struct moschip_port *data)
{
usb_set_serial_port_data(port, (void *)data);
}
static inline struct moschip_port *mos7840_get_port_private(struct
usb_serial_port
*port)
{
return (struct moschip_port *)usb_get_serial_port_data(port);
}
static void mos7840_handle_new_msr(struct moschip_port *port, __u8 new_msr)
{
struct moschip_port *mos7840_port;
struct async_icount *icount;
mos7840_port = port;
if (new_msr &
(MOS_MSR_DELTA_CTS | MOS_MSR_DELTA_DSR | MOS_MSR_DELTA_RI |
MOS_MSR_DELTA_CD)) {
icount = &mos7840_port->port->icount;
/* update input line counters */
if (new_msr & MOS_MSR_DELTA_CTS)
icount->cts++;
if (new_msr & MOS_MSR_DELTA_DSR)
icount->dsr++;
if (new_msr & MOS_MSR_DELTA_CD)
icount->dcd++;
if (new_msr & MOS_MSR_DELTA_RI)
icount->rng++;
wake_up_interruptible(&port->port->port.delta_msr_wait);
}
}
static void mos7840_handle_new_lsr(struct moschip_port *port, __u8 new_lsr)
{
struct async_icount *icount;
if (new_lsr & SERIAL_LSR_BI) {
/*
* Parity and Framing errors only count if they
* occur exclusive of a break being
* received.
*/
new_lsr &= (__u8) (SERIAL_LSR_OE | SERIAL_LSR_BI);
}
/* update input line counters */
icount = &port->port->icount;
if (new_lsr & SERIAL_LSR_BI)
icount->brk++;
if (new_lsr & SERIAL_LSR_OE)
icount->overrun++;
if (new_lsr & SERIAL_LSR_PE)
icount->parity++;
if (new_lsr & SERIAL_LSR_FE)
icount->frame++;
}
/************************************************************************/
/************************************************************************/
/* U S B C A L L B A C K F U N C T I O N S */
/* U S B C A L L B A C K F U N C T I O N S */
/************************************************************************/
/************************************************************************/
static void mos7840_control_callback(struct urb *urb)
{
unsigned char *data;
struct moschip_port *mos7840_port;
struct device *dev = &urb->dev->dev;
__u8 regval = 0x0;
int status = urb->status;
mos7840_port = urb->context;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(dev, "%s - urb shutting down with status: %d\n", __func__, status);
goto out;
default:
dev_dbg(dev, "%s - nonzero urb status received: %d\n", __func__, status);
goto out;
}
dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length);
dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__,
mos7840_port->MsrLsr, mos7840_port->port_num);
data = urb->transfer_buffer;
regval = (__u8) data[0];
dev_dbg(dev, "%s data is %x\n", __func__, regval);
if (mos7840_port->MsrLsr == 0)
mos7840_handle_new_msr(mos7840_port, regval);
else if (mos7840_port->MsrLsr == 1)
mos7840_handle_new_lsr(mos7840_port, regval);
out:
clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mos7840_port->flags);
}
static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
__u16 *val)
{
struct usb_device *dev = mcs->port->serial->dev;
struct usb_ctrlrequest *dr = mcs->dr;
unsigned char *buffer = mcs->ctrl_buf;
int ret;
if (test_and_set_bit_lock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags))
return -EBUSY;
dr->bRequestType = MCS_RD_RTYPE;
dr->bRequest = MCS_RDREQ;
dr->wValue = cpu_to_le16(Wval); /* 0 */
dr->wIndex = cpu_to_le16(reg);
dr->wLength = cpu_to_le16(2);
usb_fill_control_urb(mcs->control_urb, dev, usb_rcvctrlpipe(dev, 0),
(unsigned char *)dr, buffer, 2,
mos7840_control_callback, mcs);
mcs->control_urb->transfer_buffer_length = 2;
ret = usb_submit_urb(mcs->control_urb, GFP_ATOMIC);
if (ret)
clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags);
return ret;
}
static void mos7840_set_led_callback(struct urb *urb)
{
switch (urb->status) {
case 0:
/* Success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* This urb is terminated, clean up */
dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d",
__func__, urb->status);
break;
default:
dev_dbg(&urb->dev->dev, "%s - nonzero urb status received: %d",
__func__, urb->status);
}
}
static void mos7840_set_led_async(struct moschip_port *mcs, __u16 wval,
__u16 reg)
{
struct usb_device *dev = mcs->port->serial->dev;
struct usb_ctrlrequest *dr = mcs->led_dr;
dr->bRequestType = MCS_WR_RTYPE;
dr->bRequest = MCS_WRREQ;
dr->wValue = cpu_to_le16(wval);
dr->wIndex = cpu_to_le16(reg);
dr->wLength = cpu_to_le16(0);
usb_fill_control_urb(mcs->led_urb, dev, usb_sndctrlpipe(dev, 0),
(unsigned char *)dr, NULL, 0, mos7840_set_led_callback, NULL);
usb_submit_urb(mcs->led_urb, GFP_ATOMIC);
}
static void mos7840_set_led_sync(struct usb_serial_port *port, __u16 reg,
__u16 val)
{
struct usb_device *dev = port->serial->dev;
usb_control_msg(dev, usb_sndctrlpipe(dev, 0), MCS_WRREQ, MCS_WR_RTYPE,
val, reg, NULL, 0, MOS_WDR_TIMEOUT);
}
static void mos7840_led_off(unsigned long arg)
{
struct moschip_port *mcs = (struct moschip_port *) arg;
/* Turn off LED */
mos7840_set_led_async(mcs, 0x0300, MODEM_CONTROL_REGISTER);
mod_timer(&mcs->led_timer2,
jiffies + msecs_to_jiffies(LED_OFF_MS));
}
static void mos7840_led_flag_off(unsigned long arg)
{
struct moschip_port *mcs = (struct moschip_port *) arg;
clear_bit_unlock(MOS7840_FLAG_LED_BUSY, &mcs->flags);
}
static void mos7840_led_activity(struct usb_serial_port *port)
{
struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
if (test_and_set_bit_lock(MOS7840_FLAG_LED_BUSY, &mos7840_port->flags))
return;
mos7840_set_led_async(mos7840_port, 0x0301, MODEM_CONTROL_REGISTER);
mod_timer(&mos7840_port->led_timer1,
jiffies + msecs_to_jiffies(LED_ON_MS));
}
/*****************************************************************************
* mos7840_interrupt_callback
* this is the callback function for when we have received data on the
* interrupt endpoint.
*****************************************************************************/
static void mos7840_interrupt_callback(struct urb *urb)
{
int result;
int length;
struct moschip_port *mos7840_port;
struct usb_serial *serial;
__u16 Data;
unsigned char *data;
__u8 sp[5], st;
int i, rv = 0;
__u16 wval, wreg = 0;
int status = urb->status;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n",
__func__, status);
return;
default:
dev_dbg(&urb->dev->dev, "%s - nonzero urb status received: %d\n",
__func__, status);
goto exit;
}
length = urb->actual_length;
data = urb->transfer_buffer;
serial = urb->context;
/* Moschip get 5 bytes
* Byte 1 IIR Port 1 (port.number is 0)
* Byte 2 IIR Port 2 (port.number is 1)
* Byte 3 IIR Port 3 (port.number is 2)
* Byte 4 IIR Port 4 (port.number is 3)
* Byte 5 FIFO status for both */
if (length && length > 5) {
dev_dbg(&urb->dev->dev, "%s", "Wrong data !!!\n");
return;
}
sp[0] = (__u8) data[0];
sp[1] = (__u8) data[1];
sp[2] = (__u8) data[2];
sp[3] = (__u8) data[3];
st = (__u8) data[4];
for (i = 0; i < serial->num_ports; i++) {
mos7840_port = mos7840_get_port_private(serial->port[i]);
wval =
(((__u16) serial->port[i]->number -
(__u16) (serial->minor)) + 1) << 8;
if (mos7840_port->open) {
if (sp[i] & 0x01) {
dev_dbg(&urb->dev->dev, "SP%d No Interrupt !!!\n", i);
} else {
switch (sp[i] & 0x0f) {
case SERIAL_IIR_RLS:
dev_dbg(&urb->dev->dev, "Serial Port %d: Receiver status error or \n", i);
dev_dbg(&urb->dev->dev, "address bit detected in 9-bit mode\n");
mos7840_port->MsrLsr = 1;
wreg = LINE_STATUS_REGISTER;
break;
case SERIAL_IIR_MS:
dev_dbg(&urb->dev->dev, "Serial Port %d: Modem status change\n", i);
mos7840_port->MsrLsr = 0;
wreg = MODEM_STATUS_REGISTER;
break;
}
rv = mos7840_get_reg(mos7840_port, wval, wreg, &Data);
}
}
}
if (!(rv < 0))
/* the completion handler for the control urb will resubmit */
return;
exit:
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result) {
dev_err(&urb->dev->dev,
"%s - Error %d submitting interrupt urb\n",
__func__, result);
}
}
static int mos7840_port_paranoia_check(struct usb_serial_port *port,
const char *function)
{
if (!port) {
pr_debug("%s - port == NULL\n", function);
return -1;
}
if (!port->serial) {
pr_debug("%s - port->serial == NULL\n", function);
return -1;
}
return 0;
}
/* Inline functions to check the sanity of a pointer that is passed to us */
static int mos7840_serial_paranoia_check(struct usb_serial *serial,
const char *function)
{
if (!serial) {
pr_debug("%s - serial == NULL\n", function);
return -1;
}
if (!serial->type) {
pr_debug("%s - serial->type == NULL!\n", function);
return -1;
}
return 0;
}
static struct usb_serial *mos7840_get_usb_serial(struct usb_serial_port *port,
const char *function)
{
/* if no port was specified, or it fails a paranoia check */
if (!port ||
mos7840_port_paranoia_check(port, function) ||
mos7840_serial_paranoia_check(port->serial, function)) {
/* then say that we don't have a valid usb_serial thing,
* which will end up genrating -ENODEV return values */
return NULL;
}
return port->serial;
}
/*****************************************************************************
* mos7840_bulk_in_callback
* this is the callback function for when we have received data on the
* bulk in endpoint.
*****************************************************************************/
static void mos7840_bulk_in_callback(struct urb *urb)
{
int retval;
unsigned char *data;
struct usb_serial *serial;
struct usb_serial_port *port;
struct moschip_port *mos7840_port;
int status = urb->status;
mos7840_port = urb->context;
if (!mos7840_port)
return;
if (status) {
dev_dbg(&urb->dev->dev, "nonzero read bulk status received: %d\n", status);
mos7840_port->read_urb_busy = false;
return;
}
port = mos7840_port->port;
if (mos7840_port_paranoia_check(port, __func__)) {
mos7840_port->read_urb_busy = false;
return;
}
serial = mos7840_get_usb_serial(port, __func__);
if (!serial) {
mos7840_port->read_urb_busy = false;
return;
}
data = urb->transfer_buffer;
usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
if (urb->actual_length) {
struct tty_port *tport = &mos7840_port->port->port;
tty_insert_flip_string(tport, data, urb->actual_length);
tty_flip_buffer_push(tport);
port->icount.rx += urb->actual_length;
dev_dbg(&port->dev, "icount.rx is %d:\n", port->icount.rx);
}
if (!mos7840_port->read_urb) {
dev_dbg(&port->dev, "%s", "URB KILLED !!!\n");
mos7840_port->read_urb_busy = false;
return;
}
if (mos7840_port->has_led)
mos7840_led_activity(port);
mos7840_port->read_urb_busy = true;
retval = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC);
if (retval) {
dev_dbg(&port->dev, "usb_submit_urb(read bulk) failed, retval = %d\n", retval);
mos7840_port->read_urb_busy = false;
}
}
/*****************************************************************************
* mos7840_bulk_out_data_callback
* this is the callback function for when we have finished sending
* serial data on the bulk out endpoint.
*****************************************************************************/
static void mos7840_bulk_out_data_callback(struct urb *urb)
{
struct moschip_port *mos7840_port;
struct usb_serial_port *port;
int status = urb->status;
int i;
mos7840_port = urb->context;
port = mos7840_port->port;
spin_lock(&mos7840_port->pool_lock);
for (i = 0; i < NUM_URBS; i++) {
if (urb == mos7840_port->write_urb_pool[i]) {
mos7840_port->busy[i] = 0;
break;
}
}
spin_unlock(&mos7840_port->pool_lock);
if (status) {
dev_dbg(&port->dev, "nonzero write bulk status received:%d\n", status);
return;
}
if (mos7840_port_paranoia_check(port, __func__))
return;
if (mos7840_port->open)
tty_port_tty_wakeup(&port->port);
}
/************************************************************************/
/* D R I V E R T T Y I N T E R F A C E F U N C T I O N S */
/************************************************************************/
/*****************************************************************************
* mos7840_open
* this function is called by the tty driver when a port is opened
* If successful, we return 0
* Otherwise we return a negative error number.
*****************************************************************************/
static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
{
int response;
int j;
struct usb_serial *serial;
struct urb *urb;
__u16 Data;
int status;
struct moschip_port *mos7840_port;
struct moschip_port *port0;
if (mos7840_port_paranoia_check(port, __func__))
return -ENODEV;
serial = port->serial;
if (mos7840_serial_paranoia_check(serial, __func__))
return -ENODEV;
mos7840_port = mos7840_get_port_private(port);
port0 = mos7840_get_port_private(serial->port[0]);
if (mos7840_port == NULL || port0 == NULL)
return -ENODEV;
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
port0->open_ports++;
/* Initialising the write urb pool */
for (j = 0; j < NUM_URBS; ++j) {
urb = usb_alloc_urb(0, GFP_KERNEL);
mos7840_port->write_urb_pool[j] = urb;
if (urb == NULL) {
dev_err(&port->dev, "No more urbs???\n");
continue;
}
urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
GFP_KERNEL);
if (!urb->transfer_buffer) {
usb_free_urb(urb);
mos7840_port->write_urb_pool[j] = NULL;
dev_err(&port->dev,
"%s-out of memory for urb buffers.\n",
__func__);
continue;
}
}
/*****************************************************************************
* Initialize MCS7840 -- Write Init values to corresponding Registers
*
* Register Index
* 1 : IER
* 2 : FCR
* 3 : LCR
* 4 : MCR
*
* 0x08 : SP1/2 Control Reg
*****************************************************************************/
/* NEED to check the following Block */
Data = 0x0;
status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data);
if (status < 0) {
dev_dbg(&port->dev, "Reading Spreg failed\n");
goto err;
}
Data |= 0x80;
status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
if (status < 0) {
dev_dbg(&port->dev, "writing Spreg failed\n");
goto err;
}
Data &= ~0x80;
status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
if (status < 0) {
dev_dbg(&port->dev, "writing Spreg failed\n");
goto err;
}
/* End of block to be checked */
Data = 0x0;
status = mos7840_get_reg_sync(port, mos7840_port->ControlRegOffset,
&Data);
if (status < 0) {
dev_dbg(&port->dev, "Reading Controlreg failed\n");
goto err;
}
Data |= 0x08; /* Driver done bit */
Data |= 0x20; /* rx_disable */
status = mos7840_set_reg_sync(port,
mos7840_port->ControlRegOffset, Data);
if (status < 0) {
dev_dbg(&port->dev, "writing Controlreg failed\n");
goto err;
}
/* do register settings here */
/* Set all regs to the device default values. */
/***********************************
* First Disable all interrupts.
***********************************/
Data = 0x00;
status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "disabling interrupts failed\n");
goto err;
}
/* Set FIFO_CONTROL_REGISTER to the default value */
Data = 0x00;
status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n");
goto err;
}
Data = 0xcf;
status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n");
goto err;
}
Data = 0x03;
status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
mos7840_port->shadowLCR = Data;
Data = 0x0b;
status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
mos7840_port->shadowMCR = Data;
Data = 0x00;
status = mos7840_get_uart_reg(port, LINE_CONTROL_REGISTER, &Data);
mos7840_port->shadowLCR = Data;
Data |= SERIAL_LCR_DLAB; /* data latch enable in LCR 0x80 */
status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
Data = 0x0c;
status = mos7840_set_uart_reg(port, DIVISOR_LATCH_LSB, Data);
Data = 0x0;
status = mos7840_set_uart_reg(port, DIVISOR_LATCH_MSB, Data);
Data = 0x00;
status = mos7840_get_uart_reg(port, LINE_CONTROL_REGISTER, &Data);
Data = Data & ~SERIAL_LCR_DLAB;
status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
mos7840_port->shadowLCR = Data;
/* clearing Bulkin and Bulkout Fifo */
Data = 0x0;
status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data);
Data = Data | 0x0c;
status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
Data = Data & ~0x0c;
status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
/* Finally enable all interrupts */
Data = 0x0c;
status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
/* clearing rx_disable */
Data = 0x0;
status = mos7840_get_reg_sync(port, mos7840_port->ControlRegOffset,
&Data);
Data = Data & ~0x20;
status = mos7840_set_reg_sync(port, mos7840_port->ControlRegOffset,
Data);
/* rx_negate */
Data = 0x0;
status = mos7840_get_reg_sync(port, mos7840_port->ControlRegOffset,
&Data);
Data = Data | 0x10;
status = mos7840_set_reg_sync(port, mos7840_port->ControlRegOffset,
Data);
/* Check to see if we've set up our endpoint info yet *
* (can't set it up in mos7840_startup as the structures *
* were not set up at that time.) */
if (port0->open_ports == 1) {
if (serial->port[0]->interrupt_in_buffer == NULL) {
/* set up interrupt urb */
usb_fill_int_urb(serial->port[0]->interrupt_in_urb,
serial->dev,
usb_rcvintpipe(serial->dev,
serial->port[0]->interrupt_in_endpointAddress),
serial->port[0]->interrupt_in_buffer,
serial->port[0]->interrupt_in_urb->
transfer_buffer_length,
mos7840_interrupt_callback,
serial,
serial->port[0]->interrupt_in_urb->interval);
/* start interrupt read for mos7840 *
* will continue as long as mos7840 is connected */
response =
usb_submit_urb(serial->port[0]->interrupt_in_urb,
GFP_KERNEL);
if (response) {
dev_err(&port->dev, "%s - Error %d submitting "
"interrupt urb\n", __func__, response);
}
}
}
/* see if we've set up our endpoint info yet *
* (can't set it up in mos7840_startup as the *
* structures were not set up at that time.) */
dev_dbg(&port->dev, "port number is %d\n", port->number);
dev_dbg(&port->dev, "serial number is %d\n", port->serial->minor);
dev_dbg(&port->dev, "Bulkin endpoint is %d\n", port->bulk_in_endpointAddress);
dev_dbg(&port->dev, "BulkOut endpoint is %d\n", port->bulk_out_endpointAddress);
dev_dbg(&port->dev, "Interrupt endpoint is %d\n", port->interrupt_in_endpointAddress);
dev_dbg(&port->dev, "port's number in the device is %d\n", mos7840_port->port_num);
mos7840_port->read_urb = port->read_urb;
/* set up our bulk in urb */
if ((serial->num_ports == 2)
&& ((((__u16)port->number -
(__u16)(port->serial->minor)) % 2) != 0)) {
usb_fill_bulk_urb(mos7840_port->read_urb,
serial->dev,
usb_rcvbulkpipe(serial->dev,
(port->bulk_in_endpointAddress) + 2),
port->bulk_in_buffer,
mos7840_port->read_urb->transfer_buffer_length,
mos7840_bulk_in_callback, mos7840_port);
} else {
usb_fill_bulk_urb(mos7840_port->read_urb,
serial->dev,
usb_rcvbulkpipe(serial->dev,
port->bulk_in_endpointAddress),
port->bulk_in_buffer,
mos7840_port->read_urb->transfer_buffer_length,
mos7840_bulk_in_callback, mos7840_port);
}
dev_dbg(&port->dev, "%s: bulkin endpoint is %d\n", __func__, port->bulk_in_endpointAddress);
mos7840_port->read_urb_busy = true;
response = usb_submit_urb(mos7840_port->read_urb, GFP_KERNEL);
if (response) {
dev_err(&port->dev, "%s - Error %d submitting control urb\n",
__func__, response);
mos7840_port->read_urb_busy = false;
}
/* initialize our wait queues */
init_waitqueue_head(&mos7840_port->wait_chase);
/* initialize our port settings */
/* Must set to enable ints! */
mos7840_port->shadowMCR = MCR_MASTER_IE;
/* send a open port command */
mos7840_port->open = 1;
/* mos7840_change_port_settings(mos7840_port,old_termios); */
return 0;
err:
for (j = 0; j < NUM_URBS; ++j) {
urb = mos7840_port->write_urb_pool[j];
if (!urb)
continue;
kfree(urb->transfer_buffer);
usb_free_urb(urb);
}
return status;
}
/*****************************************************************************
* mos7840_chars_in_buffer
* this function is called by the tty driver when it wants to know how many
* bytes of data we currently have outstanding in the port (data that has
* been written, but hasn't made it out the port yet)
* If successful, we return the number of bytes left to be written in the
* system,
* Otherwise we return zero.
*****************************************************************************/
static int mos7840_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
int i;
int chars = 0;
unsigned long flags;
struct moschip_port *mos7840_port;
if (mos7840_port_paranoia_check(port, __func__))
return 0;
mos7840_port = mos7840_get_port_private(port);
if (mos7840_port == NULL)
return 0;
spin_lock_irqsave(&mos7840_port->pool_lock, flags);
for (i = 0; i < NUM_URBS; ++i) {
if (mos7840_port->busy[i]) {
struct urb *urb = mos7840_port->write_urb_pool[i];
chars += urb->transfer_buffer_length;
}
}
spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars);
return chars;
}
/*****************************************************************************
* mos7840_close
* this function is called by the tty driver when a port is closed
*****************************************************************************/
static void mos7840_close(struct usb_serial_port *port)
{
struct usb_serial *serial;
struct moschip_port *mos7840_port;
struct moschip_port *port0;
int j;
__u16 Data;
if (mos7840_port_paranoia_check(port, __func__))
return;
serial = mos7840_get_usb_serial(port, __func__);
if (!serial)
return;
mos7840_port = mos7840_get_port_private(port);
port0 = mos7840_get_port_private(serial->port[0]);
if (mos7840_port == NULL || port0 == NULL)
return;
for (j = 0; j < NUM_URBS; ++j)
usb_kill_urb(mos7840_port->write_urb_pool[j]);
/* Freeing Write URBs */
for (j = 0; j < NUM_URBS; ++j) {
if (mos7840_port->write_urb_pool[j]) {
if (mos7840_port->write_urb_pool[j]->transfer_buffer)
kfree(mos7840_port->write_urb_pool[j]->
transfer_buffer);
usb_free_urb(mos7840_port->write_urb_pool[j]);
}
}
usb_kill_urb(mos7840_port->write_urb);
usb_kill_urb(mos7840_port->read_urb);
mos7840_port->read_urb_busy = false;
port0->open_ports--;
dev_dbg(&port->dev, "%s in close%d:in port%d\n", __func__, port0->open_ports, port->number);
if (port0->open_ports == 0) {
if (serial->port[0]->interrupt_in_urb) {
dev_dbg(&port->dev, "Shutdown interrupt_in_urb\n");
usb_kill_urb(serial->port[0]->interrupt_in_urb);
}
}
if (mos7840_port->write_urb) {
/* if this urb had a transfer buffer already (old tx) free it */
kfree(mos7840_port->write_urb->transfer_buffer);
usb_free_urb(mos7840_port->write_urb);
}
Data = 0x0;
mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
Data = 0x00;
mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
mos7840_port->open = 0;
}
/************************************************************************
*
* mos7840_block_until_chase_response
*
* This function will block the close until one of the following:
* 1. Response to our Chase comes from mos7840
* 2. A timeout of 10 seconds without activity has expired
* (1K of mos7840 data @ 2400 baud ==> 4 sec to empty)
*
************************************************************************/
static void mos7840_block_until_chase_response(struct tty_struct *tty,
struct moschip_port *mos7840_port)
{
int timeout = msecs_to_jiffies(1000);
int wait = 10;
int count;
while (1) {
count = mos7840_chars_in_buffer(tty);
/* Check for Buffer status */
if (count <= 0)
return;
/* Block the thread for a while */
interruptible_sleep_on_timeout(&mos7840_port->wait_chase,
timeout);
/* No activity.. count down section */
wait--;
if (wait == 0) {
dev_dbg(&mos7840_port->port->dev, "%s - TIMEOUT\n", __func__);
return;
} else {
/* Reset timeout value back to seconds */
wait = 10;
}
}
}
/*****************************************************************************
* mos7840_break
* this function sends a break to the port
*****************************************************************************/
static void mos7840_break(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
unsigned char data;
struct usb_serial *serial;
struct moschip_port *mos7840_port;
if (mos7840_port_paranoia_check(port, __func__))
return;
serial = mos7840_get_usb_serial(port, __func__);
if (!serial)
return;
mos7840_port = mos7840_get_port_private(port);
if (mos7840_port == NULL)
return;
/* flush and block until tx is empty */
mos7840_block_until_chase_response(tty, mos7840_port);
if (break_state == -1)
data = mos7840_port->shadowLCR | LCR_SET_BREAK;
else
data = mos7840_port->shadowLCR & ~LCR_SET_BREAK;
/* FIXME: no locking on shadowLCR anywhere in driver */
mos7840_port->shadowLCR = data;
dev_dbg(&port->dev, "%s mos7840_port->shadowLCR is %x\n", __func__, mos7840_port->shadowLCR);
mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER,
mos7840_port->shadowLCR);
}
/*****************************************************************************
* mos7840_write_room
* this function is called by the tty driver when it wants to know how many
* bytes of data we can accept for a specific port.
* If successful, we return the amount of room that we have for this port
* Otherwise we return a negative error number.
*****************************************************************************/
static int mos7840_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
int i;
int room = 0;
unsigned long flags;
struct moschip_port *mos7840_port;
if (mos7840_port_paranoia_check(port, __func__))
return -1;
mos7840_port = mos7840_get_port_private(port);
if (mos7840_port == NULL)
return -1;
spin_lock_irqsave(&mos7840_port->pool_lock, flags);
for (i = 0; i < NUM_URBS; ++i) {
if (!mos7840_port->busy[i])
room += URB_TRANSFER_BUFFER_SIZE;
}
spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
room = (room == 0) ? 0 : room - URB_TRANSFER_BUFFER_SIZE + 1;
dev_dbg(&mos7840_port->port->dev, "%s - returns %d\n", __func__, room);
return room;
}
/*****************************************************************************
* mos7840_write
* this function is called by the tty driver when data should be written to
* the port.
* If successful, we return the number of bytes written, otherwise we
* return a negative error number.
*****************************************************************************/
static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *data, int count)
{
int status;
int i;
int bytes_sent = 0;
int transfer_size;
unsigned long flags;
struct moschip_port *mos7840_port;
struct usb_serial *serial;
struct urb *urb;
/* __u16 Data; */
const unsigned char *current_position = data;
unsigned char *data1;
#ifdef NOTMOS7840
Data = 0x00;
status = mos7840_get_uart_reg(port, LINE_CONTROL_REGISTER, &Data);
mos7840_port->shadowLCR = Data;
dev_dbg(&port->dev, "%s: LINE_CONTROL_REGISTER is %x\n", __func__, Data);
dev_dbg(&port->dev, "%s: mos7840_port->shadowLCR is %x\n", __func__, mos7840_port->shadowLCR);
/* Data = 0x03; */
/* status = mos7840_set_uart_reg(port,LINE_CONTROL_REGISTER,Data); */
/* mos7840_port->shadowLCR=Data;//Need to add later */
Data |= SERIAL_LCR_DLAB; /* data latch enable in LCR 0x80 */
status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
/* Data = 0x0c; */
/* status = mos7840_set_uart_reg(port,DIVISOR_LATCH_LSB,Data); */
Data = 0x00;
status = mos7840_get_uart_reg(port, DIVISOR_LATCH_LSB, &Data);
dev_dbg(&port->dev, "%s: DLL value is %x\n", __func__, Data);
Data = 0x0;
status = mos7840_get_uart_reg(port, DIVISOR_LATCH_MSB, &Data);
dev_dbg(&port->dev, "%s: DLM value is %x\n", __func__, Data);
Data = Data & ~SERIAL_LCR_DLAB;
dev_dbg(&port->dev, "%s: mos7840_port->shadowLCR is %x\n", __func__, mos7840_port->shadowLCR);
status = mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
#endif
if (mos7840_port_paranoia_check(port, __func__))
return -1;
serial = port->serial;
if (mos7840_serial_paranoia_check(serial, __func__))
return -1;
mos7840_port = mos7840_get_port_private(port);
if (mos7840_port == NULL)
return -1;
/* try to find a free urb in the list */
urb = NULL;
spin_lock_irqsave(&mos7840_port->pool_lock, flags);
for (i = 0; i < NUM_URBS; ++i) {
if (!mos7840_port->busy[i]) {
mos7840_port->busy[i] = 1;
urb = mos7840_port->write_urb_pool[i];
dev_dbg(&port->dev, "URB:%d\n", i);
break;
}
}
spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
if (urb == NULL) {
dev_dbg(&port->dev, "%s - no more free urbs\n", __func__);
goto exit;
}
if (urb->transfer_buffer == NULL) {
urb->transfer_buffer =
kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL);
if (urb->transfer_buffer == NULL) {
dev_err_console(port, "%s no more kernel memory...\n",
__func__);
goto exit;
}
}
transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE);
memcpy(urb->transfer_buffer, current_position, transfer_size);
/* fill urb with data and submit */
if ((serial->num_ports == 2)
&& ((((__u16)port->number -
(__u16)(port->serial->minor)) % 2) != 0)) {
usb_fill_bulk_urb(urb,
serial->dev,
usb_sndbulkpipe(serial->dev,
(port->bulk_out_endpointAddress) + 2),
urb->transfer_buffer,
transfer_size,
mos7840_bulk_out_data_callback, mos7840_port);
} else {
usb_fill_bulk_urb(urb,
serial->dev,
usb_sndbulkpipe(serial->dev,
port->bulk_out_endpointAddress),
urb->transfer_buffer,
transfer_size,
mos7840_bulk_out_data_callback, mos7840_port);
}
data1 = urb->transfer_buffer;
dev_dbg(&port->dev, "bulkout endpoint is %d\n", port->bulk_out_endpointAddress);
if (mos7840_port->has_led)
mos7840_led_activity(port);
/* send it down the pipe */
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
mos7840_port->busy[i] = 0;
dev_err_console(port, "%s - usb_submit_urb(write bulk) failed "
"with status = %d\n", __func__, status);
bytes_sent = status;
goto exit;
}
bytes_sent = transfer_size;
port->icount.tx += transfer_size;
dev_dbg(&port->dev, "icount.tx is %d:\n", port->icount.tx);
exit:
return bytes_sent;
}
/*****************************************************************************
* mos7840_throttle
* this function is called by the tty driver when it wants to stop the data
* being read from the port.
*****************************************************************************/
static void mos7840_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct moschip_port *mos7840_port;
int status;
if (mos7840_port_paranoia_check(port, __func__))
return;
mos7840_port = mos7840_get_port_private(port);
if (mos7840_port == NULL)
return;
if (!mos7840_port->open) {
dev_dbg(&port->dev, "%s", "port not opened\n");
return;
}
/* if we are implementing XON/XOFF, send the stop character */
if (I_IXOFF(tty)) {
unsigned char stop_char = STOP_CHAR(tty);
status = mos7840_write(tty, port, &stop_char, 1);
if (status <= 0)
return;
}
/* if we are implementing RTS/CTS, toggle that line */
if (tty->termios.c_cflag & CRTSCTS) {
mos7840_port->shadowMCR &= ~MCR_RTS;
status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER,
mos7840_port->shadowMCR);
if (status < 0)
return;
}
}
/*****************************************************************************
* mos7840_unthrottle
* this function is called by the tty driver when it wants to resume
* the data being read from the port (called after mos7840_throttle is
* called)
*****************************************************************************/
static void mos7840_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
int status;
struct moschip_port *mos7840_port = mos7840_get_port_private(port);
if (mos7840_port_paranoia_check(port, __func__))
return;
if (mos7840_port == NULL)
return;
if (!mos7840_port->open) {
dev_dbg(&port->dev, "%s - port not opened\n", __func__);
return;
}
/* if we are implementing XON/XOFF, send the start character */
if (I_IXOFF(tty)) {
unsigned char start_char = START_CHAR(tty);
status = mos7840_write(tty, port, &start_char, 1);
if (status <= 0)
return;
}
/* if we are implementing RTS/CTS, toggle that line */
if (tty->termios.c_cflag & CRTSCTS) {
mos7840_port->shadowMCR |= MCR_RTS;
status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER,
mos7840_port->shadowMCR);
if (status < 0)
return;
}
}
static int mos7840_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct moschip_port *mos7840_port;
unsigned int result;
__u16 msr;
__u16 mcr;
int status;
mos7840_port = mos7840_get_port_private(port);
if (mos7840_port == NULL)
return -ENODEV;
status = mos7840_get_uart_reg(port, MODEM_STATUS_REGISTER, &msr);
if (status != 1)
return -EIO;
status = mos7840_get_uart_reg(port, MODEM_CONTROL_REGISTER, &mcr);
if (status != 1)
return -EIO;
result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0)
| ((mcr & MCR_RTS) ? TIOCM_RTS : 0)
| ((mcr & MCR_LOOPBACK) ? TIOCM_LOOP : 0)
| ((msr & MOS7840_MSR_CTS) ? TIOCM_CTS : 0)
| ((msr & MOS7840_MSR_CD) ? TIOCM_CAR : 0)
| ((msr & MOS7840_MSR_RI) ? TIOCM_RI : 0)
| ((msr & MOS7840_MSR_DSR) ? TIOCM_DSR : 0);
dev_dbg(&port->dev, "%s - 0x%04X\n", __func__, result);
return result;
}
static int mos7840_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct moschip_port *mos7840_port;
unsigned int mcr;
int status;
mos7840_port = mos7840_get_port_private(port);
if (mos7840_port == NULL)
return -ENODEV;
/* FIXME: What locks the port registers ? */
mcr = mos7840_port->shadowMCR;
if (clear & TIOCM_RTS)
mcr &= ~MCR_RTS;
if (clear & TIOCM_DTR)
mcr &= ~MCR_DTR;
if (clear & TIOCM_LOOP)
mcr &= ~MCR_LOOPBACK;
if (set & TIOCM_RTS)
mcr |= MCR_RTS;
if (set & TIOCM_DTR)
mcr |= MCR_DTR;
if (set & TIOCM_LOOP)
mcr |= MCR_LOOPBACK;
mos7840_port->shadowMCR = mcr;
status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, mcr);
if (status < 0) {
dev_dbg(&port->dev, "setting MODEM_CONTROL_REGISTER Failed\n");
return status;
}
return 0;
}
/*****************************************************************************
* mos7840_calc_baud_rate_divisor
* this function calculates the proper baud rate divisor for the specified
* baud rate.
*****************************************************************************/
static int mos7840_calc_baud_rate_divisor(struct usb_serial_port *port,
int baudRate, int *divisor,
__u16 *clk_sel_val)
{
dev_dbg(&port->dev, "%s - %d\n", __func__, baudRate);
if (baudRate <= 115200) {
*divisor = 115200 / baudRate;
*clk_sel_val = 0x0;
}
if ((baudRate > 115200) && (baudRate <= 230400)) {
*divisor = 230400 / baudRate;
*clk_sel_val = 0x10;
} else if ((baudRate > 230400) && (baudRate <= 403200)) {
*divisor = 403200 / baudRate;
*clk_sel_val = 0x20;
} else if ((baudRate > 403200) && (baudRate <= 460800)) {
*divisor = 460800 / baudRate;
*clk_sel_val = 0x30;
} else if ((baudRate > 460800) && (baudRate <= 806400)) {
*divisor = 806400 / baudRate;
*clk_sel_val = 0x40;
} else if ((baudRate > 806400) && (baudRate <= 921600)) {
*divisor = 921600 / baudRate;
*clk_sel_val = 0x50;
} else if ((baudRate > 921600) && (baudRate <= 1572864)) {
*divisor = 1572864 / baudRate;
*clk_sel_val = 0x60;
} else if ((baudRate > 1572864) && (baudRate <= 3145728)) {
*divisor = 3145728 / baudRate;
*clk_sel_val = 0x70;
}
return 0;
#ifdef NOTMCS7840
for (i = 0; i < ARRAY_SIZE(mos7840_divisor_table); i++) {
if (mos7840_divisor_table[i].BaudRate == baudrate) {
*divisor = mos7840_divisor_table[i].Divisor;
return 0;
}
}
/* After trying for all the standard baud rates *
* Try calculating the divisor for this baud rate */
if (baudrate > 75 && baudrate < 230400) {
/* get the divisor */
custom = (__u16) (230400L / baudrate);
/* Check for round off */
round1 = (__u16) (2304000L / baudrate);
round = (__u16) (round1 - (custom * 10));
if (round > 4)
custom++;
*divisor = custom;
dev_dbg(&port->dev, " Baud %d = %d\n", baudrate, custom);
return 0;
}
dev_dbg(&port->dev, "%s", " Baud calculation Failed...\n");
return -1;
#endif
}
/*****************************************************************************
* mos7840_send_cmd_write_baud_rate
* this function sends the proper command to change the baud rate of the
* specified port.
*****************************************************************************/
static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
int baudRate)
{
int divisor = 0;
int status;
__u16 Data;
unsigned char number;
__u16 clk_sel_val;
struct usb_serial_port *port;
if (mos7840_port == NULL)
return -1;
port = mos7840_port->port;
if (mos7840_port_paranoia_check(port, __func__))
return -1;
if (mos7840_serial_paranoia_check(port->serial, __func__))
return -1;
number = mos7840_port->port->number - mos7840_port->port->serial->minor;
dev_dbg(&port->dev, "%s - port = %d, baud = %d\n", __func__,
mos7840_port->port->number, baudRate);
/* reset clk_uart_sel in spregOffset */
if (baudRate > 115200) {
#ifdef HW_flow_control
/* NOTE: need to see the pther register to modify */
/* setting h/w flow control bit to 1 */
Data = 0x2b;
mos7840_port->shadowMCR = Data;
status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER,
Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing spreg failed in set_serial_baud\n");
return -1;
}
#endif
} else {
#ifdef HW_flow_control
/* setting h/w flow control bit to 0 */
Data = 0xb;
mos7840_port->shadowMCR = Data;
status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER,
Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing spreg failed in set_serial_baud\n");
return -1;
}
#endif
}
if (1) { /* baudRate <= 115200) */
clk_sel_val = 0x0;
Data = 0x0;
status = mos7840_calc_baud_rate_divisor(port, baudRate, &divisor,
&clk_sel_val);
status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset,
&Data);
if (status < 0) {
dev_dbg(&port->dev, "reading spreg failed in set_serial_baud\n");
return -1;
}
Data = (Data & 0x8f) | clk_sel_val;
status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset,
Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing spreg failed in set_serial_baud\n");
return -1;
}
/* Calculate the Divisor */
if (status) {
dev_err(&port->dev, "%s - bad baud rate\n", __func__);
return status;
}
/* Enable access to divisor latch */
Data = mos7840_port->shadowLCR | SERIAL_LCR_DLAB;
mos7840_port->shadowLCR = Data;
mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
/* Write the divisor */
Data = (unsigned char)(divisor & 0xff);
dev_dbg(&port->dev, "set_serial_baud Value to write DLL is %x\n", Data);
mos7840_set_uart_reg(port, DIVISOR_LATCH_LSB, Data);
Data = (unsigned char)((divisor & 0xff00) >> 8);
dev_dbg(&port->dev, "set_serial_baud Value to write DLM is %x\n", Data);
mos7840_set_uart_reg(port, DIVISOR_LATCH_MSB, Data);
/* Disable access to divisor latch */
Data = mos7840_port->shadowLCR & ~SERIAL_LCR_DLAB;
mos7840_port->shadowLCR = Data;
mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
}
return status;
}
/*****************************************************************************
* mos7840_change_port_settings
* This routine is called to set the UART on the device to match
* the specified new settings.
*****************************************************************************/
static void mos7840_change_port_settings(struct tty_struct *tty,
struct moschip_port *mos7840_port, struct ktermios *old_termios)
{
int baud;
unsigned cflag;
unsigned iflag;
__u8 lData;
__u8 lParity;
__u8 lStop;
int status;
__u16 Data;
struct usb_serial_port *port;
struct usb_serial *serial;
if (mos7840_port == NULL)
return;
port = mos7840_port->port;
if (mos7840_port_paranoia_check(port, __func__))
return;
if (mos7840_serial_paranoia_check(port->serial, __func__))
return;
serial = port->serial;
if (!mos7840_port->open) {
dev_dbg(&port->dev, "%s - port not opened\n", __func__);
return;
}
lData = LCR_BITS_8;
lStop = LCR_STOP_1;
lParity = LCR_PAR_NONE;
cflag = tty->termios.c_cflag;
iflag = tty->termios.c_iflag;
/* Change the number of bits */
switch (cflag & CSIZE) {
case CS5:
lData = LCR_BITS_5;
break;
case CS6:
lData = LCR_BITS_6;
break;
case CS7:
lData = LCR_BITS_7;
break;
default:
case CS8:
lData = LCR_BITS_8;
break;
}
/* Change the Parity bit */
if (cflag & PARENB) {
if (cflag & PARODD) {
lParity = LCR_PAR_ODD;
dev_dbg(&port->dev, "%s - parity = odd\n", __func__);
} else {
lParity = LCR_PAR_EVEN;
dev_dbg(&port->dev, "%s - parity = even\n", __func__);
}
} else {
dev_dbg(&port->dev, "%s - parity = none\n", __func__);
}
if (cflag & CMSPAR)
lParity = lParity | 0x20;
/* Change the Stop bit */
if (cflag & CSTOPB) {
lStop = LCR_STOP_2;
dev_dbg(&port->dev, "%s - stop bits = 2\n", __func__);
} else {
lStop = LCR_STOP_1;
dev_dbg(&port->dev, "%s - stop bits = 1\n", __func__);
}
/* Update the LCR with the correct value */
mos7840_port->shadowLCR &=
~(LCR_BITS_MASK | LCR_STOP_MASK | LCR_PAR_MASK);
mos7840_port->shadowLCR |= (lData | lParity | lStop);
dev_dbg(&port->dev, "%s - mos7840_port->shadowLCR is %x\n", __func__,
mos7840_port->shadowLCR);
/* Disable Interrupts */
Data = 0x00;
mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
Data = 0x00;
mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
Data = 0xcf;
mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
/* Send the updated LCR value to the mos7840 */
Data = mos7840_port->shadowLCR;
mos7840_set_uart_reg(port, LINE_CONTROL_REGISTER, Data);
Data = 0x00b;
mos7840_port->shadowMCR = Data;
mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
Data = 0x00b;
mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
/* set up the MCR register and send it to the mos7840 */
mos7840_port->shadowMCR = MCR_MASTER_IE;
if (cflag & CBAUD)
mos7840_port->shadowMCR |= (MCR_DTR | MCR_RTS);
if (cflag & CRTSCTS)
mos7840_port->shadowMCR |= (MCR_XON_ANY);
else
mos7840_port->shadowMCR &= ~(MCR_XON_ANY);
Data = mos7840_port->shadowMCR;
mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
/* Determine divisor based on baud rate */
baud = tty_get_baud_rate(tty);
if (!baud) {
/* pick a default, any default... */
dev_dbg(&port->dev, "%s", "Picked default baud...\n");
baud = 9600;
}
dev_dbg(&port->dev, "%s - baud rate = %d\n", __func__, baud);
status = mos7840_send_cmd_write_baud_rate(mos7840_port, baud);
/* Enable Interrupts */
Data = 0x0c;
mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
if (mos7840_port->read_urb_busy == false) {
mos7840_port->read_urb_busy = true;
status = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC);
if (status) {
dev_dbg(&port->dev, "usb_submit_urb(read bulk) failed, status = %d\n",
status);
mos7840_port->read_urb_busy = false;
}
}
dev_dbg(&port->dev, "%s - mos7840_port->shadowLCR is End %x\n", __func__,
mos7840_port->shadowLCR);
}
/*****************************************************************************
* mos7840_set_termios
* this function is called by the tty driver when it wants to change
* the termios structure
*****************************************************************************/
static void mos7840_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
struct ktermios *old_termios)
{
int status;
unsigned int cflag;
struct usb_serial *serial;
struct moschip_port *mos7840_port;
if (mos7840_port_paranoia_check(port, __func__))
return;
serial = port->serial;
if (mos7840_serial_paranoia_check(serial, __func__))
return;
mos7840_port = mos7840_get_port_private(port);
if (mos7840_port == NULL)
return;
if (!mos7840_port->open) {
dev_dbg(&port->dev, "%s - port not opened\n", __func__);
return;
}
dev_dbg(&port->dev, "%s", "setting termios - \n");
cflag = tty->termios.c_cflag;
dev_dbg(&port->dev, "%s - clfag %08x iflag %08x\n", __func__,
tty->termios.c_cflag, RELEVANT_IFLAG(tty->termios.c_iflag));
dev_dbg(&port->dev, "%s - old clfag %08x old iflag %08x\n", __func__,
old_termios->c_cflag, RELEVANT_IFLAG(old_termios->c_iflag));
dev_dbg(&port->dev, "%s - port %d\n", __func__, port->number);
/* change the port settings to the new ones specified */
mos7840_change_port_settings(tty, mos7840_port, old_termios);
if (!mos7840_port->read_urb) {
dev_dbg(&port->dev, "%s", "URB KILLED !!!!!\n");
return;
}
if (mos7840_port->read_urb_busy == false) {
mos7840_port->read_urb_busy = true;
status = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC);
if (status) {
dev_dbg(&port->dev, "usb_submit_urb(read bulk) failed, status = %d\n",
status);
mos7840_port->read_urb_busy = false;
}
}
}
/*****************************************************************************
* mos7840_get_lsr_info - get line status register info
*
* Purpose: Let user call ioctl() to get info when the UART physically
* is emptied. On bus types like RS485, the transmitter must
* release the bus after transmitting. This must be done when
* the transmit shift register is empty, not be done when the
* transmit holding register is empty. This functionality
* allows an RS485 driver to be written in user space.
*****************************************************************************/
static int mos7840_get_lsr_info(struct tty_struct *tty,
unsigned int __user *value)
{
int count;
unsigned int result = 0;
count = mos7840_chars_in_buffer(tty);
if (count == 0)
result = TIOCSER_TEMT;
if (copy_to_user(value, &result, sizeof(int)))
return -EFAULT;
return 0;
}
/*****************************************************************************
* mos7840_get_serial_info
* function to get information about serial port
*****************************************************************************/
static int mos7840_get_serial_info(struct moschip_port *mos7840_port,
struct serial_struct __user *retinfo)
{
struct serial_struct tmp;
if (mos7840_port == NULL)
return -1;
if (!retinfo)
return -EFAULT;
memset(&tmp, 0, sizeof(tmp));
tmp.type = PORT_16550A;
tmp.line = mos7840_port->port->serial->minor;
tmp.port = mos7840_port->port->number;
tmp.irq = 0;
tmp.flags = ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ;
tmp.xmit_fifo_size = NUM_URBS * URB_TRANSFER_BUFFER_SIZE;
tmp.baud_base = 9600;
tmp.close_delay = 5 * HZ;
tmp.closing_wait = 30 * HZ;
if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
return -EFAULT;
return 0;
}
/*****************************************************************************
* SerialIoctl
* this function handles any ioctl calls to the driver
*****************************************************************************/
static int mos7840_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct usb_serial_port *port = tty->driver_data;
void __user *argp = (void __user *)arg;
struct moschip_port *mos7840_port;
if (mos7840_port_paranoia_check(port, __func__))
return -1;
mos7840_port = mos7840_get_port_private(port);
if (mos7840_port == NULL)
return -1;
dev_dbg(&port->dev, "%s - cmd = 0x%x\n", __func__, cmd);
switch (cmd) {
/* return number of bytes available */
case TIOCSERGETLSR:
dev_dbg(&port->dev, "%s TIOCSERGETLSR\n", __func__);
return mos7840_get_lsr_info(tty, argp);
case TIOCGSERIAL:
dev_dbg(&port->dev, "%s TIOCGSERIAL\n", __func__);
return mos7840_get_serial_info(mos7840_port, argp);
case TIOCSSERIAL:
dev_dbg(&port->dev, "%s TIOCSSERIAL\n", __func__);
break;
default:
break;
}
return -ENOIOCTLCMD;
}
static int mos7810_check(struct usb_serial *serial)
{
int i, pass_count = 0;
u8 *buf;
__u16 data = 0, mcr_data = 0;
__u16 test_pattern = 0x55AA;
int res;
buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
if (!buf)
return 0; /* failed to identify 7810 */
/* Store MCR setting */
res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
MCS_RDREQ, MCS_RD_RTYPE, 0x0300, MODEM_CONTROL_REGISTER,
buf, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
if (res == VENDOR_READ_LENGTH)
mcr_data = *buf;
for (i = 0; i < 16; i++) {
/* Send the 1-bit test pattern out to MCS7810 test pin */
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
MCS_WRREQ, MCS_WR_RTYPE,
(0x0300 | (((test_pattern >> i) & 0x0001) << 1)),
MODEM_CONTROL_REGISTER, NULL, 0, MOS_WDR_TIMEOUT);
/* Read the test pattern back */
res = usb_control_msg(serial->dev,
usb_rcvctrlpipe(serial->dev, 0), MCS_RDREQ,
MCS_RD_RTYPE, 0, GPIO_REGISTER, buf,
VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
if (res == VENDOR_READ_LENGTH)
data = *buf;
/* If this is a MCS7810 device, both test patterns must match */
if (((test_pattern >> i) ^ (~data >> 1)) & 0x0001)
break;
pass_count++;
}
/* Restore MCR setting */
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), MCS_WRREQ,
MCS_WR_RTYPE, 0x0300 | mcr_data, MODEM_CONTROL_REGISTER, NULL,
0, MOS_WDR_TIMEOUT);
kfree(buf);
if (pass_count == 16)
return 1;
return 0;
}
static int mos7840_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
u8 *buf;
int device_type;
if (product == MOSCHIP_DEVICE_ID_7810 ||
product == MOSCHIP_DEVICE_ID_7820) {
device_type = product;
goto out;
}
buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
if (!buf)
return -ENOMEM;
usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, buf,
VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
/* For a MCS7840 device GPIO0 must be set to 1 */
if (buf[0] & 0x01)
device_type = MOSCHIP_DEVICE_ID_7840;
else if (mos7810_check(serial))
device_type = MOSCHIP_DEVICE_ID_7810;
else
device_type = MOSCHIP_DEVICE_ID_7820;
kfree(buf);
out:
usb_set_serial_data(serial, (void *)(unsigned long)device_type);
return 0;
}
static int mos7840_calc_num_ports(struct usb_serial *serial)
{
int device_type = (unsigned long)usb_get_serial_data(serial);
int mos7840_num_ports;
mos7840_num_ports = (device_type >> 4) & 0x000F;
return mos7840_num_ports;
}
static int mos7840_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
int device_type = (unsigned long)usb_get_serial_data(serial);
struct moschip_port *mos7840_port;
int status;
int pnum;
__u16 Data;
/* we set up the pointers to the endpoints in the mos7840_open *
* function, as the structures aren't created yet. */
pnum = port->number - serial->minor;
dev_dbg(&port->dev, "mos7840_startup: configuring port %d\n", pnum);
mos7840_port = kzalloc(sizeof(struct moschip_port), GFP_KERNEL);
if (mos7840_port == NULL) {
dev_err(&port->dev, "%s - Out of memory\n", __func__);
return -ENOMEM;
}
/* Initialize all port interrupt end point to port 0 int
* endpoint. Our device has only one interrupt end point
* common to all port */
mos7840_port->port = port;
mos7840_set_port_private(port, mos7840_port);
spin_lock_init(&mos7840_port->pool_lock);
/* minor is not initialised until later by
* usb-serial.c:get_free_serial() and cannot therefore be used
* to index device instances */
mos7840_port->port_num = pnum + 1;
dev_dbg(&port->dev, "port->number = %d\n", port->number);
dev_dbg(&port->dev, "port->serial->minor = %d\n", port->serial->minor);
dev_dbg(&port->dev, "mos7840_port->port_num = %d\n", mos7840_port->port_num);
dev_dbg(&port->dev, "serial->minor = %d\n", serial->minor);
if (mos7840_port->port_num == 1) {
mos7840_port->SpRegOffset = 0x0;
mos7840_port->ControlRegOffset = 0x1;
mos7840_port->DcrRegOffset = 0x4;
} else if ((mos7840_port->port_num == 2) && (serial->num_ports == 4)) {
mos7840_port->SpRegOffset = 0x8;
mos7840_port->ControlRegOffset = 0x9;
mos7840_port->DcrRegOffset = 0x16;
} else if ((mos7840_port->port_num == 2) && (serial->num_ports == 2)) {
mos7840_port->SpRegOffset = 0xa;
mos7840_port->ControlRegOffset = 0xb;
mos7840_port->DcrRegOffset = 0x19;
} else if ((mos7840_port->port_num == 3) && (serial->num_ports == 4)) {
mos7840_port->SpRegOffset = 0xa;
mos7840_port->ControlRegOffset = 0xb;
mos7840_port->DcrRegOffset = 0x19;
} else if ((mos7840_port->port_num == 4) && (serial->num_ports == 4)) {
mos7840_port->SpRegOffset = 0xc;
mos7840_port->ControlRegOffset = 0xd;
mos7840_port->DcrRegOffset = 0x1c;
}
mos7840_dump_serial_port(port, mos7840_port);
mos7840_set_port_private(port, mos7840_port);
/* enable rx_disable bit in control register */
status = mos7840_get_reg_sync(port,
mos7840_port->ControlRegOffset, &Data);
if (status < 0) {
dev_dbg(&port->dev, "Reading ControlReg failed status-0x%x\n", status);
goto out;
} else
dev_dbg(&port->dev, "ControlReg Reading success val is %x, status%d\n", Data, status);
Data |= 0x08; /* setting driver done bit */
Data |= 0x04; /* sp1_bit to have cts change reflect in
modem status reg */
/* Data |= 0x20; //rx_disable bit */
status = mos7840_set_reg_sync(port,
mos7840_port->ControlRegOffset, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing ControlReg failed(rx_disable) status-0x%x\n", status);
goto out;
} else
dev_dbg(&port->dev, "ControlReg Writing success(rx_disable) status%d\n", status);
/* Write default values in DCR (i.e 0x01 in DCR0, 0x05 in DCR2
and 0x24 in DCR3 */
Data = 0x01;
status = mos7840_set_reg_sync(port,
(__u16) (mos7840_port->DcrRegOffset + 0), Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing DCR0 failed status-0x%x\n", status);
goto out;
} else
dev_dbg(&port->dev, "DCR0 Writing success status%d\n", status);
Data = 0x05;
status = mos7840_set_reg_sync(port,
(__u16) (mos7840_port->DcrRegOffset + 1), Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing DCR1 failed status-0x%x\n", status);
goto out;
} else
dev_dbg(&port->dev, "DCR1 Writing success status%d\n", status);
Data = 0x24;
status = mos7840_set_reg_sync(port,
(__u16) (mos7840_port->DcrRegOffset + 2), Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing DCR2 failed status-0x%x\n", status);
goto out;
} else
dev_dbg(&port->dev, "DCR2 Writing success status%d\n", status);
/* write values in clkstart0x0 and clkmulti 0x20 */
Data = 0x0;
status = mos7840_set_reg_sync(port, CLK_START_VALUE_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing CLK_START_VALUE_REGISTER failed status-0x%x\n", status);
goto out;
} else
dev_dbg(&port->dev, "CLK_START_VALUE_REGISTER Writing success status%d\n", status);
Data = 0x20;
status = mos7840_set_reg_sync(port, CLK_MULTI_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing CLK_MULTI_REGISTER failed status-0x%x\n", status);
goto error;
} else
dev_dbg(&port->dev, "CLK_MULTI_REGISTER Writing success status%d\n", status);
/* write value 0x0 to scratchpad register */
Data = 0x00;
status = mos7840_set_uart_reg(port, SCRATCH_PAD_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing SCRATCH_PAD_REGISTER failed status-0x%x\n", status);
goto out;
} else
dev_dbg(&port->dev, "SCRATCH_PAD_REGISTER Writing success status%d\n", status);
/* Zero Length flag register */
if ((mos7840_port->port_num != 1) && (serial->num_ports == 2)) {
Data = 0xff;
status = mos7840_set_reg_sync(port,
(__u16) (ZLP_REG1 +
((__u16)mos7840_port->port_num)), Data);
dev_dbg(&port->dev, "ZLIP offset %x\n",
(__u16)(ZLP_REG1 + ((__u16) mos7840_port->port_num)));
if (status < 0) {
dev_dbg(&port->dev, "Writing ZLP_REG%d failed status-0x%x\n", pnum + 2, status);
goto out;
} else
dev_dbg(&port->dev, "ZLP_REG%d Writing success status%d\n", pnum + 2, status);
} else {
Data = 0xff;
status = mos7840_set_reg_sync(port,
(__u16) (ZLP_REG1 +
((__u16)mos7840_port->port_num) - 0x1), Data);
dev_dbg(&port->dev, "ZLIP offset %x\n",
(__u16)(ZLP_REG1 + ((__u16) mos7840_port->port_num) - 0x1));
if (status < 0) {
dev_dbg(&port->dev, "Writing ZLP_REG%d failed status-0x%x\n", pnum + 1, status);
goto out;
} else
dev_dbg(&port->dev, "ZLP_REG%d Writing success status%d\n", pnum + 1, status);
}
mos7840_port->control_urb = usb_alloc_urb(0, GFP_KERNEL);
mos7840_port->ctrl_buf = kmalloc(16, GFP_KERNEL);
mos7840_port->dr = kmalloc(sizeof(struct usb_ctrlrequest),
GFP_KERNEL);
if (!mos7840_port->control_urb || !mos7840_port->ctrl_buf ||
!mos7840_port->dr) {
status = -ENOMEM;
goto error;
}
mos7840_port->has_led = false;
/* Initialize LED timers */
if (device_type == MOSCHIP_DEVICE_ID_7810) {
mos7840_port->has_led = true;
mos7840_port->led_urb = usb_alloc_urb(0, GFP_KERNEL);
mos7840_port->led_dr = kmalloc(sizeof(*mos7840_port->led_dr),
GFP_KERNEL);
if (!mos7840_port->led_urb || !mos7840_port->led_dr) {
status = -ENOMEM;
goto error;
}
init_timer(&mos7840_port->led_timer1);
mos7840_port->led_timer1.function = mos7840_led_off;
mos7840_port->led_timer1.expires =
jiffies + msecs_to_jiffies(LED_ON_MS);
mos7840_port->led_timer1.data = (unsigned long)mos7840_port;
init_timer(&mos7840_port->led_timer2);
mos7840_port->led_timer2.function = mos7840_led_flag_off;
mos7840_port->led_timer2.expires =
jiffies + msecs_to_jiffies(LED_OFF_MS);
mos7840_port->led_timer2.data = (unsigned long)mos7840_port;
/* Turn off LED */
mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0300);
}
out:
if (pnum == serial->num_ports - 1) {
/* Zero Length flag enable */
Data = 0x0f;
status = mos7840_set_reg_sync(serial->port[0], ZLP_REG5, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing ZLP_REG5 failed status-0x%x\n", status);
goto error;
} else
dev_dbg(&port->dev, "ZLP_REG5 Writing success status%d\n", status);
/* setting configuration feature to one */
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
0x03, 0x00, 0x01, 0x00, NULL, 0x00,
MOS_WDR_TIMEOUT);
}
return 0;
error:
kfree(mos7840_port->led_dr);
usb_free_urb(mos7840_port->led_urb);
kfree(mos7840_port->dr);
kfree(mos7840_port->ctrl_buf);
usb_free_urb(mos7840_port->control_urb);
kfree(mos7840_port);
return status;
}
static int mos7840_port_remove(struct usb_serial_port *port)
{
struct moschip_port *mos7840_port;
mos7840_port = mos7840_get_port_private(port);
if (mos7840_port->has_led) {
/* Turn off LED */
mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0300);
del_timer_sync(&mos7840_port->led_timer1);
del_timer_sync(&mos7840_port->led_timer2);
usb_kill_urb(mos7840_port->led_urb);
usb_free_urb(mos7840_port->led_urb);
kfree(mos7840_port->led_dr);
}
usb_kill_urb(mos7840_port->control_urb);
usb_free_urb(mos7840_port->control_urb);
kfree(mos7840_port->ctrl_buf);
kfree(mos7840_port->dr);
kfree(mos7840_port);
return 0;
}
static struct usb_serial_driver moschip7840_4port_device = {
.driver = {
.owner = THIS_MODULE,
.name = "mos7840",
},
.description = DRIVER_DESC,
.id_table = id_table,
.num_ports = 4,
.open = mos7840_open,
.close = mos7840_close,
.write = mos7840_write,
.write_room = mos7840_write_room,
.chars_in_buffer = mos7840_chars_in_buffer,
.throttle = mos7840_throttle,
.unthrottle = mos7840_unthrottle,
.calc_num_ports = mos7840_calc_num_ports,
.probe = mos7840_probe,
.ioctl = mos7840_ioctl,
.set_termios = mos7840_set_termios,
.break_ctl = mos7840_break,
.tiocmget = mos7840_tiocmget,
.tiocmset = mos7840_tiocmset,
.tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
.port_probe = mos7840_port_probe,
.port_remove = mos7840_port_remove,
.read_bulk_callback = mos7840_bulk_in_callback,
.read_int_callback = mos7840_interrupt_callback,
};
static struct usb_serial_driver * const serial_drivers[] = {
&moschip7840_4port_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| gpl-2.0 |
theboleslaw/hammerhead_kernel | drivers/media/platform/msm/camera_v1/mercury/msm_mercury_dev.c | 1609 | 6733 | /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/uaccess.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
#include <media/msm_mercury.h>
#include <mach/board.h>
#include "msm_mercury_sync.h"
#include "msm_mercury_common.h"
#include "msm.h"
#define MSM_MERCURY_NAME "mercury"
static int msm_mercury_open(struct inode *inode, struct file *filp)
{
int rc;
struct msm_mercury_device *pmercury_dev = container_of(inode->i_cdev,
struct msm_mercury_device, cdev);
filp->private_data = pmercury_dev;
MCR_DBG("\n---(%d)%s()\n", __LINE__, __func__);
rc = __msm_mercury_open(pmercury_dev);
MCR_DBG("%s:%d] %s open_count = %d\n", __func__, __LINE__,
filp->f_path.dentry->d_name.name, pmercury_dev->open_count);
return rc;
}
static int msm_mercury_release(struct inode *inode, struct file *filp)
{
int rc;
struct msm_mercury_device *pmercury_dev = filp->private_data;
MCR_DBG("\n---(%d)%s()\n", __LINE__, __func__);
rc = __msm_mercury_release(pmercury_dev);
MCR_DBG("%s:%d] %s open_count = %d\n", __func__, __LINE__,
filp->f_path.dentry->d_name.name, pmercury_dev->open_count);
return rc;
}
static long msm_mercury_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg) {
int rc;
struct msm_mercury_device *pmercury_dev = filp->private_data;
rc = __msm_mercury_ioctl(pmercury_dev, cmd, arg);
return rc;
}
static const struct file_operations msm_mercury_fops = {
.owner = THIS_MODULE,
.open = msm_mercury_open,
.release = msm_mercury_release,
.unlocked_ioctl = msm_mercury_ioctl,
};
static struct class *msm_mercury_class;
static dev_t msm_mercury_devno;
static struct msm_mercury_device *msm_mercury_device_p;
int msm_mercury_subdev_init(struct v4l2_subdev *mercury_sd)
{
int rc;
struct msm_mercury_device *pgmn_dev =
(struct msm_mercury_device *)mercury_sd->host_priv;
MCR_DBG("%s:%d: mercury_sd=0x%x pgmn_dev=0x%x\n",
__func__, __LINE__, (uint32_t)mercury_sd, (uint32_t)pgmn_dev);
rc = __msm_mercury_open(pgmn_dev);
MCR_DBG("%s:%d: rc=%d\n",
__func__, __LINE__, rc);
return rc;
}
static long msm_mercury_subdev_ioctl(struct v4l2_subdev *sd,
unsigned int cmd, void *arg)
{
long rc;
struct msm_mercury_device *pgmn_dev =
(struct msm_mercury_device *)sd->host_priv;
MCR_DBG("%s: cmd=%d\n", __func__, cmd);
MCR_DBG("%s: pgmn_dev 0x%x", __func__, (uint32_t)pgmn_dev);
MCR_DBG("%s: Calling __msm_mercury_ioctl\n", __func__);
rc = __msm_mercury_ioctl(pgmn_dev, cmd, (unsigned long)arg);
pr_debug("%s: X\n", __func__);
return rc;
}
void msm_mercury_subdev_release(struct v4l2_subdev *mercury_sd)
{
int rc;
struct msm_mercury_device *pgmn_dev =
(struct msm_mercury_device *)mercury_sd->host_priv;
MCR_DBG("%s:pgmn_dev=0x%x", __func__, (uint32_t)pgmn_dev);
rc = __msm_mercury_release(pgmn_dev);
MCR_DBG("%s:rc=%d", __func__, rc);
}
static const struct v4l2_subdev_core_ops msm_mercury_subdev_core_ops = {
.ioctl = msm_mercury_subdev_ioctl,
};
static const struct v4l2_subdev_ops msm_mercury_subdev_ops = {
.core = &msm_mercury_subdev_core_ops,
};
static int msm_mercury_init(struct platform_device *pdev)
{
int rc = -1;
struct device *dev;
MCR_DBG("%s:\n", __func__);
msm_mercury_device_p = __msm_mercury_init(pdev);
if (msm_mercury_device_p == NULL) {
MCR_PR_ERR("%s: initialization failed\n", __func__);
goto fail;
}
v4l2_subdev_init(&msm_mercury_device_p->subdev,
&msm_mercury_subdev_ops);
v4l2_set_subdev_hostdata(&msm_mercury_device_p->subdev,
msm_mercury_device_p);
pr_debug("%s: msm_mercury_device_p 0x%x", __func__,
(uint32_t)msm_mercury_device_p);
MCR_DBG("%s:mercury: platform_set_drvdata\n", __func__);
platform_set_drvdata(pdev, &msm_mercury_device_p->subdev);
rc = alloc_chrdev_region(&msm_mercury_devno, 0, 1, MSM_MERCURY_NAME);
if (rc < 0) {
MCR_PR_ERR("%s: failed to allocate chrdev\n", __func__);
goto fail_1;
}
if (!msm_mercury_class) {
msm_mercury_class = class_create(THIS_MODULE, MSM_MERCURY_NAME);
if (IS_ERR(msm_mercury_class)) {
rc = PTR_ERR(msm_mercury_class);
MCR_PR_ERR("%s: create device class failed\n",
__func__);
goto fail_2;
}
}
dev = device_create(msm_mercury_class, NULL,
MKDEV(MAJOR(msm_mercury_devno), MINOR(msm_mercury_devno)), NULL,
"%s%d", MSM_MERCURY_NAME, 0);
if (IS_ERR(dev)) {
MCR_PR_ERR("%s: error creating device\n", __func__);
rc = -ENODEV;
goto fail_3;
}
cdev_init(&msm_mercury_device_p->cdev, &msm_mercury_fops);
msm_mercury_device_p->cdev.owner = THIS_MODULE;
msm_mercury_device_p->cdev.ops =
(const struct file_operations *) &msm_mercury_fops;
rc = cdev_add(&msm_mercury_device_p->cdev, msm_mercury_devno, 1);
if (rc < 0) {
MCR_PR_ERR("%s: error adding cdev\n", __func__);
rc = -ENODEV;
goto fail_4;
}
MCR_DBG("%s %s: success\n", __func__, MSM_MERCURY_NAME);
return rc;
fail_4:
device_destroy(msm_mercury_class, msm_mercury_devno);
fail_3:
class_destroy(msm_mercury_class);
fail_2:
unregister_chrdev_region(msm_mercury_devno, 1);
fail_1:
__msm_mercury_exit(msm_mercury_device_p);
fail:
return rc;
}
static void msm_mercury_exit(void)
{
cdev_del(&msm_mercury_device_p->cdev);
device_destroy(msm_mercury_class, msm_mercury_devno);
class_destroy(msm_mercury_class);
unregister_chrdev_region(msm_mercury_devno, 1);
__msm_mercury_exit(msm_mercury_device_p);
}
static int __msm_mercury_probe(struct platform_device *pdev)
{
return msm_mercury_init(pdev);
}
static int __msm_mercury_remove(struct platform_device *pdev)
{
msm_mercury_exit();
return 0;
}
static struct platform_driver msm_mercury_driver = {
.probe = __msm_mercury_probe,
.remove = __msm_mercury_remove,
.driver = {
.name = MSM_MERCURY_DRV_NAME,
.owner = THIS_MODULE,
},
};
static int __init msm_mercury_driver_init(void)
{
int rc;
rc = platform_driver_register(&msm_mercury_driver);
return rc;
}
static void __exit msm_mercury_driver_exit(void)
{
platform_driver_unregister(&msm_mercury_driver);
}
MODULE_DESCRIPTION("msm mercury jpeg driver");
module_init(msm_mercury_driver_init);
module_exit(msm_mercury_driver_exit);
| gpl-2.0 |
spacecaker/android_kernel_acer_swing_cm10.1_MDP4 | net/ipv4/netfilter/nf_nat_standalone.c | 3145 | 8411 | /* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/icmp.h>
#include <linux/gfp.h>
#include <linux/ip.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <net/ip.h>
#include <net/checksum.h>
#include <linux/spinlock.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_rule.h>
#include <net/netfilter/nf_nat_protocol.h>
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_helper.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#ifdef CONFIG_XFRM
static void nat_decode_session(struct sk_buff *skb, struct flowi *fl)
{
struct flowi4 *fl4 = &fl->u.ip4;
const struct nf_conn *ct;
const struct nf_conntrack_tuple *t;
enum ip_conntrack_info ctinfo;
enum ip_conntrack_dir dir;
unsigned long statusbit;
ct = nf_ct_get(skb, &ctinfo);
if (ct == NULL)
return;
dir = CTINFO2DIR(ctinfo);
t = &ct->tuplehash[dir].tuple;
if (dir == IP_CT_DIR_ORIGINAL)
statusbit = IPS_DST_NAT;
else
statusbit = IPS_SRC_NAT;
if (ct->status & statusbit) {
fl4->daddr = t->dst.u3.ip;
if (t->dst.protonum == IPPROTO_TCP ||
t->dst.protonum == IPPROTO_UDP ||
t->dst.protonum == IPPROTO_UDPLITE ||
t->dst.protonum == IPPROTO_DCCP ||
t->dst.protonum == IPPROTO_SCTP)
fl4->fl4_dport = t->dst.u.tcp.port;
}
statusbit ^= IPS_NAT_MASK;
if (ct->status & statusbit) {
fl4->saddr = t->src.u3.ip;
if (t->dst.protonum == IPPROTO_TCP ||
t->dst.protonum == IPPROTO_UDP ||
t->dst.protonum == IPPROTO_UDPLITE ||
t->dst.protonum == IPPROTO_DCCP ||
t->dst.protonum == IPPROTO_SCTP)
fl4->fl4_sport = t->src.u.tcp.port;
}
}
#endif
static unsigned int
nf_nat_fn(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
struct nf_conn_nat *nat;
/* maniptype == SRC for postrouting. */
enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);
/* We never see fragments: conntrack defrags on pre-routing
and local-out, and nf_nat_out protects post-routing. */
NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb)));
ct = nf_ct_get(skb, &ctinfo);
/* Can't track? It's not due to stress, or conntrack would
have dropped it. Hence it's the user's responsibilty to
packet filter it out, or implement conntrack/NAT for that
protocol. 8) --RR */
if (!ct)
return NF_ACCEPT;
/* Don't try to NAT if this packet is not conntracked */
if (nf_ct_is_untracked(ct))
return NF_ACCEPT;
nat = nfct_nat(ct);
if (!nat) {
/* NAT module was loaded late. */
if (nf_ct_is_confirmed(ct))
return NF_ACCEPT;
nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
if (nat == NULL) {
pr_debug("failed to add NAT extension\n");
return NF_ACCEPT;
}
}
switch (ctinfo) {
case IP_CT_RELATED:
case IP_CT_RELATED_REPLY:
if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
if (!nf_nat_icmp_reply_translation(ct, ctinfo,
hooknum, skb))
return NF_DROP;
else
return NF_ACCEPT;
}
/* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */
case IP_CT_NEW:
/* Seen it before? This can happen for loopback, retrans,
or local packets.. */
if (!nf_nat_initialized(ct, maniptype)) {
unsigned int ret;
ret = nf_nat_rule_find(skb, hooknum, in, out, ct);
if (ret != NF_ACCEPT)
return ret;
} else
pr_debug("Already setup manip %s for ct %p\n",
maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
ct);
break;
default:
/* ESTABLISHED */
NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
ctinfo == IP_CT_ESTABLISHED_REPLY);
}
return nf_nat_packet(ct, ctinfo, hooknum, skb);
}
static unsigned int
nf_nat_in(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
unsigned int ret;
__be32 daddr = ip_hdr(skb)->daddr;
ret = nf_nat_fn(hooknum, skb, in, out, okfn);
if (ret != NF_DROP && ret != NF_STOLEN &&
daddr != ip_hdr(skb)->daddr)
skb_dst_drop(skb);
return ret;
}
static unsigned int
nf_nat_out(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
#ifdef CONFIG_XFRM
const struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
#endif
unsigned int ret;
/* root is playing with raw sockets. */
if (skb->len < sizeof(struct iphdr) ||
ip_hdrlen(skb) < sizeof(struct iphdr))
return NF_ACCEPT;
ret = nf_nat_fn(hooknum, skb, in, out, okfn);
#ifdef CONFIG_XFRM
if (ret != NF_DROP && ret != NF_STOLEN &&
(ct = nf_ct_get(skb, &ctinfo)) != NULL) {
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
if ((ct->tuplehash[dir].tuple.src.u3.ip !=
ct->tuplehash[!dir].tuple.dst.u3.ip) ||
(ct->tuplehash[dir].tuple.src.u.all !=
ct->tuplehash[!dir].tuple.dst.u.all)
)
return ip_xfrm_me_harder(skb) == 0 ? ret : NF_DROP;
}
#endif
return ret;
}
static unsigned int
nf_nat_local_fn(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
const struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
unsigned int ret;
/* root is playing with raw sockets. */
if (skb->len < sizeof(struct iphdr) ||
ip_hdrlen(skb) < sizeof(struct iphdr))
return NF_ACCEPT;
ret = nf_nat_fn(hooknum, skb, in, out, okfn);
if (ret != NF_DROP && ret != NF_STOLEN &&
(ct = nf_ct_get(skb, &ctinfo)) != NULL) {
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
if (ct->tuplehash[dir].tuple.dst.u3.ip !=
ct->tuplehash[!dir].tuple.src.u3.ip) {
if (ip_route_me_harder(skb, RTN_UNSPEC))
ret = NF_DROP;
}
#ifdef CONFIG_XFRM
else if (ct->tuplehash[dir].tuple.dst.u.all !=
ct->tuplehash[!dir].tuple.src.u.all)
if (ip_xfrm_me_harder(skb))
ret = NF_DROP;
#endif
}
return ret;
}
/* We must be after connection tracking and before packet filtering. */
static struct nf_hook_ops nf_nat_ops[] __read_mostly = {
/* Before packet filtering, change destination */
{
.hook = nf_nat_in,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP_PRI_NAT_DST,
},
/* After packet filtering, change source */
{
.hook = nf_nat_out,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_NAT_SRC,
},
/* Before packet filtering, change destination */
{
.hook = nf_nat_local_fn,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_NAT_DST,
},
/* After packet filtering, change source */
{
.hook = nf_nat_fn,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_NAT_SRC,
},
};
static int __init nf_nat_standalone_init(void)
{
int ret = 0;
need_ipv4_conntrack();
#ifdef CONFIG_XFRM
BUG_ON(ip_nat_decode_session != NULL);
RCU_INIT_POINTER(ip_nat_decode_session, nat_decode_session);
#endif
ret = nf_nat_rule_init();
if (ret < 0) {
pr_err("nf_nat_init: can't setup rules.\n");
goto cleanup_decode_session;
}
ret = nf_register_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops));
if (ret < 0) {
pr_err("nf_nat_init: can't register hooks.\n");
goto cleanup_rule_init;
}
return ret;
cleanup_rule_init:
nf_nat_rule_cleanup();
cleanup_decode_session:
#ifdef CONFIG_XFRM
RCU_INIT_POINTER(ip_nat_decode_session, NULL);
synchronize_net();
#endif
return ret;
}
static void __exit nf_nat_standalone_fini(void)
{
nf_unregister_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops));
nf_nat_rule_cleanup();
#ifdef CONFIG_XFRM
RCU_INIT_POINTER(ip_nat_decode_session, NULL);
synchronize_net();
#endif
/* Conntrack caches are unregistered in nf_conntrack_cleanup */
}
module_init(nf_nat_standalone_init);
module_exit(nf_nat_standalone_fini);
MODULE_LICENSE("GPL");
MODULE_ALIAS("ip_nat");
| gpl-2.0 |
bilalliberty/android_kernel_htc_msm8930 | lib/mpi/mpi-inline.c | 4937 | 1165 | /* mpi-inline.c
* Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc.
*
* This file is part of GnuPG.
*
* GnuPG is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* GnuPG is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
/* put the inline functions as real functions into the lib */
#define G10_MPI_INLINE_DECL
#include "mpi-internal.h"
/* always include the header becuase it is only
* included by mpi-internal if __GCC__ is defined but we
* need it here in all cases and the above definition of
* of the macro allows us to do so
*/
#include "mpi-inline.h"
| gpl-2.0 |
vakkov/android_kernel_samsung_tuna | drivers/gpu/drm/nouveau/nouveau_ioc32.c | 9289 | 2223 | /**
* \file mga_ioc32.c
*
* 32-bit ioctl compatibility routines for the MGA DRM.
*
* \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich
*
*
* Copyright (C) Paul Mackerras 2005
* Copyright (C) Egbert Eich 2003,2004
* Copyright (C) Dave Airlie 2005
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
/**
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/dri/card<n>.
*
* \param filp file pointer.
* \param cmd command.
* \param arg user argument.
* \return zero on success or negative number on failure.
*/
long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
drm_ioctl_compat_t *fn = NULL;
int ret;
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
#if 0
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE];
#endif
if (fn != NULL)
ret = (*fn)(filp, cmd, arg);
else
ret = drm_ioctl(filp, cmd, arg);
return ret;
}
| gpl-2.0 |
nardholio/android_kernel_google_msm | drivers/oprofile/oprofile_files.c | 9545 | 4812 | /**
* @file oprofile_files.c
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
*/
#include <linux/fs.h>
#include <linux/oprofile.h>
#include <linux/jiffies.h>
#include "event_buffer.h"
#include "oprofile_stats.h"
#include "oprof.h"
#define BUFFER_SIZE_DEFAULT 131072
#define CPU_BUFFER_SIZE_DEFAULT 8192
#define BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */
#define TIME_SLICE_DEFAULT 1
unsigned long oprofile_buffer_size;
unsigned long oprofile_cpu_buffer_size;
unsigned long oprofile_buffer_watershed;
unsigned long oprofile_time_slice;
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
static ssize_t timeout_read(struct file *file, char __user *buf,
size_t count, loff_t *offset)
{
return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
buf, count, offset);
}
static ssize_t timeout_write(struct file *file, char const __user *buf,
size_t count, loff_t *offset)
{
unsigned long val;
int retval;
if (*offset)
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
if (retval <= 0)
return retval;
retval = oprofile_set_timeout(val);
if (retval)
return retval;
return count;
}
static const struct file_operations timeout_fops = {
.read = timeout_read,
.write = timeout_write,
.llseek = default_llseek,
};
#endif
static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
{
return oprofilefs_ulong_to_user(oprofile_backtrace_depth, buf, count,
offset);
}
static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
{
unsigned long val;
int retval;
if (*offset)
return -EINVAL;
if (!oprofile_ops.backtrace)
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
if (retval <= 0)
return retval;
retval = oprofile_set_ulong(&oprofile_backtrace_depth, val);
if (retval)
return retval;
return count;
}
static const struct file_operations depth_fops = {
.read = depth_read,
.write = depth_write,
.llseek = default_llseek,
};
static ssize_t pointer_size_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
{
return oprofilefs_ulong_to_user(sizeof(void *), buf, count, offset);
}
static const struct file_operations pointer_size_fops = {
.read = pointer_size_read,
.llseek = default_llseek,
};
static ssize_t cpu_type_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
{
return oprofilefs_str_to_user(oprofile_ops.cpu_type, buf, count, offset);
}
static const struct file_operations cpu_type_fops = {
.read = cpu_type_read,
.llseek = default_llseek,
};
static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
{
return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset);
}
static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
{
unsigned long val;
int retval;
if (*offset)
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
if (retval <= 0)
return retval;
retval = 0;
if (val)
retval = oprofile_start();
else
oprofile_stop();
if (retval)
return retval;
return count;
}
static const struct file_operations enable_fops = {
.read = enable_read,
.write = enable_write,
.llseek = default_llseek,
};
static ssize_t dump_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
{
wake_up_buffer_waiter();
return count;
}
static const struct file_operations dump_fops = {
.write = dump_write,
.llseek = noop_llseek,
};
void oprofile_create_files(struct super_block *sb, struct dentry *root)
{
/* reinitialize default values */
oprofile_buffer_size = BUFFER_SIZE_DEFAULT;
oprofile_cpu_buffer_size = CPU_BUFFER_SIZE_DEFAULT;
oprofile_buffer_watershed = BUFFER_WATERSHED_DEFAULT;
oprofile_time_slice = msecs_to_jiffies(TIME_SLICE_DEFAULT);
oprofilefs_create_file(sb, root, "enable", &enable_fops);
oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
oprofilefs_create_ulong(sb, root, "buffer_size", &oprofile_buffer_size);
oprofilefs_create_ulong(sb, root, "buffer_watershed", &oprofile_buffer_watershed);
oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &oprofile_cpu_buffer_size);
oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops);
oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops);
oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops);
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
oprofilefs_create_file(sb, root, "time_slice", &timeout_fops);
#endif
oprofile_create_stats_files(sb, root);
if (oprofile_ops.create_files)
oprofile_ops.create_files(sb, root);
}
| gpl-2.0 |
GalaxyTab101/samsung-kernel-galaxytab101 | arch/powerpc/platforms/embedded6xx/ls_uart.c | 14409 | 3500 | /*
* AVR power-management chip interface for the Buffalo Linkstation /
* Kurobox Platform.
*
* Author: 2006 (c) G. Liakhovetski
* g.liakhovetski@gmx.de
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of
* any kind, whether express or implied.
*/
#include <linux/workqueue.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/serial_reg.h>
#include <linux/serial_8250.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/termbits.h>
#include "mpc10x.h"
static void __iomem *avr_addr;
static unsigned long avr_clock;
static struct work_struct wd_work;
static void wd_stop(struct work_struct *unused)
{
const char string[] = "AAAAFFFFJJJJ>>>>VVVV>>>>ZZZZVVVVKKKK";
int i = 0, rescue = 8;
int len = strlen(string);
while (rescue--) {
int j;
char lsr = in_8(avr_addr + UART_LSR);
if (lsr & (UART_LSR_THRE | UART_LSR_TEMT)) {
for (j = 0; j < 16 && i < len; j++, i++)
out_8(avr_addr + UART_TX, string[i]);
if (i == len) {
/* Read "OK" back: 4ms for the last "KKKK"
plus a couple bytes back */
msleep(7);
printk("linkstation: disarming the AVR watchdog: ");
while (in_8(avr_addr + UART_LSR) & UART_LSR_DR)
printk("%c", in_8(avr_addr + UART_RX));
break;
}
}
msleep(17);
}
printk("\n");
}
#define AVR_QUOT(clock) ((clock) + 8 * 9600) / (16 * 9600)
void avr_uart_configure(void)
{
unsigned char cval = UART_LCR_WLEN8;
unsigned int quot = AVR_QUOT(avr_clock);
if (!avr_addr || !avr_clock)
return;
out_8(avr_addr + UART_LCR, cval); /* initialise UART */
out_8(avr_addr + UART_MCR, 0);
out_8(avr_addr + UART_IER, 0);
cval |= UART_LCR_STOP | UART_LCR_PARITY | UART_LCR_EPAR;
out_8(avr_addr + UART_LCR, cval); /* Set character format */
out_8(avr_addr + UART_LCR, cval | UART_LCR_DLAB); /* set DLAB */
out_8(avr_addr + UART_DLL, quot & 0xff); /* LS of divisor */
out_8(avr_addr + UART_DLM, quot >> 8); /* MS of divisor */
out_8(avr_addr + UART_LCR, cval); /* reset DLAB */
out_8(avr_addr + UART_FCR, UART_FCR_ENABLE_FIFO); /* enable FIFO */
}
void avr_uart_send(const char c)
{
if (!avr_addr || !avr_clock)
return;
out_8(avr_addr + UART_TX, c);
out_8(avr_addr + UART_TX, c);
out_8(avr_addr + UART_TX, c);
out_8(avr_addr + UART_TX, c);
}
static void __init ls_uart_init(void)
{
local_irq_disable();
#ifndef CONFIG_SERIAL_8250
out_8(avr_addr + UART_FCR, UART_FCR_ENABLE_FIFO); /* enable FIFO */
out_8(avr_addr + UART_FCR, UART_FCR_ENABLE_FIFO |
UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); /* clear FIFOs */
out_8(avr_addr + UART_FCR, 0);
out_8(avr_addr + UART_IER, 0);
/* Clear up interrupts */
(void) in_8(avr_addr + UART_LSR);
(void) in_8(avr_addr + UART_RX);
(void) in_8(avr_addr + UART_IIR);
(void) in_8(avr_addr + UART_MSR);
#endif
avr_uart_configure();
local_irq_enable();
}
static int __init ls_uarts_init(void)
{
struct device_node *avr;
phys_addr_t phys_addr;
int len;
avr = of_find_node_by_path("/soc10x/serial@80004500");
if (!avr)
return -EINVAL;
avr_clock = *(u32*)of_get_property(avr, "clock-frequency", &len);
phys_addr = ((u32*)of_get_property(avr, "reg", &len))[0];
if (!avr_clock || !phys_addr)
return -EINVAL;
avr_addr = ioremap(phys_addr, 32);
if (!avr_addr)
return -EFAULT;
ls_uart_init();
INIT_WORK(&wd_work, wd_stop);
schedule_work(&wd_work);
return 0;
}
machine_late_initcall(linkstation, ls_uarts_init);
| gpl-2.0 |
cphelps76/elite_kernel_tuna | drivers/video/riva/nv_driver.c | 14665 | 9688 | /* $XConsortium: nv_driver.c /main/3 1996/10/28 05:13:37 kaleb $ */
/*
* Copyright 1996-1997 David J. McKay
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* DAVID J. MCKAY BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* GPL licensing note -- nVidia is allowing a liberal interpretation of
* the documentation restriction above, to merely say that this nVidia's
* copyright and disclaimer should be included with all code derived
* from this source. -- Jeff Garzik <jgarzik@pobox.com>, 01/Nov/99
*/
/* Hacked together from mga driver and 3.3.4 NVIDIA driver by Jarno Paananen
<jpaana@s2.org> */
/* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/nv/nv_setup.c,v 1.18 2002/08/0
5 20:47:06 mvojkovi Exp $ */
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include "nv_type.h"
#include "rivafb.h"
#include "nvreg.h"
#define PFX "rivafb: "
static inline unsigned char MISCin(struct riva_par *par)
{
return (VGA_RD08(par->riva.PVIO, 0x3cc));
}
static Bool
riva_is_connected(struct riva_par *par, Bool second)
{
volatile U032 __iomem *PRAMDAC = par->riva.PRAMDAC0;
U032 reg52C, reg608;
Bool present;
if(second) PRAMDAC += 0x800;
reg52C = NV_RD32(PRAMDAC, 0x052C);
reg608 = NV_RD32(PRAMDAC, 0x0608);
NV_WR32(PRAMDAC, 0x0608, reg608 & ~0x00010000);
NV_WR32(PRAMDAC, 0x052C, reg52C & 0x0000FEEE);
mdelay(1);
NV_WR32(PRAMDAC, 0x052C, NV_RD32(PRAMDAC, 0x052C) | 1);
NV_WR32(par->riva.PRAMDAC0, 0x0610, 0x94050140);
NV_WR32(par->riva.PRAMDAC0, 0x0608, 0x00001000);
mdelay(1);
present = (NV_RD32(PRAMDAC, 0x0608) & (1 << 28)) ? TRUE : FALSE;
NV_WR32(par->riva.PRAMDAC0, 0x0608,
NV_RD32(par->riva.PRAMDAC0, 0x0608) & 0x0000EFFF);
NV_WR32(PRAMDAC, 0x052C, reg52C);
NV_WR32(PRAMDAC, 0x0608, reg608);
return present;
}
static void
riva_override_CRTC(struct riva_par *par)
{
printk(KERN_INFO PFX
"Detected CRTC controller %i being used\n",
par->SecondCRTC ? 1 : 0);
if(par->forceCRTC != -1) {
printk(KERN_INFO PFX
"Forcing usage of CRTC %i\n", par->forceCRTC);
par->SecondCRTC = par->forceCRTC;
}
}
static void
riva_is_second(struct riva_par *par)
{
if (par->FlatPanel == 1) {
switch(par->Chipset & 0xffff) {
case 0x0174:
case 0x0175:
case 0x0176:
case 0x0177:
case 0x0179:
case 0x017C:
case 0x017D:
case 0x0186:
case 0x0187:
/* this might not be a good default for the chips below */
case 0x0286:
case 0x028C:
case 0x0316:
case 0x0317:
case 0x031A:
case 0x031B:
case 0x031C:
case 0x031D:
case 0x031E:
case 0x031F:
case 0x0324:
case 0x0325:
case 0x0328:
case 0x0329:
case 0x032C:
case 0x032D:
par->SecondCRTC = TRUE;
break;
default:
par->SecondCRTC = FALSE;
break;
}
} else {
if(riva_is_connected(par, 0)) {
if (NV_RD32(par->riva.PRAMDAC0, 0x0000052C) & 0x100)
par->SecondCRTC = TRUE;
else
par->SecondCRTC = FALSE;
} else
if (riva_is_connected(par, 1)) {
if(NV_RD32(par->riva.PRAMDAC0, 0x0000252C) & 0x100)
par->SecondCRTC = TRUE;
else
par->SecondCRTC = FALSE;
} else /* default */
par->SecondCRTC = FALSE;
}
riva_override_CRTC(par);
}
unsigned long riva_get_memlen(struct riva_par *par)
{
RIVA_HW_INST *chip = &par->riva;
unsigned long memlen = 0;
unsigned int chipset = par->Chipset;
struct pci_dev* dev;
u32 amt;
switch (chip->Architecture) {
case NV_ARCH_03:
if (NV_RD32(chip->PFB, 0x00000000) & 0x00000020) {
if (((NV_RD32(chip->PMC, 0x00000000) & 0xF0) == 0x20)
&& ((NV_RD32(chip->PMC, 0x00000000)&0x0F)>=0x02)) {
/*
* SDRAM 128 ZX.
*/
switch (NV_RD32(chip->PFB,0x00000000) & 0x03) {
case 2:
memlen = 1024 * 4;
break;
case 1:
memlen = 1024 * 2;
break;
default:
memlen = 1024 * 8;
break;
}
} else {
memlen = 1024 * 8;
}
} else {
/*
* SGRAM 128.
*/
switch (NV_RD32(chip->PFB, 0x00000000) & 0x00000003) {
case 0:
memlen = 1024 * 8;
break;
case 2:
memlen = 1024 * 4;
break;
default:
memlen = 1024 * 2;
break;
}
}
break;
case NV_ARCH_04:
if (NV_RD32(chip->PFB, 0x00000000) & 0x00000100) {
memlen = ((NV_RD32(chip->PFB, 0x00000000)>>12)&0x0F) *
1024 * 2 + 1024 * 2;
} else {
switch (NV_RD32(chip->PFB, 0x00000000) & 0x00000003) {
case 0:
memlen = 1024 * 32;
break;
case 1:
memlen = 1024 * 4;
break;
case 2:
memlen = 1024 * 8;
break;
case 3:
default:
memlen = 1024 * 16;
break;
}
}
break;
case NV_ARCH_10:
case NV_ARCH_20:
case NV_ARCH_30:
if(chipset == NV_CHIP_IGEFORCE2) {
dev = pci_get_bus_and_slot(0, 1);
pci_read_config_dword(dev, 0x7C, &amt);
pci_dev_put(dev);
memlen = (((amt >> 6) & 31) + 1) * 1024;
} else if (chipset == NV_CHIP_0x01F0) {
dev = pci_get_bus_and_slot(0, 1);
pci_read_config_dword(dev, 0x84, &amt);
pci_dev_put(dev);
memlen = (((amt >> 4) & 127) + 1) * 1024;
} else {
switch ((NV_RD32(chip->PFB, 0x0000020C) >> 20) &
0x000000FF){
case 0x02:
memlen = 1024 * 2;
break;
case 0x04:
memlen = 1024 * 4;
break;
case 0x08:
memlen = 1024 * 8;
break;
case 0x10:
memlen = 1024 * 16;
break;
case 0x20:
memlen = 1024 * 32;
break;
case 0x40:
memlen = 1024 * 64;
break;
case 0x80:
memlen = 1024 * 128;
break;
default:
memlen = 1024 * 16;
break;
}
}
break;
}
return memlen;
}
unsigned long riva_get_maxdclk(struct riva_par *par)
{
RIVA_HW_INST *chip = &par->riva;
unsigned long dclk = 0;
switch (chip->Architecture) {
case NV_ARCH_03:
if (NV_RD32(chip->PFB, 0x00000000) & 0x00000020) {
if (((NV_RD32(chip->PMC, 0x00000000) & 0xF0) == 0x20)
&& ((NV_RD32(chip->PMC,0x00000000)&0x0F) >= 0x02)) {
/*
* SDRAM 128 ZX.
*/
dclk = 800000;
} else {
dclk = 1000000;
}
} else {
/*
* SGRAM 128.
*/
dclk = 1000000;
}
break;
case NV_ARCH_04:
case NV_ARCH_10:
case NV_ARCH_20:
case NV_ARCH_30:
switch ((NV_RD32(chip->PFB, 0x00000000) >> 3) & 0x00000003) {
case 3:
dclk = 800000;
break;
default:
dclk = 1000000;
break;
}
break;
}
return dclk;
}
void
riva_common_setup(struct riva_par *par)
{
par->riva.EnableIRQ = 0;
par->riva.PRAMDAC0 =
(volatile U032 __iomem *)(par->ctrl_base + 0x00680000);
par->riva.PFB =
(volatile U032 __iomem *)(par->ctrl_base + 0x00100000);
par->riva.PFIFO =
(volatile U032 __iomem *)(par->ctrl_base + 0x00002000);
par->riva.PGRAPH =
(volatile U032 __iomem *)(par->ctrl_base + 0x00400000);
par->riva.PEXTDEV =
(volatile U032 __iomem *)(par->ctrl_base + 0x00101000);
par->riva.PTIMER =
(volatile U032 __iomem *)(par->ctrl_base + 0x00009000);
par->riva.PMC =
(volatile U032 __iomem *)(par->ctrl_base + 0x00000000);
par->riva.FIFO =
(volatile U032 __iomem *)(par->ctrl_base + 0x00800000);
par->riva.PCIO0 = par->ctrl_base + 0x00601000;
par->riva.PDIO0 = par->ctrl_base + 0x00681000;
par->riva.PVIO = par->ctrl_base + 0x000C0000;
par->riva.IO = (MISCin(par) & 0x01) ? 0x3D0 : 0x3B0;
if (par->FlatPanel == -1) {
switch (par->Chipset & 0xffff) {
case 0x0112: /* known laptop chips */
case 0x0174:
case 0x0175:
case 0x0176:
case 0x0177:
case 0x0179:
case 0x017C:
case 0x017D:
case 0x0186:
case 0x0187:
case 0x0286:
case 0x028C:
case 0x0316:
case 0x0317:
case 0x031A:
case 0x031B:
case 0x031C:
case 0x031D:
case 0x031E:
case 0x031F:
case 0x0324:
case 0x0325:
case 0x0328:
case 0x0329:
case 0x032C:
case 0x032D:
printk(KERN_INFO PFX
"On a laptop. Assuming Digital Flat Panel\n");
par->FlatPanel = 1;
break;
default:
break;
}
}
switch (par->Chipset & 0x0ff0) {
case 0x0110:
if (par->Chipset == NV_CHIP_GEFORCE2_GO)
par->SecondCRTC = TRUE;
#if defined(__powerpc__)
if (par->FlatPanel == 1)
par->SecondCRTC = TRUE;
#endif
riva_override_CRTC(par);
break;
case 0x0170:
case 0x0180:
case 0x01F0:
case 0x0250:
case 0x0280:
case 0x0300:
case 0x0310:
case 0x0320:
case 0x0330:
case 0x0340:
riva_is_second(par);
break;
default:
break;
}
if (par->SecondCRTC) {
par->riva.PCIO = par->riva.PCIO0 + 0x2000;
par->riva.PCRTC = par->riva.PCRTC0 + 0x800;
par->riva.PRAMDAC = par->riva.PRAMDAC0 + 0x800;
par->riva.PDIO = par->riva.PDIO0 + 0x2000;
} else {
par->riva.PCIO = par->riva.PCIO0;
par->riva.PCRTC = par->riva.PCRTC0;
par->riva.PRAMDAC = par->riva.PRAMDAC0;
par->riva.PDIO = par->riva.PDIO0;
}
if (par->FlatPanel == -1) {
/* Fix me, need x86 DDC code */
par->FlatPanel = 0;
}
par->riva.flatPanel = (par->FlatPanel > 0) ? TRUE : FALSE;
RivaGetConfig(&par->riva, par->Chipset);
}
| gpl-2.0 |