repo_name
string
path
string
copies
string
size
string
content
string
license
string
aatjitra/sgs3jb
arch/powerpc/kernel/smp-tbsync.c
3369
3174
/* * Smp timebase synchronization for ppc. * * Copyright (C) 2003 Samuel Rydh (samuel@ibrium.se) * */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/unistd.h> #include <linux/init.h> #include <linux/slab.h> #include <asm/atomic.h> #include <asm/smp.h> #include <asm/time.h> #define NUM_ITER 300 enum { kExit=0, kSetAndTest, kTest }; static struct { volatile u64 tb; volatile u64 mark; volatile int cmd; volatile int handshake; int filler[2]; volatile int ack; int filler2[7]; volatile int race_result; } *tbsync; static volatile int running; static void __devinit enter_contest(u64 mark, long add) { while (get_tb() < mark) tbsync->race_result = add; } void __devinit smp_generic_take_timebase(void) { int cmd; u64 tb; unsigned long flags; local_irq_save(flags); while (!running) barrier(); rmb(); for (;;) { tbsync->ack = 1; while (!tbsync->handshake) barrier(); rmb(); cmd = tbsync->cmd; tb = tbsync->tb; mb(); tbsync->ack = 0; if (cmd == kExit) break; while (tbsync->handshake) barrier(); if (cmd == kSetAndTest) set_tb(tb >> 32, tb & 0xfffffffful); enter_contest(tbsync->mark, -1); } local_irq_restore(flags); } static int __devinit start_contest(int cmd, long offset, int num) { int i, score=0; u64 tb; u64 mark; tbsync->cmd = cmd; local_irq_disable(); for (i = -3; i < num; ) { tb = get_tb() + 400; tbsync->tb = tb + offset; tbsync->mark = mark = tb + 400; wmb(); tbsync->handshake = 1; while (tbsync->ack) barrier(); while (get_tb() <= tb) barrier(); tbsync->handshake = 0; enter_contest(mark, 1); while (!tbsync->ack) barrier(); if (i++ > 0) score += tbsync->race_result; } local_irq_enable(); return score; } void __devinit smp_generic_give_timebase(void) { int i, score, score2, old, min=0, max=5000, offset=1000; pr_debug("Software timebase sync\n"); /* if this fails then this kernel won't work anyway... */ tbsync = kzalloc( sizeof(*tbsync), GFP_KERNEL ); mb(); running = 1; while (!tbsync->ack) barrier(); pr_debug("Got ack\n"); /* binary search */ for (old = -1; old != offset ; offset = (min+max) / 2) { score = start_contest(kSetAndTest, offset, NUM_ITER); pr_debug("score %d, offset %d\n", score, offset ); if( score > 0 ) max = offset; else min = offset; old = offset; } score = start_contest(kSetAndTest, min, NUM_ITER); score2 = start_contest(kSetAndTest, max, NUM_ITER); pr_debug("Min %d (score %d), Max %d (score %d)\n", min, score, max, score2); score = abs(score); score2 = abs(score2); offset = (score < score2) ? min : max; /* guard against inaccurate mttb */ for (i = 0; i < 10; i++) { start_contest(kSetAndTest, offset, NUM_ITER/10); if ((score2 = start_contest(kTest, offset, NUM_ITER)) < 0) score2 = -score2; if (score2 <= score || score2 < 20) break; } pr_debug("Final offset: %d (%d/%d)\n", offset, score2, NUM_ITER ); /* exiting */ tbsync->cmd = kExit; wmb(); tbsync->handshake = 1; while (tbsync->ack) barrier(); tbsync->handshake = 0; kfree(tbsync); tbsync = NULL; running = 0; }
gpl-2.0
MiCode/mi2_kernel
drivers/scsi/mpt2sas/mpt2sas_scsih.c
3625
244475
/* * Scsi Host Layer for MPT (Message Passing Technology) based controllers * * This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c * Copyright (C) 2007-2010 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * NO WARRANTY * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is * solely responsible for determining the appropriateness of using and * distributing the Program and assumes all risks associated with its * exercise of rights under this Agreement, including but not limited to * the risks and costs of program errors, damage to or loss of data, * programs or equipment, and unavailability or interruption of operations. * DISCLAIMER OF LIABILITY * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/blkdev.h> #include <linux/sched.h> #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/aer.h> #include <linux/raid_class.h> #include <linux/slab.h> #include "mpt2sas_base.h" MODULE_AUTHOR(MPT2SAS_AUTHOR); MODULE_DESCRIPTION(MPT2SAS_DESCRIPTION); MODULE_LICENSE("GPL"); MODULE_VERSION(MPT2SAS_DRIVER_VERSION); #define RAID_CHANNEL 1 /* forward proto's */ static void _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc, struct _sas_node *sas_expander); static void _firmware_event_work(struct work_struct *work); static u8 _scsih_check_for_pending_tm(struct MPT2SAS_ADAPTER *ioc, u16 smid); static void _scsih_scan_start(struct Scsi_Host *shost); static int _scsih_scan_finished(struct Scsi_Host *shost, unsigned long time); /* global parameters */ LIST_HEAD(mpt2sas_ioc_list); /* local parameters */ static u8 scsi_io_cb_idx = -1; static u8 tm_cb_idx = -1; static u8 ctl_cb_idx = -1; static u8 base_cb_idx = -1; static u8 port_enable_cb_idx = -1; static u8 transport_cb_idx = -1; static u8 scsih_cb_idx = -1; static u8 config_cb_idx = -1; static int mpt_ids; static u8 tm_tr_cb_idx = -1 ; static u8 tm_tr_volume_cb_idx = -1 ; static u8 tm_sas_control_cb_idx = -1; /* command line options */ static u32 logging_level; MODULE_PARM_DESC(logging_level, " bits for enabling additional logging info " "(default=0)"); static ushort max_sectors = 0xFFFF; module_param(max_sectors, ushort, 0); MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767"); /* scsi-mid layer global parmeter is max_report_luns, which is 511 */ #define MPT2SAS_MAX_LUN (16895) static int max_lun = MPT2SAS_MAX_LUN; module_param(max_lun, int, 0); MODULE_PARM_DESC(max_lun, " max lun, default=16895 "); /* diag_buffer_enable is bitwise * bit 0 set = TRACE * bit 1 set = SNAPSHOT * bit 2 set = EXTENDED * * Either bit can be set, or both */ static int diag_buffer_enable = -1; module_param(diag_buffer_enable, int, 0); MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers " "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)"); /** * struct sense_info - common structure for obtaining sense keys * @skey: sense key * @asc: additional sense code * @ascq: additional sense code qualifier */ struct sense_info { u8 skey; u8 asc; u8 ascq; }; #define MPT2SAS_TURN_ON_FAULT_LED (0xFFFC) #define MPT2SAS_PORT_ENABLE_COMPLETE (0xFFFD) #define MPT2SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF) /** * struct fw_event_work - firmware event struct * @list: link list framework * @work: work object (ioc->fault_reset_work_q) * @cancel_pending_work: flag set during reset handling * @ioc: per adapter object * @device_handle: device handle * @VF_ID: virtual function id * @VP_ID: virtual port id * @ignore: flag meaning this event has been marked to ignore * @event: firmware event MPI2_EVENT_XXX defined in mpt2_ioc.h * @event_data: reply event data payload follows * * This object stored on ioc->fw_event_list. */ struct fw_event_work { struct list_head list; u8 cancel_pending_work; struct delayed_work delayed_work; struct MPT2SAS_ADAPTER *ioc; u16 device_handle; u8 VF_ID; u8 VP_ID; u8 ignore; u16 event; void *event_data; }; /* raid transport support */ static struct raid_template *mpt2sas_raid_template; /** * struct _scsi_io_transfer - scsi io transfer * @handle: sas device handle (assigned by firmware) * @is_raid: flag set for hidden raid components * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE, * @data_length: data transfer length * @data_dma: dma pointer to data * @sense: sense data * @lun: lun number * @cdb_length: cdb length * @cdb: cdb contents * @timeout: timeout for this command * @VF_ID: virtual function id * @VP_ID: virtual port id * @valid_reply: flag set for reply message * @sense_length: sense length * @ioc_status: ioc status * @scsi_state: scsi state * @scsi_status: scsi staus * @log_info: log information * @transfer_length: data length transfer when there is a reply message * * Used for sending internal scsi commands to devices within this module. * Refer to _scsi_send_scsi_io(). */ struct _scsi_io_transfer { u16 handle; u8 is_raid; enum dma_data_direction dir; u32 data_length; dma_addr_t data_dma; u8 sense[SCSI_SENSE_BUFFERSIZE]; u32 lun; u8 cdb_length; u8 cdb[32]; u8 timeout; u8 VF_ID; u8 VP_ID; u8 valid_reply; /* the following bits are only valid when 'valid_reply = 1' */ u32 sense_length; u16 ioc_status; u8 scsi_state; u8 scsi_status; u32 log_info; u32 transfer_length; }; /* * The pci device ids are defined in mpi/mpi2_cnfg.h. */ static struct pci_device_id scsih_pci_table[] = { { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004, PCI_ANY_ID, PCI_ANY_ID }, /* Falcon ~ 2008*/ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008, PCI_ANY_ID, PCI_ANY_ID }, /* Liberator ~ 2108 */ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1, PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2, PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3, PCI_ANY_ID, PCI_ANY_ID }, /* Meteor ~ 2116 */ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1, PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2, PCI_ANY_ID, PCI_ANY_ID }, /* Thunderbolt ~ 2208 */ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1, PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2, PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3, PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4, PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5, PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6, PCI_ANY_ID, PCI_ANY_ID }, /* Mustang ~ 2308 */ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1, PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2, PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3, PCI_ANY_ID, PCI_ANY_ID }, /* SSS6200 */ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200, PCI_ANY_ID, PCI_ANY_ID }, {0} /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, scsih_pci_table); /** * _scsih_set_debug_level - global setting of ioc->logging_level. * * Note: The logging levels are defined in mpt2sas_debug.h. */ static int _scsih_set_debug_level(const char *val, struct kernel_param *kp) { int ret = param_set_int(val, kp); struct MPT2SAS_ADAPTER *ioc; if (ret) return ret; printk(KERN_INFO "setting logging_level(0x%08x)\n", logging_level); list_for_each_entry(ioc, &mpt2sas_ioc_list, list) ioc->logging_level = logging_level; return 0; } module_param_call(logging_level, _scsih_set_debug_level, param_get_int, &logging_level, 0644); /** * _scsih_srch_boot_sas_address - search based on sas_address * @sas_address: sas address * @boot_device: boot device object from bios page 2 * * Returns 1 when there's a match, 0 means no match. */ static inline int _scsih_srch_boot_sas_address(u64 sas_address, Mpi2BootDeviceSasWwid_t *boot_device) { return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0; } /** * _scsih_srch_boot_device_name - search based on device name * @device_name: device name specified in INDENTIFY fram * @boot_device: boot device object from bios page 2 * * Returns 1 when there's a match, 0 means no match. */ static inline int _scsih_srch_boot_device_name(u64 device_name, Mpi2BootDeviceDeviceName_t *boot_device) { return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0; } /** * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot * @enclosure_logical_id: enclosure logical id * @slot_number: slot number * @boot_device: boot device object from bios page 2 * * Returns 1 when there's a match, 0 means no match. */ static inline int _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number, Mpi2BootDeviceEnclosureSlot_t *boot_device) { return (enclosure_logical_id == le64_to_cpu(boot_device-> EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device-> SlotNumber)) ? 1 : 0; } /** * _scsih_is_boot_device - search for matching boot device. * @sas_address: sas address * @device_name: device name specified in INDENTIFY fram * @enclosure_logical_id: enclosure logical id * @slot_number: slot number * @form: specifies boot device form * @boot_device: boot device object from bios page 2 * * Returns 1 when there's a match, 0 means no match. */ static int _scsih_is_boot_device(u64 sas_address, u64 device_name, u64 enclosure_logical_id, u16 slot, u8 form, Mpi2BiosPage2BootDevice_t *boot_device) { int rc = 0; switch (form) { case MPI2_BIOSPAGE2_FORM_SAS_WWID: if (!sas_address) break; rc = _scsih_srch_boot_sas_address( sas_address, &boot_device->SasWwid); break; case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT: if (!enclosure_logical_id) break; rc = _scsih_srch_boot_encl_slot( enclosure_logical_id, slot, &boot_device->EnclosureSlot); break; case MPI2_BIOSPAGE2_FORM_DEVICE_NAME: if (!device_name) break; rc = _scsih_srch_boot_device_name( device_name, &boot_device->DeviceName); break; case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED: break; } return rc; } /** * _scsih_get_sas_address - set the sas_address for given device handle * @handle: device handle * @sas_address: sas address * * Returns 0 success, non-zero when failure */ static int _scsih_get_sas_address(struct MPT2SAS_ADAPTER *ioc, u16 handle, u64 *sas_address) { Mpi2SasDevicePage0_t sas_device_pg0; Mpi2ConfigReply_t mpi_reply; u32 ioc_status; *sas_address = 0; if (handle <= ioc->sas_hba.num_phys) { *sas_address = ioc->sas_hba.sas_address; return 0; } if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return -ENXIO; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { *sas_address = le64_to_cpu(sas_device_pg0.SASAddress); return 0; } /* we hit this becuase the given parent handle doesn't exist */ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) return -ENXIO; /* else error case */ printk(MPT2SAS_ERR_FMT "handle(0x%04x), ioc_status(0x%04x), " "failure at %s:%d/%s()!\n", ioc->name, handle, ioc_status, __FILE__, __LINE__, __func__); return -EIO; } /** * _scsih_determine_boot_device - determine boot device. * @ioc: per adapter object * @device: either sas_device or raid_device object * @is_raid: [flag] 1 = raid object, 0 = sas object * * Determines whether this device should be first reported device to * to scsi-ml or sas transport, this purpose is for persistent boot device. * There are primary, alternate, and current entries in bios page 2. The order * priority is primary, alternate, then current. This routine saves * the corresponding device object and is_raid flag in the ioc object. * The saved data to be used later in _scsih_probe_boot_devices(). */ static void _scsih_determine_boot_device(struct MPT2SAS_ADAPTER *ioc, void *device, u8 is_raid) { struct _sas_device *sas_device; struct _raid_device *raid_device; u64 sas_address; u64 device_name; u64 enclosure_logical_id; u16 slot; /* only process this function when driver loads */ if (!ioc->is_driver_loading) return; /* no Bios, return immediately */ if (!ioc->bios_pg3.BiosVersion) return; if (!is_raid) { sas_device = device; sas_address = sas_device->sas_address; device_name = sas_device->device_name; enclosure_logical_id = sas_device->enclosure_logical_id; slot = sas_device->slot; } else { raid_device = device; sas_address = raid_device->wwid; device_name = 0; enclosure_logical_id = 0; slot = 0; } if (!ioc->req_boot_device.device) { if (_scsih_is_boot_device(sas_address, device_name, enclosure_logical_id, slot, (ioc->bios_pg2.ReqBootDeviceForm & MPI2_BIOSPAGE2_FORM_MASK), &ioc->bios_pg2.RequestedBootDevice)) { dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: req_boot_device(0x%016llx)\n", ioc->name, __func__, (unsigned long long)sas_address)); ioc->req_boot_device.device = device; ioc->req_boot_device.is_raid = is_raid; } } if (!ioc->req_alt_boot_device.device) { if (_scsih_is_boot_device(sas_address, device_name, enclosure_logical_id, slot, (ioc->bios_pg2.ReqAltBootDeviceForm & MPI2_BIOSPAGE2_FORM_MASK), &ioc->bios_pg2.RequestedAltBootDevice)) { dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: req_alt_boot_device(0x%016llx)\n", ioc->name, __func__, (unsigned long long)sas_address)); ioc->req_alt_boot_device.device = device; ioc->req_alt_boot_device.is_raid = is_raid; } } if (!ioc->current_boot_device.device) { if (_scsih_is_boot_device(sas_address, device_name, enclosure_logical_id, slot, (ioc->bios_pg2.CurrentBootDeviceForm & MPI2_BIOSPAGE2_FORM_MASK), &ioc->bios_pg2.CurrentBootDevice)) { dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: current_boot_device(0x%016llx)\n", ioc->name, __func__, (unsigned long long)sas_address)); ioc->current_boot_device.device = device; ioc->current_boot_device.is_raid = is_raid; } } } /** * mpt2sas_scsih_sas_device_find_by_sas_address - sas device search * @ioc: per adapter object * @sas_address: sas address * Context: Calling function should acquire ioc->sas_device_lock * * This searches for sas_device based on sas_address, then return sas_device * object. */ struct _sas_device * mpt2sas_scsih_sas_device_find_by_sas_address(struct MPT2SAS_ADAPTER *ioc, u64 sas_address) { struct _sas_device *sas_device; list_for_each_entry(sas_device, &ioc->sas_device_list, list) if (sas_device->sas_address == sas_address) return sas_device; list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) if (sas_device->sas_address == sas_address) return sas_device; return NULL; } /** * _scsih_sas_device_find_by_handle - sas device search * @ioc: per adapter object * @handle: sas device handle (assigned by firmware) * Context: Calling function should acquire ioc->sas_device_lock * * This searches for sas_device based on sas_address, then return sas_device * object. */ static struct _sas_device * _scsih_sas_device_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle) { struct _sas_device *sas_device; list_for_each_entry(sas_device, &ioc->sas_device_list, list) if (sas_device->handle == handle) return sas_device; list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) if (sas_device->handle == handle) return sas_device; return NULL; } /** * _scsih_sas_device_remove - remove sas_device from list. * @ioc: per adapter object * @sas_device: the sas_device object * Context: This function will acquire ioc->sas_device_lock. * * Removing object and freeing associated memory from the ioc->sas_device_list. */ static void _scsih_sas_device_remove(struct MPT2SAS_ADAPTER *ioc, struct _sas_device *sas_device) { unsigned long flags; if (!sas_device) return; spin_lock_irqsave(&ioc->sas_device_lock, flags); if (mpt2sas_scsih_sas_device_find_by_sas_address(ioc, sas_device->sas_address)) { list_del(&sas_device->list); kfree(sas_device); } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); } /** * _scsih_sas_device_add - insert sas_device to the list. * @ioc: per adapter object * @sas_device: the sas_device object * Context: This function will acquire ioc->sas_device_lock. * * Adding new object to the ioc->sas_device_list. */ static void _scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc, struct _sas_device *sas_device) { unsigned long flags; dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: handle" "(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__, sas_device->handle, (unsigned long long)sas_device->sas_address)); spin_lock_irqsave(&ioc->sas_device_lock, flags); list_add_tail(&sas_device->list, &ioc->sas_device_list); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (!mpt2sas_transport_port_add(ioc, sas_device->handle, sas_device->sas_address_parent)) { _scsih_sas_device_remove(ioc, sas_device); } else if (!sas_device->starget) { /* When asyn scanning is enabled, its not possible to remove * devices while scanning is turned on due to an oops in * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start() */ if (!ioc->is_driver_loading) mpt2sas_transport_port_remove(ioc, sas_device->sas_address, sas_device->sas_address_parent); _scsih_sas_device_remove(ioc, sas_device); } } /** * _scsih_sas_device_init_add - insert sas_device to the list. * @ioc: per adapter object * @sas_device: the sas_device object * Context: This function will acquire ioc->sas_device_lock. * * Adding new object at driver load time to the ioc->sas_device_init_list. */ static void _scsih_sas_device_init_add(struct MPT2SAS_ADAPTER *ioc, struct _sas_device *sas_device) { unsigned long flags; dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: handle" "(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__, sas_device->handle, (unsigned long long)sas_device->sas_address)); spin_lock_irqsave(&ioc->sas_device_lock, flags); list_add_tail(&sas_device->list, &ioc->sas_device_init_list); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); _scsih_determine_boot_device(ioc, sas_device, 0); } /** * _scsih_raid_device_find_by_id - raid device search * @ioc: per adapter object * @id: sas device target id * @channel: sas device channel * Context: Calling function should acquire ioc->raid_device_lock * * This searches for raid_device based on target id, then return raid_device * object. */ static struct _raid_device * _scsih_raid_device_find_by_id(struct MPT2SAS_ADAPTER *ioc, int id, int channel) { struct _raid_device *raid_device, *r; r = NULL; list_for_each_entry(raid_device, &ioc->raid_device_list, list) { if (raid_device->id == id && raid_device->channel == channel) { r = raid_device; goto out; } } out: return r; } /** * _scsih_raid_device_find_by_handle - raid device search * @ioc: per adapter object * @handle: sas device handle (assigned by firmware) * Context: Calling function should acquire ioc->raid_device_lock * * This searches for raid_device based on handle, then return raid_device * object. */ static struct _raid_device * _scsih_raid_device_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle) { struct _raid_device *raid_device, *r; r = NULL; list_for_each_entry(raid_device, &ioc->raid_device_list, list) { if (raid_device->handle != handle) continue; r = raid_device; goto out; } out: return r; } /** * _scsih_raid_device_find_by_wwid - raid device search * @ioc: per adapter object * @handle: sas device handle (assigned by firmware) * Context: Calling function should acquire ioc->raid_device_lock * * This searches for raid_device based on wwid, then return raid_device * object. */ static struct _raid_device * _scsih_raid_device_find_by_wwid(struct MPT2SAS_ADAPTER *ioc, u64 wwid) { struct _raid_device *raid_device, *r; r = NULL; list_for_each_entry(raid_device, &ioc->raid_device_list, list) { if (raid_device->wwid != wwid) continue; r = raid_device; goto out; } out: return r; } /** * _scsih_raid_device_add - add raid_device object * @ioc: per adapter object * @raid_device: raid_device object * * This is added to the raid_device_list link list. */ static void _scsih_raid_device_add(struct MPT2SAS_ADAPTER *ioc, struct _raid_device *raid_device) { unsigned long flags; dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: handle" "(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__, raid_device->handle, (unsigned long long)raid_device->wwid)); spin_lock_irqsave(&ioc->raid_device_lock, flags); list_add_tail(&raid_device->list, &ioc->raid_device_list); spin_unlock_irqrestore(&ioc->raid_device_lock, flags); } /** * _scsih_raid_device_remove - delete raid_device object * @ioc: per adapter object * @raid_device: raid_device object * * This is removed from the raid_device_list link list. */ static void _scsih_raid_device_remove(struct MPT2SAS_ADAPTER *ioc, struct _raid_device *raid_device) { unsigned long flags; spin_lock_irqsave(&ioc->raid_device_lock, flags); list_del(&raid_device->list); memset(raid_device, 0, sizeof(struct _raid_device)); kfree(raid_device); spin_unlock_irqrestore(&ioc->raid_device_lock, flags); } /** * mpt2sas_scsih_expander_find_by_handle - expander device search * @ioc: per adapter object * @handle: expander handle (assigned by firmware) * Context: Calling function should acquire ioc->sas_device_lock * * This searches for expander device based on handle, then returns the * sas_node object. */ struct _sas_node * mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle) { struct _sas_node *sas_expander, *r; r = NULL; list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { if (sas_expander->handle != handle) continue; r = sas_expander; goto out; } out: return r; } /** * mpt2sas_scsih_expander_find_by_sas_address - expander device search * @ioc: per adapter object * @sas_address: sas address * Context: Calling function should acquire ioc->sas_node_lock. * * This searches for expander device based on sas_address, then returns the * sas_node object. */ struct _sas_node * mpt2sas_scsih_expander_find_by_sas_address(struct MPT2SAS_ADAPTER *ioc, u64 sas_address) { struct _sas_node *sas_expander, *r; r = NULL; list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { if (sas_expander->sas_address != sas_address) continue; r = sas_expander; goto out; } out: return r; } /** * _scsih_expander_node_add - insert expander device to the list. * @ioc: per adapter object * @sas_expander: the sas_device object * Context: This function will acquire ioc->sas_node_lock. * * Adding new object to the ioc->sas_expander_list. * * Return nothing. */ static void _scsih_expander_node_add(struct MPT2SAS_ADAPTER *ioc, struct _sas_node *sas_expander) { unsigned long flags; spin_lock_irqsave(&ioc->sas_node_lock, flags); list_add_tail(&sas_expander->list, &ioc->sas_expander_list); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); } /** * _scsih_is_end_device - determines if device is an end device * @device_info: bitfield providing information about the device. * Context: none * * Returns 1 if end device. */ static int _scsih_is_end_device(u32 device_info) { if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE && ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) | (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) | (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE))) return 1; else return 0; } /** * _scsih_scsi_lookup_get - returns scmd entry * @ioc: per adapter object * @smid: system request message index * * Returns the smid stored scmd pointer. */ static struct scsi_cmnd * _scsih_scsi_lookup_get(struct MPT2SAS_ADAPTER *ioc, u16 smid) { return ioc->scsi_lookup[smid - 1].scmd; } /** * _scsih_scsi_lookup_get_clear - returns scmd entry * @ioc: per adapter object * @smid: system request message index * * Returns the smid stored scmd pointer. * Then will derefrence the stored scmd pointer. */ static inline struct scsi_cmnd * _scsih_scsi_lookup_get_clear(struct MPT2SAS_ADAPTER *ioc, u16 smid) { unsigned long flags; struct scsi_cmnd *scmd; spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); scmd = ioc->scsi_lookup[smid - 1].scmd; ioc->scsi_lookup[smid - 1].scmd = NULL; spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); return scmd; } /** * _scsih_scsi_lookup_find_by_scmd - scmd lookup * @ioc: per adapter object * @smid: system request message index * @scmd: pointer to scsi command object * Context: This function will acquire ioc->scsi_lookup_lock. * * This will search for a scmd pointer in the scsi_lookup array, * returning the revelent smid. A returned value of zero means invalid. */ static u16 _scsih_scsi_lookup_find_by_scmd(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd) { u16 smid; unsigned long flags; int i; spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); smid = 0; for (i = 0; i < ioc->scsiio_depth; i++) { if (ioc->scsi_lookup[i].scmd == scmd) { smid = ioc->scsi_lookup[i].smid; goto out; } } out: spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); return smid; } /** * _scsih_scsi_lookup_find_by_target - search for matching channel:id * @ioc: per adapter object * @id: target id * @channel: channel * Context: This function will acquire ioc->scsi_lookup_lock. * * This will search for a matching channel:id in the scsi_lookup array, * returning 1 if found. */ static u8 _scsih_scsi_lookup_find_by_target(struct MPT2SAS_ADAPTER *ioc, int id, int channel) { u8 found; unsigned long flags; int i; spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); found = 0; for (i = 0 ; i < ioc->scsiio_depth; i++) { if (ioc->scsi_lookup[i].scmd && (ioc->scsi_lookup[i].scmd->device->id == id && ioc->scsi_lookup[i].scmd->device->channel == channel)) { found = 1; goto out; } } out: spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); return found; } /** * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun * @ioc: per adapter object * @id: target id * @lun: lun number * @channel: channel * Context: This function will acquire ioc->scsi_lookup_lock. * * This will search for a matching channel:id:lun in the scsi_lookup array, * returning 1 if found. */ static u8 _scsih_scsi_lookup_find_by_lun(struct MPT2SAS_ADAPTER *ioc, int id, unsigned int lun, int channel) { u8 found; unsigned long flags; int i; spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); found = 0; for (i = 0 ; i < ioc->scsiio_depth; i++) { if (ioc->scsi_lookup[i].scmd && (ioc->scsi_lookup[i].scmd->device->id == id && ioc->scsi_lookup[i].scmd->device->channel == channel && ioc->scsi_lookup[i].scmd->device->lun == lun)) { found = 1; goto out; } } out: spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); return found; } /** * _scsih_get_chain_buffer_tracker - obtain chain tracker * @ioc: per adapter object * @smid: smid associated to an IO request * * Returns chain tracker(from ioc->free_chain_list) */ static struct chain_tracker * _scsih_get_chain_buffer_tracker(struct MPT2SAS_ADAPTER *ioc, u16 smid) { struct chain_tracker *chain_req; unsigned long flags; spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); if (list_empty(&ioc->free_chain_list)) { spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); dfailprintk(ioc, printk(MPT2SAS_WARN_FMT "chain buffers not " "available\n", ioc->name)); return NULL; } chain_req = list_entry(ioc->free_chain_list.next, struct chain_tracker, tracker_list); list_del_init(&chain_req->tracker_list); list_add_tail(&chain_req->tracker_list, &ioc->scsi_lookup[smid - 1].chain_list); spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); return chain_req; } /** * _scsih_build_scatter_gather - main sg creation routine * @ioc: per adapter object * @scmd: scsi command * @smid: system request message index * Context: none. * * The main routine that builds scatter gather table from a given * scsi request sent via the .queuecommand main handler. * * Returns 0 success, anything else error */ static int _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, u16 smid) { Mpi2SCSIIORequest_t *mpi_request; dma_addr_t chain_dma; struct scatterlist *sg_scmd; void *sg_local, *chain; u32 chain_offset; u32 chain_length; u32 chain_flags; int sges_left; u32 sges_in_segment; u32 sgl_flags; u32 sgl_flags_last_element; u32 sgl_flags_end_buffer; struct chain_tracker *chain_req; mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); /* init scatter gather flags */ sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT; if (scmd->sc_data_direction == DMA_TO_DEVICE) sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC; sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT) << MPI2_SGE_FLAGS_SHIFT; sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT; sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; sg_scmd = scsi_sglist(scmd); sges_left = scsi_dma_map(scmd); if (sges_left < 0) { sdev_printk(KERN_ERR, scmd->device, "pci_map_sg" " failed: request for %d bytes!\n", scsi_bufflen(scmd)); return -ENOMEM; } sg_local = &mpi_request->SGL; sges_in_segment = ioc->max_sges_in_main_message; if (sges_left <= sges_in_segment) goto fill_in_last_segment; mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) + (sges_in_segment * ioc->sge_size))/4; /* fill in main message segment when there is a chain following */ while (sges_in_segment) { if (sges_in_segment == 1) ioc->base_add_sg_single(sg_local, sgl_flags_last_element | sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); else ioc->base_add_sg_single(sg_local, sgl_flags | sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); sg_scmd = sg_next(sg_scmd); sg_local += ioc->sge_size; sges_left--; sges_in_segment--; } /* initializing the chain flags and pointers */ chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT; chain_req = _scsih_get_chain_buffer_tracker(ioc, smid); if (!chain_req) return -1; chain = chain_req->chain_buffer; chain_dma = chain_req->chain_buffer_dma; do { sges_in_segment = (sges_left <= ioc->max_sges_in_chain_message) ? sges_left : ioc->max_sges_in_chain_message; chain_offset = (sges_left == sges_in_segment) ? 0 : (sges_in_segment * ioc->sge_size)/4; chain_length = sges_in_segment * ioc->sge_size; if (chain_offset) { chain_offset = chain_offset << MPI2_SGE_CHAIN_OFFSET_SHIFT; chain_length += ioc->sge_size; } ioc->base_add_sg_single(sg_local, chain_flags | chain_offset | chain_length, chain_dma); sg_local = chain; if (!chain_offset) goto fill_in_last_segment; /* fill in chain segments */ while (sges_in_segment) { if (sges_in_segment == 1) ioc->base_add_sg_single(sg_local, sgl_flags_last_element | sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); else ioc->base_add_sg_single(sg_local, sgl_flags | sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); sg_scmd = sg_next(sg_scmd); sg_local += ioc->sge_size; sges_left--; sges_in_segment--; } chain_req = _scsih_get_chain_buffer_tracker(ioc, smid); if (!chain_req) return -1; chain = chain_req->chain_buffer; chain_dma = chain_req->chain_buffer_dma; } while (1); fill_in_last_segment: /* fill the last segment */ while (sges_left) { if (sges_left == 1) ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer | sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); else ioc->base_add_sg_single(sg_local, sgl_flags | sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); sg_scmd = sg_next(sg_scmd); sg_local += ioc->sge_size; sges_left--; } return 0; } /** * _scsih_adjust_queue_depth - setting device queue depth * @sdev: scsi device struct * @qdepth: requested queue depth * * * Returns nothing */ static void _scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth) { struct Scsi_Host *shost = sdev->host; int max_depth; struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); struct MPT2SAS_DEVICE *sas_device_priv_data; struct MPT2SAS_TARGET *sas_target_priv_data; struct _sas_device *sas_device; unsigned long flags; max_depth = shost->can_queue; /* limit max device queue for SATA to 32 */ sas_device_priv_data = sdev->hostdata; if (!sas_device_priv_data) goto not_sata; sas_target_priv_data = sas_device_priv_data->sas_target; if (!sas_target_priv_data) goto not_sata; if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) goto not_sata; spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, sas_device_priv_data->sas_target->sas_address); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (sas_device && sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) max_depth = MPT2SAS_SATA_QUEUE_DEPTH; not_sata: if (!sdev->tagged_supported) max_depth = 1; if (qdepth > max_depth) qdepth = max_depth; scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); } /** * _scsih_change_queue_depth - setting device queue depth * @sdev: scsi device struct * @qdepth: requested queue depth * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP * (see include/scsi/scsi_host.h for definition) * * Returns queue depth. */ static int _scsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) { if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) _scsih_adjust_queue_depth(sdev, qdepth); else if (reason == SCSI_QDEPTH_QFULL) scsi_track_queue_full(sdev, qdepth); else return -EOPNOTSUPP; if (sdev->inquiry_len > 7) sdev_printk(KERN_INFO, sdev, "qdepth(%d), tagged(%d), " "simple(%d), ordered(%d), scsi_level(%d), cmd_que(%d)\n", sdev->queue_depth, sdev->tagged_supported, sdev->simple_tags, sdev->ordered_tags, sdev->scsi_level, (sdev->inquiry[7] & 2) >> 1); return sdev->queue_depth; } /** * _scsih_change_queue_type - changing device queue tag type * @sdev: scsi device struct * @tag_type: requested tag type * * Returns queue tag type. */ static int _scsih_change_queue_type(struct scsi_device *sdev, int tag_type) { if (sdev->tagged_supported) { scsi_set_tag_type(sdev, tag_type); if (tag_type) scsi_activate_tcq(sdev, sdev->queue_depth); else scsi_deactivate_tcq(sdev, sdev->queue_depth); } else tag_type = 0; return tag_type; } /** * _scsih_target_alloc - target add routine * @starget: scsi target struct * * Returns 0 if ok. Any other return is assumed to be an error and * the device is ignored. */ static int _scsih_target_alloc(struct scsi_target *starget) { struct Scsi_Host *shost = dev_to_shost(&starget->dev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); struct MPT2SAS_TARGET *sas_target_priv_data; struct _sas_device *sas_device; struct _raid_device *raid_device; unsigned long flags; struct sas_rphy *rphy; sas_target_priv_data = kzalloc(sizeof(struct scsi_target), GFP_KERNEL); if (!sas_target_priv_data) return -ENOMEM; starget->hostdata = sas_target_priv_data; sas_target_priv_data->starget = starget; sas_target_priv_data->handle = MPT2SAS_INVALID_DEVICE_HANDLE; /* RAID volumes */ if (starget->channel == RAID_CHANNEL) { spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = _scsih_raid_device_find_by_id(ioc, starget->id, starget->channel); if (raid_device) { sas_target_priv_data->handle = raid_device->handle; sas_target_priv_data->sas_address = raid_device->wwid; sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME; sas_target_priv_data->raid_device = raid_device; raid_device->starget = starget; } spin_unlock_irqrestore(&ioc->raid_device_lock, flags); return 0; } /* sas/sata devices */ spin_lock_irqsave(&ioc->sas_device_lock, flags); rphy = dev_to_rphy(starget->dev.parent); sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, rphy->identify.sas_address); if (sas_device) { sas_target_priv_data->handle = sas_device->handle; sas_target_priv_data->sas_address = sas_device->sas_address; sas_device->starget = starget; sas_device->id = starget->id; sas_device->channel = starget->channel; if (test_bit(sas_device->handle, ioc->pd_handles)) sas_target_priv_data->flags |= MPT_TARGET_FLAGS_RAID_COMPONENT; } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); return 0; } /** * _scsih_target_destroy - target destroy routine * @starget: scsi target struct * * Returns nothing. */ static void _scsih_target_destroy(struct scsi_target *starget) { struct Scsi_Host *shost = dev_to_shost(&starget->dev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); struct MPT2SAS_TARGET *sas_target_priv_data; struct _sas_device *sas_device; struct _raid_device *raid_device; unsigned long flags; struct sas_rphy *rphy; sas_target_priv_data = starget->hostdata; if (!sas_target_priv_data) return; if (starget->channel == RAID_CHANNEL) { spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = _scsih_raid_device_find_by_id(ioc, starget->id, starget->channel); if (raid_device) { raid_device->starget = NULL; raid_device->sdev = NULL; } spin_unlock_irqrestore(&ioc->raid_device_lock, flags); goto out; } spin_lock_irqsave(&ioc->sas_device_lock, flags); rphy = dev_to_rphy(starget->dev.parent); sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, rphy->identify.sas_address); if (sas_device && (sas_device->starget == starget) && (sas_device->id == starget->id) && (sas_device->channel == starget->channel)) sas_device->starget = NULL; spin_unlock_irqrestore(&ioc->sas_device_lock, flags); out: kfree(sas_target_priv_data); starget->hostdata = NULL; } /** * _scsih_slave_alloc - device add routine * @sdev: scsi device struct * * Returns 0 if ok. Any other return is assumed to be an error and * the device is ignored. */ static int _scsih_slave_alloc(struct scsi_device *sdev) { struct Scsi_Host *shost; struct MPT2SAS_ADAPTER *ioc; struct MPT2SAS_TARGET *sas_target_priv_data; struct MPT2SAS_DEVICE *sas_device_priv_data; struct scsi_target *starget; struct _raid_device *raid_device; unsigned long flags; sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL); if (!sas_device_priv_data) return -ENOMEM; sas_device_priv_data->lun = sdev->lun; sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT; starget = scsi_target(sdev); sas_target_priv_data = starget->hostdata; sas_target_priv_data->num_luns++; sas_device_priv_data->sas_target = sas_target_priv_data; sdev->hostdata = sas_device_priv_data; if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT)) sdev->no_uld_attach = 1; shost = dev_to_shost(&starget->dev); ioc = shost_priv(shost); if (starget->channel == RAID_CHANNEL) { spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = _scsih_raid_device_find_by_id(ioc, starget->id, starget->channel); if (raid_device) raid_device->sdev = sdev; /* raid is single lun */ spin_unlock_irqrestore(&ioc->raid_device_lock, flags); } return 0; } /** * _scsih_slave_destroy - device destroy routine * @sdev: scsi device struct * * Returns nothing. */ static void _scsih_slave_destroy(struct scsi_device *sdev) { struct MPT2SAS_TARGET *sas_target_priv_data; struct scsi_target *starget; struct Scsi_Host *shost; struct MPT2SAS_ADAPTER *ioc; struct _sas_device *sas_device; unsigned long flags; if (!sdev->hostdata) return; starget = scsi_target(sdev); sas_target_priv_data = starget->hostdata; sas_target_priv_data->num_luns--; shost = dev_to_shost(&starget->dev); ioc = shost_priv(shost); if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, sas_target_priv_data->sas_address); if (sas_device && !sas_target_priv_data->num_luns) sas_device->starget = NULL; spin_unlock_irqrestore(&ioc->sas_device_lock, flags); } kfree(sdev->hostdata); sdev->hostdata = NULL; } /** * _scsih_display_sata_capabilities - sata capabilities * @ioc: per adapter object * @sas_device: the sas_device object * @sdev: scsi device struct */ static void _scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc, struct _sas_device *sas_device, struct scsi_device *sdev) { Mpi2ConfigReply_t mpi_reply; Mpi2SasDevicePage0_t sas_device_pg0; u32 ioc_status; u16 flags; u32 device_info; if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, sas_device->handle))) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } flags = le16_to_cpu(sas_device_pg0.Flags); device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); sdev_printk(KERN_INFO, sdev, "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), " "sw_preserve(%s)\n", (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n", (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n", (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" : "n", (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n", (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n", (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n"); } /** * _scsih_is_raid - return boolean indicating device is raid volume * @dev the device struct object */ static int _scsih_is_raid(struct device *dev) { struct scsi_device *sdev = to_scsi_device(dev); struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host); if (ioc->is_warpdrive) return 0; return (sdev->channel == RAID_CHANNEL) ? 1 : 0; } /** * _scsih_get_resync - get raid volume resync percent complete * @dev the device struct object */ static void _scsih_get_resync(struct device *dev) { struct scsi_device *sdev = to_scsi_device(dev); struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host); static struct _raid_device *raid_device; unsigned long flags; Mpi2RaidVolPage0_t vol_pg0; Mpi2ConfigReply_t mpi_reply; u32 volume_status_flags; u8 percent_complete = 0; spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, sdev->channel); spin_unlock_irqrestore(&ioc->raid_device_lock, flags); if (!raid_device || ioc->is_warpdrive) goto out; if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sizeof(Mpi2RaidVolPage0_t))) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out; } volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags); if (volume_status_flags & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) percent_complete = raid_device->percent_complete; out: raid_set_resync(mpt2sas_raid_template, dev, percent_complete); } /** * _scsih_get_state - get raid volume level * @dev the device struct object */ static void _scsih_get_state(struct device *dev) { struct scsi_device *sdev = to_scsi_device(dev); struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host); static struct _raid_device *raid_device; unsigned long flags; Mpi2RaidVolPage0_t vol_pg0; Mpi2ConfigReply_t mpi_reply; u32 volstate; enum raid_state state = RAID_STATE_UNKNOWN; spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, sdev->channel); spin_unlock_irqrestore(&ioc->raid_device_lock, flags); if (!raid_device) goto out; if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sizeof(Mpi2RaidVolPage0_t))) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out; } volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags); if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) { state = RAID_STATE_RESYNCING; goto out; } switch (vol_pg0.VolumeState) { case MPI2_RAID_VOL_STATE_OPTIMAL: case MPI2_RAID_VOL_STATE_ONLINE: state = RAID_STATE_ACTIVE; break; case MPI2_RAID_VOL_STATE_DEGRADED: state = RAID_STATE_DEGRADED; break; case MPI2_RAID_VOL_STATE_FAILED: case MPI2_RAID_VOL_STATE_MISSING: state = RAID_STATE_OFFLINE; break; } out: raid_set_state(mpt2sas_raid_template, dev, state); } /** * _scsih_set_level - set raid level * @sdev: scsi device struct * @raid_device: raid_device object */ static void _scsih_set_level(struct scsi_device *sdev, struct _raid_device *raid_device) { enum raid_level level = RAID_LEVEL_UNKNOWN; switch (raid_device->volume_type) { case MPI2_RAID_VOL_TYPE_RAID0: level = RAID_LEVEL_0; break; case MPI2_RAID_VOL_TYPE_RAID10: level = RAID_LEVEL_10; break; case MPI2_RAID_VOL_TYPE_RAID1E: level = RAID_LEVEL_1E; break; case MPI2_RAID_VOL_TYPE_RAID1: level = RAID_LEVEL_1; break; } raid_set_level(mpt2sas_raid_template, &sdev->sdev_gendev, level); } /** * _scsih_get_volume_capabilities - volume capabilities * @ioc: per adapter object * @sas_device: the raid_device object * * Returns 0 for success, else 1 */ static int _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc, struct _raid_device *raid_device) { Mpi2RaidVolPage0_t *vol_pg0; Mpi2RaidPhysDiskPage0_t pd_pg0; Mpi2SasDevicePage0_t sas_device_pg0; Mpi2ConfigReply_t mpi_reply; u16 sz; u8 num_pds; if ((mpt2sas_config_get_number_pds(ioc, raid_device->handle, &num_pds)) || !num_pds) { dfailprintk(ioc, printk(MPT2SAS_WARN_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__)); return 1; } raid_device->num_pds = num_pds; sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds * sizeof(Mpi2RaidVol0PhysDisk_t)); vol_pg0 = kzalloc(sz, GFP_KERNEL); if (!vol_pg0) { dfailprintk(ioc, printk(MPT2SAS_WARN_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__)); return 1; } if ((mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) { dfailprintk(ioc, printk(MPT2SAS_WARN_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__)); kfree(vol_pg0); return 1; } raid_device->volume_type = vol_pg0->VolumeType; /* figure out what the underlying devices are by * obtaining the device_info bits for the 1st device */ if (!(mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply, &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM, vol_pg0->PhysDisk[0].PhysDiskNum))) { if (!(mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, le16_to_cpu(pd_pg0.DevHandle)))) { raid_device->device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); } } kfree(vol_pg0); return 0; } /** * _scsih_disable_ddio - Disable direct I/O for all the volumes * @ioc: per adapter object */ static void _scsih_disable_ddio(struct MPT2SAS_ADAPTER *ioc) { Mpi2RaidVolPage1_t vol_pg1; Mpi2ConfigReply_t mpi_reply; struct _raid_device *raid_device; u16 handle; u16 ioc_status; handle = 0xFFFF; while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply, &vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) break; handle = le16_to_cpu(vol_pg1.DevHandle); raid_device = _scsih_raid_device_find_by_handle(ioc, handle); if (raid_device) raid_device->direct_io_enabled = 0; } return; } /** * _scsih_get_num_volumes - Get number of volumes in the ioc * @ioc: per adapter object */ static u8 _scsih_get_num_volumes(struct MPT2SAS_ADAPTER *ioc) { Mpi2RaidVolPage1_t vol_pg1; Mpi2ConfigReply_t mpi_reply; u16 handle; u8 vol_cnt = 0; u16 ioc_status; handle = 0xFFFF; while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply, &vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) break; vol_cnt++; handle = le16_to_cpu(vol_pg1.DevHandle); } return vol_cnt; } /** * _scsih_init_warpdrive_properties - Set properties for warpdrive direct I/O. * @ioc: per adapter object * @raid_device: the raid_device object */ static void _scsih_init_warpdrive_properties(struct MPT2SAS_ADAPTER *ioc, struct _raid_device *raid_device) { Mpi2RaidVolPage0_t *vol_pg0; Mpi2RaidPhysDiskPage0_t pd_pg0; Mpi2ConfigReply_t mpi_reply; u16 sz; u8 num_pds, count; unsigned long stripe_sz, block_sz; u8 stripe_exp, block_exp; u64 dev_max_lba; if (!ioc->is_warpdrive) return; if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) { printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled " "globally as drives are exposed\n", ioc->name); return; } if (_scsih_get_num_volumes(ioc) > 1) { _scsih_disable_ddio(ioc); printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled " "globally as number of drives > 1\n", ioc->name); return; } if ((mpt2sas_config_get_number_pds(ioc, raid_device->handle, &num_pds)) || !num_pds) { printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled " "Failure in computing number of drives\n", ioc->name); return; } sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds * sizeof(Mpi2RaidVol0PhysDisk_t)); vol_pg0 = kzalloc(sz, GFP_KERNEL); if (!vol_pg0) { printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled " "Memory allocation failure for RVPG0\n", ioc->name); return; } if ((mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) { printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled " "Failure in retrieving RVPG0\n", ioc->name); kfree(vol_pg0); return; } /* * WARPDRIVE:If number of physical disks in a volume exceeds the max pds * assumed for WARPDRIVE, disable direct I/O */ if (num_pds > MPT_MAX_WARPDRIVE_PDS) { printk(MPT2SAS_WARN_FMT "WarpDrive : Direct IO is disabled " "for the drive with handle(0x%04x): num_mem=%d, " "max_mem_allowed=%d\n", ioc->name, raid_device->handle, num_pds, MPT_MAX_WARPDRIVE_PDS); kfree(vol_pg0); return; } for (count = 0; count < num_pds; count++) { if (mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply, &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM, vol_pg0->PhysDisk[count].PhysDiskNum) || pd_pg0.DevHandle == MPT2SAS_INVALID_DEVICE_HANDLE) { printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is " "disabled for the drive with handle(0x%04x) member" "handle retrieval failed for member number=%d\n", ioc->name, raid_device->handle, vol_pg0->PhysDisk[count].PhysDiskNum); goto out_error; } /* Disable direct I/O if member drive lba exceeds 4 bytes */ dev_max_lba = le64_to_cpu(pd_pg0.DeviceMaxLBA); if (dev_max_lba >> 32) { printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is " "disabled for the drive with handle(0x%04x) member" "handle (0x%04x) unsupported max lba 0x%016llx\n", ioc->name, raid_device->handle, le16_to_cpu(pd_pg0.DevHandle), (unsigned long long)dev_max_lba); goto out_error; } raid_device->pd_handle[count] = le16_to_cpu(pd_pg0.DevHandle); } /* * Assumption for WD: Direct I/O is not supported if the volume is * not RAID0 */ if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0) { printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled " "for the drive with handle(0x%04x): type=%d, " "s_sz=%uK, blk_size=%u\n", ioc->name, raid_device->handle, raid_device->volume_type, (le32_to_cpu(vol_pg0->StripeSize) * le16_to_cpu(vol_pg0->BlockSize)) / 1024, le16_to_cpu(vol_pg0->BlockSize)); goto out_error; } stripe_sz = le32_to_cpu(vol_pg0->StripeSize); stripe_exp = find_first_bit(&stripe_sz, 32); if (stripe_exp == 32) { printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled " "for the drive with handle(0x%04x) invalid stripe sz %uK\n", ioc->name, raid_device->handle, (le32_to_cpu(vol_pg0->StripeSize) * le16_to_cpu(vol_pg0->BlockSize)) / 1024); goto out_error; } raid_device->stripe_exponent = stripe_exp; block_sz = le16_to_cpu(vol_pg0->BlockSize); block_exp = find_first_bit(&block_sz, 16); if (block_exp == 16) { printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled " "for the drive with handle(0x%04x) invalid block sz %u\n", ioc->name, raid_device->handle, le16_to_cpu(vol_pg0->BlockSize)); goto out_error; } raid_device->block_exponent = block_exp; raid_device->direct_io_enabled = 1; printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is Enabled for the drive" " with handle(0x%04x)\n", ioc->name, raid_device->handle); /* * WARPDRIVE: Though the following fields are not used for direct IO, * stored for future purpose: */ raid_device->max_lba = le64_to_cpu(vol_pg0->MaxLBA); raid_device->stripe_sz = le32_to_cpu(vol_pg0->StripeSize); raid_device->block_sz = le16_to_cpu(vol_pg0->BlockSize); kfree(vol_pg0); return; out_error: raid_device->direct_io_enabled = 0; for (count = 0; count < num_pds; count++) raid_device->pd_handle[count] = 0; kfree(vol_pg0); return; } /** * _scsih_enable_tlr - setting TLR flags * @ioc: per adapter object * @sdev: scsi device struct * * Enabling Transaction Layer Retries for tape devices when * vpd page 0x90 is present * */ static void _scsih_enable_tlr(struct MPT2SAS_ADAPTER *ioc, struct scsi_device *sdev) { /* only for TAPE */ if (sdev->type != TYPE_TAPE) return; if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR)) return; sas_enable_tlr(sdev); sdev_printk(KERN_INFO, sdev, "TLR %s\n", sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled"); return; } /** * _scsih_slave_configure - device configure routine. * @sdev: scsi device struct * * Returns 0 if ok. Any other return is assumed to be an error and * the device is ignored. */ static int _scsih_slave_configure(struct scsi_device *sdev) { struct Scsi_Host *shost = sdev->host; struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); struct MPT2SAS_DEVICE *sas_device_priv_data; struct MPT2SAS_TARGET *sas_target_priv_data; struct _sas_device *sas_device; struct _raid_device *raid_device; unsigned long flags; int qdepth; u8 ssp_target = 0; char *ds = ""; char *r_level = ""; qdepth = 1; sas_device_priv_data = sdev->hostdata; sas_device_priv_data->configured_lun = 1; sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT; sas_target_priv_data = sas_device_priv_data->sas_target; /* raid volume handling */ if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) { spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = _scsih_raid_device_find_by_handle(ioc, sas_target_priv_data->handle); spin_unlock_irqrestore(&ioc->raid_device_lock, flags); if (!raid_device) { dfailprintk(ioc, printk(MPT2SAS_WARN_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__)); return 1; } _scsih_get_volume_capabilities(ioc, raid_device); if (_scsih_get_volume_capabilities(ioc, raid_device)) { dfailprintk(ioc, printk(MPT2SAS_WARN_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__)); return 1; } /* * WARPDRIVE: Initialize the required data for Direct IO */ _scsih_init_warpdrive_properties(ioc, raid_device); /* RAID Queue Depth Support * IS volume = underlying qdepth of drive type, either * MPT2SAS_SAS_QUEUE_DEPTH or MPT2SAS_SATA_QUEUE_DEPTH * IM/IME/R10 = 128 (MPT2SAS_RAID_QUEUE_DEPTH) */ if (raid_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) { qdepth = MPT2SAS_SAS_QUEUE_DEPTH; ds = "SSP"; } else { qdepth = MPT2SAS_SATA_QUEUE_DEPTH; if (raid_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) ds = "SATA"; else ds = "STP"; } switch (raid_device->volume_type) { case MPI2_RAID_VOL_TYPE_RAID0: r_level = "RAID0"; break; case MPI2_RAID_VOL_TYPE_RAID1E: qdepth = MPT2SAS_RAID_QUEUE_DEPTH; if (ioc->manu_pg10.OEMIdentifier && (le32_to_cpu(ioc->manu_pg10.GenericFlags0) & MFG10_GF0_R10_DISPLAY) && !(raid_device->num_pds % 2)) r_level = "RAID10"; else r_level = "RAID1E"; break; case MPI2_RAID_VOL_TYPE_RAID1: qdepth = MPT2SAS_RAID_QUEUE_DEPTH; r_level = "RAID1"; break; case MPI2_RAID_VOL_TYPE_RAID10: qdepth = MPT2SAS_RAID_QUEUE_DEPTH; r_level = "RAID10"; break; case MPI2_RAID_VOL_TYPE_UNKNOWN: default: qdepth = MPT2SAS_RAID_QUEUE_DEPTH; r_level = "RAIDX"; break; } if (!ioc->hide_ir_msg) sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " "wwid(0x%016llx), pd_count(%d), type(%s)\n", r_level, raid_device->handle, (unsigned long long)raid_device->wwid, raid_device->num_pds, ds); _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); /* raid transport support */ if (!ioc->is_warpdrive) _scsih_set_level(sdev, raid_device); return 0; } /* non-raid handling */ spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, sas_device_priv_data->sas_target->sas_address); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (sas_device) { if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) { if (mpt2sas_config_get_volume_handle(ioc, sas_device->handle, &sas_device->volume_handle)) { dfailprintk(ioc, printk(MPT2SAS_WARN_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__)); return 1; } if (sas_device->volume_handle && mpt2sas_config_get_volume_wwid(ioc, sas_device->volume_handle, &sas_device->volume_wwid)) { dfailprintk(ioc, printk(MPT2SAS_WARN_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__)); return 1; } } if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) { qdepth = MPT2SAS_SAS_QUEUE_DEPTH; ssp_target = 1; ds = "SSP"; } else { qdepth = MPT2SAS_SATA_QUEUE_DEPTH; if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) ds = "STP"; else if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) ds = "SATA"; } sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n", ds, sas_device->handle, (unsigned long long)sas_device->sas_address, sas_device->phy, (unsigned long long)sas_device->device_name); sdev_printk(KERN_INFO, sdev, "%s: " "enclosure_logical_id(0x%016llx), slot(%d)\n", ds, (unsigned long long) sas_device->enclosure_logical_id, sas_device->slot); if (!ssp_target) _scsih_display_sata_capabilities(ioc, sas_device, sdev); } else { dfailprintk(ioc, printk(MPT2SAS_WARN_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__)); return 1; } _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); if (ssp_target) { sas_read_port_mode_page(sdev); _scsih_enable_tlr(ioc, sdev); } return 0; } /** * _scsih_bios_param - fetch head, sector, cylinder info for a disk * @sdev: scsi device struct * @bdev: pointer to block device context * @capacity: device size (in 512 byte sectors) * @params: three element array to place output: * params[0] number of heads (max 255) * params[1] number of sectors (max 63) * params[2] number of cylinders * * Return nothing. */ static int _scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int params[]) { int heads; int sectors; sector_t cylinders; ulong dummy; heads = 64; sectors = 32; dummy = heads * sectors; cylinders = capacity; sector_div(cylinders, dummy); /* * Handle extended translation size for logical drives * > 1Gb */ if ((ulong)capacity >= 0x200000) { heads = 255; sectors = 63; dummy = heads * sectors; cylinders = capacity; sector_div(cylinders, dummy); } /* return result */ params[0] = heads; params[1] = sectors; params[2] = cylinders; return 0; } /** * _scsih_response_code - translation of device response code * @ioc: per adapter object * @response_code: response code returned by the device * * Return nothing. */ static void _scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code) { char *desc; switch (response_code) { case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: desc = "task management request completed"; break; case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: desc = "invalid frame"; break; case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: desc = "task management request not supported"; break; case MPI2_SCSITASKMGMT_RSP_TM_FAILED: desc = "task management request failed"; break; case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: desc = "task management request succeeded"; break; case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: desc = "invalid lun"; break; case 0xA: desc = "overlapped tag attempted"; break; case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: desc = "task queued, however not sent to target"; break; default: desc = "unknown"; break; } printk(MPT2SAS_WARN_FMT "response_code(0x%01x): %s\n", ioc->name, response_code, desc); } /** * _scsih_tm_done - tm completion routine * @ioc: per adapter object * @smid: system request message index * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * Context: none. * * The callback handler when using scsih_issue_tm. * * Return 1 meaning mf should be freed from _base_interrupt * 0 means the mf is freed from this function. */ static u8 _scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) { MPI2DefaultReply_t *mpi_reply; if (ioc->tm_cmds.status == MPT2_CMD_NOT_USED) return 1; if (ioc->tm_cmds.smid != smid) return 1; mpt2sas_base_flush_reply_queues(ioc); ioc->tm_cmds.status |= MPT2_CMD_COMPLETE; mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); if (mpi_reply) { memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); ioc->tm_cmds.status |= MPT2_CMD_REPLY_VALID; } ioc->tm_cmds.status &= ~MPT2_CMD_PENDING; complete(&ioc->tm_cmds.done); return 1; } /** * mpt2sas_scsih_set_tm_flag - set per target tm_busy * @ioc: per adapter object * @handle: device handle * * During taskmangement request, we need to freeze the device queue. */ void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle) { struct MPT2SAS_DEVICE *sas_device_priv_data; struct scsi_device *sdev; u8 skip = 0; shost_for_each_device(sdev, ioc->shost) { if (skip) continue; sas_device_priv_data = sdev->hostdata; if (!sas_device_priv_data) continue; if (sas_device_priv_data->sas_target->handle == handle) { sas_device_priv_data->sas_target->tm_busy = 1; skip = 1; ioc->ignore_loginfos = 1; } } } /** * mpt2sas_scsih_clear_tm_flag - clear per target tm_busy * @ioc: per adapter object * @handle: device handle * * During taskmangement request, we need to freeze the device queue. */ void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle) { struct MPT2SAS_DEVICE *sas_device_priv_data; struct scsi_device *sdev; u8 skip = 0; shost_for_each_device(sdev, ioc->shost) { if (skip) continue; sas_device_priv_data = sdev->hostdata; if (!sas_device_priv_data) continue; if (sas_device_priv_data->sas_target->handle == handle) { sas_device_priv_data->sas_target->tm_busy = 0; skip = 1; ioc->ignore_loginfos = 0; } } } /** * mpt2sas_scsih_issue_tm - main routine for sending tm requests * @ioc: per adapter struct * @device_handle: device handle * @channel: the channel assigned by the OS * @id: the id assigned by the OS * @lun: lun number * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) * @smid_task: smid assigned to the task * @timeout: timeout in seconds * @serial_number: the serial_number from scmd * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF * Context: user * * A generic API for sending task management requests to firmware. * * The callback index is set inside `ioc->tm_cb_idx`. * * Return SUCCESS or FAILED. */ int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel, uint id, uint lun, u8 type, u16 smid_task, ulong timeout, unsigned long serial_number, enum mutex_type m_type) { Mpi2SCSITaskManagementRequest_t *mpi_request; Mpi2SCSITaskManagementReply_t *mpi_reply; u16 smid = 0; u32 ioc_state; unsigned long timeleft; struct scsiio_tracker *scsi_lookup = NULL; int rc; if (m_type == TM_MUTEX_ON) mutex_lock(&ioc->tm_cmds.mutex); if (ioc->tm_cmds.status != MPT2_CMD_NOT_USED) { printk(MPT2SAS_INFO_FMT "%s: tm_cmd busy!!!\n", __func__, ioc->name); rc = FAILED; goto err_out; } if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) { printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n", __func__, ioc->name); rc = FAILED; goto err_out; } ioc_state = mpt2sas_base_get_iocstate(ioc, 0); if (ioc_state & MPI2_DOORBELL_USED) { dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "unexpected doorbell " "active!\n", ioc->name)); rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); rc = (!rc) ? SUCCESS : FAILED; goto err_out; } if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { mpt2sas_base_fault_info(ioc, ioc_state & MPI2_DOORBELL_DATA_MASK); rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); rc = (!rc) ? SUCCESS : FAILED; goto err_out; } smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx); if (!smid) { printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); rc = FAILED; goto err_out; } if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) scsi_lookup = &ioc->scsi_lookup[smid_task - 1]; dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "sending tm: handle(0x%04x)," " task_type(0x%02x), smid(%d)\n", ioc->name, handle, type, smid_task)); ioc->tm_cmds.status = MPT2_CMD_PENDING; mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); ioc->tm_cmds.smid = smid; memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t)); mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; mpi_request->DevHandle = cpu_to_le16(handle); mpi_request->TaskType = type; mpi_request->TaskMID = cpu_to_le16(smid_task); int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN); mpt2sas_scsih_set_tm_flag(ioc, handle); init_completion(&ioc->tm_cmds.done); mpt2sas_base_put_smid_hi_priority(ioc, smid); timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ); if (!(ioc->tm_cmds.status & MPT2_CMD_COMPLETE)) { printk(MPT2SAS_ERR_FMT "%s: timeout\n", ioc->name, __func__); _debug_dump_mf(mpi_request, sizeof(Mpi2SCSITaskManagementRequest_t)/4); if (!(ioc->tm_cmds.status & MPT2_CMD_RESET)) { rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); rc = (!rc) ? SUCCESS : FAILED; ioc->tm_cmds.status = MPT2_CMD_NOT_USED; mpt2sas_scsih_clear_tm_flag(ioc, handle); goto err_out; } } if (ioc->tm_cmds.status & MPT2_CMD_REPLY_VALID) { mpi_reply = ioc->tm_cmds.reply; dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "complete tm: " "ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n", ioc->name, le16_to_cpu(mpi_reply->IOCStatus), le32_to_cpu(mpi_reply->IOCLogInfo), le32_to_cpu(mpi_reply->TerminationCount))); if (ioc->logging_level & MPT_DEBUG_TM) { _scsih_response_code(ioc, mpi_reply->ResponseCode); if (mpi_reply->IOCStatus) _debug_dump_mf(mpi_request, sizeof(Mpi2SCSITaskManagementRequest_t)/4); } } switch (type) { case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: rc = SUCCESS; if (scsi_lookup->scmd == NULL) break; rc = FAILED; break; case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: if (_scsih_scsi_lookup_find_by_target(ioc, id, channel)) rc = FAILED; else rc = SUCCESS; break; case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: if (_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel)) rc = FAILED; else rc = SUCCESS; break; case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK: rc = SUCCESS; break; default: rc = FAILED; break; } mpt2sas_scsih_clear_tm_flag(ioc, handle); ioc->tm_cmds.status = MPT2_CMD_NOT_USED; if (m_type == TM_MUTEX_ON) mutex_unlock(&ioc->tm_cmds.mutex); return rc; err_out: if (m_type == TM_MUTEX_ON) mutex_unlock(&ioc->tm_cmds.mutex); return rc; } /** * _scsih_tm_display_info - displays info about the device * @ioc: per adapter struct * @scmd: pointer to scsi command object * * Called by task management callback handlers. */ static void _scsih_tm_display_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd) { struct scsi_target *starget = scmd->device->sdev_target; struct MPT2SAS_TARGET *priv_target = starget->hostdata; struct _sas_device *sas_device = NULL; unsigned long flags; char *device_str = NULL; if (!priv_target) return; if (ioc->hide_ir_msg) device_str = "WarpDrive"; else device_str = "volume"; scsi_print_command(scmd); if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { starget_printk(KERN_INFO, starget, "%s handle(0x%04x), " "%s wwid(0x%016llx)\n", device_str, priv_target->handle, device_str, (unsigned long long)priv_target->sas_address); } else { spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, priv_target->sas_address); if (sas_device) { if (priv_target->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) { starget_printk(KERN_INFO, starget, "volume handle(0x%04x), " "volume wwid(0x%016llx)\n", sas_device->volume_handle, (unsigned long long)sas_device->volume_wwid); } starget_printk(KERN_INFO, starget, "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n", sas_device->handle, (unsigned long long)sas_device->sas_address, sas_device->phy); starget_printk(KERN_INFO, starget, "enclosure_logical_id(0x%016llx), slot(%d)\n", (unsigned long long)sas_device->enclosure_logical_id, sas_device->slot); } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); } } /** * _scsih_abort - eh threads main abort routine * @scmd: pointer to scsi command object * * Returns SUCCESS if command aborted else FAILED */ static int _scsih_abort(struct scsi_cmnd *scmd) { struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); struct MPT2SAS_DEVICE *sas_device_priv_data; u16 smid; u16 handle; int r; sdev_printk(KERN_INFO, scmd->device, "attempting task abort! " "scmd(%p)\n", scmd); _scsih_tm_display_info(ioc, scmd); sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { sdev_printk(KERN_INFO, scmd->device, "device been deleted! " "scmd(%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); r = SUCCESS; goto out; } /* search for the command */ smid = _scsih_scsi_lookup_find_by_scmd(ioc, scmd); if (!smid) { scmd->result = DID_RESET << 16; r = SUCCESS; goto out; } /* for hidden raid components and volumes this is not supported */ if (sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_RAID_COMPONENT || sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) { scmd->result = DID_RESET << 16; r = FAILED; goto out; } mpt2sas_halt_firmware(ioc); handle = sas_device_priv_data->sas_target->handle; r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel, scmd->device->id, scmd->device->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, scmd->serial_number, TM_MUTEX_ON); out: sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); return r; } /** * _scsih_dev_reset - eh threads main device reset routine * @scmd: pointer to scsi command object * * Returns SUCCESS if command aborted else FAILED */ static int _scsih_dev_reset(struct scsi_cmnd *scmd) { struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); struct MPT2SAS_DEVICE *sas_device_priv_data; struct _sas_device *sas_device; unsigned long flags; u16 handle; int r; struct scsi_target *starget = scmd->device->sdev_target; starget_printk(KERN_INFO, starget, "attempting device reset! " "scmd(%p)\n", scmd); _scsih_tm_display_info(ioc, scmd); sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { starget_printk(KERN_INFO, starget, "device been deleted! " "scmd(%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); r = SUCCESS; goto out; } /* for hidden raid components obtain the volume_handle */ handle = 0; if (sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) { spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _scsih_sas_device_find_by_handle(ioc, sas_device_priv_data->sas_target->handle); if (sas_device) handle = sas_device->volume_handle; spin_unlock_irqrestore(&ioc->sas_device_lock, flags); } else handle = sas_device_priv_data->sas_target->handle; if (!handle) { scmd->result = DID_RESET << 16; r = FAILED; goto out; } r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel, scmd->device->id, scmd->device->lun, MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, 0, TM_MUTEX_ON); out: sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n", ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); return r; } /** * _scsih_target_reset - eh threads main target reset routine * @scmd: pointer to scsi command object * * Returns SUCCESS if command aborted else FAILED */ static int _scsih_target_reset(struct scsi_cmnd *scmd) { struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); struct MPT2SAS_DEVICE *sas_device_priv_data; struct _sas_device *sas_device; unsigned long flags; u16 handle; int r; struct scsi_target *starget = scmd->device->sdev_target; starget_printk(KERN_INFO, starget, "attempting target reset! " "scmd(%p)\n", scmd); _scsih_tm_display_info(ioc, scmd); sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { starget_printk(KERN_INFO, starget, "target been deleted! " "scmd(%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); r = SUCCESS; goto out; } /* for hidden raid components obtain the volume_handle */ handle = 0; if (sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) { spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _scsih_sas_device_find_by_handle(ioc, sas_device_priv_data->sas_target->handle); if (sas_device) handle = sas_device->volume_handle; spin_unlock_irqrestore(&ioc->sas_device_lock, flags); } else handle = sas_device_priv_data->sas_target->handle; if (!handle) { scmd->result = DID_RESET << 16; r = FAILED; goto out; } r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel, scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30, 0, TM_MUTEX_ON); out: starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); return r; } /** * _scsih_host_reset - eh threads main host reset routine * @scmd: pointer to scsi command object * * Returns SUCCESS if command aborted else FAILED */ static int _scsih_host_reset(struct scsi_cmnd *scmd) { struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); int r, retval; printk(MPT2SAS_INFO_FMT "attempting host reset! scmd(%p)\n", ioc->name, scmd); scsi_print_command(scmd); retval = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); r = (retval < 0) ? FAILED : SUCCESS; printk(MPT2SAS_INFO_FMT "host reset: %s scmd(%p)\n", ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); return r; } /** * _scsih_fw_event_add - insert and queue up fw_event * @ioc: per adapter object * @fw_event: object describing the event * Context: This function will acquire ioc->fw_event_lock. * * This adds the firmware event object into link list, then queues it up to * be processed from user context. * * Return nothing. */ static void _scsih_fw_event_add(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { unsigned long flags; if (ioc->firmware_event_thread == NULL) return; spin_lock_irqsave(&ioc->fw_event_lock, flags); list_add_tail(&fw_event->list, &ioc->fw_event_list); INIT_DELAYED_WORK(&fw_event->delayed_work, _firmware_event_work); queue_delayed_work(ioc->firmware_event_thread, &fw_event->delayed_work, 0); spin_unlock_irqrestore(&ioc->fw_event_lock, flags); } /** * _scsih_fw_event_free - delete fw_event * @ioc: per adapter object * @fw_event: object describing the event * Context: This function will acquire ioc->fw_event_lock. * * This removes firmware event object from link list, frees associated memory. * * Return nothing. */ static void _scsih_fw_event_free(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { unsigned long flags; spin_lock_irqsave(&ioc->fw_event_lock, flags); list_del(&fw_event->list); kfree(fw_event->event_data); kfree(fw_event); spin_unlock_irqrestore(&ioc->fw_event_lock, flags); } /** * _scsih_error_recovery_delete_devices - remove devices not responding * @ioc: per adapter object * * Return nothing. */ static void _scsih_error_recovery_delete_devices(struct MPT2SAS_ADAPTER *ioc) { struct fw_event_work *fw_event; if (ioc->is_driver_loading) return; fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); if (!fw_event) return; fw_event->event = MPT2SAS_REMOVE_UNRESPONDING_DEVICES; fw_event->ioc = ioc; _scsih_fw_event_add(ioc, fw_event); } /** * mpt2sas_port_enable_complete - port enable completed (fake event) * @ioc: per adapter object * * Return nothing. */ void mpt2sas_port_enable_complete(struct MPT2SAS_ADAPTER *ioc) { struct fw_event_work *fw_event; fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); if (!fw_event) return; fw_event->event = MPT2SAS_PORT_ENABLE_COMPLETE; fw_event->ioc = ioc; _scsih_fw_event_add(ioc, fw_event); } /** * _scsih_fw_event_cleanup_queue - cleanup event queue * @ioc: per adapter object * * Walk the firmware event queue, either killing timers, or waiting * for outstanding events to complete * * Return nothing. */ static void _scsih_fw_event_cleanup_queue(struct MPT2SAS_ADAPTER *ioc) { struct fw_event_work *fw_event, *next; if (list_empty(&ioc->fw_event_list) || !ioc->firmware_event_thread || in_interrupt()) return; list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) { if (cancel_delayed_work(&fw_event->delayed_work)) { _scsih_fw_event_free(ioc, fw_event); continue; } fw_event->cancel_pending_work = 1; } } /** * _scsih_ublock_io_all_device - unblock every device * @ioc: per adapter object * * change the device state from block to running */ static void _scsih_ublock_io_all_device(struct MPT2SAS_ADAPTER *ioc) { struct MPT2SAS_DEVICE *sas_device_priv_data; struct scsi_device *sdev; shost_for_each_device(sdev, ioc->shost) { sas_device_priv_data = sdev->hostdata; if (!sas_device_priv_data) continue; if (!sas_device_priv_data->block) continue; sas_device_priv_data->block = 0; dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, "device_running, " "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle)); scsi_internal_device_unblock(sdev); } } /** * _scsih_ublock_io_device - set the device state to SDEV_RUNNING * @ioc: per adapter object * @handle: device handle * * During device pull we need to appropiately set the sdev state. */ static void _scsih_ublock_io_device(struct MPT2SAS_ADAPTER *ioc, u16 handle) { struct MPT2SAS_DEVICE *sas_device_priv_data; struct scsi_device *sdev; shost_for_each_device(sdev, ioc->shost) { sas_device_priv_data = sdev->hostdata; if (!sas_device_priv_data) continue; if (!sas_device_priv_data->block) continue; if (sas_device_priv_data->sas_target->handle == handle) { dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, MPT2SAS_INFO_FMT "SDEV_RUNNING: " "handle(0x%04x)\n", ioc->name, handle)); sas_device_priv_data->block = 0; scsi_internal_device_unblock(sdev); } } } /** * _scsih_block_io_all_device - set the device state to SDEV_BLOCK * @ioc: per adapter object * @handle: device handle * * During device pull we need to appropiately set the sdev state. */ static void _scsih_block_io_all_device(struct MPT2SAS_ADAPTER *ioc) { struct MPT2SAS_DEVICE *sas_device_priv_data; struct scsi_device *sdev; shost_for_each_device(sdev, ioc->shost) { sas_device_priv_data = sdev->hostdata; if (!sas_device_priv_data) continue; if (sas_device_priv_data->block) continue; sas_device_priv_data->block = 1; dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, "device_blocked, " "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle)); scsi_internal_device_block(sdev); } } /** * _scsih_block_io_device - set the device state to SDEV_BLOCK * @ioc: per adapter object * @handle: device handle * * During device pull we need to appropiately set the sdev state. */ static void _scsih_block_io_device(struct MPT2SAS_ADAPTER *ioc, u16 handle) { struct MPT2SAS_DEVICE *sas_device_priv_data; struct scsi_device *sdev; shost_for_each_device(sdev, ioc->shost) { sas_device_priv_data = sdev->hostdata; if (!sas_device_priv_data) continue; if (sas_device_priv_data->block) continue; if (sas_device_priv_data->sas_target->handle == handle) { dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, MPT2SAS_INFO_FMT "SDEV_BLOCK: " "handle(0x%04x)\n", ioc->name, handle)); sas_device_priv_data->block = 1; scsi_internal_device_block(sdev); } } } /** * _scsih_block_io_to_children_attached_to_ex * @ioc: per adapter object * @sas_expander: the sas_device object * * This routine set sdev state to SDEV_BLOCK for all devices * attached to this expander. This function called when expander is * pulled. */ static void _scsih_block_io_to_children_attached_to_ex(struct MPT2SAS_ADAPTER *ioc, struct _sas_node *sas_expander) { struct _sas_port *mpt2sas_port; struct _sas_device *sas_device; struct _sas_node *expander_sibling; unsigned long flags; if (!sas_expander) return; list_for_each_entry(mpt2sas_port, &sas_expander->sas_port_list, port_list) { if (mpt2sas_port->remote_identify.device_type == SAS_END_DEVICE) { spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, mpt2sas_port->remote_identify.sas_address); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (!sas_device) continue; _scsih_block_io_device(ioc, sas_device->handle); } } list_for_each_entry(mpt2sas_port, &sas_expander->sas_port_list, port_list) { if (mpt2sas_port->remote_identify.device_type == SAS_EDGE_EXPANDER_DEVICE || mpt2sas_port->remote_identify.device_type == SAS_FANOUT_EXPANDER_DEVICE) { spin_lock_irqsave(&ioc->sas_node_lock, flags); expander_sibling = mpt2sas_scsih_expander_find_by_sas_address( ioc, mpt2sas_port->remote_identify.sas_address); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); _scsih_block_io_to_children_attached_to_ex(ioc, expander_sibling); } } } /** * _scsih_block_io_to_children_attached_directly * @ioc: per adapter object * @event_data: topology change event data * * This routine set sdev state to SDEV_BLOCK for all devices * direct attached during device pull. */ static void _scsih_block_io_to_children_attached_directly(struct MPT2SAS_ADAPTER *ioc, Mpi2EventDataSasTopologyChangeList_t *event_data) { int i; u16 handle; u16 reason_code; u8 phy_number; for (i = 0; i < event_data->NumEntries; i++) { handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); if (!handle) continue; phy_number = event_data->StartPhyNum + i; reason_code = event_data->PHY[i].PhyStatus & MPI2_EVENT_SAS_TOPO_RC_MASK; if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING) _scsih_block_io_device(ioc, handle); } } /** * _scsih_tm_tr_send - send task management request * @ioc: per adapter object * @handle: device handle * Context: interrupt time. * * This code is to initiate the device removal handshake protocol * with controller firmware. This function will issue target reset * using high priority request queue. It will send a sas iounit * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion. * * This is designed to send muliple task management request at the same * time to the fifo. If the fifo is full, we will append the request, * and process it in a future completion. */ static void _scsih_tm_tr_send(struct MPT2SAS_ADAPTER *ioc, u16 handle) { Mpi2SCSITaskManagementRequest_t *mpi_request; u16 smid; struct _sas_device *sas_device; struct MPT2SAS_TARGET *sas_target_priv_data = NULL; u64 sas_address = 0; unsigned long flags; struct _tr_list *delayed_tr; u32 ioc_state; if (ioc->remove_host) { dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host has been " "removed: handle(0x%04x)\n", __func__, ioc->name, handle)); return; } else if (ioc->pci_error_recovery) { dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host in pci " "error recovery: handle(0x%04x)\n", __func__, ioc->name, handle)); return; } ioc_state = mpt2sas_base_get_iocstate(ioc, 1); if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host is not " "operational: handle(0x%04x)\n", __func__, ioc->name, handle)); return; } /* if PD, then return */ if (test_bit(handle, ioc->pd_handles)) return; spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _scsih_sas_device_find_by_handle(ioc, handle); if (sas_device && sas_device->starget && sas_device->starget->hostdata) { sas_target_priv_data = sas_device->starget->hostdata; sas_target_priv_data->deleted = 1; sas_address = sas_device->sas_address; } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (sas_target_priv_data) { dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "setting delete flag: " "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, handle, (unsigned long long)sas_address)); _scsih_ublock_io_device(ioc, handle); sas_target_priv_data->handle = MPT2SAS_INVALID_DEVICE_HANDLE; } smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx); if (!smid) { delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); if (!delayed_tr) return; INIT_LIST_HEAD(&delayed_tr->list); delayed_tr->handle = handle; list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "DELAYED:tr:handle(0x%04x), (open)\n", ioc->name, handle)); return; } dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "tr_send:handle(0x%04x), " "(open), smid(%d), cb(%d)\n", ioc->name, handle, smid, ioc->tm_tr_cb_idx)); mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; mpi_request->DevHandle = cpu_to_le16(handle); mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; mpt2sas_base_put_smid_hi_priority(ioc, smid); } /** * _scsih_sas_control_complete - completion routine * @ioc: per adapter object * @smid: system request message index * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * Context: interrupt time. * * This is the sas iounit control completion routine. * This code is part of the code to initiate the device removal * handshake protocol with controller firmware. * * Return 1 meaning mf should be freed from _base_interrupt * 0 means the mf is freed from this function. */ static u8 _scsih_sas_control_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) { #ifdef CONFIG_SCSI_MPT2SAS_LOGGING Mpi2SasIoUnitControlReply_t *mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); #endif dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "sc_complete:handle(0x%04x), (open) " "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid, le16_to_cpu(mpi_reply->IOCStatus), le32_to_cpu(mpi_reply->IOCLogInfo))); return 1; } /** * _scsih_tm_tr_volume_send - send target reset request for volumes * @ioc: per adapter object * @handle: device handle * Context: interrupt time. * * This is designed to send muliple task management request at the same * time to the fifo. If the fifo is full, we will append the request, * and process it in a future completion. */ static void _scsih_tm_tr_volume_send(struct MPT2SAS_ADAPTER *ioc, u16 handle) { Mpi2SCSITaskManagementRequest_t *mpi_request; u16 smid; struct _tr_list *delayed_tr; if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) { dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host reset in " "progress!\n", __func__, ioc->name)); return; } smid = mpt2sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx); if (!smid) { delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); if (!delayed_tr) return; INIT_LIST_HEAD(&delayed_tr->list); delayed_tr->handle = handle; list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list); dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "DELAYED:tr:handle(0x%04x), (open)\n", ioc->name, handle)); return; } dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "tr_send:handle(0x%04x), " "(open), smid(%d), cb(%d)\n", ioc->name, handle, smid, ioc->tm_tr_volume_cb_idx)); mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; mpi_request->DevHandle = cpu_to_le16(handle); mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; mpt2sas_base_put_smid_hi_priority(ioc, smid); } /** * _scsih_tm_volume_tr_complete - target reset completion * @ioc: per adapter object * @smid: system request message index * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * Context: interrupt time. * * Return 1 meaning mf should be freed from _base_interrupt * 0 means the mf is freed from this function. */ static u8 _scsih_tm_volume_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) { u16 handle; Mpi2SCSITaskManagementRequest_t *mpi_request_tm; Mpi2SCSITaskManagementReply_t *mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) { dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host reset in " "progress!\n", __func__, ioc->name)); return 1; } mpi_request_tm = mpt2sas_base_get_msg_frame(ioc, smid); handle = le16_to_cpu(mpi_request_tm->DevHandle); if (handle != le16_to_cpu(mpi_reply->DevHandle)) { dewtprintk(ioc, printk("spurious interrupt: " "handle(0x%04x:0x%04x), smid(%d)!!!\n", handle, le16_to_cpu(mpi_reply->DevHandle), smid)); return 0; } dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), " "loginfo(0x%08x), completed(%d)\n", ioc->name, handle, smid, le16_to_cpu(mpi_reply->IOCStatus), le32_to_cpu(mpi_reply->IOCLogInfo), le32_to_cpu(mpi_reply->TerminationCount))); return _scsih_check_for_pending_tm(ioc, smid); } /** * _scsih_tm_tr_complete - * @ioc: per adapter object * @smid: system request message index * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * Context: interrupt time. * * This is the target reset completion routine. * This code is part of the code to initiate the device removal * handshake protocol with controller firmware. * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE) * * Return 1 meaning mf should be freed from _base_interrupt * 0 means the mf is freed from this function. */ static u8 _scsih_tm_tr_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) { u16 handle; Mpi2SCSITaskManagementRequest_t *mpi_request_tm; Mpi2SCSITaskManagementReply_t *mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); Mpi2SasIoUnitControlRequest_t *mpi_request; u16 smid_sas_ctrl; u32 ioc_state; if (ioc->remove_host) { dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host has been " "removed\n", __func__, ioc->name)); return 1; } else if (ioc->pci_error_recovery) { dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host in pci " "error recovery\n", __func__, ioc->name)); return 1; } ioc_state = mpt2sas_base_get_iocstate(ioc, 1); if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: host is not " "operational\n", __func__, ioc->name)); return 1; } mpi_request_tm = mpt2sas_base_get_msg_frame(ioc, smid); handle = le16_to_cpu(mpi_request_tm->DevHandle); if (handle != le16_to_cpu(mpi_reply->DevHandle)) { dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "spurious interrupt: " "handle(0x%04x:0x%04x), smid(%d)!!!\n", ioc->name, handle, le16_to_cpu(mpi_reply->DevHandle), smid)); return 0; } dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), " "loginfo(0x%08x), completed(%d)\n", ioc->name, handle, smid, le16_to_cpu(mpi_reply->IOCStatus), le32_to_cpu(mpi_reply->IOCLogInfo), le32_to_cpu(mpi_reply->TerminationCount))); smid_sas_ctrl = mpt2sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx); if (!smid_sas_ctrl) { printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); return 1; } dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "sc_send:handle(0x%04x), " "(open), smid(%d), cb(%d)\n", ioc->name, handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx)); mpi_request = mpt2sas_base_get_msg_frame(ioc, smid_sas_ctrl); memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; mpi_request->DevHandle = mpi_request_tm->DevHandle; mpt2sas_base_put_smid_default(ioc, smid_sas_ctrl); return _scsih_check_for_pending_tm(ioc, smid); } /** * _scsih_check_for_pending_tm - check for pending task management * @ioc: per adapter object * @smid: system request message index * * This will check delayed target reset list, and feed the * next reqeust. * * Return 1 meaning mf should be freed from _base_interrupt * 0 means the mf is freed from this function. */ static u8 _scsih_check_for_pending_tm(struct MPT2SAS_ADAPTER *ioc, u16 smid) { struct _tr_list *delayed_tr; if (!list_empty(&ioc->delayed_tr_volume_list)) { delayed_tr = list_entry(ioc->delayed_tr_volume_list.next, struct _tr_list, list); mpt2sas_base_free_smid(ioc, smid); _scsih_tm_tr_volume_send(ioc, delayed_tr->handle); list_del(&delayed_tr->list); kfree(delayed_tr); return 0; } if (!list_empty(&ioc->delayed_tr_list)) { delayed_tr = list_entry(ioc->delayed_tr_list.next, struct _tr_list, list); mpt2sas_base_free_smid(ioc, smid); _scsih_tm_tr_send(ioc, delayed_tr->handle); list_del(&delayed_tr->list); kfree(delayed_tr); return 0; } return 1; } /** * _scsih_check_topo_delete_events - sanity check on topo events * @ioc: per adapter object * @event_data: the event data payload * * This routine added to better handle cable breaker. * * This handles the case where driver receives multiple expander * add and delete events in a single shot. When there is a delete event * the routine will void any pending add events waiting in the event queue. * * Return nothing. */ static void _scsih_check_topo_delete_events(struct MPT2SAS_ADAPTER *ioc, Mpi2EventDataSasTopologyChangeList_t *event_data) { struct fw_event_work *fw_event; Mpi2EventDataSasTopologyChangeList_t *local_event_data; u16 expander_handle; struct _sas_node *sas_expander; unsigned long flags; int i, reason_code; u16 handle; for (i = 0 ; i < event_data->NumEntries; i++) { handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); if (!handle) continue; reason_code = event_data->PHY[i].PhyStatus & MPI2_EVENT_SAS_TOPO_RC_MASK; if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING) _scsih_tm_tr_send(ioc, handle); } expander_handle = le16_to_cpu(event_data->ExpanderDevHandle); if (expander_handle < ioc->sas_hba.num_phys) { _scsih_block_io_to_children_attached_directly(ioc, event_data); return; } if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING || event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) { spin_lock_irqsave(&ioc->sas_node_lock, flags); sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc, expander_handle); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander); } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING) _scsih_block_io_to_children_attached_directly(ioc, event_data); if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) return; /* mark ignore flag for pending events */ spin_lock_irqsave(&ioc->fw_event_lock, flags); list_for_each_entry(fw_event, &ioc->fw_event_list, list) { if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST || fw_event->ignore) continue; local_event_data = fw_event->event_data; if (local_event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED || local_event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING) { if (le16_to_cpu(local_event_data->ExpanderDevHandle) == expander_handle) { dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "setting ignoring flag\n", ioc->name)); fw_event->ignore = 1; } } } spin_unlock_irqrestore(&ioc->fw_event_lock, flags); } /** * _scsih_set_volume_delete_flag - setting volume delete flag * @ioc: per adapter object * @handle: device handle * * This * Return nothing. */ static void _scsih_set_volume_delete_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle) { struct _raid_device *raid_device; struct MPT2SAS_TARGET *sas_target_priv_data; unsigned long flags; spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = _scsih_raid_device_find_by_handle(ioc, handle); if (raid_device && raid_device->starget && raid_device->starget->hostdata) { sas_target_priv_data = raid_device->starget->hostdata; sas_target_priv_data->deleted = 1; dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "setting delete flag: handle(0x%04x), " "wwid(0x%016llx)\n", ioc->name, handle, (unsigned long long) raid_device->wwid)); } spin_unlock_irqrestore(&ioc->raid_device_lock, flags); } /** * _scsih_set_volume_handle_for_tr - set handle for target reset to volume * @handle: input handle * @a: handle for volume a * @b: handle for volume b * * IR firmware only supports two raid volumes. The purpose of this * routine is to set the volume handle in either a or b. When the given * input handle is non-zero, or when a and b have not been set before. */ static void _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b) { if (!handle || handle == *a || handle == *b) return; if (!*a) *a = handle; else if (!*b) *b = handle; } /** * _scsih_check_ir_config_unhide_events - check for UNHIDE events * @ioc: per adapter object * @event_data: the event data payload * Context: interrupt time. * * This routine will send target reset to volume, followed by target * resets to the PDs. This is called when a PD has been removed, or * volume has been deleted or removed. When the target reset is sent * to volume, the PD target resets need to be queued to start upon * completion of the volume target reset. * * Return nothing. */ static void _scsih_check_ir_config_unhide_events(struct MPT2SAS_ADAPTER *ioc, Mpi2EventDataIrConfigChangeList_t *event_data) { Mpi2EventIrConfigElement_t *element; int i; u16 handle, volume_handle, a, b; struct _tr_list *delayed_tr; a = 0; b = 0; if (ioc->is_warpdrive) return; /* Volume Resets for Deleted or Removed */ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; for (i = 0; i < event_data->NumElements; i++, element++) { if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED || element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_REMOVED) { volume_handle = le16_to_cpu(element->VolDevHandle); _scsih_set_volume_delete_flag(ioc, volume_handle); _scsih_set_volume_handle_for_tr(volume_handle, &a, &b); } } /* Volume Resets for UNHIDE events */ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; for (i = 0; i < event_data->NumElements; i++, element++) { if (le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) continue; if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) { volume_handle = le16_to_cpu(element->VolDevHandle); _scsih_set_volume_handle_for_tr(volume_handle, &a, &b); } } if (a) _scsih_tm_tr_volume_send(ioc, a); if (b) _scsih_tm_tr_volume_send(ioc, b); /* PD target resets */ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; for (i = 0; i < event_data->NumElements; i++, element++) { if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE) continue; handle = le16_to_cpu(element->PhysDiskDevHandle); volume_handle = le16_to_cpu(element->VolDevHandle); clear_bit(handle, ioc->pd_handles); if (!volume_handle) _scsih_tm_tr_send(ioc, handle); else if (volume_handle == a || volume_handle == b) { delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); BUG_ON(!delayed_tr); INIT_LIST_HEAD(&delayed_tr->list); delayed_tr->handle = handle; list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "DELAYED:tr:handle(0x%04x), (open)\n", ioc->name, handle)); } else _scsih_tm_tr_send(ioc, handle); } } /** * _scsih_check_volume_delete_events - set delete flag for volumes * @ioc: per adapter object * @event_data: the event data payload * Context: interrupt time. * * This will handle the case when the cable connected to entire volume is * pulled. We will take care of setting the deleted flag so normal IO will * not be sent. * * Return nothing. */ static void _scsih_check_volume_delete_events(struct MPT2SAS_ADAPTER *ioc, Mpi2EventDataIrVolume_t *event_data) { u32 state; if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) return; state = le32_to_cpu(event_data->NewValue); if (state == MPI2_RAID_VOL_STATE_MISSING || state == MPI2_RAID_VOL_STATE_FAILED) _scsih_set_volume_delete_flag(ioc, le16_to_cpu(event_data->VolDevHandle)); } /** * _scsih_flush_running_cmds - completing outstanding commands. * @ioc: per adapter object * * The flushing out of all pending scmd commands following host reset, * where all IO is dropped to the floor. * * Return nothing. */ static void _scsih_flush_running_cmds(struct MPT2SAS_ADAPTER *ioc) { struct scsi_cmnd *scmd; u16 smid; u16 count = 0; for (smid = 1; smid <= ioc->scsiio_depth; smid++) { scmd = _scsih_scsi_lookup_get_clear(ioc, smid); if (!scmd) continue; count++; mpt2sas_base_free_smid(ioc, smid); scsi_dma_unmap(scmd); if (ioc->pci_error_recovery) scmd->result = DID_NO_CONNECT << 16; else scmd->result = DID_RESET << 16; scmd->scsi_done(scmd); } dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "completing %d cmds\n", ioc->name, count)); } /** * _scsih_setup_eedp - setup MPI request for EEDP transfer * @scmd: pointer to scsi command object * @mpi_request: pointer to the SCSI_IO reqest message frame * * Supporting protection 1 and 3. * * Returns nothing */ static void _scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request) { u16 eedp_flags; unsigned char prot_op = scsi_get_prot_op(scmd); unsigned char prot_type = scsi_get_prot_type(scmd); if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL) return; if (prot_op == SCSI_PROT_READ_STRIP) eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP; else if (prot_op == SCSI_PROT_WRITE_INSERT) eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; else return; switch (prot_type) { case SCSI_PROT_DIF_TYPE1: case SCSI_PROT_DIF_TYPE2: /* * enable ref/guard checking * auto increment ref tag */ eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; mpi_request->CDB.EEDP32.PrimaryReferenceTag = cpu_to_be32(scsi_get_lba(scmd)); break; case SCSI_PROT_DIF_TYPE3: /* * enable guard checking */ eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; break; } mpi_request->EEDPBlockSize = cpu_to_le32(scmd->device->sector_size); mpi_request->EEDPFlags = cpu_to_le16(eedp_flags); } /** * _scsih_eedp_error_handling - return sense code for EEDP errors * @scmd: pointer to scsi command object * @ioc_status: ioc status * * Returns nothing */ static void _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) { u8 ascq; u8 sk; u8 host_byte; switch (ioc_status) { case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: ascq = 0x01; break; case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: ascq = 0x02; break; case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: ascq = 0x03; break; default: ascq = 0x00; break; } if (scmd->sc_data_direction == DMA_TO_DEVICE) { sk = ILLEGAL_REQUEST; host_byte = DID_ABORT; } else { sk = ABORTED_COMMAND; host_byte = DID_OK; } scsi_build_sense_buffer(0, scmd->sense_buffer, sk, 0x10, ascq); scmd->result = DRIVER_SENSE << 24 | (host_byte << 16) | SAM_STAT_CHECK_CONDITION; } /** * _scsih_scsi_direct_io_get - returns direct io flag * @ioc: per adapter object * @smid: system request message index * * Returns the smid stored scmd pointer. */ static inline u8 _scsih_scsi_direct_io_get(struct MPT2SAS_ADAPTER *ioc, u16 smid) { return ioc->scsi_lookup[smid - 1].direct_io; } /** * _scsih_scsi_direct_io_set - sets direct io flag * @ioc: per adapter object * @smid: system request message index * @direct_io: Zero or non-zero value to set in the direct_io flag * * Returns Nothing. */ static inline void _scsih_scsi_direct_io_set(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 direct_io) { ioc->scsi_lookup[smid - 1].direct_io = direct_io; } /** * _scsih_setup_direct_io - setup MPI request for WARPDRIVE Direct I/O * @ioc: per adapter object * @scmd: pointer to scsi command object * @raid_device: pointer to raid device data structure * @mpi_request: pointer to the SCSI_IO reqest message frame * @smid: system request message index * * Returns nothing */ static void _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request, u16 smid) { u32 v_lba, p_lba, stripe_off, stripe_unit, column, io_size; u32 stripe_sz, stripe_exp; u8 num_pds, *cdb_ptr, i; u8 cdb0 = scmd->cmnd[0]; u64 v_llba; /* * Try Direct I/O to RAID memeber disks */ if (cdb0 == READ_16 || cdb0 == READ_10 || cdb0 == WRITE_16 || cdb0 == WRITE_10) { cdb_ptr = mpi_request->CDB.CDB32; if ((cdb0 < READ_16) || !(cdb_ptr[2] | cdb_ptr[3] | cdb_ptr[4] | cdb_ptr[5])) { io_size = scsi_bufflen(scmd) >> raid_device->block_exponent; i = (cdb0 < READ_16) ? 2 : 6; /* get virtual lba */ v_lba = be32_to_cpu(*(__be32 *)(&cdb_ptr[i])); if (((u64)v_lba + (u64)io_size - 1) <= (u32)raid_device->max_lba) { stripe_sz = raid_device->stripe_sz; stripe_exp = raid_device->stripe_exponent; stripe_off = v_lba & (stripe_sz - 1); /* Check whether IO falls within a stripe */ if ((stripe_off + io_size) <= stripe_sz) { num_pds = raid_device->num_pds; p_lba = v_lba >> stripe_exp; stripe_unit = p_lba / num_pds; column = p_lba % num_pds; p_lba = (stripe_unit << stripe_exp) + stripe_off; mpi_request->DevHandle = cpu_to_le16(raid_device-> pd_handle[column]); (*(__be32 *)(&cdb_ptr[i])) = cpu_to_be32(p_lba); /* * WD: To indicate this I/O is directI/O */ _scsih_scsi_direct_io_set(ioc, smid, 1); } } } else { io_size = scsi_bufflen(scmd) >> raid_device->block_exponent; /* get virtual lba */ v_llba = be64_to_cpu(*(__be64 *)(&cdb_ptr[2])); if ((v_llba + (u64)io_size - 1) <= raid_device->max_lba) { stripe_sz = raid_device->stripe_sz; stripe_exp = raid_device->stripe_exponent; stripe_off = (u32) (v_llba & (stripe_sz - 1)); /* Check whether IO falls within a stripe */ if ((stripe_off + io_size) <= stripe_sz) { num_pds = raid_device->num_pds; p_lba = (u32)(v_llba >> stripe_exp); stripe_unit = p_lba / num_pds; column = p_lba % num_pds; p_lba = (stripe_unit << stripe_exp) + stripe_off; mpi_request->DevHandle = cpu_to_le16(raid_device-> pd_handle[column]); (*(__be64 *)(&cdb_ptr[2])) = cpu_to_be64((u64)p_lba); /* * WD: To indicate this I/O is directI/O */ _scsih_scsi_direct_io_set(ioc, smid, 1); } } } } } /** * _scsih_qcmd - main scsi request entry point * @scmd: pointer to scsi command object * @done: function pointer to be invoked on completion * * The callback index is set inside `ioc->scsi_io_cb_idx`. * * Returns 0 on success. If there's a failure, return either: * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full */ static int _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) { struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); struct MPT2SAS_DEVICE *sas_device_priv_data; struct MPT2SAS_TARGET *sas_target_priv_data; struct _raid_device *raid_device; Mpi2SCSIIORequest_t *mpi_request; u32 mpi_control; u16 smid; scmd->scsi_done = done; sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); return 0; } if (ioc->pci_error_recovery || ioc->remove_host) { scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); return 0; } sas_target_priv_data = sas_device_priv_data->sas_target; /* invalid device handle */ if (sas_target_priv_data->handle == MPT2SAS_INVALID_DEVICE_HANDLE) { scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); return 0; } /* host recovery or link resets sent via IOCTLs */ if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) return SCSI_MLQUEUE_HOST_BUSY; /* device busy with task management */ else if (sas_device_priv_data->block || sas_target_priv_data->tm_busy) return SCSI_MLQUEUE_DEVICE_BUSY; /* device has been deleted */ else if (sas_target_priv_data->deleted) { scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); return 0; } if (scmd->sc_data_direction == DMA_FROM_DEVICE) mpi_control = MPI2_SCSIIO_CONTROL_READ; else if (scmd->sc_data_direction == DMA_TO_DEVICE) mpi_control = MPI2_SCSIIO_CONTROL_WRITE; else mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; /* set tags */ if (!(sas_device_priv_data->flags & MPT_DEVICE_FLAGS_INIT)) { if (scmd->device->tagged_supported) { if (scmd->device->ordered_tags) mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ; else mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; } else /* MPI Revision I (UNIT = 0xA) - removed MPI2_SCSIIO_CONTROL_UNTAGGED */ /* mpi_control |= MPI2_SCSIIO_CONTROL_UNTAGGED; */ mpi_control |= (0x500); } else mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; /* Make sure Device is not raid volume. * We do not expose raid functionality to upper layer for warpdrive. */ if (!ioc->is_warpdrive && !_scsih_is_raid(&scmd->device->sdev_gendev) && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32) mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; smid = mpt2sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd); if (!smid) { printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); goto out; } mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t)); _scsih_setup_eedp(scmd, mpi_request); if (scmd->cmd_len == 32) mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; if (sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; else mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; mpi_request->DevHandle = cpu_to_le16(sas_device_priv_data->sas_target->handle); mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); mpi_request->Control = cpu_to_le32(mpi_control); mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len); mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR; mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; mpi_request->SenseBufferLowAddress = mpt2sas_base_get_sense_buffer_dma(ioc, smid); mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4; mpi_request->SGLFlags = cpu_to_le16(MPI2_SCSIIO_SGLFLAGS_TYPE_MPI + MPI2_SCSIIO_SGLFLAGS_SYSTEM_ADDR); mpi_request->VF_ID = 0; /* TODO */ mpi_request->VP_ID = 0; int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *) mpi_request->LUN); memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); if (!mpi_request->DataLength) { mpt2sas_base_build_zero_len_sge(ioc, &mpi_request->SGL); } else { if (_scsih_build_scatter_gather(ioc, scmd, smid)) { mpt2sas_base_free_smid(ioc, smid); goto out; } } raid_device = sas_target_priv_data->raid_device; if (raid_device && raid_device->direct_io_enabled) _scsih_setup_direct_io(ioc, scmd, raid_device, mpi_request, smid); if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) mpt2sas_base_put_smid_scsi_io(ioc, smid, le16_to_cpu(mpi_request->DevHandle)); else mpt2sas_base_put_smid_default(ioc, smid); return 0; out: return SCSI_MLQUEUE_HOST_BUSY; } static DEF_SCSI_QCMD(_scsih_qcmd) /** * _scsih_normalize_sense - normalize descriptor and fixed format sense data * @sense_buffer: sense data returned by target * @data: normalized skey/asc/ascq * * Return nothing. */ static void _scsih_normalize_sense(char *sense_buffer, struct sense_info *data) { if ((sense_buffer[0] & 0x7F) >= 0x72) { /* descriptor format */ data->skey = sense_buffer[1] & 0x0F; data->asc = sense_buffer[2]; data->ascq = sense_buffer[3]; } else { /* fixed format */ data->skey = sense_buffer[2] & 0x0F; data->asc = sense_buffer[12]; data->ascq = sense_buffer[13]; } } #ifdef CONFIG_SCSI_MPT2SAS_LOGGING /** * _scsih_scsi_ioc_info - translated non-successful SCSI_IO request * @ioc: per adapter object * @scmd: pointer to scsi command object * @mpi_reply: reply mf payload returned from firmware * * scsi_status - SCSI Status code returned from target device * scsi_state - state info associated with SCSI_IO determined by ioc * ioc_status - ioc supplied status info * * Return nothing. */ static void _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, Mpi2SCSIIOReply_t *mpi_reply, u16 smid) { u32 response_info; u8 *response_bytes; u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; u8 scsi_state = mpi_reply->SCSIState; u8 scsi_status = mpi_reply->SCSIStatus; char *desc_ioc_state = NULL; char *desc_scsi_status = NULL; char *desc_scsi_state = ioc->tmp_string; u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); struct _sas_device *sas_device = NULL; unsigned long flags; struct scsi_target *starget = scmd->device->sdev_target; struct MPT2SAS_TARGET *priv_target = starget->hostdata; char *device_str = NULL; if (!priv_target) return; if (ioc->hide_ir_msg) device_str = "WarpDrive"; else device_str = "volume"; if (log_info == 0x31170000) return; switch (ioc_status) { case MPI2_IOCSTATUS_SUCCESS: desc_ioc_state = "success"; break; case MPI2_IOCSTATUS_INVALID_FUNCTION: desc_ioc_state = "invalid function"; break; case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: desc_ioc_state = "scsi recovered error"; break; case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: desc_ioc_state = "scsi invalid dev handle"; break; case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: desc_ioc_state = "scsi device not there"; break; case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: desc_ioc_state = "scsi data overrun"; break; case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: desc_ioc_state = "scsi data underrun"; break; case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: desc_ioc_state = "scsi io data error"; break; case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: desc_ioc_state = "scsi protocol error"; break; case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: desc_ioc_state = "scsi task terminated"; break; case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: desc_ioc_state = "scsi residual mismatch"; break; case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: desc_ioc_state = "scsi task mgmt failed"; break; case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: desc_ioc_state = "scsi ioc terminated"; break; case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: desc_ioc_state = "scsi ext terminated"; break; case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: desc_ioc_state = "eedp guard error"; break; case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: desc_ioc_state = "eedp ref tag error"; break; case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: desc_ioc_state = "eedp app tag error"; break; default: desc_ioc_state = "unknown"; break; } switch (scsi_status) { case MPI2_SCSI_STATUS_GOOD: desc_scsi_status = "good"; break; case MPI2_SCSI_STATUS_CHECK_CONDITION: desc_scsi_status = "check condition"; break; case MPI2_SCSI_STATUS_CONDITION_MET: desc_scsi_status = "condition met"; break; case MPI2_SCSI_STATUS_BUSY: desc_scsi_status = "busy"; break; case MPI2_SCSI_STATUS_INTERMEDIATE: desc_scsi_status = "intermediate"; break; case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET: desc_scsi_status = "intermediate condmet"; break; case MPI2_SCSI_STATUS_RESERVATION_CONFLICT: desc_scsi_status = "reservation conflict"; break; case MPI2_SCSI_STATUS_COMMAND_TERMINATED: desc_scsi_status = "command terminated"; break; case MPI2_SCSI_STATUS_TASK_SET_FULL: desc_scsi_status = "task set full"; break; case MPI2_SCSI_STATUS_ACA_ACTIVE: desc_scsi_status = "aca active"; break; case MPI2_SCSI_STATUS_TASK_ABORTED: desc_scsi_status = "task aborted"; break; default: desc_scsi_status = "unknown"; break; } desc_scsi_state[0] = '\0'; if (!scsi_state) desc_scsi_state = " "; if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) strcat(desc_scsi_state, "response info "); if (scsi_state & MPI2_SCSI_STATE_TERMINATED) strcat(desc_scsi_state, "state terminated "); if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) strcat(desc_scsi_state, "no status "); if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED) strcat(desc_scsi_state, "autosense failed "); if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) strcat(desc_scsi_state, "autosense valid "); scsi_print_command(scmd); if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { printk(MPT2SAS_WARN_FMT "\t%s wwid(0x%016llx)\n", ioc->name, device_str, (unsigned long long)priv_target->sas_address); } else { spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, priv_target->sas_address); if (sas_device) { printk(MPT2SAS_WARN_FMT "\tsas_address(0x%016llx), " "phy(%d)\n", ioc->name, sas_device->sas_address, sas_device->phy); printk(MPT2SAS_WARN_FMT "\tenclosure_logical_id(0x%016llx), slot(%d)\n", ioc->name, sas_device->enclosure_logical_id, sas_device->slot); } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); } printk(MPT2SAS_WARN_FMT "\thandle(0x%04x), ioc_status(%s)(0x%04x), " "smid(%d)\n", ioc->name, le16_to_cpu(mpi_reply->DevHandle), desc_ioc_state, ioc_status, smid); printk(MPT2SAS_WARN_FMT "\trequest_len(%d), underflow(%d), " "resid(%d)\n", ioc->name, scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd)); printk(MPT2SAS_WARN_FMT "\ttag(%d), transfer_count(%d), " "sc->result(0x%08x)\n", ioc->name, le16_to_cpu(mpi_reply->TaskTag), le32_to_cpu(mpi_reply->TransferCount), scmd->result); printk(MPT2SAS_WARN_FMT "\tscsi_status(%s)(0x%02x), " "scsi_state(%s)(0x%02x)\n", ioc->name, desc_scsi_status, scsi_status, desc_scsi_state, scsi_state); if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { struct sense_info data; _scsih_normalize_sense(scmd->sense_buffer, &data); printk(MPT2SAS_WARN_FMT "\t[sense_key,asc,ascq]: " "[0x%02x,0x%02x,0x%02x], count(%d)\n", ioc->name, data.skey, data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount)); } if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { response_info = le32_to_cpu(mpi_reply->ResponseInfo); response_bytes = (u8 *)&response_info; _scsih_response_code(ioc, response_bytes[0]); } } #endif /** * _scsih_turn_on_fault_led - illuminate Fault LED * @ioc: per adapter object * @handle: device handle * Context: process * * Return nothing. */ static void _scsih_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle) { Mpi2SepReply_t mpi_reply; Mpi2SepRequest_t mpi_request; memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; mpi_request.SlotStatus = cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT); mpi_request.DevHandle = cpu_to_le16(handle); mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS; if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply, &mpi_request)) != 0) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "enclosure_processor: " "ioc_status (0x%04x), loginfo(0x%08x)\n", ioc->name, le16_to_cpu(mpi_reply.IOCStatus), le32_to_cpu(mpi_reply.IOCLogInfo))); return; } } /** * _scsih_send_event_to_turn_on_fault_led - fire delayed event * @ioc: per adapter object * @handle: device handle * Context: interrupt. * * Return nothing. */ static void _scsih_send_event_to_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle) { struct fw_event_work *fw_event; fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); if (!fw_event) return; fw_event->event = MPT2SAS_TURN_ON_FAULT_LED; fw_event->device_handle = handle; fw_event->ioc = ioc; _scsih_fw_event_add(ioc, fw_event); } /** * _scsih_smart_predicted_fault - process smart errors * @ioc: per adapter object * @handle: device handle * Context: interrupt. * * Return nothing. */ static void _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle) { struct scsi_target *starget; struct MPT2SAS_TARGET *sas_target_priv_data; Mpi2EventNotificationReply_t *event_reply; Mpi2EventDataSasDeviceStatusChange_t *event_data; struct _sas_device *sas_device; ssize_t sz; unsigned long flags; /* only handle non-raid devices */ spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _scsih_sas_device_find_by_handle(ioc, handle); if (!sas_device) { spin_unlock_irqrestore(&ioc->sas_device_lock, flags); return; } starget = sas_device->starget; sas_target_priv_data = starget->hostdata; if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) || ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))) { spin_unlock_irqrestore(&ioc->sas_device_lock, flags); return; } starget_printk(KERN_WARNING, starget, "predicted fault\n"); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) _scsih_send_event_to_turn_on_fault_led(ioc, handle); /* insert into event log */ sz = offsetof(Mpi2EventNotificationReply_t, EventData) + sizeof(Mpi2EventDataSasDeviceStatusChange_t); event_reply = kzalloc(sz, GFP_ATOMIC); if (!event_reply) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; event_reply->Event = cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); event_reply->MsgLength = sz/4; event_reply->EventDataLength = cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4); event_data = (Mpi2EventDataSasDeviceStatusChange_t *) event_reply->EventData; event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA; event_data->ASC = 0x5D; event_data->DevHandle = cpu_to_le16(handle); event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address); mpt2sas_ctl_add_to_event_log(ioc, event_reply); kfree(event_reply); } /** * _scsih_io_done - scsi request callback * @ioc: per adapter object * @smid: system request message index * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * * Callback handler when using _scsih_qcmd. * * Return 1 meaning mf should be freed from _base_interrupt * 0 means the mf is freed from this function. */ static u8 _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) { Mpi2SCSIIORequest_t *mpi_request; Mpi2SCSIIOReply_t *mpi_reply; struct scsi_cmnd *scmd; u16 ioc_status; u32 xfer_cnt; u8 scsi_state; u8 scsi_status; u32 log_info; struct MPT2SAS_DEVICE *sas_device_priv_data; u32 response_code = 0; unsigned long flags; mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); scmd = _scsih_scsi_lookup_get_clear(ioc, smid); if (scmd == NULL) return 1; mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); if (mpi_reply == NULL) { scmd->result = DID_OK << 16; goto out; } sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target || sas_device_priv_data->sas_target->deleted) { scmd->result = DID_NO_CONNECT << 16; goto out; } ioc_status = le16_to_cpu(mpi_reply->IOCStatus); /* * WARPDRIVE: If direct_io is set then it is directIO, * the failed direct I/O should be redirected to volume */ if (_scsih_scsi_direct_io_get(ioc, smid) && ((ioc_status & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) { spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); ioc->scsi_lookup[smid - 1].scmd = scmd; spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); _scsih_scsi_direct_io_set(ioc, smid, 0); memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); mpi_request->DevHandle = cpu_to_le16(sas_device_priv_data->sas_target->handle); mpt2sas_base_put_smid_scsi_io(ioc, smid, sas_device_priv_data->sas_target->handle); return 0; } /* turning off TLR */ scsi_state = mpi_reply->SCSIState; if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) response_code = le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF; if (!sas_device_priv_data->tlr_snoop_check) { sas_device_priv_data->tlr_snoop_check++; /* Make sure Device is not raid volume. * We do not expose raid functionality to upper layer for warpdrive. */ if (!ioc->is_warpdrive && !_scsih_is_raid(&scmd->device->sdev_gendev) && sas_is_tlr_enabled(scmd->device) && response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) { sas_disable_tlr(scmd->device); sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n"); } } xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) log_info = le32_to_cpu(mpi_reply->IOCLogInfo); else log_info = 0; ioc_status &= MPI2_IOCSTATUS_MASK; scsi_status = mpi_reply->SCSIStatus; if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 && (scsi_status == MPI2_SCSI_STATUS_BUSY || scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT || scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) { ioc_status = MPI2_IOCSTATUS_SUCCESS; } if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { struct sense_info data; const void *sense_data = mpt2sas_base_get_sense_buffer(ioc, smid); u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, le32_to_cpu(mpi_reply->SenseCount)); memcpy(scmd->sense_buffer, sense_data, sz); _scsih_normalize_sense(scmd->sense_buffer, &data); /* failure prediction threshold exceeded */ if (data.asc == 0x5D) _scsih_smart_predicted_fault(ioc, le16_to_cpu(mpi_reply->DevHandle)); } switch (ioc_status) { case MPI2_IOCSTATUS_BUSY: case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: scmd->result = SAM_STAT_BUSY; break; case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: scmd->result = DID_NO_CONNECT << 16; break; case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: if (sas_device_priv_data->block) { scmd->result = DID_TRANSPORT_DISRUPTED << 16; goto out; } scmd->result = DID_SOFT_ERROR << 16; break; case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: scmd->result = DID_RESET << 16; break; case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt)) scmd->result = DID_SOFT_ERROR << 16; else scmd->result = (DID_OK << 16) | scsi_status; break; case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: scmd->result = (DID_OK << 16) | scsi_status; if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)) break; if (xfer_cnt < scmd->underflow) { if (scsi_status == SAM_STAT_BUSY) scmd->result = SAM_STAT_BUSY; else scmd->result = DID_SOFT_ERROR << 16; } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | MPI2_SCSI_STATE_NO_SCSI_STATUS)) scmd->result = DID_SOFT_ERROR << 16; else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) scmd->result = DID_RESET << 16; else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) { mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID; mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION; scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; scmd->sense_buffer[0] = 0x70; scmd->sense_buffer[2] = ILLEGAL_REQUEST; scmd->sense_buffer[12] = 0x20; scmd->sense_buffer[13] = 0; } break; case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: scsi_set_resid(scmd, 0); case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: case MPI2_IOCSTATUS_SUCCESS: scmd->result = (DID_OK << 16) | scsi_status; if (response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME || (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | MPI2_SCSI_STATE_NO_SCSI_STATUS))) scmd->result = DID_SOFT_ERROR << 16; else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) scmd->result = DID_RESET << 16; break; case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: _scsih_eedp_error_handling(scmd, ioc_status); break; case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: case MPI2_IOCSTATUS_INVALID_FUNCTION: case MPI2_IOCSTATUS_INVALID_SGL: case MPI2_IOCSTATUS_INTERNAL_ERROR: case MPI2_IOCSTATUS_INVALID_FIELD: case MPI2_IOCSTATUS_INVALID_STATE: case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: default: scmd->result = DID_SOFT_ERROR << 16; break; } #ifdef CONFIG_SCSI_MPT2SAS_LOGGING if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY)) _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid); #endif out: scsi_dma_unmap(scmd); scmd->scsi_done(scmd); return 1; } /** * _scsih_sas_host_refresh - refreshing sas host object contents * @ioc: per adapter object * Context: user * * During port enable, fw will send topology events for every device. Its * possible that the handles may change from the previous setting, so this * code keeping handles updating if changed. * * Return nothing. */ static void _scsih_sas_host_refresh(struct MPT2SAS_ADAPTER *ioc) { u16 sz; u16 ioc_status; int i; Mpi2ConfigReply_t mpi_reply; Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; u16 attached_handle; u8 link_rate; dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "updating handles for sas_host(0x%016llx)\n", ioc->name, (unsigned long long)ioc->sas_hba.sas_address)); sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys * sizeof(Mpi2SasIOUnit0PhyData_t)); sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); if (!sas_iounit_pg0) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } if ((mpt2sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, sas_iounit_pg0, sz)) != 0) goto out; ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) goto out; for (i = 0; i < ioc->sas_hba.num_phys ; i++) { link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4; if (i == 0) ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> PhyData[0].ControllerDevHandle); ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i]. AttachedDevHandle); if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) link_rate = MPI2_SAS_NEG_LINK_RATE_1_5; mpt2sas_transport_update_links(ioc, ioc->sas_hba.sas_address, attached_handle, i, link_rate); } out: kfree(sas_iounit_pg0); } /** * _scsih_sas_host_add - create sas host object * @ioc: per adapter object * * Creating host side data object, stored in ioc->sas_hba * * Return nothing. */ static void _scsih_sas_host_add(struct MPT2SAS_ADAPTER *ioc) { int i; Mpi2ConfigReply_t mpi_reply; Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; Mpi2SasPhyPage0_t phy_pg0; Mpi2SasDevicePage0_t sas_device_pg0; Mpi2SasEnclosurePage0_t enclosure_pg0; u16 ioc_status; u16 sz; u16 device_missing_delay; mpt2sas_config_get_number_hba_phys(ioc, &ioc->sas_hba.num_phys); if (!ioc->sas_hba.num_phys) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } /* sas_iounit page 0 */ sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys * sizeof(Mpi2SasIOUnit0PhyData_t)); sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); if (!sas_iounit_pg0) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } if ((mpt2sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, sas_iounit_pg0, sz))) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out; } /* sas_iounit page 1 */ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys * sizeof(Mpi2SasIOUnit1PhyData_t)); sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); if (!sas_iounit_pg1) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out; } if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, sz))) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out; } ioc->io_missing_delay = le16_to_cpu(sas_iounit_pg1->IODeviceMissingDelay); device_missing_delay = le16_to_cpu(sas_iounit_pg1->ReportDeviceMissingDelay); if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) ioc->device_missing_delay = (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; else ioc->device_missing_delay = device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev; ioc->sas_hba.phy = kcalloc(ioc->sas_hba.num_phys, sizeof(struct _sas_phy), GFP_KERNEL); if (!ioc->sas_hba.phy) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out; } for (i = 0; i < ioc->sas_hba.num_phys ; i++) { if ((mpt2sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, i))) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out; } if (i == 0) ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> PhyData[0].ControllerDevHandle); ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; ioc->sas_hba.phy[i].phy_id = i; mpt2sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i], phy_pg0, ioc->sas_hba.parent_dev); } if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out; } ioc->sas_hba.enclosure_handle = le16_to_cpu(sas_device_pg0.EnclosureHandle); ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress); printk(MPT2SAS_INFO_FMT "host_add: handle(0x%04x), " "sas_addr(0x%016llx), phys(%d)\n", ioc->name, ioc->sas_hba.handle, (unsigned long long) ioc->sas_hba.sas_address, ioc->sas_hba.num_phys) ; if (ioc->sas_hba.enclosure_handle) { if (!(mpt2sas_config_get_enclosure_pg0(ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, ioc->sas_hba.enclosure_handle))) { ioc->sas_hba.enclosure_logical_id = le64_to_cpu(enclosure_pg0.EnclosureLogicalID); } } out: kfree(sas_iounit_pg1); kfree(sas_iounit_pg0); } /** * _scsih_expander_add - creating expander object * @ioc: per adapter object * @handle: expander handle * * Creating expander object, stored in ioc->sas_expander_list. * * Return 0 for success, else error. */ static int _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle) { struct _sas_node *sas_expander; Mpi2ConfigReply_t mpi_reply; Mpi2ExpanderPage0_t expander_pg0; Mpi2ExpanderPage1_t expander_pg1; Mpi2SasEnclosurePage0_t enclosure_pg0; u32 ioc_status; u16 parent_handle; u64 sas_address, sas_address_parent = 0; int i; unsigned long flags; struct _sas_port *mpt2sas_port = NULL; int rc = 0; if (!handle) return -1; if (ioc->shost_recovery || ioc->pci_error_recovery) return -1; if ((mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return -1; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return -1; } /* handle out of order topology events */ parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle); if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent) != 0) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return -1; } if (sas_address_parent != ioc->sas_hba.sas_address) { spin_lock_irqsave(&ioc->sas_node_lock, flags); sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc, sas_address_parent); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); if (!sas_expander) { rc = _scsih_expander_add(ioc, parent_handle); if (rc != 0) return rc; } } spin_lock_irqsave(&ioc->sas_node_lock, flags); sas_address = le64_to_cpu(expander_pg0.SASAddress); sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc, sas_address); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); if (sas_expander) return 0; sas_expander = kzalloc(sizeof(struct _sas_node), GFP_KERNEL); if (!sas_expander) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return -1; } sas_expander->handle = handle; sas_expander->num_phys = expander_pg0.NumPhys; sas_expander->sas_address_parent = sas_address_parent; sas_expander->sas_address = sas_address; printk(MPT2SAS_INFO_FMT "expander_add: handle(0x%04x)," " parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", ioc->name, handle, parent_handle, (unsigned long long) sas_expander->sas_address, sas_expander->num_phys); if (!sas_expander->num_phys) goto out_fail; sas_expander->phy = kcalloc(sas_expander->num_phys, sizeof(struct _sas_phy), GFP_KERNEL); if (!sas_expander->phy) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); rc = -1; goto out_fail; } INIT_LIST_HEAD(&sas_expander->sas_port_list); mpt2sas_port = mpt2sas_transport_port_add(ioc, handle, sas_address_parent); if (!mpt2sas_port) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); rc = -1; goto out_fail; } sas_expander->parent_dev = &mpt2sas_port->rphy->dev; for (i = 0 ; i < sas_expander->num_phys ; i++) { if ((mpt2sas_config_get_expander_pg1(ioc, &mpi_reply, &expander_pg1, i, handle))) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); rc = -1; goto out_fail; } sas_expander->phy[i].handle = handle; sas_expander->phy[i].phy_id = i; if ((mpt2sas_transport_add_expander_phy(ioc, &sas_expander->phy[i], expander_pg1, sas_expander->parent_dev))) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); rc = -1; goto out_fail; } } if (sas_expander->enclosure_handle) { if (!(mpt2sas_config_get_enclosure_pg0(ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, sas_expander->enclosure_handle))) { sas_expander->enclosure_logical_id = le64_to_cpu(enclosure_pg0.EnclosureLogicalID); } } _scsih_expander_node_add(ioc, sas_expander); return 0; out_fail: if (mpt2sas_port) mpt2sas_transport_port_remove(ioc, sas_expander->sas_address, sas_address_parent); kfree(sas_expander); return rc; } /** * _scsih_done - scsih callback handler. * @ioc: per adapter object * @smid: system request message index * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * * Callback handler when sending internal generated message frames. * The callback index passed is `ioc->scsih_cb_idx` * * Return 1 meaning mf should be freed from _base_interrupt * 0 means the mf is freed from this function. */ static u8 _scsih_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) { MPI2DefaultReply_t *mpi_reply; mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); if (ioc->scsih_cmds.status == MPT2_CMD_NOT_USED) return 1; if (ioc->scsih_cmds.smid != smid) return 1; ioc->scsih_cmds.status |= MPT2_CMD_COMPLETE; if (mpi_reply) { memcpy(ioc->scsih_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); ioc->scsih_cmds.status |= MPT2_CMD_REPLY_VALID; } ioc->scsih_cmds.status &= ~MPT2_CMD_PENDING; complete(&ioc->scsih_cmds.done); return 1; } /** * mpt2sas_expander_remove - removing expander object * @ioc: per adapter object * @sas_address: expander sas_address * * Return nothing. */ void mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address) { struct _sas_node *sas_expander; unsigned long flags; if (ioc->shost_recovery) return; spin_lock_irqsave(&ioc->sas_node_lock, flags); sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc, sas_address); if (!sas_expander) { spin_unlock_irqrestore(&ioc->sas_node_lock, flags); return; } list_del(&sas_expander->list); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); _scsih_expander_node_remove(ioc, sas_expander); } /** * _scsih_check_access_status - check access flags * @ioc: per adapter object * @sas_address: sas address * @handle: sas device handle * @access_flags: errors returned during discovery of the device * * Return 0 for success, else failure */ static u8 _scsih_check_access_status(struct MPT2SAS_ADAPTER *ioc, u64 sas_address, u16 handle, u8 access_status) { u8 rc = 1; char *desc = NULL; switch (access_status) { case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS: case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION: rc = 0; break; case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED: desc = "sata capability failed"; break; case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT: desc = "sata affiliation conflict"; break; case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE: desc = "route not addressable"; break; case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE: desc = "smp error not addressable"; break; case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED: desc = "device blocked"; break; case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED: case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN: case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT: case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG: case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION: case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER: case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN: case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN: case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN: case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION: case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE: case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX: desc = "sata initialization failed"; break; default: desc = "unknown"; break; } if (!rc) return 0; printk(MPT2SAS_ERR_FMT "discovery errors(%s): sas_address(0x%016llx), " "handle(0x%04x)\n", ioc->name, desc, (unsigned long long)sas_address, handle); return rc; } static void _scsih_check_device(struct MPT2SAS_ADAPTER *ioc, u16 handle) { Mpi2ConfigReply_t mpi_reply; Mpi2SasDevicePage0_t sas_device_pg0; struct _sas_device *sas_device; u32 ioc_status; unsigned long flags; u64 sas_address; struct scsi_target *starget; struct MPT2SAS_TARGET *sas_target_priv_data; u32 device_info; if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) return; ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) return; /* check if this is end device */ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); if (!(_scsih_is_end_device(device_info))) return; spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_address = le64_to_cpu(sas_device_pg0.SASAddress); sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, sas_address); if (!sas_device) { printk(MPT2SAS_ERR_FMT "device is not present " "handle(0x%04x), no sas_device!!!\n", ioc->name, handle); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); return; } if (unlikely(sas_device->handle != handle)) { starget = sas_device->starget; sas_target_priv_data = starget->hostdata; starget_printk(KERN_INFO, starget, "handle changed from(0x%04x)" " to (0x%04x)!!!\n", sas_device->handle, handle); sas_target_priv_data->handle = handle; sas_device->handle = handle; } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); /* check if device is present */ if (!(le16_to_cpu(sas_device_pg0.Flags) & MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { printk(MPT2SAS_ERR_FMT "device is not present " "handle(0x%04x), flags!!!\n", ioc->name, handle); return; } /* check if there were any issues with discovery */ if (_scsih_check_access_status(ioc, sas_address, handle, sas_device_pg0.AccessStatus)) return; _scsih_ublock_io_device(ioc, handle); } /** * _scsih_add_device - creating sas device object * @ioc: per adapter object * @handle: sas device handle * @phy_num: phy number end device attached to * @is_pd: is this hidden raid component * * Creating end device object, stored in ioc->sas_device_list. * * Returns 0 for success, non-zero for failure. */ static int _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd) { Mpi2ConfigReply_t mpi_reply; Mpi2SasDevicePage0_t sas_device_pg0; Mpi2SasEnclosurePage0_t enclosure_pg0; struct _sas_device *sas_device; u32 ioc_status; __le64 sas_address; u32 device_info; unsigned long flags; if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return -1; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return -1; } sas_address = le64_to_cpu(sas_device_pg0.SASAddress); /* check if device is present */ if (!(le16_to_cpu(sas_device_pg0.Flags) & MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); printk(MPT2SAS_ERR_FMT "Flags = 0x%04x\n", ioc->name, le16_to_cpu(sas_device_pg0.Flags)); return -1; } /* check if there were any issues with discovery */ if (_scsih_check_access_status(ioc, sas_address, handle, sas_device_pg0.AccessStatus)) return -1; /* check if this is end device */ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); if (!(_scsih_is_end_device(device_info))) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return -1; } spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, sas_address); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (sas_device) return 0; sas_device = kzalloc(sizeof(struct _sas_device), GFP_KERNEL); if (!sas_device) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return -1; } sas_device->handle = handle; if (_scsih_get_sas_address(ioc, le16_to_cpu (sas_device_pg0.ParentDevHandle), &sas_device->sas_address_parent) != 0) printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); sas_device->enclosure_handle = le16_to_cpu(sas_device_pg0.EnclosureHandle); sas_device->slot = le16_to_cpu(sas_device_pg0.Slot); sas_device->device_info = device_info; sas_device->sas_address = sas_address; sas_device->phy = sas_device_pg0.PhyNum; /* get enclosure_logical_id */ if (sas_device->enclosure_handle && !(mpt2sas_config_get_enclosure_pg0( ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, sas_device->enclosure_handle))) sas_device->enclosure_logical_id = le64_to_cpu(enclosure_pg0.EnclosureLogicalID); /* get device name */ sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); if (ioc->wait_for_discovery_to_complete) _scsih_sas_device_init_add(ioc, sas_device); else _scsih_sas_device_add(ioc, sas_device); return 0; } /** * _scsih_remove_device - removing sas device object * @ioc: per adapter object * @sas_device_delete: the sas_device object * * Return nothing. */ static void _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, struct _sas_device *sas_device) { struct _sas_device sas_device_backup; struct MPT2SAS_TARGET *sas_target_priv_data; if (!sas_device) return; memcpy(&sas_device_backup, sas_device, sizeof(struct _sas_device)); _scsih_sas_device_remove(ioc, sas_device); dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: " "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__, sas_device_backup.handle, (unsigned long long) sas_device_backup.sas_address)); if (sas_device_backup.starget && sas_device_backup.starget->hostdata) { sas_target_priv_data = sas_device_backup.starget->hostdata; sas_target_priv_data->deleted = 1; _scsih_ublock_io_device(ioc, sas_device_backup.handle); sas_target_priv_data->handle = MPT2SAS_INVALID_DEVICE_HANDLE; } _scsih_ublock_io_device(ioc, sas_device_backup.handle); if (!ioc->hide_drives) mpt2sas_transport_port_remove(ioc, sas_device_backup.sas_address, sas_device_backup.sas_address_parent); printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), sas_addr" "(0x%016llx)\n", ioc->name, sas_device_backup.handle, (unsigned long long) sas_device_backup.sas_address); dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit: " "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__, sas_device_backup.handle, (unsigned long long) sas_device_backup.sas_address)); } /** * mpt2sas_device_remove - removing device object * @ioc: per adapter object * @sas_address: expander sas_address * * Return nothing. */ void mpt2sas_device_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address) { struct _sas_device *sas_device; unsigned long flags; if (ioc->shost_recovery) return; spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, sas_address); if (!sas_device) { spin_unlock_irqrestore(&ioc->sas_device_lock, flags); return; } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); _scsih_remove_device(ioc, sas_device); } #ifdef CONFIG_SCSI_MPT2SAS_LOGGING /** * _scsih_sas_topology_change_event_debug - debug for topology event * @ioc: per adapter object * @event_data: event data payload * Context: user. */ static void _scsih_sas_topology_change_event_debug(struct MPT2SAS_ADAPTER *ioc, Mpi2EventDataSasTopologyChangeList_t *event_data) { int i; u16 handle; u16 reason_code; u8 phy_number; char *status_str = NULL; u8 link_rate, prev_link_rate; switch (event_data->ExpStatus) { case MPI2_EVENT_SAS_TOPO_ES_ADDED: status_str = "add"; break; case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING: status_str = "remove"; break; case MPI2_EVENT_SAS_TOPO_ES_RESPONDING: case 0: status_str = "responding"; break; case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: status_str = "remove delay"; break; default: status_str = "unknown status"; break; } printk(MPT2SAS_INFO_FMT "sas topology change: (%s)\n", ioc->name, status_str); printk(KERN_INFO "\thandle(0x%04x), enclosure_handle(0x%04x) " "start_phy(%02d), count(%d)\n", le16_to_cpu(event_data->ExpanderDevHandle), le16_to_cpu(event_data->EnclosureHandle), event_data->StartPhyNum, event_data->NumEntries); for (i = 0; i < event_data->NumEntries; i++) { handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); if (!handle) continue; phy_number = event_data->StartPhyNum + i; reason_code = event_data->PHY[i].PhyStatus & MPI2_EVENT_SAS_TOPO_RC_MASK; switch (reason_code) { case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: status_str = "target add"; break; case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: status_str = "target remove"; break; case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING: status_str = "delay target remove"; break; case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: status_str = "link rate change"; break; case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE: status_str = "target responding"; break; default: status_str = "unknown"; break; } link_rate = event_data->PHY[i].LinkRate >> 4; prev_link_rate = event_data->PHY[i].LinkRate & 0xF; printk(KERN_INFO "\tphy(%02d), attached_handle(0x%04x): %s:" " link rate: new(0x%02x), old(0x%02x)\n", phy_number, handle, status_str, link_rate, prev_link_rate); } } #endif /** * _scsih_sas_topology_change_event - handle topology changes * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. * */ static void _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { int i; u16 parent_handle, handle; u16 reason_code; u8 phy_number, max_phys; struct _sas_node *sas_expander; struct _sas_device *sas_device; u64 sas_address; unsigned long flags; u8 link_rate, prev_link_rate; Mpi2EventDataSasTopologyChangeList_t *event_data = fw_event->event_data; #ifdef CONFIG_SCSI_MPT2SAS_LOGGING if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) _scsih_sas_topology_change_event_debug(ioc, event_data); #endif if (ioc->remove_host || ioc->pci_error_recovery) return; if (!ioc->sas_hba.num_phys) _scsih_sas_host_add(ioc); else _scsih_sas_host_refresh(ioc); if (fw_event->ignore) { dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "ignoring expander " "event\n", ioc->name)); return; } parent_handle = le16_to_cpu(event_data->ExpanderDevHandle); /* handle expander add */ if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED) if (_scsih_expander_add(ioc, parent_handle) != 0) return; spin_lock_irqsave(&ioc->sas_node_lock, flags); sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc, parent_handle); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); if (sas_expander) { sas_address = sas_expander->sas_address; max_phys = sas_expander->num_phys; } else if (parent_handle < ioc->sas_hba.num_phys) { sas_address = ioc->sas_hba.sas_address; max_phys = ioc->sas_hba.num_phys; } else return; /* handle siblings events */ for (i = 0; i < event_data->NumEntries; i++) { if (fw_event->ignore) { dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "ignoring " "expander event\n", ioc->name)); return; } if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) return; phy_number = event_data->StartPhyNum + i; if (phy_number >= max_phys) continue; reason_code = event_data->PHY[i].PhyStatus & MPI2_EVENT_SAS_TOPO_RC_MASK; if ((event_data->PHY[i].PhyStatus & MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code != MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) continue; handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); if (!handle) continue; link_rate = event_data->PHY[i].LinkRate >> 4; prev_link_rate = event_data->PHY[i].LinkRate & 0xF; switch (reason_code) { case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: if (ioc->shost_recovery) break; if (link_rate == prev_link_rate) break; mpt2sas_transport_update_links(ioc, sas_address, handle, phy_number, link_rate); if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) break; _scsih_check_device(ioc, handle); break; case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: if (ioc->shost_recovery) break; mpt2sas_transport_update_links(ioc, sas_address, handle, phy_number, link_rate); _scsih_add_device(ioc, handle, phy_number, 0); break; case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _scsih_sas_device_find_by_handle(ioc, handle); if (!sas_device) { spin_unlock_irqrestore(&ioc->sas_device_lock, flags); break; } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); _scsih_remove_device(ioc, sas_device); break; } } /* handle expander removal */ if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING && sas_expander) mpt2sas_expander_remove(ioc, sas_address); } #ifdef CONFIG_SCSI_MPT2SAS_LOGGING /** * _scsih_sas_device_status_change_event_debug - debug for device event * @event_data: event data payload * Context: user. * * Return nothing. */ static void _scsih_sas_device_status_change_event_debug(struct MPT2SAS_ADAPTER *ioc, Mpi2EventDataSasDeviceStatusChange_t *event_data) { char *reason_str = NULL; switch (event_data->ReasonCode) { case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA: reason_str = "smart data"; break; case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED: reason_str = "unsupported device discovered"; break; case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: reason_str = "internal device reset"; break; case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL: reason_str = "internal task abort"; break; case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL: reason_str = "internal task abort set"; break; case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: reason_str = "internal clear task set"; break; case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL: reason_str = "internal query task"; break; case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE: reason_str = "sata init failure"; break; case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET: reason_str = "internal device reset complete"; break; case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL: reason_str = "internal task abort complete"; break; case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION: reason_str = "internal async notification"; break; case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY: reason_str = "expander reduced functionality"; break; case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY: reason_str = "expander reduced functionality complete"; break; default: reason_str = "unknown reason"; break; } printk(MPT2SAS_INFO_FMT "device status change: (%s)\n" "\thandle(0x%04x), sas address(0x%016llx), tag(%d)", ioc->name, reason_str, le16_to_cpu(event_data->DevHandle), (unsigned long long)le64_to_cpu(event_data->SASAddress), le16_to_cpu(event_data->TaskTag)); if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA) printk(MPT2SAS_INFO_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name, event_data->ASC, event_data->ASCQ); printk(KERN_INFO "\n"); } #endif /** * _scsih_sas_device_status_change_event - handle device status change * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. * * Return nothing. */ static void _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { struct MPT2SAS_TARGET *target_priv_data; struct _sas_device *sas_device; u64 sas_address; unsigned long flags; Mpi2EventDataSasDeviceStatusChange_t *event_data = fw_event->event_data; #ifdef CONFIG_SCSI_MPT2SAS_LOGGING if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) _scsih_sas_device_status_change_event_debug(ioc, event_data); #endif /* In MPI Revision K (0xC), the internal device reset complete was * implemented, so avoid setting tm_busy flag for older firmware. */ if ((ioc->facts.HeaderVersion >> 8) < 0xC) return; if (event_data->ReasonCode != MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET && event_data->ReasonCode != MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET) return; spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_address = le64_to_cpu(event_data->SASAddress); sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, sas_address); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (!sas_device || !sas_device->starget) return; target_priv_data = sas_device->starget->hostdata; if (!target_priv_data) return; if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET) target_priv_data->tm_busy = 1; else target_priv_data->tm_busy = 0; } #ifdef CONFIG_SCSI_MPT2SAS_LOGGING /** * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure event * @ioc: per adapter object * @event_data: event data payload * Context: user. * * Return nothing. */ static void _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT2SAS_ADAPTER *ioc, Mpi2EventDataSasEnclDevStatusChange_t *event_data) { char *reason_str = NULL; switch (event_data->ReasonCode) { case MPI2_EVENT_SAS_ENCL_RC_ADDED: reason_str = "enclosure add"; break; case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING: reason_str = "enclosure remove"; break; default: reason_str = "unknown reason"; break; } printk(MPT2SAS_INFO_FMT "enclosure status change: (%s)\n" "\thandle(0x%04x), enclosure logical id(0x%016llx)" " number slots(%d)\n", ioc->name, reason_str, le16_to_cpu(event_data->EnclosureHandle), (unsigned long long)le64_to_cpu(event_data->EnclosureLogicalID), le16_to_cpu(event_data->StartSlot)); } #endif /** * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. * * Return nothing. */ static void _scsih_sas_enclosure_dev_status_change_event(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { #ifdef CONFIG_SCSI_MPT2SAS_LOGGING if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) _scsih_sas_enclosure_dev_status_change_event_debug(ioc, fw_event->event_data); #endif } /** * _scsih_sas_broadcast_primitive_event - handle broadcast events * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. * * Return nothing. */ static void _scsih_sas_broadcast_primitive_event(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { struct scsi_cmnd *scmd; struct scsi_device *sdev; u16 smid, handle; u32 lun; struct MPT2SAS_DEVICE *sas_device_priv_data; u32 termination_count; u32 query_count; Mpi2SCSITaskManagementReply_t *mpi_reply; Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data; u16 ioc_status; unsigned long flags; int r; u8 max_retries = 0; u8 task_abort_retries; mutex_lock(&ioc->tm_cmds.mutex); dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: phy number(%d), " "width(%d)\n", ioc->name, __func__, event_data->PhyNum, event_data->PortWidth)); _scsih_block_io_all_device(ioc); spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); mpi_reply = ioc->tm_cmds.reply; broadcast_aen_retry: /* sanity checks for retrying this loop */ if (max_retries++ == 5) { dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: giving up\n", ioc->name, __func__)); goto out; } else if (max_retries > 1) dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: %d retry\n", ioc->name, __func__, max_retries - 1)); termination_count = 0; query_count = 0; for (smid = 1; smid <= ioc->scsiio_depth; smid++) { if (ioc->shost_recovery) goto out; scmd = _scsih_scsi_lookup_get(ioc, smid); if (!scmd) continue; sdev = scmd->device; sas_device_priv_data = sdev->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) continue; /* skip hidden raid components */ if (sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) continue; /* skip volumes */ if (sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) continue; handle = sas_device_priv_data->sas_target->handle; lun = sas_device_priv_data->lun; query_count++; if (ioc->shost_recovery) goto out; spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); r = mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun, MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 0, TM_MUTEX_OFF); if (r == FAILED) { sdev_printk(KERN_WARNING, sdev, "mpt2sas_scsih_issue_tm: FAILED when sending " "QUERY_TASK: scmd(%p)\n", scmd); spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); goto broadcast_aen_retry; } ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { sdev_printk(KERN_WARNING, sdev, "query task: FAILED " "with IOCSTATUS(0x%04x), scmd(%p)\n", ioc_status, scmd); spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); goto broadcast_aen_retry; } /* see if IO is still owned by IOC and target */ if (mpi_reply->ResponseCode == MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED || mpi_reply->ResponseCode == MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) { spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); continue; } task_abort_retries = 0; tm_retry: if (task_abort_retries++ == 60) { dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: ABORT_TASK: giving up\n", ioc->name, __func__)); spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); goto broadcast_aen_retry; } if (ioc->shost_recovery) goto out_no_lock; r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, scmd->serial_number, TM_MUTEX_OFF); if (r == FAILED) { sdev_printk(KERN_WARNING, sdev, "mpt2sas_scsih_issue_tm: ABORT_TASK: FAILED : " "scmd(%p)\n", scmd); goto tm_retry; } if (task_abort_retries > 1) sdev_printk(KERN_WARNING, sdev, "mpt2sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):" " scmd(%p)\n", task_abort_retries - 1, scmd); termination_count += le32_to_cpu(mpi_reply->TerminationCount); spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); } if (ioc->broadcast_aen_pending) { dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: loop back due to" " pending AEN\n", ioc->name, __func__)); ioc->broadcast_aen_pending = 0; goto broadcast_aen_retry; } out: spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); out_no_lock: dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s - exit, query_count = %d termination_count = %d\n", ioc->name, __func__, query_count, termination_count)); ioc->broadcast_aen_busy = 0; if (!ioc->shost_recovery) _scsih_ublock_io_all_device(ioc); mutex_unlock(&ioc->tm_cmds.mutex); } /** * _scsih_sas_discovery_event - handle discovery events * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. * * Return nothing. */ static void _scsih_sas_discovery_event(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { Mpi2EventDataSasDiscovery_t *event_data = fw_event->event_data; #ifdef CONFIG_SCSI_MPT2SAS_LOGGING if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) { printk(MPT2SAS_INFO_FMT "discovery event: (%s)", ioc->name, (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ? "start" : "stop"); if (event_data->DiscoveryStatus) printk("discovery_status(0x%08x)", le32_to_cpu(event_data->DiscoveryStatus)); printk("\n"); } #endif if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED && !ioc->sas_hba.num_phys) _scsih_sas_host_add(ioc); } /** * _scsih_reprobe_lun - reprobing lun * @sdev: scsi device struct * @no_uld_attach: sdev->no_uld_attach flag setting * **/ static void _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach) { int rc; sdev->no_uld_attach = no_uld_attach ? 1 : 0; sdev_printk(KERN_INFO, sdev, "%s raid component\n", sdev->no_uld_attach ? "hidding" : "exposing"); rc = scsi_device_reprobe(sdev); } /** * _scsih_reprobe_target - reprobing target * @starget: scsi target struct * @no_uld_attach: sdev->no_uld_attach flag setting * * Note: no_uld_attach flag determines whether the disk device is attached * to block layer. A value of `1` means to not attach. **/ static void _scsih_reprobe_target(struct scsi_target *starget, int no_uld_attach) { struct MPT2SAS_TARGET *sas_target_priv_data; if (starget == NULL) return; sas_target_priv_data = starget->hostdata; if (no_uld_attach) sas_target_priv_data->flags |= MPT_TARGET_FLAGS_RAID_COMPONENT; else sas_target_priv_data->flags &= ~MPT_TARGET_FLAGS_RAID_COMPONENT; starget_for_each_device(starget, no_uld_attach ? (void *)1 : NULL, _scsih_reprobe_lun); } /** * _scsih_sas_volume_add - add new volume * @ioc: per adapter object * @element: IR config element data * Context: user. * * Return nothing. */ static void _scsih_sas_volume_add(struct MPT2SAS_ADAPTER *ioc, Mpi2EventIrConfigElement_t *element) { struct _raid_device *raid_device; unsigned long flags; u64 wwid; u16 handle = le16_to_cpu(element->VolDevHandle); int rc; mpt2sas_config_get_volume_wwid(ioc, handle, &wwid); if (!wwid) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid); spin_unlock_irqrestore(&ioc->raid_device_lock, flags); if (raid_device) return; raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); if (!raid_device) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } raid_device->id = ioc->sas_id++; raid_device->channel = RAID_CHANNEL; raid_device->handle = handle; raid_device->wwid = wwid; _scsih_raid_device_add(ioc, raid_device); if (!ioc->wait_for_discovery_to_complete) { rc = scsi_add_device(ioc->shost, RAID_CHANNEL, raid_device->id, 0); if (rc) _scsih_raid_device_remove(ioc, raid_device); } else _scsih_determine_boot_device(ioc, raid_device, 1); } /** * _scsih_sas_volume_delete - delete volume * @ioc: per adapter object * @handle: volume device handle * Context: user. * * Return nothing. */ static void _scsih_sas_volume_delete(struct MPT2SAS_ADAPTER *ioc, u16 handle) { struct _raid_device *raid_device; unsigned long flags; struct MPT2SAS_TARGET *sas_target_priv_data; spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = _scsih_raid_device_find_by_handle(ioc, handle); spin_unlock_irqrestore(&ioc->raid_device_lock, flags); if (!raid_device) return; if (raid_device->starget) { sas_target_priv_data = raid_device->starget->hostdata; sas_target_priv_data->deleted = 1; scsi_remove_target(&raid_device->starget->dev); } printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), wwid" "(0x%016llx)\n", ioc->name, raid_device->handle, (unsigned long long) raid_device->wwid); _scsih_raid_device_remove(ioc, raid_device); } /** * _scsih_sas_pd_expose - expose pd component to /dev/sdX * @ioc: per adapter object * @element: IR config element data * Context: user. * * Return nothing. */ static void _scsih_sas_pd_expose(struct MPT2SAS_ADAPTER *ioc, Mpi2EventIrConfigElement_t *element) { struct _sas_device *sas_device; unsigned long flags; u16 handle = le16_to_cpu(element->PhysDiskDevHandle); spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _scsih_sas_device_find_by_handle(ioc, handle); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (!sas_device) return; /* exposing raid component */ sas_device->volume_handle = 0; sas_device->volume_wwid = 0; clear_bit(handle, ioc->pd_handles); _scsih_reprobe_target(sas_device->starget, 0); } /** * _scsih_sas_pd_hide - hide pd component from /dev/sdX * @ioc: per adapter object * @element: IR config element data * Context: user. * * Return nothing. */ static void _scsih_sas_pd_hide(struct MPT2SAS_ADAPTER *ioc, Mpi2EventIrConfigElement_t *element) { struct _sas_device *sas_device; unsigned long flags; u16 handle = le16_to_cpu(element->PhysDiskDevHandle); spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _scsih_sas_device_find_by_handle(ioc, handle); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (!sas_device) return; /* hiding raid component */ mpt2sas_config_get_volume_handle(ioc, handle, &sas_device->volume_handle); mpt2sas_config_get_volume_wwid(ioc, sas_device->volume_handle, &sas_device->volume_wwid); set_bit(handle, ioc->pd_handles); _scsih_reprobe_target(sas_device->starget, 1); } /** * _scsih_sas_pd_delete - delete pd component * @ioc: per adapter object * @element: IR config element data * Context: user. * * Return nothing. */ static void _scsih_sas_pd_delete(struct MPT2SAS_ADAPTER *ioc, Mpi2EventIrConfigElement_t *element) { struct _sas_device *sas_device; unsigned long flags; u16 handle = le16_to_cpu(element->PhysDiskDevHandle); spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _scsih_sas_device_find_by_handle(ioc, handle); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (!sas_device) return; _scsih_remove_device(ioc, sas_device); } /** * _scsih_sas_pd_add - remove pd component * @ioc: per adapter object * @element: IR config element data * Context: user. * * Return nothing. */ static void _scsih_sas_pd_add(struct MPT2SAS_ADAPTER *ioc, Mpi2EventIrConfigElement_t *element) { struct _sas_device *sas_device; unsigned long flags; u16 handle = le16_to_cpu(element->PhysDiskDevHandle); Mpi2ConfigReply_t mpi_reply; Mpi2SasDevicePage0_t sas_device_pg0; u32 ioc_status; u64 sas_address; u16 parent_handle; set_bit(handle, ioc->pd_handles); spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _scsih_sas_device_find_by_handle(ioc, handle); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (sas_device) return; if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) mpt2sas_transport_update_links(ioc, sas_address, handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); _scsih_add_device(ioc, handle, 0, 1); } #ifdef CONFIG_SCSI_MPT2SAS_LOGGING /** * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events * @ioc: per adapter object * @event_data: event data payload * Context: user. * * Return nothing. */ static void _scsih_sas_ir_config_change_event_debug(struct MPT2SAS_ADAPTER *ioc, Mpi2EventDataIrConfigChangeList_t *event_data) { Mpi2EventIrConfigElement_t *element; u8 element_type; int i; char *reason_str = NULL, *element_str = NULL; element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; printk(MPT2SAS_INFO_FMT "raid config change: (%s), elements(%d)\n", ioc->name, (le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? "foreign" : "native", event_data->NumElements); for (i = 0; i < event_data->NumElements; i++, element++) { switch (element->ReasonCode) { case MPI2_EVENT_IR_CHANGE_RC_ADDED: reason_str = "add"; break; case MPI2_EVENT_IR_CHANGE_RC_REMOVED: reason_str = "remove"; break; case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE: reason_str = "no change"; break; case MPI2_EVENT_IR_CHANGE_RC_HIDE: reason_str = "hide"; break; case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: reason_str = "unhide"; break; case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: reason_str = "volume_created"; break; case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: reason_str = "volume_deleted"; break; case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: reason_str = "pd_created"; break; case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: reason_str = "pd_deleted"; break; default: reason_str = "unknown reason"; break; } element_type = le16_to_cpu(element->ElementFlags) & MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK; switch (element_type) { case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT: element_str = "volume"; break; case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT: element_str = "phys disk"; break; case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT: element_str = "hot spare"; break; default: element_str = "unknown element"; break; } printk(KERN_INFO "\t(%s:%s), vol handle(0x%04x), " "pd handle(0x%04x), pd num(0x%02x)\n", element_str, reason_str, le16_to_cpu(element->VolDevHandle), le16_to_cpu(element->PhysDiskDevHandle), element->PhysDiskNum); } } #endif /** * _scsih_sas_ir_config_change_event - handle ir configuration change events * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. * * Return nothing. */ static void _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { Mpi2EventIrConfigElement_t *element; int i; u8 foreign_config; Mpi2EventDataIrConfigChangeList_t *event_data = fw_event->event_data; #ifdef CONFIG_SCSI_MPT2SAS_LOGGING if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) && !ioc->hide_ir_msg) _scsih_sas_ir_config_change_event_debug(ioc, event_data); #endif if (ioc->shost_recovery) return; foreign_config = (le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0; element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; for (i = 0; i < event_data->NumElements; i++, element++) { switch (element->ReasonCode) { case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: case MPI2_EVENT_IR_CHANGE_RC_ADDED: if (!foreign_config) _scsih_sas_volume_add(ioc, element); break; case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: case MPI2_EVENT_IR_CHANGE_RC_REMOVED: if (!foreign_config) _scsih_sas_volume_delete(ioc, le16_to_cpu(element->VolDevHandle)); break; case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: if (!ioc->is_warpdrive) _scsih_sas_pd_hide(ioc, element); break; case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: if (!ioc->is_warpdrive) _scsih_sas_pd_expose(ioc, element); break; case MPI2_EVENT_IR_CHANGE_RC_HIDE: if (!ioc->is_warpdrive) _scsih_sas_pd_add(ioc, element); break; case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: if (!ioc->is_warpdrive) _scsih_sas_pd_delete(ioc, element); break; } } } /** * _scsih_sas_ir_volume_event - IR volume event * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. * * Return nothing. */ static void _scsih_sas_ir_volume_event(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { u64 wwid; unsigned long flags; struct _raid_device *raid_device; u16 handle; u32 state; int rc; Mpi2EventDataIrVolume_t *event_data = fw_event->event_data; if (ioc->shost_recovery) return; if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) return; handle = le16_to_cpu(event_data->VolDevHandle); state = le32_to_cpu(event_data->NewValue); if (!ioc->hide_ir_msg) dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: handle(0x%04x), " "old(0x%08x), new(0x%08x)\n", ioc->name, __func__, handle, le32_to_cpu(event_data->PreviousValue), state)); switch (state) { case MPI2_RAID_VOL_STATE_MISSING: case MPI2_RAID_VOL_STATE_FAILED: _scsih_sas_volume_delete(ioc, handle); break; case MPI2_RAID_VOL_STATE_ONLINE: case MPI2_RAID_VOL_STATE_DEGRADED: case MPI2_RAID_VOL_STATE_OPTIMAL: spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = _scsih_raid_device_find_by_handle(ioc, handle); spin_unlock_irqrestore(&ioc->raid_device_lock, flags); if (raid_device) break; mpt2sas_config_get_volume_wwid(ioc, handle, &wwid); if (!wwid) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); break; } raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); if (!raid_device) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); break; } raid_device->id = ioc->sas_id++; raid_device->channel = RAID_CHANNEL; raid_device->handle = handle; raid_device->wwid = wwid; _scsih_raid_device_add(ioc, raid_device); rc = scsi_add_device(ioc->shost, RAID_CHANNEL, raid_device->id, 0); if (rc) _scsih_raid_device_remove(ioc, raid_device); break; case MPI2_RAID_VOL_STATE_INITIALIZING: default: break; } } /** * _scsih_sas_ir_physical_disk_event - PD event * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. * * Return nothing. */ static void _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { u16 handle, parent_handle; u32 state; struct _sas_device *sas_device; unsigned long flags; Mpi2ConfigReply_t mpi_reply; Mpi2SasDevicePage0_t sas_device_pg0; u32 ioc_status; Mpi2EventDataIrPhysicalDisk_t *event_data = fw_event->event_data; u64 sas_address; if (ioc->shost_recovery) return; if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) return; handle = le16_to_cpu(event_data->PhysDiskDevHandle); state = le32_to_cpu(event_data->NewValue); if (!ioc->hide_ir_msg) dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: handle(0x%04x), " "old(0x%08x), new(0x%08x)\n", ioc->name, __func__, handle, le32_to_cpu(event_data->PreviousValue), state)); switch (state) { case MPI2_RAID_PD_STATE_ONLINE: case MPI2_RAID_PD_STATE_DEGRADED: case MPI2_RAID_PD_STATE_REBUILDING: case MPI2_RAID_PD_STATE_OPTIMAL: case MPI2_RAID_PD_STATE_HOT_SPARE: if (!ioc->is_warpdrive) set_bit(handle, ioc->pd_handles); spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _scsih_sas_device_find_by_handle(ioc, handle); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (sas_device) return; if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) mpt2sas_transport_update_links(ioc, sas_address, handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); _scsih_add_device(ioc, handle, 0, 1); break; case MPI2_RAID_PD_STATE_OFFLINE: case MPI2_RAID_PD_STATE_NOT_CONFIGURED: case MPI2_RAID_PD_STATE_NOT_COMPATIBLE: default: break; } } #ifdef CONFIG_SCSI_MPT2SAS_LOGGING /** * _scsih_sas_ir_operation_status_event_debug - debug for IR op event * @ioc: per adapter object * @event_data: event data payload * Context: user. * * Return nothing. */ static void _scsih_sas_ir_operation_status_event_debug(struct MPT2SAS_ADAPTER *ioc, Mpi2EventDataIrOperationStatus_t *event_data) { char *reason_str = NULL; switch (event_data->RAIDOperation) { case MPI2_EVENT_IR_RAIDOP_RESYNC: reason_str = "resync"; break; case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION: reason_str = "online capacity expansion"; break; case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK: reason_str = "consistency check"; break; case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT: reason_str = "background init"; break; case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT: reason_str = "make data consistent"; break; } if (!reason_str) return; printk(MPT2SAS_INFO_FMT "raid operational status: (%s)" "\thandle(0x%04x), percent complete(%d)\n", ioc->name, reason_str, le16_to_cpu(event_data->VolDevHandle), event_data->PercentComplete); } #endif /** * _scsih_sas_ir_operation_status_event - handle RAID operation events * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. * * Return nothing. */ static void _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data; static struct _raid_device *raid_device; unsigned long flags; u16 handle; #ifdef CONFIG_SCSI_MPT2SAS_LOGGING if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) && !ioc->hide_ir_msg) _scsih_sas_ir_operation_status_event_debug(ioc, event_data); #endif /* code added for raid transport support */ if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) { handle = le16_to_cpu(event_data->VolDevHandle); spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = _scsih_raid_device_find_by_handle(ioc, handle); spin_unlock_irqrestore(&ioc->raid_device_lock, flags); if (!raid_device) return; if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) raid_device->percent_complete = event_data->PercentComplete; } } /** * _scsih_prep_device_scan - initialize parameters prior to device scan * @ioc: per adapter object * * Set the deleted flag prior to device scan. If the device is found during * the scan, then we clear the deleted flag. */ static void _scsih_prep_device_scan(struct MPT2SAS_ADAPTER *ioc) { struct MPT2SAS_DEVICE *sas_device_priv_data; struct scsi_device *sdev; shost_for_each_device(sdev, ioc->shost) { sas_device_priv_data = sdev->hostdata; if (sas_device_priv_data && sas_device_priv_data->sas_target) sas_device_priv_data->sas_target->deleted = 1; } } /** * _scsih_mark_responding_sas_device - mark a sas_devices as responding * @ioc: per adapter object * @sas_address: sas address * @slot: enclosure slot id * @handle: device handle * * After host reset, find out whether devices are still responding. * Used in _scsi_remove_unresponsive_sas_devices. * * Return nothing. */ static void _scsih_mark_responding_sas_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address, u16 slot, u16 handle) { struct MPT2SAS_TARGET *sas_target_priv_data = NULL; struct scsi_target *starget; struct _sas_device *sas_device; unsigned long flags; spin_lock_irqsave(&ioc->sas_device_lock, flags); list_for_each_entry(sas_device, &ioc->sas_device_list, list) { if (sas_device->sas_address == sas_address && sas_device->slot == slot) { sas_device->responding = 1; starget = sas_device->starget; if (starget && starget->hostdata) { sas_target_priv_data = starget->hostdata; sas_target_priv_data->tm_busy = 0; sas_target_priv_data->deleted = 0; } else sas_target_priv_data = NULL; if (starget) starget_printk(KERN_INFO, starget, "handle(0x%04x), sas_addr(0x%016llx), " "enclosure logical id(0x%016llx), " "slot(%d)\n", handle, (unsigned long long)sas_device->sas_address, (unsigned long long) sas_device->enclosure_logical_id, sas_device->slot); if (sas_device->handle == handle) goto out; printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n", sas_device->handle); sas_device->handle = handle; if (sas_target_priv_data) sas_target_priv_data->handle = handle; goto out; } } out: spin_unlock_irqrestore(&ioc->sas_device_lock, flags); } /** * _scsih_search_responding_sas_devices - * @ioc: per adapter object * * After host reset, find out whether devices are still responding. * If not remove. * * Return nothing. */ static void _scsih_search_responding_sas_devices(struct MPT2SAS_ADAPTER *ioc) { Mpi2SasDevicePage0_t sas_device_pg0; Mpi2ConfigReply_t mpi_reply; u16 ioc_status; __le64 sas_address; u16 handle; u32 device_info; u16 slot; printk(MPT2SAS_INFO_FMT "search for end-devices: start\n", ioc->name); if (list_empty(&ioc->sas_device_list)) goto out; handle = 0xFFFF; while (!(mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) break; handle = le16_to_cpu(sas_device_pg0.DevHandle); device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); if (!(_scsih_is_end_device(device_info))) continue; sas_address = le64_to_cpu(sas_device_pg0.SASAddress); slot = le16_to_cpu(sas_device_pg0.Slot); _scsih_mark_responding_sas_device(ioc, sas_address, slot, handle); } out: printk(MPT2SAS_INFO_FMT "search for end-devices: complete\n", ioc->name); } /** * _scsih_mark_responding_raid_device - mark a raid_device as responding * @ioc: per adapter object * @wwid: world wide identifier for raid volume * @handle: device handle * * After host reset, find out whether devices are still responding. * Used in _scsi_remove_unresponsive_raid_devices. * * Return nothing. */ static void _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid, u16 handle) { struct MPT2SAS_TARGET *sas_target_priv_data; struct scsi_target *starget; struct _raid_device *raid_device; unsigned long flags; spin_lock_irqsave(&ioc->raid_device_lock, flags); list_for_each_entry(raid_device, &ioc->raid_device_list, list) { if (raid_device->wwid == wwid && raid_device->starget) { starget = raid_device->starget; if (starget && starget->hostdata) { sas_target_priv_data = starget->hostdata; sas_target_priv_data->deleted = 0; } else sas_target_priv_data = NULL; raid_device->responding = 1; spin_unlock_irqrestore(&ioc->raid_device_lock, flags); starget_printk(KERN_INFO, raid_device->starget, "handle(0x%04x), wwid(0x%016llx)\n", handle, (unsigned long long)raid_device->wwid); /* * WARPDRIVE: The handles of the PDs might have changed * across the host reset so re-initialize the * required data for Direct IO */ _scsih_init_warpdrive_properties(ioc, raid_device); if (raid_device->handle == handle) return; printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n", raid_device->handle); raid_device->handle = handle; if (sas_target_priv_data) sas_target_priv_data->handle = handle; return; } } spin_unlock_irqrestore(&ioc->raid_device_lock, flags); } /** * _scsih_search_responding_raid_devices - * @ioc: per adapter object * * After host reset, find out whether devices are still responding. * If not remove. * * Return nothing. */ static void _scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc) { Mpi2RaidVolPage1_t volume_pg1; Mpi2RaidVolPage0_t volume_pg0; Mpi2RaidPhysDiskPage0_t pd_pg0; Mpi2ConfigReply_t mpi_reply; u16 ioc_status; u16 handle; u8 phys_disk_num; if (!ioc->ir_firmware) return; printk(MPT2SAS_INFO_FMT "search for raid volumes: start\n", ioc->name); if (list_empty(&ioc->raid_device_list)) goto out; handle = 0xFFFF; while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply, &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) break; handle = le16_to_cpu(volume_pg1.DevHandle); if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, sizeof(Mpi2RaidVolPage0_t))) continue; if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE || volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) _scsih_mark_responding_raid_device(ioc, le64_to_cpu(volume_pg1.WWID), handle); } /* refresh the pd_handles */ if (!ioc->is_warpdrive) { phys_disk_num = 0xFF; memset(ioc->pd_handles, 0, ioc->pd_handles_sz); while (!(mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply, &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, phys_disk_num))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) break; phys_disk_num = pd_pg0.PhysDiskNum; handle = le16_to_cpu(pd_pg0.DevHandle); set_bit(handle, ioc->pd_handles); } } out: printk(MPT2SAS_INFO_FMT "search for responding raid volumes: " "complete\n", ioc->name); } /** * _scsih_mark_responding_expander - mark a expander as responding * @ioc: per adapter object * @sas_address: sas address * @handle: * * After host reset, find out whether devices are still responding. * Used in _scsi_remove_unresponsive_expanders. * * Return nothing. */ static void _scsih_mark_responding_expander(struct MPT2SAS_ADAPTER *ioc, u64 sas_address, u16 handle) { struct _sas_node *sas_expander; unsigned long flags; int i; spin_lock_irqsave(&ioc->sas_node_lock, flags); list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { if (sas_expander->sas_address != sas_address) continue; sas_expander->responding = 1; if (sas_expander->handle == handle) goto out; printk(KERN_INFO "\texpander(0x%016llx): handle changed" " from(0x%04x) to (0x%04x)!!!\n", (unsigned long long)sas_expander->sas_address, sas_expander->handle, handle); sas_expander->handle = handle; for (i = 0 ; i < sas_expander->num_phys ; i++) sas_expander->phy[i].handle = handle; goto out; } out: spin_unlock_irqrestore(&ioc->sas_node_lock, flags); } /** * _scsih_search_responding_expanders - * @ioc: per adapter object * * After host reset, find out whether devices are still responding. * If not remove. * * Return nothing. */ static void _scsih_search_responding_expanders(struct MPT2SAS_ADAPTER *ioc) { Mpi2ExpanderPage0_t expander_pg0; Mpi2ConfigReply_t mpi_reply; u16 ioc_status; u64 sas_address; u16 handle; printk(MPT2SAS_INFO_FMT "search for expanders: start\n", ioc->name); if (list_empty(&ioc->sas_expander_list)) goto out; handle = 0xFFFF; while (!(mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) break; handle = le16_to_cpu(expander_pg0.DevHandle); sas_address = le64_to_cpu(expander_pg0.SASAddress); printk(KERN_INFO "\texpander present: handle(0x%04x), " "sas_addr(0x%016llx)\n", handle, (unsigned long long)sas_address); _scsih_mark_responding_expander(ioc, sas_address, handle); } out: printk(MPT2SAS_INFO_FMT "search for expanders: complete\n", ioc->name); } /** * _scsih_remove_unresponding_sas_devices - removing unresponding devices * @ioc: per adapter object * * Return nothing. */ static void _scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc) { struct _sas_device *sas_device, *sas_device_next; struct _sas_node *sas_expander; struct _raid_device *raid_device, *raid_device_next; printk(MPT2SAS_INFO_FMT "removing unresponding devices: start\n", ioc->name); list_for_each_entry_safe(sas_device, sas_device_next, &ioc->sas_device_list, list) { if (sas_device->responding) { sas_device->responding = 0; continue; } if (sas_device->starget) starget_printk(KERN_INFO, sas_device->starget, "removing: handle(0x%04x), sas_addr(0x%016llx), " "enclosure logical id(0x%016llx), slot(%d)\n", sas_device->handle, (unsigned long long)sas_device->sas_address, (unsigned long long) sas_device->enclosure_logical_id, sas_device->slot); _scsih_remove_device(ioc, sas_device); } if (!ioc->ir_firmware) goto retry_expander_search; list_for_each_entry_safe(raid_device, raid_device_next, &ioc->raid_device_list, list) { if (raid_device->responding) { raid_device->responding = 0; continue; } if (raid_device->starget) { starget_printk(KERN_INFO, raid_device->starget, "removing: handle(0x%04x), wwid(0x%016llx)\n", raid_device->handle, (unsigned long long)raid_device->wwid); scsi_remove_target(&raid_device->starget->dev); } _scsih_raid_device_remove(ioc, raid_device); } retry_expander_search: sas_expander = NULL; list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { if (sas_expander->responding) { sas_expander->responding = 0; continue; } mpt2sas_expander_remove(ioc, sas_expander->sas_address); goto retry_expander_search; } printk(MPT2SAS_INFO_FMT "removing unresponding devices: complete\n", ioc->name); /* unblock devices */ _scsih_ublock_io_all_device(ioc); } static void _scsih_refresh_expander_links(struct MPT2SAS_ADAPTER *ioc, struct _sas_node *sas_expander, u16 handle) { Mpi2ExpanderPage1_t expander_pg1; Mpi2ConfigReply_t mpi_reply; int i; for (i = 0 ; i < sas_expander->num_phys ; i++) { if ((mpt2sas_config_get_expander_pg1(ioc, &mpi_reply, &expander_pg1, i, handle))) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } mpt2sas_transport_update_links(ioc, sas_expander->sas_address, le16_to_cpu(expander_pg1.AttachedDevHandle), i, expander_pg1.NegotiatedLinkRate >> 4); } } /** * _scsih_scan_for_devices_after_reset - scan for devices after host reset * @ioc: per adapter object * * Return nothing. */ static void _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc) { Mpi2ExpanderPage0_t expander_pg0; Mpi2SasDevicePage0_t sas_device_pg0; Mpi2RaidVolPage1_t volume_pg1; Mpi2RaidVolPage0_t volume_pg0; Mpi2RaidPhysDiskPage0_t pd_pg0; Mpi2EventIrConfigElement_t element; Mpi2ConfigReply_t mpi_reply; u8 phys_disk_num; u16 ioc_status; u16 handle, parent_handle; u64 sas_address; struct _sas_device *sas_device; struct _sas_node *expander_device; static struct _raid_device *raid_device; printk(MPT2SAS_INFO_FMT "scan devices: start\n", ioc->name); _scsih_sas_host_refresh(ioc); /* expanders */ handle = 0xFFFF; while (!(mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) break; handle = le16_to_cpu(expander_pg0.DevHandle); expander_device = mpt2sas_scsih_expander_find_by_sas_address( ioc, le64_to_cpu(expander_pg0.SASAddress)); if (expander_device) _scsih_refresh_expander_links(ioc, expander_device, handle); else _scsih_expander_add(ioc, handle); } if (!ioc->ir_firmware) goto skip_to_sas; /* phys disk */ phys_disk_num = 0xFF; while (!(mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply, &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, phys_disk_num))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) break; phys_disk_num = pd_pg0.PhysDiskNum; handle = le16_to_cpu(pd_pg0.DevHandle); sas_device = _scsih_sas_device_find_by_handle(ioc, handle); if (sas_device) continue; if (mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle) != 0) continue; parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) { mpt2sas_transport_update_links(ioc, sas_address, handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); set_bit(handle, ioc->pd_handles); _scsih_add_device(ioc, handle, 0, 1); } } /* volumes */ handle = 0xFFFF; while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply, &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) break; handle = le16_to_cpu(volume_pg1.DevHandle); raid_device = _scsih_raid_device_find_by_wwid(ioc, le64_to_cpu(volume_pg1.WWID)); if (raid_device) continue; if (mpt2sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, sizeof(Mpi2RaidVolPage0_t))) continue; if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE || volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) { memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t)); element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED; element.VolDevHandle = volume_pg1.DevHandle; _scsih_sas_volume_add(ioc, &element); } } skip_to_sas: /* sas devices */ handle = 0xFFFF; while (!(mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) break; handle = le16_to_cpu(sas_device_pg0.DevHandle); if (!(_scsih_is_end_device( le32_to_cpu(sas_device_pg0.DeviceInfo)))) continue; sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, le64_to_cpu(sas_device_pg0.SASAddress)); if (sas_device) continue; parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) { mpt2sas_transport_update_links(ioc, sas_address, handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); _scsih_add_device(ioc, handle, 0, 0); } } printk(MPT2SAS_INFO_FMT "scan devices: complete\n", ioc->name); } /** * mpt2sas_scsih_reset_handler - reset callback handler (for scsih) * @ioc: per adapter object * @reset_phase: phase * * The handler for doing any required cleanup or initialization. * * The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET, * MPT2_IOC_DONE_RESET * * Return nothing. */ void mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) { switch (reset_phase) { case MPT2_IOC_PRE_RESET: dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " "MPT2_IOC_PRE_RESET\n", ioc->name, __func__)); break; case MPT2_IOC_AFTER_RESET: dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__)); if (ioc->scsih_cmds.status & MPT2_CMD_PENDING) { ioc->scsih_cmds.status |= MPT2_CMD_RESET; mpt2sas_base_free_smid(ioc, ioc->scsih_cmds.smid); complete(&ioc->scsih_cmds.done); } if (ioc->tm_cmds.status & MPT2_CMD_PENDING) { ioc->tm_cmds.status |= MPT2_CMD_RESET; mpt2sas_base_free_smid(ioc, ioc->tm_cmds.smid); complete(&ioc->tm_cmds.done); } _scsih_fw_event_cleanup_queue(ioc); _scsih_flush_running_cmds(ioc); break; case MPT2_IOC_DONE_RESET: dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " "MPT2_IOC_DONE_RESET\n", ioc->name, __func__)); _scsih_sas_host_refresh(ioc); _scsih_prep_device_scan(ioc); _scsih_search_responding_sas_devices(ioc); _scsih_search_responding_raid_devices(ioc); _scsih_search_responding_expanders(ioc); if (!ioc->is_driver_loading) { _scsih_prep_device_scan(ioc); _scsih_search_responding_sas_devices(ioc); _scsih_search_responding_raid_devices(ioc); _scsih_search_responding_expanders(ioc); _scsih_error_recovery_delete_devices(ioc); } break; } } /** * _firmware_event_work - delayed task for processing firmware events * @ioc: per adapter object * @work: equal to the fw_event_work object * Context: user. * * Return nothing. */ static void _firmware_event_work(struct work_struct *work) { struct fw_event_work *fw_event = container_of(work, struct fw_event_work, delayed_work.work); struct MPT2SAS_ADAPTER *ioc = fw_event->ioc; /* the queue is being flushed so ignore this event */ if (ioc->remove_host || fw_event->cancel_pending_work || ioc->pci_error_recovery) { _scsih_fw_event_free(ioc, fw_event); return; } switch (fw_event->event) { case MPT2SAS_REMOVE_UNRESPONDING_DEVICES: while (scsi_host_in_recovery(ioc->shost)) ssleep(1); _scsih_remove_unresponding_sas_devices(ioc); _scsih_scan_for_devices_after_reset(ioc); break; case MPT2SAS_PORT_ENABLE_COMPLETE: ioc->start_scan = 0; dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "port enable: complete " "from worker thread\n", ioc->name)); break; case MPT2SAS_TURN_ON_FAULT_LED: _scsih_turn_on_fault_led(ioc, fw_event->device_handle); break; case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: _scsih_sas_topology_change_event(ioc, fw_event); break; case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: _scsih_sas_device_status_change_event(ioc, fw_event); break; case MPI2_EVENT_SAS_DISCOVERY: _scsih_sas_discovery_event(ioc, fw_event); break; case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: _scsih_sas_broadcast_primitive_event(ioc, fw_event); break; case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: _scsih_sas_enclosure_dev_status_change_event(ioc, fw_event); break; case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: _scsih_sas_ir_config_change_event(ioc, fw_event); break; case MPI2_EVENT_IR_VOLUME: _scsih_sas_ir_volume_event(ioc, fw_event); break; case MPI2_EVENT_IR_PHYSICAL_DISK: _scsih_sas_ir_physical_disk_event(ioc, fw_event); break; case MPI2_EVENT_IR_OPERATION_STATUS: _scsih_sas_ir_operation_status_event(ioc, fw_event); break; } _scsih_fw_event_free(ioc, fw_event); } /** * mpt2sas_scsih_event_callback - firmware event handler (called at ISR time) * @ioc: per adapter object * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * Context: interrupt. * * This function merely adds a new work task into ioc->firmware_event_thread. * The tasks are worked from _firmware_event_work in user context. * * Return 1 meaning mf should be freed from _base_interrupt * 0 means the mf is freed from this function. */ u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply) { struct fw_event_work *fw_event; Mpi2EventNotificationReply_t *mpi_reply; u16 event; u16 sz; /* events turned off due to host reset or driver unloading */ if (ioc->remove_host || ioc->pci_error_recovery) return 1; mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); event = le16_to_cpu(mpi_reply->Event); switch (event) { /* handle these */ case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: { Mpi2EventDataSasBroadcastPrimitive_t *baen_data = (Mpi2EventDataSasBroadcastPrimitive_t *) mpi_reply->EventData; if (baen_data->Primitive != MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT) return 1; if (ioc->broadcast_aen_busy) { ioc->broadcast_aen_pending++; return 1; } else ioc->broadcast_aen_busy = 1; break; } case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: _scsih_check_topo_delete_events(ioc, (Mpi2EventDataSasTopologyChangeList_t *) mpi_reply->EventData); break; case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: _scsih_check_ir_config_unhide_events(ioc, (Mpi2EventDataIrConfigChangeList_t *) mpi_reply->EventData); break; case MPI2_EVENT_IR_VOLUME: _scsih_check_volume_delete_events(ioc, (Mpi2EventDataIrVolume_t *) mpi_reply->EventData); break; case MPI2_EVENT_LOG_ENTRY_ADDED: { Mpi2EventDataLogEntryAdded_t *log_entry; u32 *log_code; if (!ioc->is_warpdrive) break; log_entry = (Mpi2EventDataLogEntryAdded_t *) mpi_reply->EventData; log_code = (u32 *)log_entry->LogData; if (le16_to_cpu(log_entry->LogEntryQualifier) != MPT2_WARPDRIVE_LOGENTRY) break; switch (le32_to_cpu(*log_code)) { case MPT2_WARPDRIVE_LC_SSDT: printk(MPT2SAS_WARN_FMT "WarpDrive Warning: " "IO Throttling has occurred in the WarpDrive " "subsystem. Check WarpDrive documentation for " "additional details.\n", ioc->name); break; case MPT2_WARPDRIVE_LC_SSDLW: printk(MPT2SAS_WARN_FMT "WarpDrive Warning: " "Program/Erase Cycles for the WarpDrive subsystem " "in degraded range. Check WarpDrive documentation " "for additional details.\n", ioc->name); break; case MPT2_WARPDRIVE_LC_SSDLF: printk(MPT2SAS_ERR_FMT "WarpDrive Fatal Error: " "There are no Program/Erase Cycles for the " "WarpDrive subsystem. The storage device will be " "in read-only mode. Check WarpDrive documentation " "for additional details.\n", ioc->name); break; case MPT2_WARPDRIVE_LC_BRMF: printk(MPT2SAS_ERR_FMT "WarpDrive Fatal Error: " "The Backup Rail Monitor has failed on the " "WarpDrive subsystem. Check WarpDrive " "documentation for additional details.\n", ioc->name); break; } break; } case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: case MPI2_EVENT_IR_OPERATION_STATUS: case MPI2_EVENT_SAS_DISCOVERY: case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: case MPI2_EVENT_IR_PHYSICAL_DISK: break; default: /* ignore the rest */ return 1; } fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); if (!fw_event) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return 1; } sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; fw_event->event_data = kzalloc(sz, GFP_ATOMIC); if (!fw_event->event_data) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); kfree(fw_event); return 1; } memcpy(fw_event->event_data, mpi_reply->EventData, sz); fw_event->ioc = ioc; fw_event->VF_ID = mpi_reply->VF_ID; fw_event->VP_ID = mpi_reply->VP_ID; fw_event->event = event; _scsih_fw_event_add(ioc, fw_event); return 1; } /* shost template */ static struct scsi_host_template scsih_driver_template = { .module = THIS_MODULE, .name = "Fusion MPT SAS Host", .proc_name = MPT2SAS_DRIVER_NAME, .queuecommand = _scsih_qcmd, .target_alloc = _scsih_target_alloc, .slave_alloc = _scsih_slave_alloc, .slave_configure = _scsih_slave_configure, .target_destroy = _scsih_target_destroy, .slave_destroy = _scsih_slave_destroy, .scan_finished = _scsih_scan_finished, .scan_start = _scsih_scan_start, .change_queue_depth = _scsih_change_queue_depth, .change_queue_type = _scsih_change_queue_type, .eh_abort_handler = _scsih_abort, .eh_device_reset_handler = _scsih_dev_reset, .eh_target_reset_handler = _scsih_target_reset, .eh_host_reset_handler = _scsih_host_reset, .bios_param = _scsih_bios_param, .can_queue = 1, .this_id = -1, .sg_tablesize = MPT2SAS_SG_DEPTH, .max_sectors = 32767, .cmd_per_lun = 7, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = mpt2sas_host_attrs, .sdev_attrs = mpt2sas_dev_attrs, }; /** * _scsih_expander_node_remove - removing expander device from list. * @ioc: per adapter object * @sas_expander: the sas_device object * Context: Calling function should acquire ioc->sas_node_lock. * * Removing object and freeing associated memory from the * ioc->sas_expander_list. * * Return nothing. */ static void _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc, struct _sas_node *sas_expander) { struct _sas_port *mpt2sas_port, *next; /* remove sibling ports attached to this expander */ list_for_each_entry_safe(mpt2sas_port, next, &sas_expander->sas_port_list, port_list) { if (ioc->shost_recovery) return; if (mpt2sas_port->remote_identify.device_type == SAS_END_DEVICE) mpt2sas_device_remove(ioc, mpt2sas_port->remote_identify.sas_address); else if (mpt2sas_port->remote_identify.device_type == SAS_EDGE_EXPANDER_DEVICE || mpt2sas_port->remote_identify.device_type == SAS_FANOUT_EXPANDER_DEVICE) mpt2sas_expander_remove(ioc, mpt2sas_port->remote_identify.sas_address); } mpt2sas_transport_port_remove(ioc, sas_expander->sas_address, sas_expander->sas_address_parent); printk(MPT2SAS_INFO_FMT "expander_remove: handle" "(0x%04x), sas_addr(0x%016llx)\n", ioc->name, sas_expander->handle, (unsigned long long) sas_expander->sas_address); kfree(sas_expander->phy); kfree(sas_expander); } /** * _scsih_ir_shutdown - IR shutdown notification * @ioc: per adapter object * * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that * the host system is shutting down. * * Return nothing. */ static void _scsih_ir_shutdown(struct MPT2SAS_ADAPTER *ioc) { Mpi2RaidActionRequest_t *mpi_request; Mpi2RaidActionReply_t *mpi_reply; u16 smid; /* is IR firmware build loaded ? */ if (!ioc->ir_firmware) return; /* are there any volumes ? */ if (list_empty(&ioc->raid_device_list)) return; mutex_lock(&ioc->scsih_cmds.mutex); if (ioc->scsih_cmds.status != MPT2_CMD_NOT_USED) { printk(MPT2SAS_ERR_FMT "%s: scsih_cmd in use\n", ioc->name, __func__); goto out; } ioc->scsih_cmds.status = MPT2_CMD_PENDING; smid = mpt2sas_base_get_smid(ioc, ioc->scsih_cb_idx); if (!smid) { printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); ioc->scsih_cmds.status = MPT2_CMD_NOT_USED; goto out; } mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); ioc->scsih_cmds.smid = smid; memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t)); mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED; if (!ioc->hide_ir_msg) printk(MPT2SAS_INFO_FMT "IR shutdown (sending)\n", ioc->name); init_completion(&ioc->scsih_cmds.done); mpt2sas_base_put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); if (!(ioc->scsih_cmds.status & MPT2_CMD_COMPLETE)) { printk(MPT2SAS_ERR_FMT "%s: timeout\n", ioc->name, __func__); goto out; } if (ioc->scsih_cmds.status & MPT2_CMD_REPLY_VALID) { mpi_reply = ioc->scsih_cmds.reply; if (!ioc->hide_ir_msg) printk(MPT2SAS_INFO_FMT "IR shutdown (complete): " "ioc_status(0x%04x), loginfo(0x%08x)\n", ioc->name, le16_to_cpu(mpi_reply->IOCStatus), le32_to_cpu(mpi_reply->IOCLogInfo)); } out: ioc->scsih_cmds.status = MPT2_CMD_NOT_USED; mutex_unlock(&ioc->scsih_cmds.mutex); } /** * _scsih_shutdown - routine call during system shutdown * @pdev: PCI device struct * * Return nothing. */ static void _scsih_shutdown(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); struct workqueue_struct *wq; unsigned long flags; ioc->remove_host = 1; _scsih_fw_event_cleanup_queue(ioc); spin_lock_irqsave(&ioc->fw_event_lock, flags); wq = ioc->firmware_event_thread; ioc->firmware_event_thread = NULL; spin_unlock_irqrestore(&ioc->fw_event_lock, flags); if (wq) destroy_workqueue(wq); _scsih_ir_shutdown(ioc); mpt2sas_base_detach(ioc); } /** * _scsih_remove - detach and remove add host * @pdev: PCI device struct * * Routine called when unloading the driver. * Return nothing. */ static void __devexit _scsih_remove(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); struct _sas_port *mpt2sas_port, *next_port; struct _raid_device *raid_device, *next; struct MPT2SAS_TARGET *sas_target_priv_data; struct workqueue_struct *wq; unsigned long flags; ioc->remove_host = 1; _scsih_fw_event_cleanup_queue(ioc); spin_lock_irqsave(&ioc->fw_event_lock, flags); wq = ioc->firmware_event_thread; ioc->firmware_event_thread = NULL; spin_unlock_irqrestore(&ioc->fw_event_lock, flags); if (wq) destroy_workqueue(wq); /* release all the volumes */ _scsih_ir_shutdown(ioc); list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list, list) { if (raid_device->starget) { sas_target_priv_data = raid_device->starget->hostdata; sas_target_priv_data->deleted = 1; scsi_remove_target(&raid_device->starget->dev); } printk(MPT2SAS_INFO_FMT "removing handle(0x%04x), wwid" "(0x%016llx)\n", ioc->name, raid_device->handle, (unsigned long long) raid_device->wwid); _scsih_raid_device_remove(ioc, raid_device); } /* free ports attached to the sas_host */ list_for_each_entry_safe(mpt2sas_port, next_port, &ioc->sas_hba.sas_port_list, port_list) { if (mpt2sas_port->remote_identify.device_type == SAS_END_DEVICE) mpt2sas_device_remove(ioc, mpt2sas_port->remote_identify.sas_address); else if (mpt2sas_port->remote_identify.device_type == SAS_EDGE_EXPANDER_DEVICE || mpt2sas_port->remote_identify.device_type == SAS_FANOUT_EXPANDER_DEVICE) mpt2sas_expander_remove(ioc, mpt2sas_port->remote_identify.sas_address); } /* free phys attached to the sas_host */ if (ioc->sas_hba.num_phys) { kfree(ioc->sas_hba.phy); ioc->sas_hba.phy = NULL; ioc->sas_hba.num_phys = 0; } sas_remove_host(shost); mpt2sas_base_detach(ioc); list_del(&ioc->list); scsi_remove_host(shost); scsi_host_put(shost); } /** * _scsih_probe_boot_devices - reports 1st device * @ioc: per adapter object * * If specified in bios page 2, this routine reports the 1st * device scsi-ml or sas transport for persistent boot device * purposes. Please refer to function _scsih_determine_boot_device() */ static void _scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc) { u8 is_raid; void *device; struct _sas_device *sas_device; struct _raid_device *raid_device; u16 handle; u64 sas_address_parent; u64 sas_address; unsigned long flags; int rc; /* no Bios, return immediately */ if (!ioc->bios_pg3.BiosVersion) return; device = NULL; is_raid = 0; if (ioc->req_boot_device.device) { device = ioc->req_boot_device.device; is_raid = ioc->req_boot_device.is_raid; } else if (ioc->req_alt_boot_device.device) { device = ioc->req_alt_boot_device.device; is_raid = ioc->req_alt_boot_device.is_raid; } else if (ioc->current_boot_device.device) { device = ioc->current_boot_device.device; is_raid = ioc->current_boot_device.is_raid; } if (!device) return; if (is_raid) { raid_device = device; rc = scsi_add_device(ioc->shost, RAID_CHANNEL, raid_device->id, 0); if (rc) _scsih_raid_device_remove(ioc, raid_device); } else { sas_device = device; handle = sas_device->handle; sas_address_parent = sas_device->sas_address_parent; sas_address = sas_device->sas_address; spin_lock_irqsave(&ioc->sas_device_lock, flags); list_move_tail(&sas_device->list, &ioc->sas_device_list); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (ioc->hide_drives) return; if (!mpt2sas_transport_port_add(ioc, sas_device->handle, sas_device->sas_address_parent)) { _scsih_sas_device_remove(ioc, sas_device); } else if (!sas_device->starget) { if (!ioc->is_driver_loading) mpt2sas_transport_port_remove(ioc, sas_address, sas_address_parent); _scsih_sas_device_remove(ioc, sas_device); } } } /** * _scsih_probe_raid - reporting raid volumes to scsi-ml * @ioc: per adapter object * * Called during initial loading of the driver. */ static void _scsih_probe_raid(struct MPT2SAS_ADAPTER *ioc) { struct _raid_device *raid_device, *raid_next; int rc; list_for_each_entry_safe(raid_device, raid_next, &ioc->raid_device_list, list) { if (raid_device->starget) continue; rc = scsi_add_device(ioc->shost, RAID_CHANNEL, raid_device->id, 0); if (rc) _scsih_raid_device_remove(ioc, raid_device); } } /** * _scsih_probe_sas - reporting sas devices to sas transport * @ioc: per adapter object * * Called during initial loading of the driver. */ static void _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc) { struct _sas_device *sas_device, *next; unsigned long flags; /* SAS Device List */ list_for_each_entry_safe(sas_device, next, &ioc->sas_device_init_list, list) { if (ioc->hide_drives) continue; if (!mpt2sas_transport_port_add(ioc, sas_device->handle, sas_device->sas_address_parent)) { list_del(&sas_device->list); kfree(sas_device); continue; } else if (!sas_device->starget) { if (!ioc->is_driver_loading) mpt2sas_transport_port_remove(ioc, sas_device->sas_address, sas_device->sas_address_parent); list_del(&sas_device->list); kfree(sas_device); continue; } spin_lock_irqsave(&ioc->sas_device_lock, flags); list_move_tail(&sas_device->list, &ioc->sas_device_list); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); } } /** * _scsih_probe_devices - probing for devices * @ioc: per adapter object * * Called during initial loading of the driver. */ static void _scsih_probe_devices(struct MPT2SAS_ADAPTER *ioc) { u16 volume_mapping_flags; if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR)) return; /* return when IOC doesn't support initiator mode */ _scsih_probe_boot_devices(ioc); if (ioc->ir_firmware) { volume_mapping_flags = le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) & MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; if (volume_mapping_flags == MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) { _scsih_probe_raid(ioc); _scsih_probe_sas(ioc); } else { _scsih_probe_sas(ioc); _scsih_probe_raid(ioc); } } else _scsih_probe_sas(ioc); } /** * _scsih_scan_start - scsi lld callback for .scan_start * @shost: SCSI host pointer * * The shost has the ability to discover targets on its own instead * of scanning the entire bus. In our implemention, we will kick off * firmware discovery. */ static void _scsih_scan_start(struct Scsi_Host *shost) { struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); int rc; if (diag_buffer_enable != -1 && diag_buffer_enable != 0) mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable); ioc->start_scan = 1; rc = mpt2sas_port_enable(ioc); if (rc != 0) printk(MPT2SAS_INFO_FMT "port enable: FAILED\n", ioc->name); } /** * _scsih_scan_finished - scsi lld callback for .scan_finished * @shost: SCSI host pointer * @time: elapsed time of the scan in jiffies * * This function will be called periodically until it returns 1 with the * scsi_host and the elapsed time of the scan in jiffies. In our implemention, * we wait for firmware discovery to complete, then return 1. */ static int _scsih_scan_finished(struct Scsi_Host *shost, unsigned long time) { struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); if (time >= (300 * HZ)) { ioc->base_cmds.status = MPT2_CMD_NOT_USED; printk(MPT2SAS_INFO_FMT "port enable: FAILED with timeout " "(timeout=300s)\n", ioc->name); ioc->is_driver_loading = 0; return 1; } if (ioc->start_scan) return 0; if (ioc->start_scan_failed) { printk(MPT2SAS_INFO_FMT "port enable: FAILED with " "(ioc_status=0x%08x)\n", ioc->name, ioc->start_scan_failed); ioc->is_driver_loading = 0; ioc->wait_for_discovery_to_complete = 0; ioc->remove_host = 1; return 1; } printk(MPT2SAS_INFO_FMT "port enable: SUCCESS\n", ioc->name); ioc->base_cmds.status = MPT2_CMD_NOT_USED; if (ioc->wait_for_discovery_to_complete) { ioc->wait_for_discovery_to_complete = 0; _scsih_probe_devices(ioc); } mpt2sas_base_start_watchdog(ioc); ioc->is_driver_loading = 0; return 1; } /** * _scsih_probe - attach and add scsi host * @pdev: PCI device struct * @id: pci device id * * Returns 0 success, anything else error. */ static int _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct MPT2SAS_ADAPTER *ioc; struct Scsi_Host *shost; shost = scsi_host_alloc(&scsih_driver_template, sizeof(struct MPT2SAS_ADAPTER)); if (!shost) return -ENODEV; /* init local params */ ioc = shost_priv(shost); memset(ioc, 0, sizeof(struct MPT2SAS_ADAPTER)); INIT_LIST_HEAD(&ioc->list); list_add_tail(&ioc->list, &mpt2sas_ioc_list); ioc->shost = shost; ioc->id = mpt_ids++; sprintf(ioc->name, "%s%d", MPT2SAS_DRIVER_NAME, ioc->id); ioc->pdev = pdev; if (id->device == MPI2_MFGPAGE_DEVID_SSS6200) { ioc->is_warpdrive = 1; ioc->hide_ir_msg = 1; } else ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS; ioc->scsi_io_cb_idx = scsi_io_cb_idx; ioc->tm_cb_idx = tm_cb_idx; ioc->ctl_cb_idx = ctl_cb_idx; ioc->base_cb_idx = base_cb_idx; ioc->port_enable_cb_idx = port_enable_cb_idx; ioc->transport_cb_idx = transport_cb_idx; ioc->scsih_cb_idx = scsih_cb_idx; ioc->config_cb_idx = config_cb_idx; ioc->tm_tr_cb_idx = tm_tr_cb_idx; ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx; ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx; ioc->logging_level = logging_level; ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds; /* misc semaphores and spin locks */ mutex_init(&ioc->reset_in_progress_mutex); spin_lock_init(&ioc->ioc_reset_in_progress_lock); spin_lock_init(&ioc->scsi_lookup_lock); spin_lock_init(&ioc->sas_device_lock); spin_lock_init(&ioc->sas_node_lock); spin_lock_init(&ioc->fw_event_lock); spin_lock_init(&ioc->raid_device_lock); INIT_LIST_HEAD(&ioc->sas_device_list); INIT_LIST_HEAD(&ioc->sas_device_init_list); INIT_LIST_HEAD(&ioc->sas_expander_list); INIT_LIST_HEAD(&ioc->fw_event_list); INIT_LIST_HEAD(&ioc->raid_device_list); INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list); INIT_LIST_HEAD(&ioc->delayed_tr_list); INIT_LIST_HEAD(&ioc->delayed_tr_volume_list); /* init shost parameters */ shost->max_cmd_len = 32; shost->max_lun = max_lun; shost->transportt = mpt2sas_transport_template; shost->unique_id = ioc->id; if (max_sectors != 0xFFFF) { if (max_sectors < 64) { shost->max_sectors = 64; printk(MPT2SAS_WARN_FMT "Invalid value %d passed " "for max_sectors, range is 64 to 8192. Assigning " "value of 64.\n", ioc->name, max_sectors); } else if (max_sectors > 32767) { shost->max_sectors = 32767; printk(MPT2SAS_WARN_FMT "Invalid value %d passed " "for max_sectors, range is 64 to 8192. Assigning " "default value of 32767.\n", ioc->name, max_sectors); } else { shost->max_sectors = max_sectors & 0xFFFE; printk(MPT2SAS_INFO_FMT "The max_sectors value is " "set to %d\n", ioc->name, shost->max_sectors); } } if ((scsi_add_host(shost, &pdev->dev))) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); list_del(&ioc->list); goto out_add_shost_fail; } scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION); scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); /* event thread */ snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), "fw_event%d", ioc->id); ioc->firmware_event_thread = create_singlethread_workqueue( ioc->firmware_event_name); if (!ioc->firmware_event_thread) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out_thread_fail; } ioc->is_driver_loading = 1; if ((mpt2sas_base_attach(ioc))) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out_attach_fail; } if (ioc->is_warpdrive) { if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) ioc->hide_drives = 0; else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS) ioc->hide_drives = 1; else { if (_scsih_get_num_volumes(ioc)) ioc->hide_drives = 1; else ioc->hide_drives = 0; } } else ioc->hide_drives = 0; scsi_scan_host(shost); return 0; out_attach_fail: destroy_workqueue(ioc->firmware_event_thread); out_thread_fail: list_del(&ioc->list); scsi_remove_host(shost); scsi_host_put(shost); out_add_shost_fail: return -ENODEV; } #ifdef CONFIG_PM /** * _scsih_suspend - power management suspend main entry point * @pdev: PCI device struct * @state: PM state change to (usually PCI_D3) * * Returns 0 success, anything else error. */ static int _scsih_suspend(struct pci_dev *pdev, pm_message_t state) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); pci_power_t device_state; mpt2sas_base_stop_watchdog(ioc); scsi_block_requests(shost); device_state = pci_choose_state(pdev, state); printk(MPT2SAS_INFO_FMT "pdev=0x%p, slot=%s, entering " "operating state [D%d]\n", ioc->name, pdev, pci_name(pdev), device_state); mpt2sas_base_free_resources(ioc); pci_save_state(pdev); pci_disable_device(pdev); pci_set_power_state(pdev, device_state); return 0; } /** * _scsih_resume - power management resume main entry point * @pdev: PCI device struct * * Returns 0 success, anything else error. */ static int _scsih_resume(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); pci_power_t device_state = pdev->current_state; int r; printk(MPT2SAS_INFO_FMT "pdev=0x%p, slot=%s, previous " "operating state [D%d]\n", ioc->name, pdev, pci_name(pdev), device_state); pci_set_power_state(pdev, PCI_D0); pci_enable_wake(pdev, PCI_D0, 0); pci_restore_state(pdev); ioc->pdev = pdev; r = mpt2sas_base_map_resources(ioc); if (r) return r; mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, SOFT_RESET); scsi_unblock_requests(shost); mpt2sas_base_start_watchdog(ioc); return 0; } #endif /* CONFIG_PM */ /** * _scsih_pci_error_detected - Called when a PCI error is detected. * @pdev: PCI device struct * @state: PCI channel state * * Description: Called when a PCI error is detected. * * Return value: * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT */ static pci_ers_result_t _scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); printk(MPT2SAS_INFO_FMT "PCI error: detected callback, state(%d)!!\n", ioc->name, state); switch (state) { case pci_channel_io_normal: return PCI_ERS_RESULT_CAN_RECOVER; case pci_channel_io_frozen: /* Fatal error, prepare for slot reset */ ioc->pci_error_recovery = 1; scsi_block_requests(ioc->shost); mpt2sas_base_stop_watchdog(ioc); mpt2sas_base_free_resources(ioc); return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: /* Permanent error, prepare for device removal */ ioc->pci_error_recovery = 1; mpt2sas_base_stop_watchdog(ioc); _scsih_flush_running_cmds(ioc); return PCI_ERS_RESULT_DISCONNECT; } return PCI_ERS_RESULT_NEED_RESET; } /** * _scsih_pci_slot_reset - Called when PCI slot has been reset. * @pdev: PCI device struct * * Description: This routine is called by the pci error recovery * code after the PCI slot has been reset, just before we * should resume normal operations. */ static pci_ers_result_t _scsih_pci_slot_reset(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); int rc; printk(MPT2SAS_INFO_FMT "PCI error: slot reset callback!!\n", ioc->name); ioc->pci_error_recovery = 0; ioc->pdev = pdev; pci_restore_state(pdev); rc = mpt2sas_base_map_resources(ioc); if (rc) return PCI_ERS_RESULT_DISCONNECT; rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); printk(MPT2SAS_WARN_FMT "hard reset: %s\n", ioc->name, (rc == 0) ? "success" : "failed"); if (!rc) return PCI_ERS_RESULT_RECOVERED; else return PCI_ERS_RESULT_DISCONNECT; } /** * _scsih_pci_resume() - resume normal ops after PCI reset * @pdev: pointer to PCI device * * Called when the error recovery driver tells us that its * OK to resume normal operation. Use completion to allow * halted scsi ops to resume. */ static void _scsih_pci_resume(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); printk(MPT2SAS_INFO_FMT "PCI error: resume callback!!\n", ioc->name); pci_cleanup_aer_uncorrect_error_status(pdev); mpt2sas_base_start_watchdog(ioc); scsi_unblock_requests(ioc->shost); } /** * _scsih_pci_mmio_enabled - Enable MMIO and dump debug registers * @pdev: pointer to PCI device */ static pci_ers_result_t _scsih_pci_mmio_enabled(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); printk(MPT2SAS_INFO_FMT "PCI error: mmio enabled callback!!\n", ioc->name); /* TODO - dump whatever for debugging purposes */ /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } static struct pci_error_handlers _scsih_err_handler = { .error_detected = _scsih_pci_error_detected, .mmio_enabled = _scsih_pci_mmio_enabled, .slot_reset = _scsih_pci_slot_reset, .resume = _scsih_pci_resume, }; static struct pci_driver scsih_driver = { .name = MPT2SAS_DRIVER_NAME, .id_table = scsih_pci_table, .probe = _scsih_probe, .remove = __devexit_p(_scsih_remove), .shutdown = _scsih_shutdown, .err_handler = &_scsih_err_handler, #ifdef CONFIG_PM .suspend = _scsih_suspend, .resume = _scsih_resume, #endif }; /* raid transport support */ static struct raid_function_template mpt2sas_raid_functions = { .cookie = &scsih_driver_template, .is_raid = _scsih_is_raid, .get_resync = _scsih_get_resync, .get_state = _scsih_get_state, }; /** * _scsih_init - main entry point for this driver. * * Returns 0 success, anything else error. */ static int __init _scsih_init(void) { int error; mpt_ids = 0; printk(KERN_INFO "%s version %s loaded\n", MPT2SAS_DRIVER_NAME, MPT2SAS_DRIVER_VERSION); mpt2sas_transport_template = sas_attach_transport(&mpt2sas_transport_functions); if (!mpt2sas_transport_template) return -ENODEV; /* raid transport support */ mpt2sas_raid_template = raid_class_attach(&mpt2sas_raid_functions); if (!mpt2sas_raid_template) { sas_release_transport(mpt2sas_transport_template); return -ENODEV; } mpt2sas_base_initialize_callback_handler(); /* queuecommand callback hander */ scsi_io_cb_idx = mpt2sas_base_register_callback_handler(_scsih_io_done); /* task management callback handler */ tm_cb_idx = mpt2sas_base_register_callback_handler(_scsih_tm_done); /* base internal commands callback handler */ base_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_base_done); port_enable_cb_idx = mpt2sas_base_register_callback_handler( mpt2sas_port_enable_done); /* transport internal commands callback handler */ transport_cb_idx = mpt2sas_base_register_callback_handler( mpt2sas_transport_done); /* scsih internal commands callback handler */ scsih_cb_idx = mpt2sas_base_register_callback_handler(_scsih_done); /* configuration page API internal commands callback handler */ config_cb_idx = mpt2sas_base_register_callback_handler( mpt2sas_config_done); /* ctl module callback handler */ ctl_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_ctl_done); tm_tr_cb_idx = mpt2sas_base_register_callback_handler( _scsih_tm_tr_complete); tm_tr_volume_cb_idx = mpt2sas_base_register_callback_handler( _scsih_tm_volume_tr_complete); tm_sas_control_cb_idx = mpt2sas_base_register_callback_handler( _scsih_sas_control_complete); mpt2sas_ctl_init(); error = pci_register_driver(&scsih_driver); if (error) { /* raid transport support */ raid_class_release(mpt2sas_raid_template); sas_release_transport(mpt2sas_transport_template); } return error; } /** * _scsih_exit - exit point for this driver (when it is a module). * * Returns 0 success, anything else error. */ static void __exit _scsih_exit(void) { printk(KERN_INFO "mpt2sas version %s unloading\n", MPT2SAS_DRIVER_VERSION); pci_unregister_driver(&scsih_driver); mpt2sas_ctl_exit(); mpt2sas_base_release_callback_handler(scsi_io_cb_idx); mpt2sas_base_release_callback_handler(tm_cb_idx); mpt2sas_base_release_callback_handler(base_cb_idx); mpt2sas_base_release_callback_handler(port_enable_cb_idx); mpt2sas_base_release_callback_handler(transport_cb_idx); mpt2sas_base_release_callback_handler(scsih_cb_idx); mpt2sas_base_release_callback_handler(config_cb_idx); mpt2sas_base_release_callback_handler(ctl_cb_idx); mpt2sas_base_release_callback_handler(tm_tr_cb_idx); mpt2sas_base_release_callback_handler(tm_tr_volume_cb_idx); mpt2sas_base_release_callback_handler(tm_sas_control_cb_idx); /* raid transport support */ raid_class_release(mpt2sas_raid_template); sas_release_transport(mpt2sas_transport_template); } module_init(_scsih_init); module_exit(_scsih_exit);
gpl-2.0
steven676/ti-omap-encore-kernel3
drivers/uio/uio_pdrv.c
3625
2517
/* * drivers/uio/uio_pdrv.c * * Copyright (C) 2008 by Digi International Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/platform_device.h> #include <linux/uio_driver.h> #include <linux/stringify.h> #include <linux/slab.h> #define DRIVER_NAME "uio_pdrv" struct uio_platdata { struct uio_info *uioinfo; }; static int uio_pdrv_probe(struct platform_device *pdev) { struct uio_info *uioinfo = pdev->dev.platform_data; struct uio_platdata *pdata; struct uio_mem *uiomem; int ret = -ENODEV; int i; if (!uioinfo || !uioinfo->name || !uioinfo->version) { dev_dbg(&pdev->dev, "%s: err_uioinfo\n", __func__); goto err_uioinfo; } pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); if (!pdata) { ret = -ENOMEM; dev_dbg(&pdev->dev, "%s: err_alloc_pdata\n", __func__); goto err_alloc_pdata; } pdata->uioinfo = uioinfo; uiomem = &uioinfo->mem[0]; for (i = 0; i < pdev->num_resources; ++i) { struct resource *r = &pdev->resource[i]; if (r->flags != IORESOURCE_MEM) continue; if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) { dev_warn(&pdev->dev, "device has more than " __stringify(MAX_UIO_MAPS) " I/O memory resources.\n"); break; } uiomem->memtype = UIO_MEM_PHYS; uiomem->addr = r->start; uiomem->size = r->end - r->start + 1; ++uiomem; } while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) { uiomem->size = 0; ++uiomem; } pdata->uioinfo->priv = pdata; ret = uio_register_device(&pdev->dev, pdata->uioinfo); if (ret) { kfree(pdata); err_alloc_pdata: err_uioinfo: return ret; } platform_set_drvdata(pdev, pdata); return 0; } static int uio_pdrv_remove(struct platform_device *pdev) { struct uio_platdata *pdata = platform_get_drvdata(pdev); uio_unregister_device(pdata->uioinfo); kfree(pdata); return 0; } static struct platform_driver uio_pdrv = { .probe = uio_pdrv_probe, .remove = uio_pdrv_remove, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, }; static int __init uio_pdrv_init(void) { return platform_driver_register(&uio_pdrv); } static void __exit uio_pdrv_exit(void) { platform_driver_unregister(&uio_pdrv); } module_init(uio_pdrv_init); module_exit(uio_pdrv_exit); MODULE_AUTHOR("Uwe Kleine-Koenig"); MODULE_DESCRIPTION("Userspace I/O platform driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRIVER_NAME);
gpl-2.0
AOSPA-MARVEL/android_kernel_lge_msm7x27-3.0.x
arch/arm/mach-msm/board-mahimahi-rfkill.c
4649
2972
/* * Copyright (C) 2009 Google, Inc. * Copyright (C) 2009 HTC Corporation. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/delay.h> #include <linux/device.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/rfkill.h> #include <asm/gpio.h> #include <asm/mach-types.h> #include "board-mahimahi.h" static struct rfkill *bt_rfk; static const char bt_name[] = "bcm4329"; static int bluetooth_set_power(void *data, bool blocked) { if (!blocked) { gpio_direction_output(MAHIMAHI_GPIO_BT_RESET_N, 1); gpio_direction_output(MAHIMAHI_GPIO_BT_SHUTDOWN_N, 1); } else { gpio_direction_output(MAHIMAHI_GPIO_BT_SHUTDOWN_N, 0); gpio_direction_output(MAHIMAHI_GPIO_BT_RESET_N, 0); } return 0; } static struct rfkill_ops mahimahi_rfkill_ops = { .set_block = bluetooth_set_power, }; static int mahimahi_rfkill_probe(struct platform_device *pdev) { int rc = 0; bool default_state = true; /* off */ rc = gpio_request(MAHIMAHI_GPIO_BT_RESET_N, "bt_reset"); if (rc) goto err_gpio_reset; rc = gpio_request(MAHIMAHI_GPIO_BT_SHUTDOWN_N, "bt_shutdown"); if (rc) goto err_gpio_shutdown; bluetooth_set_power(NULL, default_state); bt_rfk = rfkill_alloc(bt_name, &pdev->dev, RFKILL_TYPE_BLUETOOTH, &mahimahi_rfkill_ops, NULL); if (!bt_rfk) { rc = -ENOMEM; goto err_rfkill_alloc; } rfkill_set_states(bt_rfk, default_state, false); /* userspace cannot take exclusive control */ rc = rfkill_register(bt_rfk); if (rc) goto err_rfkill_reg; return 0; err_rfkill_reg: rfkill_destroy(bt_rfk); err_rfkill_alloc: gpio_free(MAHIMAHI_GPIO_BT_SHUTDOWN_N); err_gpio_shutdown: gpio_free(MAHIMAHI_GPIO_BT_RESET_N); err_gpio_reset: return rc; } static int mahimahi_rfkill_remove(struct platform_device *dev) { rfkill_unregister(bt_rfk); rfkill_destroy(bt_rfk); gpio_free(MAHIMAHI_GPIO_BT_SHUTDOWN_N); gpio_free(MAHIMAHI_GPIO_BT_RESET_N); return 0; } static struct platform_driver mahimahi_rfkill_driver = { .probe = mahimahi_rfkill_probe, .remove = mahimahi_rfkill_remove, .driver = { .name = "mahimahi_rfkill", .owner = THIS_MODULE, }, }; static int __init mahimahi_rfkill_init(void) { if (!machine_is_mahimahi()) return 0; return platform_driver_register(&mahimahi_rfkill_driver); } static void __exit mahimahi_rfkill_exit(void) { platform_driver_unregister(&mahimahi_rfkill_driver); } module_init(mahimahi_rfkill_init); module_exit(mahimahi_rfkill_exit); MODULE_DESCRIPTION("mahimahi rfkill"); MODULE_AUTHOR("Nick Pelly <npelly@google.com>"); MODULE_LICENSE("GPL");
gpl-2.0
FroX/Cutrekernel
arch/arm/mach-orion5x/tsx09-common.c
4905
3243
/* * QNAP TS-x09 Boards common functions * * Maintainers: Lennert Buytenhek <buytenh@marvell.com> * Byron Bradley <byron.bbradley@gmail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/mv643xx_eth.h> #include <linux/timex.h> #include <linux/serial_reg.h> #include "tsx09-common.h" #include "common.h" /***************************************************************************** * QNAP TS-x09 specific power off method via UART1-attached PIC ****************************************************************************/ #define UART1_REG(x) (UART1_VIRT_BASE + ((UART_##x) << 2)) void qnap_tsx09_power_off(void) { /* 19200 baud divisor */ const unsigned divisor = ((orion5x_tclk + (8 * 19200)) / (16 * 19200)); pr_info("%s: triggering power-off...\n", __func__); /* hijack uart1 and reset into sane state (19200,8n1) */ writel(0x83, UART1_REG(LCR)); writel(divisor & 0xff, UART1_REG(DLL)); writel((divisor >> 8) & 0xff, UART1_REG(DLM)); writel(0x03, UART1_REG(LCR)); writel(0x00, UART1_REG(IER)); writel(0x00, UART1_REG(FCR)); writel(0x00, UART1_REG(MCR)); /* send the power-off command 'A' to PIC */ writel('A', UART1_REG(TX)); } /***************************************************************************** * Ethernet ****************************************************************************/ struct mv643xx_eth_platform_data qnap_tsx09_eth_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(8), }; static int __init qnap_tsx09_parse_hex_nibble(char n) { if (n >= '0' && n <= '9') return n - '0'; if (n >= 'A' && n <= 'F') return n - 'A' + 10; if (n >= 'a' && n <= 'f') return n - 'a' + 10; return -1; } static int __init qnap_tsx09_parse_hex_byte(const char *b) { int hi; int lo; hi = qnap_tsx09_parse_hex_nibble(b[0]); lo = qnap_tsx09_parse_hex_nibble(b[1]); if (hi < 0 || lo < 0) return -1; return (hi << 4) | lo; } static int __init qnap_tsx09_check_mac_addr(const char *addr_str) { u_int8_t addr[6]; int i; for (i = 0; i < 6; i++) { int byte; /* * Enforce "xx:xx:xx:xx:xx:xx\n" format. */ if (addr_str[(i * 3) + 2] != ((i < 5) ? ':' : '\n')) return -1; byte = qnap_tsx09_parse_hex_byte(addr_str + (i * 3)); if (byte < 0) return -1; addr[i] = byte; } printk(KERN_INFO "tsx09: found ethernet mac address "); for (i = 0; i < 6; i++) printk("%.2x%s", addr[i], (i < 5) ? ":" : ".\n"); memcpy(qnap_tsx09_eth_data.mac_addr, addr, 6); return 0; } /* * The 'NAS Config' flash partition has an ext2 filesystem which * contains a file that has the ethernet MAC address in plain text * (format "xx:xx:xx:xx:xx:xx\n"). */ void __init qnap_tsx09_find_mac_addr(u32 mem_base, u32 size) { unsigned long addr; for (addr = mem_base; addr < (mem_base + size); addr += 1024) { char *nor_page; int ret = 0; nor_page = ioremap(addr, 1024); if (nor_page != NULL) { ret = qnap_tsx09_check_mac_addr(nor_page); iounmap(nor_page); } if (ret == 0) break; } }
gpl-2.0
snak3ater/kernel_msm
drivers/net/irda/ks959-sir.c
4905
28251
/***************************************************************************** * * Filename: ks959-sir.c * Version: 0.1.2 * Description: Irda KingSun KS-959 USB Dongle * Status: Experimental * Author: Alex Villacís Lasso <a_villacis@palosanto.com> * with help from Domen Puncer <domen@coderock.org> * * Based on stir4200, mcs7780, kingsun-sir drivers. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * *****************************************************************************/ /* * Following is my most current (2007-07-17) understanding of how the Kingsun * KS-959 dongle is supposed to work. This information was deduced by * reverse-engineering and examining the USB traffic captured with USBSnoopy * from the WinXP driver. Feel free to update here as more of the dongle is * known. * * My most sincere thanks must go to Domen Puncer <domen@coderock.org> for * invaluable help in cracking the obfuscation and padding required for this * dongle. * * General: This dongle exposes one interface with one interrupt IN endpoint. * However, the interrupt endpoint is NOT used at all for this dongle. Instead, * this dongle uses control transfers for everything, including sending and * receiving the IrDA frame data. Apparently the interrupt endpoint is just a * dummy to ensure the dongle has a valid interface to present to the PC.And I * thought the DonShine dongle was weird... In addition, this dongle uses * obfuscation (?!?!), applied at the USB level, to hide the traffic, both sent * and received, from the dongle. I call it obfuscation because the XOR keying * and padding required to produce an USB traffic acceptable for the dongle can * not be explained by any other technical requirement. * * Transmission: To transmit an IrDA frame, the driver must prepare a control * URB with the following as a setup packet: * bRequestType USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE * bRequest 0x09 * wValue <length of valid data before padding, little endian> * wIndex 0x0000 * wLength <length of padded data> * The payload packet must be manually wrapped and escaped (as in stir4200.c), * then padded and obfuscated before being sent. Both padding and obfuscation * are implemented in the procedure obfuscate_tx_buffer(). Suffice to say, the * designer/programmer of the dongle used his name as a source for the * obfuscation. WTF?! * Apparently the dongle cannot handle payloads larger than 256 bytes. The * driver has to perform fragmentation in order to send anything larger than * this limit. * * Reception: To receive data, the driver must poll the dongle regularly (like * kingsun-sir.c) with control URBs and the following as a setup packet: * bRequestType USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE * bRequest 0x01 * wValue 0x0200 * wIndex 0x0000 * wLength 0x0800 (size of available buffer) * If there is data to be read, it will be returned as the response payload. * This data is (apparently) not padded, but it is obfuscated. To de-obfuscate * it, the driver must XOR every byte, in sequence, with a value that starts at * 1 and is incremented with each byte processed, and then with 0x55. The value * incremented with each byte processed overflows as an unsigned char. The * resulting bytes form a wrapped SIR frame that is unwrapped and unescaped * as in stir4200.c The incremented value is NOT reset with each frame, but is * kept across the entire session with the dongle. Also, the dongle inserts an * extra garbage byte with value 0x95 (after decoding) every 0xff bytes, which * must be skipped. * * Speed change: To change the speed of the dongle, the driver prepares a * control URB with the following as a setup packet: * bRequestType USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE * bRequest 0x09 * wValue 0x0200 * wIndex 0x0001 * wLength 0x0008 (length of the payload) * The payload is a 8-byte record, apparently identical to the one used in * drivers/usb/serial/cypress_m8.c to change speed: * __u32 baudSpeed; * unsigned int dataBits : 2; // 0 - 5 bits 3 - 8 bits * unsigned int : 1; * unsigned int stopBits : 1; * unsigned int parityEnable : 1; * unsigned int parityType : 1; * unsigned int : 1; * unsigned int reset : 1; * unsigned char reserved[3]; // set to 0 * * For now only SIR speeds have been observed with this dongle. Therefore, * nothing is known on what changes (if any) must be done to frame wrapping / * unwrapping for higher than SIR speeds. This driver assumes no change is * necessary and announces support for all the way to 57600 bps. Although the * package announces support for up to 4MBps, tests with a Sony Ericcson K300 * phone show corruption when receiving large frames at 115200 bps, the highest * speed announced by the phone. However, transmission at 115200 bps is OK. Go * figure. Since I don't know whether the phone or the dongle is at fault, max * announced speed is 57600 bps until someone produces a device that can run * at higher speeds with this dongle. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/device.h> #include <linux/crc32.h> #include <asm/unaligned.h> #include <asm/byteorder.h> #include <asm/uaccess.h> #include <net/irda/irda.h> #include <net/irda/wrapper.h> #include <net/irda/crc.h> #define KS959_VENDOR_ID 0x07d0 #define KS959_PRODUCT_ID 0x4959 /* These are the currently known USB ids */ static struct usb_device_id dongles[] = { /* KingSun Co,Ltd IrDA/USB Bridge */ {USB_DEVICE(KS959_VENDOR_ID, KS959_PRODUCT_ID)}, {} }; MODULE_DEVICE_TABLE(usb, dongles); #define KINGSUN_MTT 0x07 #define KINGSUN_REQ_RECV 0x01 #define KINGSUN_REQ_SEND 0x09 #define KINGSUN_RCV_FIFO_SIZE 2048 /* Max length we can receive */ #define KINGSUN_SND_FIFO_SIZE 2048 /* Max packet we can send */ #define KINGSUN_SND_PACKET_SIZE 256 /* Max packet dongle can handle */ struct ks959_speedparams { __le32 baudrate; /* baud rate, little endian */ __u8 flags; __u8 reserved[3]; } __packed; #define KS_DATA_5_BITS 0x00 #define KS_DATA_6_BITS 0x01 #define KS_DATA_7_BITS 0x02 #define KS_DATA_8_BITS 0x03 #define KS_STOP_BITS_1 0x00 #define KS_STOP_BITS_2 0x08 #define KS_PAR_DISABLE 0x00 #define KS_PAR_EVEN 0x10 #define KS_PAR_ODD 0x30 #define KS_RESET 0x80 struct ks959_cb { struct usb_device *usbdev; /* init: probe_irda */ struct net_device *netdev; /* network layer */ struct irlap_cb *irlap; /* The link layer we are binded to */ struct qos_info qos; struct usb_ctrlrequest *tx_setuprequest; struct urb *tx_urb; __u8 *tx_buf_clear; unsigned int tx_buf_clear_used; unsigned int tx_buf_clear_sent; __u8 *tx_buf_xored; struct usb_ctrlrequest *rx_setuprequest; struct urb *rx_urb; __u8 *rx_buf; __u8 rx_variable_xormask; iobuff_t rx_unwrap_buff; struct timeval rx_time; struct usb_ctrlrequest *speed_setuprequest; struct urb *speed_urb; struct ks959_speedparams speedparams; unsigned int new_speed; spinlock_t lock; int receiving; }; /* Procedure to perform the obfuscation/padding expected by the dongle * * buf_cleartext (IN) Cleartext version of the IrDA frame to transmit * len_cleartext (IN) Length of the cleartext version of IrDA frame * buf_xoredtext (OUT) Obfuscated version of frame built by proc * len_maxbuf (OUT) Maximum space available at buf_xoredtext * * (return) length of obfuscated frame with padding * * If not enough space (as indicated by len_maxbuf vs. required padding), * zero is returned * * The value of lookup_string is actually a required portion of the algorithm. * Seems the designer of the dongle wanted to state who exactly is responsible * for implementing obfuscation. Send your best (or other) wishes to him ]:-) */ static unsigned int obfuscate_tx_buffer(const __u8 * buf_cleartext, unsigned int len_cleartext, __u8 * buf_xoredtext, unsigned int len_maxbuf) { unsigned int len_xoredtext; /* Calculate required length with padding, check for necessary space */ len_xoredtext = ((len_cleartext + 7) & ~0x7) + 0x10; if (len_xoredtext <= len_maxbuf) { static const __u8 lookup_string[] = "wangshuofei19710"; __u8 xor_mask; /* Unlike the WinXP driver, we *do* clear out the padding */ memset(buf_xoredtext, 0, len_xoredtext); xor_mask = lookup_string[(len_cleartext & 0x0f) ^ 0x06] ^ 0x55; while (len_cleartext-- > 0) { *buf_xoredtext++ = *buf_cleartext++ ^ xor_mask; } } else { len_xoredtext = 0; } return len_xoredtext; } /* Callback transmission routine */ static void ks959_speed_irq(struct urb *urb) { /* unlink, shutdown, unplug, other nasties */ if (urb->status != 0) { err("ks959_speed_irq: urb asynchronously failed - %d", urb->status); } } /* Send a control request to change speed of the dongle */ static int ks959_change_speed(struct ks959_cb *kingsun, unsigned speed) { static unsigned int supported_speeds[] = { 2400, 9600, 19200, 38400, 57600, 115200, 576000, 1152000, 4000000, 0 }; int err; unsigned int i; if (kingsun->speed_setuprequest == NULL || kingsun->speed_urb == NULL) return -ENOMEM; /* Check that requested speed is among the supported ones */ for (i = 0; supported_speeds[i] && supported_speeds[i] != speed; i++) ; if (supported_speeds[i] == 0) return -EOPNOTSUPP; memset(&(kingsun->speedparams), 0, sizeof(struct ks959_speedparams)); kingsun->speedparams.baudrate = cpu_to_le32(speed); kingsun->speedparams.flags = KS_DATA_8_BITS; /* speed_setuprequest pre-filled in ks959_probe */ usb_fill_control_urb(kingsun->speed_urb, kingsun->usbdev, usb_sndctrlpipe(kingsun->usbdev, 0), (unsigned char *)kingsun->speed_setuprequest, &(kingsun->speedparams), sizeof(struct ks959_speedparams), ks959_speed_irq, kingsun); kingsun->speed_urb->status = 0; err = usb_submit_urb(kingsun->speed_urb, GFP_ATOMIC); return err; } /* Submit one fragment of an IrDA frame to the dongle */ static void ks959_send_irq(struct urb *urb); static int ks959_submit_tx_fragment(struct ks959_cb *kingsun) { unsigned int padlen; unsigned int wraplen; int ret; /* Check whether current plaintext can produce a padded buffer that fits within the range handled by the dongle */ wraplen = (KINGSUN_SND_PACKET_SIZE & ~0x7) - 0x10; if (wraplen > kingsun->tx_buf_clear_used) wraplen = kingsun->tx_buf_clear_used; /* Perform dongle obfuscation. Also remove the portion of the frame that was just obfuscated and will now be sent to the dongle. */ padlen = obfuscate_tx_buffer(kingsun->tx_buf_clear, wraplen, kingsun->tx_buf_xored, KINGSUN_SND_PACKET_SIZE); /* Calculate how much data can be transmitted in this urb */ kingsun->tx_setuprequest->wValue = cpu_to_le16(wraplen); kingsun->tx_setuprequest->wLength = cpu_to_le16(padlen); /* Rest of the fields were filled in ks959_probe */ usb_fill_control_urb(kingsun->tx_urb, kingsun->usbdev, usb_sndctrlpipe(kingsun->usbdev, 0), (unsigned char *)kingsun->tx_setuprequest, kingsun->tx_buf_xored, padlen, ks959_send_irq, kingsun); kingsun->tx_urb->status = 0; ret = usb_submit_urb(kingsun->tx_urb, GFP_ATOMIC); /* Remember how much data was sent, in order to update at callback */ kingsun->tx_buf_clear_sent = (ret == 0) ? wraplen : 0; return ret; } /* Callback transmission routine */ static void ks959_send_irq(struct urb *urb) { struct ks959_cb *kingsun = urb->context; struct net_device *netdev = kingsun->netdev; int ret = 0; /* in process of stopping, just drop data */ if (!netif_running(kingsun->netdev)) { err("ks959_send_irq: Network not running!"); return; } /* unlink, shutdown, unplug, other nasties */ if (urb->status != 0) { err("ks959_send_irq: urb asynchronously failed - %d", urb->status); return; } if (kingsun->tx_buf_clear_used > 0) { /* Update data remaining to be sent */ if (kingsun->tx_buf_clear_sent < kingsun->tx_buf_clear_used) { memmove(kingsun->tx_buf_clear, kingsun->tx_buf_clear + kingsun->tx_buf_clear_sent, kingsun->tx_buf_clear_used - kingsun->tx_buf_clear_sent); } kingsun->tx_buf_clear_used -= kingsun->tx_buf_clear_sent; kingsun->tx_buf_clear_sent = 0; if (kingsun->tx_buf_clear_used > 0) { /* There is more data to be sent */ if ((ret = ks959_submit_tx_fragment(kingsun)) != 0) { err("ks959_send_irq: failed tx_urb submit: %d", ret); switch (ret) { case -ENODEV: case -EPIPE: break; default: netdev->stats.tx_errors++; netif_start_queue(netdev); } } } else { /* All data sent, send next speed && wake network queue */ if (kingsun->new_speed != -1 && cpu_to_le32(kingsun->new_speed) != kingsun->speedparams.baudrate) ks959_change_speed(kingsun, kingsun->new_speed); netif_wake_queue(netdev); } } } /* * Called from net/core when new frame is available. */ static netdev_tx_t ks959_hard_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ks959_cb *kingsun; unsigned int wraplen; int ret = 0; netif_stop_queue(netdev); /* the IRDA wrapping routines don't deal with non linear skb */ SKB_LINEAR_ASSERT(skb); kingsun = netdev_priv(netdev); spin_lock(&kingsun->lock); kingsun->new_speed = irda_get_next_speed(skb); /* Append data to the end of whatever data remains to be transmitted */ wraplen = async_wrap_skb(skb, kingsun->tx_buf_clear, KINGSUN_SND_FIFO_SIZE); kingsun->tx_buf_clear_used = wraplen; if ((ret = ks959_submit_tx_fragment(kingsun)) != 0) { err("ks959_hard_xmit: failed tx_urb submit: %d", ret); switch (ret) { case -ENODEV: case -EPIPE: break; default: netdev->stats.tx_errors++; netif_start_queue(netdev); } } else { netdev->stats.tx_packets++; netdev->stats.tx_bytes += skb->len; } dev_kfree_skb(skb); spin_unlock(&kingsun->lock); return NETDEV_TX_OK; } /* Receive callback function */ static void ks959_rcv_irq(struct urb *urb) { struct ks959_cb *kingsun = urb->context; int ret; /* in process of stopping, just drop data */ if (!netif_running(kingsun->netdev)) { kingsun->receiving = 0; return; } /* unlink, shutdown, unplug, other nasties */ if (urb->status != 0) { err("kingsun_rcv_irq: urb asynchronously failed - %d", urb->status); kingsun->receiving = 0; return; } if (urb->actual_length > 0) { __u8 *bytes = urb->transfer_buffer; unsigned int i; for (i = 0; i < urb->actual_length; i++) { /* De-obfuscation implemented here: variable portion of xormask is incremented, and then used with the encoded byte for the XOR. The result of the operation is used to unwrap the SIR frame. */ kingsun->rx_variable_xormask++; bytes[i] = bytes[i] ^ kingsun->rx_variable_xormask ^ 0x55u; /* rx_variable_xormask doubles as an index counter so we can skip the byte at 0xff (wrapped around to 0). */ if (kingsun->rx_variable_xormask != 0) { async_unwrap_char(kingsun->netdev, &kingsun->netdev->stats, &kingsun->rx_unwrap_buff, bytes[i]); } } do_gettimeofday(&kingsun->rx_time); kingsun->receiving = (kingsun->rx_unwrap_buff.state != OUTSIDE_FRAME) ? 1 : 0; } /* This urb has already been filled in kingsun_net_open. Setup packet must be re-filled, but it is assumed that urb keeps the pointer to the initial setup packet, as well as the payload buffer. Setup packet is already pre-filled at ks959_probe. */ urb->status = 0; ret = usb_submit_urb(urb, GFP_ATOMIC); } /* * Function kingsun_net_open (dev) * * Network device is taken up. Usually this is done by "ifconfig irda0 up" */ static int ks959_net_open(struct net_device *netdev) { struct ks959_cb *kingsun = netdev_priv(netdev); int err = -ENOMEM; char hwname[16]; /* At this point, urbs are NULL, and skb is NULL (see kingsun_probe) */ kingsun->receiving = 0; /* Initialize for SIR to copy data directly into skb. */ kingsun->rx_unwrap_buff.in_frame = FALSE; kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME; kingsun->rx_unwrap_buff.truesize = IRDA_SKB_MAX_MTU; kingsun->rx_unwrap_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU); if (!kingsun->rx_unwrap_buff.skb) goto free_mem; skb_reserve(kingsun->rx_unwrap_buff.skb, 1); kingsun->rx_unwrap_buff.head = kingsun->rx_unwrap_buff.skb->data; do_gettimeofday(&kingsun->rx_time); kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!kingsun->rx_urb) goto free_mem; kingsun->tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!kingsun->tx_urb) goto free_mem; kingsun->speed_urb = usb_alloc_urb(0, GFP_KERNEL); if (!kingsun->speed_urb) goto free_mem; /* Initialize speed for dongle */ kingsun->new_speed = 9600; err = ks959_change_speed(kingsun, 9600); if (err < 0) goto free_mem; /* * Now that everything should be initialized properly, * Open new IrLAP layer instance to take care of us... */ sprintf(hwname, "usb#%d", kingsun->usbdev->devnum); kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname); if (!kingsun->irlap) { err("ks959-sir: irlap_open failed"); goto free_mem; } /* Start reception. Setup request already pre-filled in ks959_probe */ usb_fill_control_urb(kingsun->rx_urb, kingsun->usbdev, usb_rcvctrlpipe(kingsun->usbdev, 0), (unsigned char *)kingsun->rx_setuprequest, kingsun->rx_buf, KINGSUN_RCV_FIFO_SIZE, ks959_rcv_irq, kingsun); kingsun->rx_urb->status = 0; err = usb_submit_urb(kingsun->rx_urb, GFP_KERNEL); if (err) { err("ks959-sir: first urb-submit failed: %d", err); goto close_irlap; } netif_start_queue(netdev); /* Situation at this point: - all work buffers allocated - urbs allocated and ready to fill - max rx packet known (in max_rx) - unwrap state machine initialized, in state outside of any frame - receive request in progress - IrLAP layer started, about to hand over packets to send */ return 0; close_irlap: irlap_close(kingsun->irlap); free_mem: usb_free_urb(kingsun->speed_urb); kingsun->speed_urb = NULL; usb_free_urb(kingsun->tx_urb); kingsun->tx_urb = NULL; usb_free_urb(kingsun->rx_urb); kingsun->rx_urb = NULL; if (kingsun->rx_unwrap_buff.skb) { kfree_skb(kingsun->rx_unwrap_buff.skb); kingsun->rx_unwrap_buff.skb = NULL; kingsun->rx_unwrap_buff.head = NULL; } return err; } /* * Function kingsun_net_close (kingsun) * * Network device is taken down. Usually this is done by * "ifconfig irda0 down" */ static int ks959_net_close(struct net_device *netdev) { struct ks959_cb *kingsun = netdev_priv(netdev); /* Stop transmit processing */ netif_stop_queue(netdev); /* Mop up receive && transmit urb's */ usb_kill_urb(kingsun->tx_urb); usb_free_urb(kingsun->tx_urb); kingsun->tx_urb = NULL; usb_kill_urb(kingsun->speed_urb); usb_free_urb(kingsun->speed_urb); kingsun->speed_urb = NULL; usb_kill_urb(kingsun->rx_urb); usb_free_urb(kingsun->rx_urb); kingsun->rx_urb = NULL; kfree_skb(kingsun->rx_unwrap_buff.skb); kingsun->rx_unwrap_buff.skb = NULL; kingsun->rx_unwrap_buff.head = NULL; kingsun->rx_unwrap_buff.in_frame = FALSE; kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME; kingsun->receiving = 0; /* Stop and remove instance of IrLAP */ if (kingsun->irlap) irlap_close(kingsun->irlap); kingsun->irlap = NULL; return 0; } /* * IOCTLs : Extra out-of-band network commands... */ static int ks959_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) { struct if_irda_req *irq = (struct if_irda_req *)rq; struct ks959_cb *kingsun = netdev_priv(netdev); int ret = 0; switch (cmd) { case SIOCSBANDWIDTH: /* Set bandwidth */ if (!capable(CAP_NET_ADMIN)) return -EPERM; /* Check if the device is still there */ if (netif_device_present(kingsun->netdev)) return ks959_change_speed(kingsun, irq->ifr_baudrate); break; case SIOCSMEDIABUSY: /* Set media busy */ if (!capable(CAP_NET_ADMIN)) return -EPERM; /* Check if the IrDA stack is still there */ if (netif_running(kingsun->netdev)) irda_device_set_media_busy(kingsun->netdev, TRUE); break; case SIOCGRECEIVING: /* Only approximately true */ irq->ifr_receiving = kingsun->receiving; break; default: ret = -EOPNOTSUPP; } return ret; } static const struct net_device_ops ks959_ops = { .ndo_start_xmit = ks959_hard_xmit, .ndo_open = ks959_net_open, .ndo_stop = ks959_net_close, .ndo_do_ioctl = ks959_net_ioctl, }; /* * This routine is called by the USB subsystem for each new device * in the system. We need to check if the device is ours, and in * this case start handling it. */ static int ks959_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); struct ks959_cb *kingsun = NULL; struct net_device *net = NULL; int ret = -ENOMEM; /* Allocate network device container. */ net = alloc_irdadev(sizeof(*kingsun)); if (!net) goto err_out1; SET_NETDEV_DEV(net, &intf->dev); kingsun = netdev_priv(net); kingsun->netdev = net; kingsun->usbdev = dev; kingsun->irlap = NULL; kingsun->tx_setuprequest = NULL; kingsun->tx_urb = NULL; kingsun->tx_buf_clear = NULL; kingsun->tx_buf_xored = NULL; kingsun->tx_buf_clear_used = 0; kingsun->tx_buf_clear_sent = 0; kingsun->rx_setuprequest = NULL; kingsun->rx_urb = NULL; kingsun->rx_buf = NULL; kingsun->rx_variable_xormask = 0; kingsun->rx_unwrap_buff.in_frame = FALSE; kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME; kingsun->rx_unwrap_buff.skb = NULL; kingsun->receiving = 0; spin_lock_init(&kingsun->lock); kingsun->speed_setuprequest = NULL; kingsun->speed_urb = NULL; kingsun->speedparams.baudrate = 0; /* Allocate input buffer */ kingsun->rx_buf = kmalloc(KINGSUN_RCV_FIFO_SIZE, GFP_KERNEL); if (!kingsun->rx_buf) goto free_mem; /* Allocate input setup packet */ kingsun->rx_setuprequest = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); if (!kingsun->rx_setuprequest) goto free_mem; kingsun->rx_setuprequest->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE; kingsun->rx_setuprequest->bRequest = KINGSUN_REQ_RECV; kingsun->rx_setuprequest->wValue = cpu_to_le16(0x0200); kingsun->rx_setuprequest->wIndex = 0; kingsun->rx_setuprequest->wLength = cpu_to_le16(KINGSUN_RCV_FIFO_SIZE); /* Allocate output buffer */ kingsun->tx_buf_clear = kmalloc(KINGSUN_SND_FIFO_SIZE, GFP_KERNEL); if (!kingsun->tx_buf_clear) goto free_mem; kingsun->tx_buf_xored = kmalloc(KINGSUN_SND_PACKET_SIZE, GFP_KERNEL); if (!kingsun->tx_buf_xored) goto free_mem; /* Allocate and initialize output setup packet */ kingsun->tx_setuprequest = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); if (!kingsun->tx_setuprequest) goto free_mem; kingsun->tx_setuprequest->bRequestType = USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE; kingsun->tx_setuprequest->bRequest = KINGSUN_REQ_SEND; kingsun->tx_setuprequest->wValue = 0; kingsun->tx_setuprequest->wIndex = 0; kingsun->tx_setuprequest->wLength = 0; /* Allocate and initialize speed setup packet */ kingsun->speed_setuprequest = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); if (!kingsun->speed_setuprequest) goto free_mem; kingsun->speed_setuprequest->bRequestType = USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE; kingsun->speed_setuprequest->bRequest = KINGSUN_REQ_SEND; kingsun->speed_setuprequest->wValue = cpu_to_le16(0x0200); kingsun->speed_setuprequest->wIndex = cpu_to_le16(0x0001); kingsun->speed_setuprequest->wLength = cpu_to_le16(sizeof(struct ks959_speedparams)); printk(KERN_INFO "KingSun KS-959 IRDA/USB found at address %d, " "Vendor: %x, Product: %x\n", dev->devnum, le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct)); /* Initialize QoS for this device */ irda_init_max_qos_capabilies(&kingsun->qos); /* Baud rates known to be supported. Please uncomment if devices (other than a SonyEriccson K300 phone) can be shown to support higher speed with this dongle. */ kingsun->qos.baud_rate.bits = IR_2400 | IR_9600 | IR_19200 | IR_38400 | IR_57600; kingsun->qos.min_turn_time.bits &= KINGSUN_MTT; irda_qos_bits_to_value(&kingsun->qos); /* Override the network functions we need to use */ net->netdev_ops = &ks959_ops; ret = register_netdev(net); if (ret != 0) goto free_mem; dev_info(&net->dev, "IrDA: Registered KingSun KS-959 device %s\n", net->name); usb_set_intfdata(intf, kingsun); /* Situation at this point: - all work buffers allocated - setup requests pre-filled - urbs not allocated, set to NULL - max rx packet known (is KINGSUN_FIFO_SIZE) - unwrap state machine (partially) initialized, but skb == NULL */ return 0; free_mem: kfree(kingsun->speed_setuprequest); kfree(kingsun->tx_setuprequest); kfree(kingsun->tx_buf_xored); kfree(kingsun->tx_buf_clear); kfree(kingsun->rx_setuprequest); kfree(kingsun->rx_buf); free_netdev(net); err_out1: return ret; } /* * The current device is removed, the USB layer tell us to shut it down... */ static void ks959_disconnect(struct usb_interface *intf) { struct ks959_cb *kingsun = usb_get_intfdata(intf); if (!kingsun) return; unregister_netdev(kingsun->netdev); /* Mop up receive && transmit urb's */ if (kingsun->speed_urb != NULL) { usb_kill_urb(kingsun->speed_urb); usb_free_urb(kingsun->speed_urb); kingsun->speed_urb = NULL; } if (kingsun->tx_urb != NULL) { usb_kill_urb(kingsun->tx_urb); usb_free_urb(kingsun->tx_urb); kingsun->tx_urb = NULL; } if (kingsun->rx_urb != NULL) { usb_kill_urb(kingsun->rx_urb); usb_free_urb(kingsun->rx_urb); kingsun->rx_urb = NULL; } kfree(kingsun->speed_setuprequest); kfree(kingsun->tx_setuprequest); kfree(kingsun->tx_buf_xored); kfree(kingsun->tx_buf_clear); kfree(kingsun->rx_setuprequest); kfree(kingsun->rx_buf); free_netdev(kingsun->netdev); usb_set_intfdata(intf, NULL); } #ifdef CONFIG_PM /* USB suspend, so power off the transmitter/receiver */ static int ks959_suspend(struct usb_interface *intf, pm_message_t message) { struct ks959_cb *kingsun = usb_get_intfdata(intf); netif_device_detach(kingsun->netdev); if (kingsun->speed_urb != NULL) usb_kill_urb(kingsun->speed_urb); if (kingsun->tx_urb != NULL) usb_kill_urb(kingsun->tx_urb); if (kingsun->rx_urb != NULL) usb_kill_urb(kingsun->rx_urb); return 0; } /* Coming out of suspend, so reset hardware */ static int ks959_resume(struct usb_interface *intf) { struct ks959_cb *kingsun = usb_get_intfdata(intf); if (kingsun->rx_urb != NULL) { /* Setup request already filled in ks959_probe */ usb_submit_urb(kingsun->rx_urb, GFP_KERNEL); } netif_device_attach(kingsun->netdev); return 0; } #endif /* * USB device callbacks */ static struct usb_driver irda_driver = { .name = "ks959-sir", .probe = ks959_probe, .disconnect = ks959_disconnect, .id_table = dongles, #ifdef CONFIG_PM .suspend = ks959_suspend, .resume = ks959_resume, #endif }; module_usb_driver(irda_driver); MODULE_AUTHOR("Alex Villacís Lasso <a_villacis@palosanto.com>"); MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun KS-959"); MODULE_LICENSE("GPL");
gpl-2.0
GeyerA/kernel_hammerhead
drivers/media/video/gspca/finepix.c
4905
7867
/* * Fujifilm Finepix subdriver * * Copyright (C) 2008 Frank Zago * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "finepix" #include "gspca.h" MODULE_AUTHOR("Frank Zago <frank@zago.net>"); MODULE_DESCRIPTION("Fujifilm FinePix USB V4L2 driver"); MODULE_LICENSE("GPL"); /* Default timeout, in ms */ #define FPIX_TIMEOUT 250 /* Maximum transfer size to use. The windows driver reads by chunks of * 0x2000 bytes, so do the same. Note: reading more seems to work * too. */ #define FPIX_MAX_TRANSFER 0x2000 /* Structure to hold all of our device specific stuff */ struct usb_fpix { struct gspca_dev gspca_dev; /* !! must be the first item */ struct work_struct work_struct; struct workqueue_struct *work_thread; }; /* Delay after which claim the next frame. If the delay is too small, * the camera will return old frames. On the 4800Z, 20ms is bad, 25ms * will fail every 4 or 5 frames, but 30ms is perfect. On the A210, * 30ms is bad while 35ms is perfect. */ #define NEXT_FRAME_DELAY 35 /* These cameras only support 320x200. */ static const struct v4l2_pix_format fpix_mode[1] = { { 320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0} }; /* send a command to the webcam */ static int command(struct gspca_dev *gspca_dev, int order) /* 0: reset, 1: frame request */ { static u8 order_values[2][12] = { {0xc6, 0, 0, 0, 0, 0, 0, 0, 0x20, 0, 0, 0}, /* reset */ {0xd3, 0, 0, 0, 0, 0, 0, 0x01, 0, 0, 0, 0}, /* fr req */ }; memcpy(gspca_dev->usb_buf, order_values[order], 12); return usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), USB_REQ_GET_STATUS, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, 0, gspca_dev->usb_buf, 12, FPIX_TIMEOUT); } /* workqueue */ static void dostream(struct work_struct *work) { struct usb_fpix *dev = container_of(work, struct usb_fpix, work_struct); struct gspca_dev *gspca_dev = &dev->gspca_dev; struct urb *urb = gspca_dev->urb[0]; u8 *data = urb->transfer_buffer; int ret = 0; int len; /* synchronize with the main driver */ mutex_lock(&gspca_dev->usb_lock); mutex_unlock(&gspca_dev->usb_lock); PDEBUG(D_STREAM, "dostream started"); /* loop reading a frame */ again: while (gspca_dev->present && gspca_dev->streaming) { /* request a frame */ mutex_lock(&gspca_dev->usb_lock); ret = command(gspca_dev, 1); mutex_unlock(&gspca_dev->usb_lock); if (ret < 0) break; if (!gspca_dev->present || !gspca_dev->streaming) break; /* the frame comes in parts */ for (;;) { ret = usb_bulk_msg(gspca_dev->dev, urb->pipe, data, FPIX_MAX_TRANSFER, &len, FPIX_TIMEOUT); if (ret < 0) { /* Most of the time we get a timeout * error. Just restart. */ goto again; } if (!gspca_dev->present || !gspca_dev->streaming) goto out; if (len < FPIX_MAX_TRANSFER || (data[len - 2] == 0xff && data[len - 1] == 0xd9)) { /* If the result is less than what was asked * for, then it's the end of the * frame. Sometimes the jpeg is not complete, * but there's nothing we can do. We also end * here if the the jpeg ends right at the end * of the frame. */ gspca_frame_add(gspca_dev, LAST_PACKET, data, len); break; } /* got a partial image */ gspca_frame_add(gspca_dev, gspca_dev->last_packet_type == LAST_PACKET ? FIRST_PACKET : INTER_PACKET, data, len); } /* We must wait before trying reading the next * frame. If we don't, or if the delay is too short, * the camera will disconnect. */ msleep(NEXT_FRAME_DELAY); } out: PDEBUG(D_STREAM, "dostream stopped"); } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct usb_fpix *dev = (struct usb_fpix *) gspca_dev; struct cam *cam = &gspca_dev->cam; cam->cam_mode = fpix_mode; cam->nmodes = 1; cam->bulk = 1; cam->bulk_size = FPIX_MAX_TRANSFER; INIT_WORK(&dev->work_struct, dostream); return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { return 0; } /* start the camera */ static int sd_start(struct gspca_dev *gspca_dev) { struct usb_fpix *dev = (struct usb_fpix *) gspca_dev; int ret, len; /* Init the device */ ret = command(gspca_dev, 0); if (ret < 0) { pr_err("init failed %d\n", ret); return ret; } /* Read the result of the command. Ignore the result, for it * varies with the device. */ ret = usb_bulk_msg(gspca_dev->dev, gspca_dev->urb[0]->pipe, gspca_dev->urb[0]->transfer_buffer, FPIX_MAX_TRANSFER, &len, FPIX_TIMEOUT); if (ret < 0) { pr_err("usb_bulk_msg failed %d\n", ret); return ret; } /* Request a frame, but don't read it */ ret = command(gspca_dev, 1); if (ret < 0) { pr_err("frame request failed %d\n", ret); return ret; } /* Again, reset bulk in endpoint */ usb_clear_halt(gspca_dev->dev, gspca_dev->urb[0]->pipe); /* Start the workqueue function to do the streaming */ dev->work_thread = create_singlethread_workqueue(MODULE_NAME); queue_work(dev->work_thread, &dev->work_struct); return 0; } /* called on streamoff with alt==0 and on disconnect */ /* the usb_lock is held at entry - restore on exit */ static void sd_stop0(struct gspca_dev *gspca_dev) { struct usb_fpix *dev = (struct usb_fpix *) gspca_dev; /* wait for the work queue to terminate */ mutex_unlock(&gspca_dev->usb_lock); destroy_workqueue(dev->work_thread); mutex_lock(&gspca_dev->usb_lock); dev->work_thread = NULL; } /* Table of supported USB devices */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x04cb, 0x0104)}, {USB_DEVICE(0x04cb, 0x0109)}, {USB_DEVICE(0x04cb, 0x010b)}, {USB_DEVICE(0x04cb, 0x010f)}, {USB_DEVICE(0x04cb, 0x0111)}, {USB_DEVICE(0x04cb, 0x0113)}, {USB_DEVICE(0x04cb, 0x0115)}, {USB_DEVICE(0x04cb, 0x0117)}, {USB_DEVICE(0x04cb, 0x0119)}, {USB_DEVICE(0x04cb, 0x011b)}, {USB_DEVICE(0x04cb, 0x011d)}, {USB_DEVICE(0x04cb, 0x0121)}, {USB_DEVICE(0x04cb, 0x0123)}, {USB_DEVICE(0x04cb, 0x0125)}, {USB_DEVICE(0x04cb, 0x0127)}, {USB_DEVICE(0x04cb, 0x0129)}, {USB_DEVICE(0x04cb, 0x012b)}, {USB_DEVICE(0x04cb, 0x012d)}, {USB_DEVICE(0x04cb, 0x012f)}, {USB_DEVICE(0x04cb, 0x0131)}, {USB_DEVICE(0x04cb, 0x013b)}, {USB_DEVICE(0x04cb, 0x013d)}, {USB_DEVICE(0x04cb, 0x013f)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .start = sd_start, .stop0 = sd_stop0, }; /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct usb_fpix), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
gpl-2.0
mukelarvin-price/linux_imx
drivers/media/video/gspca/sq905.c
4905
12995
/* * SQ905 subdriver * * Copyright (C) 2008, 2009 Adam Baker and Theodore Kilgore * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * History and Acknowledgments * * The original Linux driver for SQ905 based cameras was written by * Marcell Lengyel and furter developed by many other contributors * and is available from http://sourceforge.net/projects/sqcam/ * * This driver takes advantage of the reverse engineering work done for * that driver and for libgphoto2 but shares no code with them. * * This driver has used as a base the finepix driver and other gspca * based drivers and may still contain code fragments taken from those * drivers. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "sq905" #include <linux/workqueue.h> #include <linux/slab.h> #include "gspca.h" MODULE_AUTHOR("Adam Baker <linux@baker-net.org.uk>, " "Theodore Kilgore <kilgota@auburn.edu>"); MODULE_DESCRIPTION("GSPCA/SQ905 USB Camera Driver"); MODULE_LICENSE("GPL"); /* Default timeouts, in ms */ #define SQ905_CMD_TIMEOUT 500 #define SQ905_DATA_TIMEOUT 1000 /* Maximum transfer size to use. */ #define SQ905_MAX_TRANSFER 0x8000 #define FRAME_HEADER_LEN 64 /* The known modes, or registers. These go in the "value" slot. */ /* 00 is "none" obviously */ #define SQ905_BULK_READ 0x03 /* precedes any bulk read */ #define SQ905_COMMAND 0x06 /* precedes the command codes below */ #define SQ905_PING 0x07 /* when reading an "idling" command */ #define SQ905_READ_DONE 0xc0 /* ack bulk read completed */ /* Any non-zero value in the bottom 2 bits of the 2nd byte of * the ID appears to indicate the camera can do 640*480. If the * LSB of that byte is set the image is just upside down, otherwise * it is rotated 180 degrees. */ #define SQ905_HIRES_MASK 0x00000300 #define SQ905_ORIENTATION_MASK 0x00000100 /* Some command codes. These go in the "index" slot. */ #define SQ905_ID 0xf0 /* asks for model string */ #define SQ905_CONFIG 0x20 /* gets photo alloc. table, not used here */ #define SQ905_DATA 0x30 /* accesses photo data, not used here */ #define SQ905_CLEAR 0xa0 /* clear everything */ #define SQ905_CAPTURE_LOW 0x60 /* Starts capture at 160x120 */ #define SQ905_CAPTURE_MED 0x61 /* Starts capture at 320x240 */ #define SQ905_CAPTURE_HIGH 0x62 /* Starts capture at 640x480 (some cams only) */ /* note that the capture command also controls the output dimensions */ /* Structure to hold all of our device specific stuff */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ /* * Driver stuff */ struct work_struct work_struct; struct workqueue_struct *work_thread; }; static struct v4l2_pix_format sq905_mode[] = { { 160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, { 320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, { 640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0} }; /* * Send a command to the camera. */ static int sq905_command(struct gspca_dev *gspca_dev, u16 index) { int ret; gspca_dev->usb_buf[0] = '\0'; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, SQ905_COMMAND, index, gspca_dev->usb_buf, 1, SQ905_CMD_TIMEOUT); if (ret < 0) { pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret); return ret; } ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, SQ905_PING, 0, gspca_dev->usb_buf, 1, SQ905_CMD_TIMEOUT); if (ret < 0) { pr_err("%s: usb_control_msg failed 2 (%d)\n", __func__, ret); return ret; } return 0; } /* * Acknowledge the end of a frame - see warning on sq905_command. */ static int sq905_ack_frame(struct gspca_dev *gspca_dev) { int ret; gspca_dev->usb_buf[0] = '\0'; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, SQ905_READ_DONE, 0, gspca_dev->usb_buf, 1, SQ905_CMD_TIMEOUT); if (ret < 0) { pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret); return ret; } return 0; } /* * request and read a block of data - see warning on sq905_command. */ static int sq905_read_data(struct gspca_dev *gspca_dev, u8 *data, int size, int need_lock) { int ret; int act_len; gspca_dev->usb_buf[0] = '\0'; if (need_lock) mutex_lock(&gspca_dev->usb_lock); ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, SQ905_BULK_READ, size, gspca_dev->usb_buf, 1, SQ905_CMD_TIMEOUT); if (need_lock) mutex_unlock(&gspca_dev->usb_lock); if (ret < 0) { pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret); return ret; } ret = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 0x81), data, size, &act_len, SQ905_DATA_TIMEOUT); /* successful, it returns 0, otherwise negative */ if (ret < 0 || act_len != size) { pr_err("bulk read fail (%d) len %d/%d\n", ret, act_len, size); return -EIO; } return 0; } /* This function is called as a workqueue function and runs whenever the camera * is streaming data. Because it is a workqueue function it is allowed to sleep * so we can use synchronous USB calls. To avoid possible collisions with other * threads attempting to use the camera's USB interface we take the gspca * usb_lock when performing USB operations. In practice the only thing we need * to protect against is the usb_set_interface call that gspca makes during * stream_off as the camera doesn't provide any controls that the user could try * to change. */ static void sq905_dostream(struct work_struct *work) { struct sd *dev = container_of(work, struct sd, work_struct); struct gspca_dev *gspca_dev = &dev->gspca_dev; int bytes_left; /* bytes remaining in current frame. */ int data_len; /* size to use for the next read. */ int header_read; /* true if we have already read the frame header. */ int packet_type; int frame_sz; int ret; u8 *data; u8 *buffer; buffer = kmalloc(SQ905_MAX_TRANSFER, GFP_KERNEL | GFP_DMA); if (!buffer) { pr_err("Couldn't allocate USB buffer\n"); goto quit_stream; } frame_sz = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].sizeimage + FRAME_HEADER_LEN; while (gspca_dev->present && gspca_dev->streaming) { /* request some data and then read it until we have * a complete frame. */ bytes_left = frame_sz; header_read = 0; /* Note we do not check for gspca_dev->streaming here, as we must finish reading an entire frame, otherwise the next time we stream we start reading in the middle of a frame. */ while (bytes_left > 0 && gspca_dev->present) { data_len = bytes_left > SQ905_MAX_TRANSFER ? SQ905_MAX_TRANSFER : bytes_left; ret = sq905_read_data(gspca_dev, buffer, data_len, 1); if (ret < 0) goto quit_stream; PDEBUG(D_PACK, "Got %d bytes out of %d for frame", data_len, bytes_left); bytes_left -= data_len; data = buffer; if (!header_read) { packet_type = FIRST_PACKET; /* The first 64 bytes of each frame are * a header full of FF 00 bytes */ data += FRAME_HEADER_LEN; data_len -= FRAME_HEADER_LEN; header_read = 1; } else if (bytes_left == 0) { packet_type = LAST_PACKET; } else { packet_type = INTER_PACKET; } gspca_frame_add(gspca_dev, packet_type, data, data_len); /* If entire frame fits in one packet we still need to add a LAST_PACKET */ if (packet_type == FIRST_PACKET && bytes_left == 0) gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); } if (gspca_dev->present) { /* acknowledge the frame */ mutex_lock(&gspca_dev->usb_lock); ret = sq905_ack_frame(gspca_dev); mutex_unlock(&gspca_dev->usb_lock); if (ret < 0) goto quit_stream; } } quit_stream: if (gspca_dev->present) { mutex_lock(&gspca_dev->usb_lock); sq905_command(gspca_dev, SQ905_CLEAR); mutex_unlock(&gspca_dev->usb_lock); } kfree(buffer); } /* This function is called at probe time just before sd_init */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct cam *cam = &gspca_dev->cam; struct sd *dev = (struct sd *) gspca_dev; /* We don't use the buffer gspca allocates so make it small. */ cam->bulk = 1; cam->bulk_size = 64; INIT_WORK(&dev->work_struct, sq905_dostream); return 0; } /* called on streamoff with alt==0 and on disconnect */ /* the usb_lock is held at entry - restore on exit */ static void sd_stop0(struct gspca_dev *gspca_dev) { struct sd *dev = (struct sd *) gspca_dev; /* wait for the work queue to terminate */ mutex_unlock(&gspca_dev->usb_lock); /* This waits for sq905_dostream to finish */ destroy_workqueue(dev->work_thread); dev->work_thread = NULL; mutex_lock(&gspca_dev->usb_lock); } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { u32 ident; int ret; /* connect to the camera and read * the model ID and process that and put it away. */ ret = sq905_command(gspca_dev, SQ905_CLEAR); if (ret < 0) return ret; ret = sq905_command(gspca_dev, SQ905_ID); if (ret < 0) return ret; ret = sq905_read_data(gspca_dev, gspca_dev->usb_buf, 4, 0); if (ret < 0) return ret; /* usb_buf is allocated with kmalloc so is aligned. * Camera model number is the right way round if we assume this * reverse engineered ID is supposed to be big endian. */ ident = be32_to_cpup((__be32 *)gspca_dev->usb_buf); ret = sq905_command(gspca_dev, SQ905_CLEAR); if (ret < 0) return ret; PDEBUG(D_CONF, "SQ905 camera ID %08x detected", ident); gspca_dev->cam.cam_mode = sq905_mode; gspca_dev->cam.nmodes = ARRAY_SIZE(sq905_mode); if (!(ident & SQ905_HIRES_MASK)) gspca_dev->cam.nmodes--; if (ident & SQ905_ORIENTATION_MASK) gspca_dev->cam.input_flags = V4L2_IN_ST_VFLIP; else gspca_dev->cam.input_flags = V4L2_IN_ST_VFLIP | V4L2_IN_ST_HFLIP; return 0; } /* Set up for getting frames. */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *dev = (struct sd *) gspca_dev; int ret; /* "Open the shutter" and set size, to start capture */ switch (gspca_dev->curr_mode) { default: /* case 2: */ PDEBUG(D_STREAM, "Start streaming at high resolution"); ret = sq905_command(&dev->gspca_dev, SQ905_CAPTURE_HIGH); break; case 1: PDEBUG(D_STREAM, "Start streaming at medium resolution"); ret = sq905_command(&dev->gspca_dev, SQ905_CAPTURE_MED); break; case 0: PDEBUG(D_STREAM, "Start streaming at low resolution"); ret = sq905_command(&dev->gspca_dev, SQ905_CAPTURE_LOW); } if (ret < 0) { PDEBUG(D_ERR, "Start streaming command failed"); return ret; } /* Start the workqueue function to do the streaming */ dev->work_thread = create_singlethread_workqueue(MODULE_NAME); queue_work(dev->work_thread, &dev->work_struct); return 0; } /* Table of supported USB devices */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x2770, 0x9120)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .start = sd_start, .stop0 = sd_stop0, }; /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
gpl-2.0
12019/linux-2.6.34-ts471x
arch/ia64/kernel/acpi.c
42
25849
/* * acpi.c - Architecture-Specific Low-Level ACPI Support * * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com> * Copyright (C) 2000, 2002-2003 Hewlett-Packard Co. * David Mosberger-Tang <davidm@hpl.hp.com> * Copyright (C) 2000 Intel Corp. * Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com> * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com> * Copyright (C) 2001 Takayoshi Kochi <t-kochi@bq.jp.nec.com> * Copyright (C) 2002 Erich Focht <efocht@ess.nec.de> * Copyright (C) 2004 Ashok Raj <ashok.raj@intel.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/string.h> #include <linux/types.h> #include <linux/irq.h> #include <linux/acpi.h> #include <linux/efi.h> #include <linux/mmzone.h> #include <linux/nodemask.h> #include <linux/slab.h> #include <acpi/processor.h> #include <asm/io.h> #include <asm/iosapic.h> #include <asm/machvec.h> #include <asm/page.h> #include <asm/system.h> #include <asm/numa.h> #include <asm/sal.h> #include <asm/cyclone.h> #include <asm/xen/hypervisor.h> #define BAD_MADT_ENTRY(entry, end) ( \ (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ ((struct acpi_subtable_header *)entry)->length < sizeof(*entry)) #define PREFIX "ACPI: " u32 acpi_rsdt_forced; unsigned int acpi_cpei_override; unsigned int acpi_cpei_phys_cpuid; unsigned long acpi_wakeup_address = 0; #ifdef CONFIG_IA64_GENERIC static unsigned long __init acpi_find_rsdp(void) { unsigned long rsdp_phys = 0; if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) rsdp_phys = efi.acpi20; else if (efi.acpi != EFI_INVALID_TABLE_ADDR) printk(KERN_WARNING PREFIX "v1.0/r0.71 tables no longer supported\n"); return rsdp_phys; } const char __init * acpi_get_sysname(void) { unsigned long rsdp_phys; struct acpi_table_rsdp *rsdp; struct acpi_table_xsdt *xsdt; struct acpi_table_header *hdr; #ifdef CONFIG_DMAR u64 i, nentries; #endif rsdp_phys = acpi_find_rsdp(); if (!rsdp_phys) { printk(KERN_ERR "ACPI 2.0 RSDP not found, default to \"dig\"\n"); return "dig"; } rsdp = (struct acpi_table_rsdp *)__va(rsdp_phys); if (strncmp(rsdp->signature, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1)) { printk(KERN_ERR "ACPI 2.0 RSDP signature incorrect, default to \"dig\"\n"); return "dig"; } xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_physical_address); hdr = &xsdt->header; if (strncmp(hdr->signature, ACPI_SIG_XSDT, sizeof(ACPI_SIG_XSDT) - 1)) { printk(KERN_ERR "ACPI 2.0 XSDT signature incorrect, default to \"dig\"\n"); return "dig"; } if (!strcmp(hdr->oem_id, "HP")) { return "hpzx1"; } else if (!strcmp(hdr->oem_id, "SGI")) { if (!strcmp(hdr->oem_table_id + 4, "UV")) return "uv"; else return "sn2"; } else if (xen_pv_domain() && !strcmp(hdr->oem_id, "XEN")) { return "xen"; } #ifdef CONFIG_DMAR /* Look for Intel IOMMU */ nentries = (hdr->length - sizeof(*hdr)) / sizeof(xsdt->table_offset_entry[0]); for (i = 0; i < nentries; i++) { hdr = __va(xsdt->table_offset_entry[i]); if (strncmp(hdr->signature, ACPI_SIG_DMAR, sizeof(ACPI_SIG_DMAR) - 1) == 0) return "dig_vtd"; } #endif return "dig"; } #endif /* CONFIG_IA64_GENERIC */ #define ACPI_MAX_PLATFORM_INTERRUPTS 256 /* Array to record platform interrupt vectors for generic interrupt routing. */ int platform_intr_list[ACPI_MAX_PLATFORM_INTERRUPTS] = { [0 ... ACPI_MAX_PLATFORM_INTERRUPTS - 1] = -1 }; enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_IOSAPIC; /* * Interrupt routing API for device drivers. Provides interrupt vector for * a generic platform event. Currently only CPEI is implemented. */ int acpi_request_vector(u32 int_type) { int vector = -1; if (int_type < ACPI_MAX_PLATFORM_INTERRUPTS) { /* corrected platform error interrupt */ vector = platform_intr_list[int_type]; } else printk(KERN_ERR "acpi_request_vector(): invalid interrupt type\n"); return vector; } char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size) { return __va(phys_addr); } void __init __acpi_unmap_table(char *map, unsigned long size) { } /* -------------------------------------------------------------------------- Boot-time Table Parsing -------------------------------------------------------------------------- */ static int available_cpus __initdata; struct acpi_table_madt *acpi_madt __initdata; static u8 has_8259; static int __init acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_local_apic_override *lapic; lapic = (struct acpi_madt_local_apic_override *)header; if (BAD_MADT_ENTRY(lapic, end)) return -EINVAL; if (lapic->address) { iounmap(ipi_base_addr); ipi_base_addr = ioremap(lapic->address, 0); } return 0; } static int __init acpi_parse_lsapic(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_local_sapic *lsapic; lsapic = (struct acpi_madt_local_sapic *)header; /*Skip BAD_MADT_ENTRY check, as lsapic size could vary */ if (lsapic->lapic_flags & ACPI_MADT_ENABLED) { #ifdef CONFIG_SMP smp_boot_data.cpu_phys_id[available_cpus] = (lsapic->id << 8) | lsapic->eid; #endif ++available_cpus; } total_cpus++; return 0; } static int __init acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_local_apic_nmi *lacpi_nmi; lacpi_nmi = (struct acpi_madt_local_apic_nmi *)header; if (BAD_MADT_ENTRY(lacpi_nmi, end)) return -EINVAL; /* TBD: Support lapic_nmi entries */ return 0; } static int __init acpi_parse_iosapic(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_io_sapic *iosapic; iosapic = (struct acpi_madt_io_sapic *)header; if (BAD_MADT_ENTRY(iosapic, end)) return -EINVAL; return iosapic_init(iosapic->address, iosapic->global_irq_base); } static unsigned int __initdata acpi_madt_rev; static int __init acpi_parse_plat_int_src(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_interrupt_source *plintsrc; int vector; plintsrc = (struct acpi_madt_interrupt_source *)header; if (BAD_MADT_ENTRY(plintsrc, end)) return -EINVAL; /* * Get vector assignment for this interrupt, set attributes, * and program the IOSAPIC routing table. */ vector = iosapic_register_platform_intr(plintsrc->type, plintsrc->global_irq, plintsrc->io_sapic_vector, plintsrc->eid, plintsrc->id, ((plintsrc->inti_flags & ACPI_MADT_POLARITY_MASK) == ACPI_MADT_POLARITY_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, ((plintsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) == ACPI_MADT_TRIGGER_EDGE) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); platform_intr_list[plintsrc->type] = vector; if (acpi_madt_rev > 1) { acpi_cpei_override = plintsrc->flags & ACPI_MADT_CPEI_OVERRIDE; } /* * Save the physical id, so we can check when its being removed */ acpi_cpei_phys_cpuid = ((plintsrc->id << 8) | (plintsrc->eid)) & 0xffff; return 0; } #ifdef CONFIG_HOTPLUG_CPU unsigned int can_cpei_retarget(void) { extern int cpe_vector; extern unsigned int force_cpei_retarget; /* * Only if CPEI is supported and the override flag * is present, otherwise return that its re-targettable * if we are in polling mode. */ if (cpe_vector > 0) { if (acpi_cpei_override || force_cpei_retarget) return 1; else return 0; } return 1; } unsigned int is_cpu_cpei_target(unsigned int cpu) { unsigned int logical_id; logical_id = cpu_logical_id(acpi_cpei_phys_cpuid); if (logical_id == cpu) return 1; else return 0; } void set_cpei_target_cpu(unsigned int cpu) { acpi_cpei_phys_cpuid = cpu_physical_id(cpu); } #endif unsigned int get_cpei_target_cpu(void) { return acpi_cpei_phys_cpuid; } static int __init acpi_parse_int_src_ovr(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_interrupt_override *p; p = (struct acpi_madt_interrupt_override *)header; if (BAD_MADT_ENTRY(p, end)) return -EINVAL; iosapic_override_isa_irq(p->source_irq, p->global_irq, ((p->inti_flags & ACPI_MADT_POLARITY_MASK) == ACPI_MADT_POLARITY_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, ((p->inti_flags & ACPI_MADT_TRIGGER_MASK) == ACPI_MADT_TRIGGER_EDGE) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); return 0; } static int __init acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_nmi_source *nmi_src; nmi_src = (struct acpi_madt_nmi_source *)header; if (BAD_MADT_ENTRY(nmi_src, end)) return -EINVAL; /* TBD: Support nimsrc entries */ return 0; } static void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) { if (!strncmp(oem_id, "IBM", 3) && (!strncmp(oem_table_id, "SERMOW", 6))) { /* * Unfortunately ITC_DRIFT is not yet part of the * official SAL spec, so the ITC_DRIFT bit is not * set by the BIOS on this hardware. */ sal_platform_features |= IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT; cyclone_setup(); } } static int __init acpi_parse_madt(struct acpi_table_header *table) { if (!table) return -EINVAL; acpi_madt = (struct acpi_table_madt *)table; acpi_madt_rev = acpi_madt->header.revision; /* remember the value for reference after free_initmem() */ #ifdef CONFIG_ITANIUM has_8259 = 1; /* Firmware on old Itanium systems is broken */ #else has_8259 = acpi_madt->flags & ACPI_MADT_PCAT_COMPAT; #endif iosapic_system_init(has_8259); /* Get base address of IPI Message Block */ if (acpi_madt->address) ipi_base_addr = ioremap(acpi_madt->address, 0); printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr); acpi_madt_oem_check(acpi_madt->header.oem_id, acpi_madt->header.oem_table_id); return 0; } #ifdef CONFIG_ACPI_NUMA #undef SLIT_DEBUG #define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32) static int __initdata srat_num_cpus; /* number of cpus */ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN]; #define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag)) #define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag)) static struct acpi_table_slit __initdata *slit_table; cpumask_t early_cpu_possible_map = CPU_MASK_NONE; static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa) { int pxm; pxm = pa->proximity_domain_lo; if (ia64_platform_is("sn2")) pxm += pa->proximity_domain_hi[0] << 8; return pxm; } static int get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma) { int pxm; pxm = ma->proximity_domain; if (!ia64_platform_is("sn2")) pxm &= 0xff; return pxm; } /* * ACPI 2.0 SLIT (System Locality Information Table) * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf */ void __init acpi_numa_slit_init(struct acpi_table_slit *slit) { u32 len; len = sizeof(struct acpi_table_header) + 8 + slit->locality_count * slit->locality_count; if (slit->header.length != len) { printk(KERN_ERR "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n", len, slit->header.length); return; } slit_table = slit; } void __init acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) { int pxm; if (!(pa->flags & ACPI_SRAT_CPU_ENABLED)) return; pxm = get_processor_proximity_domain(pa); /* record this node in proximity bitmap */ pxm_bit_set(pxm); node_cpuid[srat_num_cpus].phys_id = (pa->apic_id << 8) | (pa->local_sapic_eid); /* nid should be overridden as logical node id later */ node_cpuid[srat_num_cpus].nid = pxm; cpu_set(srat_num_cpus, early_cpu_possible_map); srat_num_cpus++; } void __init acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) { unsigned long paddr, size; int pxm; struct node_memblk_s *p, *q, *pend; pxm = get_memory_proximity_domain(ma); /* fill node memory chunk structure */ paddr = ma->base_address; size = ma->length; /* Ignore disabled entries */ if (!(ma->flags & ACPI_SRAT_MEM_ENABLED)) return; /* record this node in proximity bitmap */ pxm_bit_set(pxm); /* Insertion sort based on base address */ pend = &node_memblk[num_node_memblks]; for (p = &node_memblk[0]; p < pend; p++) { if (paddr < p->start_paddr) break; } if (p < pend) { for (q = pend - 1; q >= p; q--) *(q + 1) = *q; } p->start_paddr = paddr; p->size = size; p->nid = pxm; num_node_memblks++; } void __init acpi_numa_arch_fixup(void) { int i, j, node_from, node_to; /* If there's no SRAT, fix the phys_id and mark node 0 online */ if (srat_num_cpus == 0) { node_set_online(0); node_cpuid[0].phys_id = hard_smp_processor_id(); return; } /* * MCD - This can probably be dropped now. No need for pxm ID to node ID * mapping with sparse node numbering iff MAX_PXM_DOMAINS <= MAX_NUMNODES. */ nodes_clear(node_online_map); for (i = 0; i < MAX_PXM_DOMAINS; i++) { if (pxm_bit_test(i)) { int nid = acpi_map_pxm_to_node(i); node_set_online(nid); } } /* set logical node id in memory chunk structure */ for (i = 0; i < num_node_memblks; i++) node_memblk[i].nid = pxm_to_node(node_memblk[i].nid); /* assign memory bank numbers for each chunk on each node */ for_each_online_node(i) { int bank; bank = 0; for (j = 0; j < num_node_memblks; j++) if (node_memblk[j].nid == i) node_memblk[j].bank = bank++; } /* set logical node id in cpu structure */ for_each_possible_early_cpu(i) node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid); printk(KERN_INFO "Number of logical nodes in system = %d\n", num_online_nodes()); printk(KERN_INFO "Number of memory chunks in system = %d\n", num_node_memblks); if (!slit_table) { for (i = 0; i < MAX_NUMNODES; i++) for (j = 0; j < MAX_NUMNODES; j++) node_distance(i, j) = i == j ? LOCAL_DISTANCE : REMOTE_DISTANCE; return; } memset(numa_slit, -1, sizeof(numa_slit)); for (i = 0; i < slit_table->locality_count; i++) { if (!pxm_bit_test(i)) continue; node_from = pxm_to_node(i); for (j = 0; j < slit_table->locality_count; j++) { if (!pxm_bit_test(j)) continue; node_to = pxm_to_node(j); node_distance(node_from, node_to) = slit_table->entry[i * slit_table->locality_count + j]; } } #ifdef SLIT_DEBUG printk("ACPI 2.0 SLIT locality table:\n"); for_each_online_node(i) { for_each_online_node(j) printk("%03d ", node_distance(i, j)); printk("\n"); } #endif } #endif /* CONFIG_ACPI_NUMA */ /* * success: return IRQ number (>=0) * failure: return < 0 */ int acpi_register_gsi(struct device *dev, u32 gsi, int triggering, int polarity) { if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM) return gsi; if (has_8259 && gsi < 16) return isa_irq_to_vector(gsi); return iosapic_register_intr(gsi, (polarity == ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, (triggering == ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); } void acpi_unregister_gsi(u32 gsi) { if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM) return; if (has_8259 && gsi < 16) return; iosapic_unregister_intr(gsi); } static int __init acpi_parse_fadt(struct acpi_table_header *table) { struct acpi_table_header *fadt_header; struct acpi_table_fadt *fadt; if (!table) return -EINVAL; fadt_header = (struct acpi_table_header *)table; if (fadt_header->revision != 3) return -ENODEV; /* Only deal with ACPI 2.0 FADT */ fadt = (struct acpi_table_fadt *)fadt_header; acpi_register_gsi(NULL, fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW); return 0; } int __init early_acpi_boot_init(void) { int ret; /* * do a partial walk of MADT to determine how many CPUs * we have including offline CPUs */ if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { printk(KERN_ERR PREFIX "Can't find MADT\n"); return 0; } ret = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, acpi_parse_lsapic, NR_CPUS); if (ret < 1) printk(KERN_ERR PREFIX "Error parsing MADT - no LAPIC entries\n"); #ifdef CONFIG_SMP if (available_cpus == 0) { printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id()); smp_boot_data.cpu_phys_id[available_cpus] = hard_smp_processor_id(); available_cpus = 1; /* We've got at least one of these, no? */ } smp_boot_data.cpu_count = available_cpus; #endif /* Make boot-up look pretty */ printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus); return 0; } int __init acpi_boot_init(void) { /* * MADT * ---- * Parse the Multiple APIC Description Table (MADT), if exists. * Note that this table provides platform SMP configuration * information -- the successor to MPS tables. */ if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { printk(KERN_ERR PREFIX "Can't find MADT\n"); goto skip_madt; } /* Local APIC */ if (acpi_table_parse_madt (ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0) < 0) printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0) < 0) printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); /* I/O APIC */ if (acpi_table_parse_madt (ACPI_MADT_TYPE_IO_SAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) { if (!ia64_platform_is("sn2")) printk(KERN_ERR PREFIX "Error parsing MADT - no IOSAPIC entries\n"); } /* System-Level Interrupt Routing */ if (acpi_table_parse_madt (ACPI_MADT_TYPE_INTERRUPT_SOURCE, acpi_parse_plat_int_src, ACPI_MAX_PLATFORM_INTERRUPTS) < 0) printk(KERN_ERR PREFIX "Error parsing platform interrupt source entry\n"); if (acpi_table_parse_madt (ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, 0) < 0) printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n"); if (acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, 0) < 0) printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); skip_madt: /* * FADT says whether a legacy keyboard controller is present. * The FADT also contains an SCI_INT line, by which the system * gets interrupts such as power and sleep buttons. If it's not * on a Legacy interrupt, it needs to be setup. */ if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt)) printk(KERN_ERR PREFIX "Can't find FADT\n"); #ifdef CONFIG_ACPI_NUMA #ifdef CONFIG_SMP if (srat_num_cpus == 0) { int cpu, i = 1; for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) if (smp_boot_data.cpu_phys_id[cpu] != hard_smp_processor_id()) node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu]; } #endif build_cpu_to_node_map(); #endif return 0; } int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) { int tmp; if (has_8259 && gsi < 16) *irq = isa_irq_to_vector(gsi); else { tmp = gsi_to_irq(gsi); if (tmp == -1) return -1; *irq = tmp; } return 0; } /* * ACPI based hotplug CPU support */ #ifdef CONFIG_ACPI_HOTPLUG_CPU static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) { #ifdef CONFIG_ACPI_NUMA int pxm_id; int nid; pxm_id = acpi_get_pxm(handle); /* * We don't have cpu-only-node hotadd. But if the system equips * SRAT table, pxm is already found and node is ready. * So, just pxm_to_nid(pxm) is OK. * This code here is for the system which doesn't have full SRAT * table for possible cpus. */ nid = acpi_map_pxm_to_node(pxm_id); node_cpuid[cpu].phys_id = physid; node_cpuid[cpu].nid = nid; #endif return (0); } int additional_cpus __initdata = -1; static __init int setup_additional_cpus(char *s) { if (s) additional_cpus = simple_strtol(s, NULL, 0); return 0; } early_param("additional_cpus", setup_additional_cpus); /* * cpu_possible_map should be static, it cannot change as CPUs * are onlined, or offlined. The reason is per-cpu data-structures * are allocated by some modules at init time, and dont expect to * do this dynamically on cpu arrival/departure. * cpu_present_map on the other hand can change dynamically. * In case when cpu_hotplug is not compiled, then we resort to current * behaviour, which is cpu_possible == cpu_present. * - Ashok Raj * * Three ways to find out the number of additional hotplug CPUs: * - If the BIOS specified disabled CPUs in ACPI/mptables use that. * - The user can overwrite it with additional_cpus=NUM * - Otherwise don't reserve additional CPUs. */ __init void prefill_possible_map(void) { int i; int possible, disabled_cpus; disabled_cpus = total_cpus - available_cpus; if (additional_cpus == -1) { if (disabled_cpus > 0) additional_cpus = disabled_cpus; else additional_cpus = 0; } possible = available_cpus + additional_cpus; if (possible > nr_cpu_ids) possible = nr_cpu_ids; printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", possible, max((possible - available_cpus), 0)); for (i = 0; i < possible; i++) set_cpu_possible(i, true); } int acpi_map_lsapic(acpi_handle handle, int *pcpu) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; struct acpi_madt_local_sapic *lsapic; cpumask_t tmp_map; int cpu, physid; if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) return -EINVAL; if (!buffer.length || !buffer.pointer) return -EINVAL; obj = buffer.pointer; if (obj->type != ACPI_TYPE_BUFFER) { kfree(buffer.pointer); return -EINVAL; } lsapic = (struct acpi_madt_local_sapic *)obj->buffer.pointer; if ((lsapic->header.type != ACPI_MADT_TYPE_LOCAL_SAPIC) || (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))) { kfree(buffer.pointer); return -EINVAL; } physid = ((lsapic->id << 8) | (lsapic->eid)); kfree(buffer.pointer); buffer.length = ACPI_ALLOCATE_BUFFER; buffer.pointer = NULL; cpumask_complement(&tmp_map, cpu_present_mask); cpu = cpumask_first(&tmp_map); if (cpu >= nr_cpu_ids) return -EINVAL; acpi_map_cpu2node(handle, cpu, physid); cpu_set(cpu, cpu_present_map); ia64_cpu_to_sapicid[cpu] = physid; acpi_processor_set_pdc(handle); *pcpu = cpu; return (0); } EXPORT_SYMBOL(acpi_map_lsapic); int acpi_unmap_lsapic(int cpu) { ia64_cpu_to_sapicid[cpu] = -1; cpu_clear(cpu, cpu_present_map); #ifdef CONFIG_ACPI_NUMA /* NUMA specific cleanup's */ #endif return (0); } EXPORT_SYMBOL(acpi_unmap_lsapic); #endif /* CONFIG_ACPI_HOTPLUG_CPU */ #ifdef CONFIG_ACPI_NUMA static acpi_status __devinit acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; struct acpi_madt_io_sapic *iosapic; unsigned int gsi_base; int pxm, node; /* Only care about objects w/ a method that returns the MADT */ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) return AE_OK; if (!buffer.length || !buffer.pointer) return AE_OK; obj = buffer.pointer; if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < sizeof(*iosapic)) { kfree(buffer.pointer); return AE_OK; } iosapic = (struct acpi_madt_io_sapic *)obj->buffer.pointer; if (iosapic->header.type != ACPI_MADT_TYPE_IO_SAPIC) { kfree(buffer.pointer); return AE_OK; } gsi_base = iosapic->global_irq_base; kfree(buffer.pointer); /* * OK, it's an IOSAPIC MADT entry, look for a _PXM value to tell * us which node to associate this with. */ pxm = acpi_get_pxm(handle); if (pxm < 0) return AE_OK; node = pxm_to_node(pxm); if (node >= MAX_NUMNODES || !node_online(node) || cpumask_empty(cpumask_of_node(node))) return AE_OK; /* We know a gsi to node mapping! */ map_iosapic_to_node(gsi_base, node); return AE_OK; } static int __init acpi_map_iosapics (void) { acpi_get_devices(NULL, acpi_map_iosapic, NULL, NULL); return 0; } fs_initcall(acpi_map_iosapics); #endif /* CONFIG_ACPI_NUMA */ int __ref acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) { int err; if ((err = iosapic_init(phys_addr, gsi_base))) return err; #ifdef CONFIG_ACPI_NUMA acpi_map_iosapic(handle, 0, NULL, NULL); #endif /* CONFIG_ACPI_NUMA */ return 0; } EXPORT_SYMBOL(acpi_register_ioapic); int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base) { return iosapic_remove(gsi_base); } EXPORT_SYMBOL(acpi_unregister_ioapic); /* * acpi_save_state_mem() - save kernel state * * TBD when when IA64 starts to support suspend... */ int acpi_save_state_mem(void) { return 0; } /* * acpi_restore_state() */ void acpi_restore_state_mem(void) {} /* * do_suspend_lowlevel() */ void do_suspend_lowlevel(void) {}
gpl-2.0
OpenInkpot-archive/linux-2.6
net/mac80211/agg-rx.c
42
10523
/* * HT handling * * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi> * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007, Michael Wu <flamingice@sourmilk.net> * Copyright 2007-2010, Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /** * DOC: RX A-MPDU aggregation * * Aggregation on the RX side requires only implementing the * @ampdu_action callback that is invoked to start/stop any * block-ack sessions for RX aggregation. * * When RX aggregation is started by the peer, the driver is * notified via @ampdu_action function, with the * %IEEE80211_AMPDU_RX_START action, and may reject the request * in which case a negative response is sent to the peer, if it * accepts it a positive response is sent. * * While the session is active, the device/driver are required * to de-aggregate frames and pass them up one by one to mac80211, * which will handle the reorder buffer. * * When the aggregation session is stopped again by the peer or * ourselves, the driver's @ampdu_action function will be called * with the action %IEEE80211_AMPDU_RX_STOP. In this case, the * call must not fail. */ #include <linux/ieee80211.h> #include <linux/slab.h> #include <net/mac80211.h> #include "ieee80211_i.h" #include "driver-ops.h" static void ieee80211_free_tid_rx(struct rcu_head *h) { struct tid_ampdu_rx *tid_rx = container_of(h, struct tid_ampdu_rx, rcu_head); int i; for (i = 0; i < tid_rx->buf_size; i++) dev_kfree_skb(tid_rx->reorder_buf[i]); kfree(tid_rx->reorder_buf); kfree(tid_rx->reorder_time); kfree(tid_rx); } void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, u16 initiator, u16 reason, bool tx) { struct ieee80211_local *local = sta->local; struct tid_ampdu_rx *tid_rx; lockdep_assert_held(&sta->ampdu_mlme.mtx); tid_rx = sta->ampdu_mlme.tid_rx[tid]; if (!tid_rx) return; rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], NULL); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n", sta->sta.addr, tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP, &sta->sta, tid, NULL)) printk(KERN_DEBUG "HW problem - can not stop rx " "aggregation for tid %d\n", tid); /* check if this is a self generated aggregation halt */ if (initiator == WLAN_BACK_RECIPIENT && tx) ieee80211_send_delba(sta->sdata, sta->sta.addr, tid, 0, reason); del_timer_sync(&tid_rx->session_timer); del_timer_sync(&tid_rx->reorder_timer); call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx); } void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, u16 initiator, u16 reason, bool tx) { mutex_lock(&sta->ampdu_mlme.mtx); ___ieee80211_stop_rx_ba_session(sta, tid, initiator, reason, tx); mutex_unlock(&sta->ampdu_mlme.mtx); } /* * After accepting the AddBA Request we activated a timer, * resetting it after each frame that arrives from the originator. */ static void sta_rx_agg_session_timer_expired(unsigned long data) { /* not an elegant detour, but there is no choice as the timer passes * only one argument, and various sta_info are needed here, so init * flow in sta_info_create gives the TID as data, while the timer_to_id * array gives the sta through container_of */ u8 *ptid = (u8 *)data; u8 *timer_to_id = ptid - *ptid; struct sta_info *sta = container_of(timer_to_id, struct sta_info, timer_to_tid[0]); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); #endif set_bit(*ptid, sta->ampdu_mlme.tid_rx_timer_expired); ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work); } static void sta_rx_agg_reorder_timer_expired(unsigned long data) { u8 *ptid = (u8 *)data; u8 *timer_to_id = ptid - *ptid; struct sta_info *sta = container_of(timer_to_id, struct sta_info, timer_to_tid[0]); rcu_read_lock(); spin_lock(&sta->lock); ieee80211_release_reorder_timeout(sta, *ptid); spin_unlock(&sta->lock); rcu_read_unlock(); } static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid, u8 dialog_token, u16 status, u16 policy, u16 buf_size, u16 timeout) { struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; u16 capab; skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); if (!skb) { printk(KERN_DEBUG "%s: failed to allocate buffer " "for addba resp frame\n", sdata->name); return; } skb_reserve(skb, local->hw.extra_tx_headroom); mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); memset(mgmt, 0, 24); memcpy(mgmt->da, da, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); if (sdata->vif.type == NL80211_IFTYPE_AP || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); else if (sdata->vif.type == NL80211_IFTYPE_STATION) memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_resp)); mgmt->u.action.category = WLAN_CATEGORY_BACK; mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP; mgmt->u.action.u.addba_resp.dialog_token = dialog_token; capab = (u16)(policy << 1); /* bit 1 aggregation policy */ capab |= (u16)(tid << 2); /* bit 5:2 TID number */ capab |= (u16)(buf_size << 6); /* bit 15:6 max size of aggregation */ mgmt->u.action.u.addba_resp.capab = cpu_to_le16(capab); mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout); mgmt->u.action.u.addba_resp.status = cpu_to_le16(status); ieee80211_tx_skb(sdata, skb); } void ieee80211_process_addba_request(struct ieee80211_local *local, struct sta_info *sta, struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_hw *hw = &local->hw; struct ieee80211_conf *conf = &hw->conf; struct tid_ampdu_rx *tid_agg_rx; u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num, status; u8 dialog_token; int ret = -EOPNOTSUPP; /* extract session parameters from addba request frame */ dialog_token = mgmt->u.action.u.addba_req.dialog_token; timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout); start_seq_num = le16_to_cpu(mgmt->u.action.u.addba_req.start_seq_num) >> 4; capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab); ba_policy = (capab & IEEE80211_ADDBA_PARAM_POLICY_MASK) >> 1; tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; status = WLAN_STATUS_REQUEST_DECLINED; if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Suspend in progress. " "Denying ADDBA request\n"); #endif goto end_no_lock; } /* sanity check for incoming parameters: * check if configuration can support the BA policy * and if buffer size does not exceeds max value */ /* XXX: check own ht delayed BA capability?? */ if (((ba_policy != 1) && (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) || (buf_size > IEEE80211_MAX_AMPDU_BUF)) { status = WLAN_STATUS_INVALID_QOS_PARAM; #ifdef CONFIG_MAC80211_HT_DEBUG if (net_ratelimit()) printk(KERN_DEBUG "AddBA Req with bad params from " "%pM on tid %u. policy %d, buffer size %d\n", mgmt->sa, tid, ba_policy, buf_size); #endif /* CONFIG_MAC80211_HT_DEBUG */ goto end_no_lock; } /* determine default buffer size */ if (buf_size == 0) { struct ieee80211_supported_band *sband; sband = local->hw.wiphy->bands[conf->channel->band]; buf_size = IEEE80211_MIN_AMPDU_BUF; buf_size = buf_size << sband->ht_cap.ampdu_factor; } /* examine state machine */ mutex_lock(&sta->ampdu_mlme.mtx); if (sta->ampdu_mlme.tid_rx[tid]) { #ifdef CONFIG_MAC80211_HT_DEBUG if (net_ratelimit()) printk(KERN_DEBUG "unexpected AddBA Req from " "%pM on tid %u\n", mgmt->sa, tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ goto end; } /* prepare A-MPDU MLME for Rx aggregation */ tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC); if (!tid_agg_rx) { #ifdef CONFIG_MAC80211_HT_DEBUG if (net_ratelimit()) printk(KERN_ERR "allocate rx mlme to tid %d failed\n", tid); #endif goto end; } spin_lock_init(&tid_agg_rx->reorder_lock); /* rx timer */ tid_agg_rx->session_timer.function = sta_rx_agg_session_timer_expired; tid_agg_rx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid]; init_timer(&tid_agg_rx->session_timer); /* rx reorder timer */ tid_agg_rx->reorder_timer.function = sta_rx_agg_reorder_timer_expired; tid_agg_rx->reorder_timer.data = (unsigned long)&sta->timer_to_tid[tid]; init_timer(&tid_agg_rx->reorder_timer); /* prepare reordering buffer */ tid_agg_rx->reorder_buf = kcalloc(buf_size, sizeof(struct sk_buff *), GFP_ATOMIC); tid_agg_rx->reorder_time = kcalloc(buf_size, sizeof(unsigned long), GFP_ATOMIC); if (!tid_agg_rx->reorder_buf || !tid_agg_rx->reorder_time) { #ifdef CONFIG_MAC80211_HT_DEBUG if (net_ratelimit()) printk(KERN_ERR "can not allocate reordering buffer " "to tid %d\n", tid); #endif kfree(tid_agg_rx->reorder_buf); kfree(tid_agg_rx->reorder_time); kfree(tid_agg_rx); goto end; } ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START, &sta->sta, tid, &start_seq_num); #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret); #endif /* CONFIG_MAC80211_HT_DEBUG */ if (ret) { kfree(tid_agg_rx->reorder_buf); kfree(tid_agg_rx->reorder_time); kfree(tid_agg_rx); goto end; } /* update data */ tid_agg_rx->dialog_token = dialog_token; tid_agg_rx->ssn = start_seq_num; tid_agg_rx->head_seq_num = start_seq_num; tid_agg_rx->buf_size = buf_size; tid_agg_rx->timeout = timeout; tid_agg_rx->stored_mpdu_num = 0; status = WLAN_STATUS_SUCCESS; /* activate it for RX */ rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx); if (timeout) mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout)); end: mutex_unlock(&sta->ampdu_mlme.mtx); end_no_lock: ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid, dialog_token, status, 1, buf_size, timeout); }
gpl-2.0
derekcentrico/pockettv.kernel.2.x
drivers/amlogic/wifi/atheros_6302/wmi/wmi.c
42
192730
//------------------------------------------------------------------------------ // <copyright file="wmi.c" company="Atheros"> // Copyright (c) 2004-2010 Atheros Corporation. All rights reserved. // // The software source and binaries included in this development package are // licensed, not sold. You, or your company, received the package under one // or more license agreements. The rights granted to you are specifically // listed in these license agreement(s). All other rights remain with Atheros // Communications, Inc., its subsidiaries, or the respective owner including // those listed on the included copyright notices. Distribution of any // portion of this package must be in strict compliance with the license // agreement(s) terms. // </copyright> // // <summary> // Wifi driver for AR6002 // </summary> // //------------------------------------------------------------------------------ //============================================================================== // This module implements the hardware independent layer of the // Wireless Module Interface (WMI) protocol. // // Author(s): ="Atheros" //============================================================================== #ifdef WIN_MOBILE7 #include <ntddk.h> #endif #include <a_config.h> #include <athdefs.h> #include <a_types.h> #include <a_osapi.h> #include "htc.h" #include "htc_api.h" #include "wmi.h" #include <wlan_api.h> #include <wmi_api.h> #include <ieee80211.h> #include <ieee80211_node.h> #include "dset_api.h" #include "gpio_api.h" #include "wmi_host.h" #include "a_drv.h" #include "a_drv_api.h" #define ATH_MODULE_NAME wmi #include "a_debug.h" #include "dbglog_api.h" #include "roaming.h" #define ATH_DEBUG_WMI ATH_DEBUG_MAKE_MODULE_MASK(0) #ifdef DEBUG static ATH_DEBUG_MASK_DESCRIPTION wmi_debug_desc[] = { { ATH_DEBUG_WMI , "General WMI Tracing"}, }; ATH_DEBUG_INSTANTIATE_MODULE_VAR(wmi, "wmi", "Wireless Module Interface", ATH_DEBUG_MASK_DEFAULTS, ATH_DEBUG_DESCRIPTION_COUNT(wmi_debug_desc), wmi_debug_desc); #endif #ifndef REXOS #define DBGARG _A_FUNCNAME_ #define DBGFMT "%s() : " #define DBG_WMI ATH_DEBUG_WMI #define DBG_ERROR ATH_DEBUG_ERR #define DBG_WMI2 ATH_DEBUG_WMI #define A_DPRINTF AR_DEBUG_PRINTF #endif static A_STATUS wmi_ready_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_connect_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_disconnect_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_tkip_micerr_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_bssInfo_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_opt_frame_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_pstream_timeout_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_sync_point(struct wmi_t *wmip); static A_STATUS wmi_bitrate_reply_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_ratemask_reply_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_channelList_reply_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_regDomain_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_txPwr_reply_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_neighborReport_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_dset_open_req_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); #ifdef CONFIG_HOST_DSET_SUPPORT static A_STATUS wmi_dset_close_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_dset_data_req_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); #endif /* CONFIG_HOST_DSET_SUPPORT */ static A_STATUS wmi_scanComplete_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_errorEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_statsEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_rssiThresholdEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_hbChallengeResp_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_reportErrorEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_cac_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_channel_change_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_roam_tbl_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_roam_data_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_get_wow_list_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_get_pmkid_list_event_rx(struct wmi_t *wmip, A_UINT8 *datap, A_UINT32 len); static A_STATUS wmi_set_params_event_rx(struct wmi_t *wmip, A_UINT8 *datap, A_UINT32 len); #ifdef CONFIG_HOST_GPIO_SUPPORT static A_STATUS wmi_gpio_intr_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_gpio_data_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_gpio_ack_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); #endif /* CONFIG_HOST_GPIO_SUPPORT */ #ifdef CONFIG_HOST_TCMD_SUPPORT static A_STATUS wmi_tcmd_test_report_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); #endif static A_STATUS wmi_txRetryErrEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_snrThresholdEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_lqThresholdEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_BOOL wmi_is_bitrate_index_valid(struct wmi_t *wmip, A_INT32 rateIndex); static A_STATUS wmi_aplistEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_dbglog_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_keepalive_reply_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); A_STATUS wmi_cmd_send_xtnd(struct wmi_t *wmip, void *osbuf, WMIX_COMMAND_ID cmdId, WMI_SYNC_FLAG syncflag); A_UINT8 ar6000_get_upper_threshold(A_INT16 rssi, SQ_THRESHOLD_PARAMS *sq_thresh, A_UINT32 size); A_UINT8 ar6000_get_lower_threshold(A_INT16 rssi, SQ_THRESHOLD_PARAMS *sq_thresh, A_UINT32 size); void wmi_cache_configure_rssithreshold(struct wmi_t *wmip, WMI_RSSI_THRESHOLD_PARAMS_CMD *rssiCmd); void wmi_cache_configure_snrthreshold(struct wmi_t *wmip, WMI_SNR_THRESHOLD_PARAMS_CMD *snrCmd); static A_STATUS wmi_send_rssi_threshold_params(struct wmi_t *wmip, WMI_RSSI_THRESHOLD_PARAMS_CMD *rssiCmd); static A_STATUS wmi_send_snr_threshold_params(struct wmi_t *wmip, WMI_SNR_THRESHOLD_PARAMS_CMD *snrCmd); #if defined(CONFIG_TARGET_PROFILE_SUPPORT) static A_STATUS wmi_prof_count_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); #endif /* CONFIG_TARGET_PROFILE_SUPPORT */ static A_STATUS wmi_pspoll_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_dtimexpiry_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_peer_node_event_rx (struct wmi_t *wmip, A_UINT8 *datap, int len); #ifdef ATH_AR6K_11N_SUPPORT static A_STATUS wmi_addba_req_event_rx(struct wmi_t *, A_UINT8 *, int); static A_STATUS wmi_addba_resp_event_rx(struct wmi_t *, A_UINT8 *, int); static A_STATUS wmi_delba_req_event_rx(struct wmi_t *, A_UINT8 *, int); static A_STATUS wmi_btcoex_config_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); static A_STATUS wmi_btcoex_stats_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); #endif static A_STATUS wmi_hci_event_rx(struct wmi_t *, A_UINT8 *, int); #ifdef WAPI_ENABLE static A_STATUS wmi_wapi_rekey_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len); #endif #if defined(UNDER_CE) #if defined(NDIS51_MINIPORT) unsigned int processDot11Hdr = 0; #else unsigned int processDot11Hdr = 1; #endif #else extern unsigned int processDot11Hdr; #endif int wps_enable; static const A_INT32 wmi_rateTable[][2] = { //{W/O SGI, with SGI} {1000, 1000}, {2000, 2000}, {5500, 5500}, {11000, 11000}, {6000, 6000}, {9000, 9000}, {12000, 12000}, {18000, 18000}, {24000, 24000}, {36000, 36000}, {48000, 48000}, {54000, 54000}, {6500, 7200}, {13000, 14400}, {19500, 21700}, {26000, 28900}, {39000, 43300}, {52000, 57800}, {58500, 65000}, {65000, 72200}, {13500, 15000}, {27000, 30000}, {40500, 45000}, {54000, 60000}, {81000, 90000}, {108000, 120000}, {121500, 135000}, {135000, 150000}, {0, 0}}; #define MODE_A_SUPPORT_RATE_START ((A_INT32) 4) #define MODE_A_SUPPORT_RATE_STOP ((A_INT32) 11) #define MODE_GONLY_SUPPORT_RATE_START MODE_A_SUPPORT_RATE_START #define MODE_GONLY_SUPPORT_RATE_STOP MODE_A_SUPPORT_RATE_STOP #define MODE_B_SUPPORT_RATE_START ((A_INT32) 0) #define MODE_B_SUPPORT_RATE_STOP ((A_INT32) 3) #define MODE_G_SUPPORT_RATE_START ((A_INT32) 0) #define MODE_G_SUPPORT_RATE_STOP ((A_INT32) 11) #define MODE_GHT20_SUPPORT_RATE_START ((A_INT32) 0) #define MODE_GHT20_SUPPORT_RATE_STOP ((A_INT32) 19) #define MAX_NUMBER_OF_SUPPORT_RATES (MODE_GHT20_SUPPORT_RATE_STOP + 1) /* 802.1d to AC mapping. Refer pg 57 of WMM-test-plan-v1.2 */ const A_UINT8 up_to_ac[]= { WMM_AC_BE, WMM_AC_BK, WMM_AC_BK, WMM_AC_BE, WMM_AC_VI, WMM_AC_VI, WMM_AC_VO, WMM_AC_VO, }; #include "athstartpack.h" /* This stuff is used when we want a simple layer-3 visibility */ typedef PREPACK struct _iphdr { A_UINT8 ip_ver_hdrlen; /* version and hdr length */ A_UINT8 ip_tos; /* type of service */ A_UINT16 ip_len; /* total length */ A_UINT16 ip_id; /* identification */ A_INT16 ip_off; /* fragment offset field */ #define IP_DF 0x4000 /* dont fragment flag */ #define IP_MF 0x2000 /* more fragments flag */ #define IP_OFFMASK 0x1fff /* mask for fragmenting bits */ A_UINT8 ip_ttl; /* time to live */ A_UINT8 ip_p; /* protocol */ A_UINT16 ip_sum; /* checksum */ A_UINT8 ip_src[4]; /* source and dest address */ A_UINT8 ip_dst[4]; } POSTPACK iphdr; #include "athendpack.h" A_INT16 rssi_event_value = 0; A_INT16 snr_event_value = 0; A_BOOL is_probe_ssid = FALSE; void * wmi_init(void *devt) { struct wmi_t *wmip; A_REGISTER_MODULE_DEBUG_INFO(wmi); wmip = A_MALLOC(sizeof(struct wmi_t)); if (wmip == NULL) { return (NULL); } A_MEMZERO(wmip, sizeof(*wmip)); A_MUTEX_INIT(&wmip->wmi_lock); wmip->wmi_devt = devt; wlan_node_table_init(wmip, &wmip->wmi_scan_table); wmi_qos_state_init(wmip); wmip->wmi_powerMode = REC_POWER; wmip->wmi_phyMode = WMI_11G_MODE; wmip->wmi_pair_crypto_type = NONE_CRYPT; wmip->wmi_grp_crypto_type = NONE_CRYPT; wmip->wmi_ht_allowed[A_BAND_24GHZ] = 1; wmip->wmi_ht_allowed[A_BAND_5GHZ] = 1; return (wmip); } void wmi_qos_state_init(struct wmi_t *wmip) { A_UINT8 i; if (wmip == NULL) { return; } LOCK_WMI(wmip); /* Initialize QoS States */ wmip->wmi_numQoSStream = 0; wmip->wmi_fatPipeExists = 0; for (i=0; i < WMM_NUM_AC; i++) { wmip->wmi_streamExistsForAC[i]=0; } UNLOCK_WMI(wmip); A_WMI_SET_NUMDATAENDPTS(wmip->wmi_devt, 1); } void wmi_set_control_ep(struct wmi_t * wmip, HTC_ENDPOINT_ID eid) { A_ASSERT( eid != ENDPOINT_UNUSED); wmip->wmi_endpoint_id = eid; } HTC_ENDPOINT_ID wmi_get_control_ep(struct wmi_t * wmip) { return(wmip->wmi_endpoint_id); } void wmi_shutdown(struct wmi_t *wmip) { if (wmip != NULL) { wlan_node_table_cleanup(&wmip->wmi_scan_table); if (A_IS_MUTEX_VALID(&wmip->wmi_lock)) { A_MUTEX_DELETE(&wmip->wmi_lock); } A_FREE(wmip); } } /* * performs DIX to 802.3 encapsulation for transmit packets. * uses passed in buffer. Returns buffer or NULL if failed. * Assumes the entire DIX header is contigous and that there is * enough room in the buffer for a 802.3 mac header and LLC+SNAP headers. */ A_STATUS wmi_dix_2_dot3(struct wmi_t *wmip, void *osbuf) { A_UINT8 *datap; A_UINT16 typeorlen; ATH_MAC_HDR macHdr; ATH_LLC_SNAP_HDR *llcHdr; A_ASSERT(osbuf != NULL); if (A_NETBUF_HEADROOM(osbuf) < (sizeof(ATH_LLC_SNAP_HDR) + sizeof(WMI_DATA_HDR))) { return A_NO_MEMORY; } datap = A_NETBUF_DATA(osbuf); typeorlen = *(A_UINT16 *)(datap + ATH_MAC_LEN + ATH_MAC_LEN); if (!IS_ETHERTYPE(A_BE2CPU16(typeorlen))) { /* * packet is already in 802.3 format - return success */ A_DPRINTF(DBG_WMI, (DBGFMT "packet already 802.3\n", DBGARG)); return (A_OK); } /* * Save mac fields and length to be inserted later */ A_MEMCPY(macHdr.dstMac, datap, ATH_MAC_LEN); A_MEMCPY(macHdr.srcMac, datap + ATH_MAC_LEN, ATH_MAC_LEN); macHdr.typeOrLen = A_CPU2BE16(A_NETBUF_LEN(osbuf) - sizeof(ATH_MAC_HDR) + sizeof(ATH_LLC_SNAP_HDR)); /* * Make room for LLC+SNAP headers */ if (A_NETBUF_PUSH(osbuf, sizeof(ATH_LLC_SNAP_HDR)) != A_OK) { return A_NO_MEMORY; } datap = A_NETBUF_DATA(osbuf); A_MEMCPY(datap, &macHdr, sizeof (ATH_MAC_HDR)); llcHdr = (ATH_LLC_SNAP_HDR *)(datap + sizeof(ATH_MAC_HDR)); llcHdr->dsap = 0xAA; llcHdr->ssap = 0xAA; llcHdr->cntl = 0x03; llcHdr->orgCode[0] = 0x0; llcHdr->orgCode[1] = 0x0; llcHdr->orgCode[2] = 0x0; llcHdr->etherType = typeorlen; return (A_OK); } A_STATUS wmi_meta_add(struct wmi_t *wmip, void *osbuf, A_UINT8 *pVersion,void *pTxMetaS) { switch(*pVersion){ case 0: return (A_OK); case WMI_META_VERSION_1: { WMI_TX_META_V1 *pV1= NULL; A_ASSERT(osbuf != NULL); if (A_NETBUF_PUSH(osbuf, WMI_MAX_TX_META_SZ) != A_OK) { return A_NO_MEMORY; } pV1 = (WMI_TX_META_V1 *)A_NETBUF_DATA(osbuf); /* the pktID is used in conjunction with txComplete messages * allowing the target to notify which tx requests have been * completed and how. */ pV1->pktID = 0; /* the ratePolicyID allows the host to specify which rate policy * to use for transmitting this packet. 0 means use default behavior. */ pV1->ratePolicyID = 0; A_ASSERT(pVersion != NULL); /* the version must be used to populate the meta field of the WMI_DATA_HDR */ *pVersion = WMI_META_VERSION_1; return (A_OK); } #ifdef CONFIG_CHECKSUM_OFFLOAD case WMI_META_VERSION_2: { WMI_TX_META_V2 *pV2 ; A_ASSERT(osbuf != NULL); if (A_NETBUF_PUSH(osbuf, WMI_MAX_TX_META_SZ) != A_OK) { return A_NO_MEMORY; } pV2 = (WMI_TX_META_V2 *)A_NETBUF_DATA(osbuf); A_MEMCPY(pV2,(WMI_TX_META_V2 *)pTxMetaS,sizeof(WMI_TX_META_V2)); return (A_OK); } #endif default: return (A_OK); } } /* Adds a WMI data header */ A_STATUS wmi_data_hdr_add(struct wmi_t *wmip, void *osbuf, A_UINT8 msgType, A_BOOL bMoreData, WMI_DATA_HDR_DATA_TYPE data_type,A_UINT8 metaVersion, void *pTxMetaS) { WMI_DATA_HDR *dtHdr; // A_UINT8 metaVersion = 0; A_STATUS status; A_ASSERT(osbuf != NULL); /* adds the meta data field after the wmi data hdr. If metaVersion * is returns 0 then no meta field was added. */ if ((status = wmi_meta_add(wmip, osbuf, &metaVersion,pTxMetaS)) != A_OK) { return status; } if (A_NETBUF_PUSH(osbuf, sizeof(WMI_DATA_HDR)) != A_OK) { return A_NO_MEMORY; } dtHdr = (WMI_DATA_HDR *)A_NETBUF_DATA(osbuf); A_MEMZERO(dtHdr, sizeof(WMI_DATA_HDR)); WMI_DATA_HDR_SET_MSG_TYPE(dtHdr, msgType); WMI_DATA_HDR_SET_DATA_TYPE(dtHdr, data_type); if (bMoreData) { WMI_DATA_HDR_SET_MORE_BIT(dtHdr); } WMI_DATA_HDR_SET_META(dtHdr, metaVersion); //dtHdr->rssi = 0; return (A_OK); } A_UINT8 wmi_implicit_create_pstream(struct wmi_t *wmip, void *osbuf, A_UINT32 layer2Priority, A_BOOL wmmEnabled) { A_UINT8 *datap; A_UINT8 trafficClass = WMM_AC_BE; A_UINT16 ipType = IP_ETHERTYPE; WMI_DATA_HDR *dtHdr; A_BOOL streamExists = FALSE; A_UINT8 userPriority; A_UINT32 hdrsize, metasize; ATH_LLC_SNAP_HDR *llcHdr; WMI_CREATE_PSTREAM_CMD cmd; A_ASSERT(osbuf != NULL); // // Initialize header size // hdrsize = 0; datap = A_NETBUF_DATA(osbuf); dtHdr = (WMI_DATA_HDR *)datap; metasize = (WMI_DATA_HDR_GET_META(dtHdr))? WMI_MAX_TX_META_SZ : 0; if (!wmmEnabled) { /* If WMM is disabled all traffic goes as BE traffic */ userPriority = 0; } else { if (processDot11Hdr) { hdrsize = A_ROUND_UP(sizeof(struct ieee80211_qosframe),sizeof(A_UINT32)); llcHdr = (ATH_LLC_SNAP_HDR *)(datap + sizeof(WMI_DATA_HDR) + metasize + hdrsize); } else { llcHdr = (ATH_LLC_SNAP_HDR *)(datap + sizeof(WMI_DATA_HDR) + metasize + sizeof(ATH_MAC_HDR)); } if (llcHdr->etherType == A_CPU2BE16(ipType)) { /* Extract the endpoint info from the TOS field in the IP header */ userPriority = wmi_determine_userPriority (((A_UINT8 *)llcHdr) + sizeof(ATH_LLC_SNAP_HDR),layer2Priority); } else { userPriority = layer2Priority & 0x7; } } trafficClass = convert_userPriority_to_trafficClass(userPriority); WMI_DATA_HDR_SET_UP(dtHdr, userPriority); /* lower 3-bits are 802.1d priority */ //dtHdr->info |= (userPriority & WMI_DATA_HDR_UP_MASK) << WMI_DATA_HDR_UP_SHIFT; LOCK_WMI(wmip); streamExists = wmip->wmi_fatPipeExists; UNLOCK_WMI(wmip); if (!(streamExists & (1 << trafficClass))) { A_MEMZERO(&cmd, sizeof(cmd)); cmd.trafficClass = trafficClass; cmd.userPriority = userPriority; cmd.inactivityInt = WMI_IMPLICIT_PSTREAM_INACTIVITY_INT; /* Implicit streams are created with TSID 0xFF */ cmd.tsid = WMI_IMPLICIT_PSTREAM; wmi_create_pstream_cmd(wmip, &cmd); } return trafficClass; } A_STATUS wmi_dot11_hdr_add (struct wmi_t *wmip, void *osbuf, NETWORK_TYPE mode) { A_UINT8 *datap; A_UINT16 typeorlen; ATH_MAC_HDR macHdr; ATH_LLC_SNAP_HDR *llcHdr; struct ieee80211_frame *wh; A_UINT32 hdrsize; A_ASSERT(osbuf != NULL); if (A_NETBUF_HEADROOM(osbuf) < (sizeof(struct ieee80211_qosframe) + sizeof(ATH_LLC_SNAP_HDR) + sizeof(WMI_DATA_HDR))) { return A_NO_MEMORY; } datap = A_NETBUF_DATA(osbuf); typeorlen = *(A_UINT16 *)(datap + ATH_MAC_LEN + ATH_MAC_LEN); if (!IS_ETHERTYPE(A_BE2CPU16(typeorlen))) { /* * packet is already in 802.3 format - return success */ A_DPRINTF(DBG_WMI, (DBGFMT "packet already 802.3\n", DBGARG)); goto AddDot11Hdr; } /* * Save mac fields and length to be inserted later */ A_MEMCPY(macHdr.dstMac, datap, ATH_MAC_LEN); A_MEMCPY(macHdr.srcMac, datap + ATH_MAC_LEN, ATH_MAC_LEN); macHdr.typeOrLen = A_CPU2BE16(A_NETBUF_LEN(osbuf) - sizeof(ATH_MAC_HDR) + sizeof(ATH_LLC_SNAP_HDR)); // Remove the Ethernet hdr A_NETBUF_PULL(osbuf, sizeof(ATH_MAC_HDR)); /* * Make room for LLC+SNAP headers */ if (A_NETBUF_PUSH(osbuf, sizeof(ATH_LLC_SNAP_HDR)) != A_OK) { return A_NO_MEMORY; } datap = A_NETBUF_DATA(osbuf); llcHdr = (ATH_LLC_SNAP_HDR *)(datap); llcHdr->dsap = 0xAA; llcHdr->ssap = 0xAA; llcHdr->cntl = 0x03; llcHdr->orgCode[0] = 0x0; llcHdr->orgCode[1] = 0x0; llcHdr->orgCode[2] = 0x0; llcHdr->etherType = typeorlen; AddDot11Hdr: /* Make room for 802.11 hdr */ if (wmip->wmi_is_wmm_enabled) { hdrsize = A_ROUND_UP(sizeof(struct ieee80211_qosframe),sizeof(A_UINT32)); if (A_NETBUF_PUSH(osbuf, hdrsize) != A_OK) { return A_NO_MEMORY; } wh = (struct ieee80211_frame *) A_NETBUF_DATA(osbuf); wh->i_fc[0] = IEEE80211_FC0_SUBTYPE_QOS; } else { hdrsize = A_ROUND_UP(sizeof(struct ieee80211_frame),sizeof(A_UINT32)); if (A_NETBUF_PUSH(osbuf, hdrsize) != A_OK) { return A_NO_MEMORY; } wh = (struct ieee80211_frame *) A_NETBUF_DATA(osbuf); wh->i_fc[0] = IEEE80211_FC0_SUBTYPE_DATA; } /* Setup the SA & DA */ IEEE80211_ADDR_COPY(wh->i_addr2, macHdr.srcMac); if (mode == INFRA_NETWORK) { IEEE80211_ADDR_COPY(wh->i_addr3, macHdr.dstMac); } else if (mode == ADHOC_NETWORK) { IEEE80211_ADDR_COPY(wh->i_addr1, macHdr.dstMac); } return (A_OK); } A_STATUS wmi_dot11_hdr_remove(struct wmi_t *wmip, void *osbuf) { A_UINT8 *datap; struct ieee80211_frame *pwh,wh; A_UINT8 type,subtype; ATH_LLC_SNAP_HDR *llcHdr; ATH_MAC_HDR macHdr; A_UINT32 hdrsize; A_ASSERT(osbuf != NULL); datap = A_NETBUF_DATA(osbuf); pwh = (struct ieee80211_frame *)datap; type = pwh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; subtype = pwh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; A_MEMCPY((A_UINT8 *)&wh, datap, sizeof(struct ieee80211_frame)); /* strip off the 802.11 hdr*/ if (subtype == IEEE80211_FC0_SUBTYPE_QOS) { hdrsize = A_ROUND_UP(sizeof(struct ieee80211_qosframe),sizeof(A_UINT32)); A_NETBUF_PULL(osbuf, hdrsize); } else if (subtype == IEEE80211_FC0_SUBTYPE_DATA) { A_NETBUF_PULL(osbuf, sizeof(struct ieee80211_frame)); } datap = A_NETBUF_DATA(osbuf); llcHdr = (ATH_LLC_SNAP_HDR *)(datap); macHdr.typeOrLen = llcHdr->etherType; switch (wh.i_fc[1] & IEEE80211_FC1_DIR_MASK) { case IEEE80211_FC1_DIR_NODS: IEEE80211_ADDR_COPY(macHdr.dstMac, wh.i_addr1); IEEE80211_ADDR_COPY(macHdr.srcMac, wh.i_addr2); break; case IEEE80211_FC1_DIR_TODS: IEEE80211_ADDR_COPY(macHdr.dstMac, wh.i_addr3); IEEE80211_ADDR_COPY(macHdr.srcMac, wh.i_addr2); break; case IEEE80211_FC1_DIR_FROMDS: IEEE80211_ADDR_COPY(macHdr.dstMac, wh.i_addr1); IEEE80211_ADDR_COPY(macHdr.srcMac, wh.i_addr3); break; case IEEE80211_FC1_DIR_DSTODS: break; } // Remove the LLC Hdr. A_NETBUF_PULL(osbuf, sizeof(ATH_LLC_SNAP_HDR)); // Insert the ATH MAC hdr. A_NETBUF_PUSH(osbuf, sizeof(ATH_MAC_HDR)); datap = A_NETBUF_DATA(osbuf); A_MEMCPY (datap, &macHdr, sizeof(ATH_MAC_HDR)); return A_OK; } /* * performs 802.3 to DIX encapsulation for received packets. * Assumes the entire 802.3 header is contigous. */ A_STATUS wmi_dot3_2_dix(void *osbuf) { A_UINT8 *datap; ATH_MAC_HDR macHdr; ATH_LLC_SNAP_HDR *llcHdr; A_ASSERT(osbuf != NULL); datap = A_NETBUF_DATA(osbuf); A_MEMCPY(&macHdr, datap, sizeof(ATH_MAC_HDR)); llcHdr = (ATH_LLC_SNAP_HDR *)(datap + sizeof(ATH_MAC_HDR)); macHdr.typeOrLen = llcHdr->etherType; if (A_NETBUF_PULL(osbuf, sizeof(ATH_LLC_SNAP_HDR)) != A_OK) { return A_NO_MEMORY; } datap = A_NETBUF_DATA(osbuf); A_MEMCPY(datap, &macHdr, sizeof (ATH_MAC_HDR)); return (A_OK); } /* * Removes a WMI data header */ A_STATUS wmi_data_hdr_remove(struct wmi_t *wmip, void *osbuf) { A_ASSERT(osbuf != NULL); return (A_NETBUF_PULL(osbuf, sizeof(WMI_DATA_HDR))); } void wmi_iterate_nodes(struct wmi_t *wmip, wlan_node_iter_func *f, void *arg) { wlan_iterate_nodes(&wmip->wmi_scan_table, f, arg); } /* * WMI Extended Event received from Target. */ A_STATUS wmi_control_rx_xtnd(struct wmi_t *wmip, void *osbuf) { WMIX_CMD_HDR *cmd; A_UINT16 id; A_UINT8 *datap; A_UINT32 len; A_STATUS status = A_OK; if (A_NETBUF_LEN(osbuf) < sizeof(WMIX_CMD_HDR)) { A_DPRINTF(DBG_WMI, (DBGFMT "bad packet 1\n", DBGARG)); wmip->wmi_stats.cmd_len_err++; return A_ERROR; } cmd = (WMIX_CMD_HDR *)A_NETBUF_DATA(osbuf); id = cmd->commandId; if (A_NETBUF_PULL(osbuf, sizeof(WMIX_CMD_HDR)) != A_OK) { A_DPRINTF(DBG_WMI, (DBGFMT "bad packet 2\n", DBGARG)); wmip->wmi_stats.cmd_len_err++; return A_ERROR; } datap = A_NETBUF_DATA(osbuf); len = A_NETBUF_LEN(osbuf); switch (id) { case (WMIX_DSETOPENREQ_EVENTID): status = wmi_dset_open_req_rx(wmip, datap, len); break; #ifdef CONFIG_HOST_DSET_SUPPORT case (WMIX_DSETCLOSE_EVENTID): status = wmi_dset_close_rx(wmip, datap, len); break; case (WMIX_DSETDATAREQ_EVENTID): status = wmi_dset_data_req_rx(wmip, datap, len); break; #endif /* CONFIG_HOST_DSET_SUPPORT */ #ifdef CONFIG_HOST_GPIO_SUPPORT case (WMIX_GPIO_INTR_EVENTID): wmi_gpio_intr_rx(wmip, datap, len); break; case (WMIX_GPIO_DATA_EVENTID): wmi_gpio_data_rx(wmip, datap, len); break; case (WMIX_GPIO_ACK_EVENTID): wmi_gpio_ack_rx(wmip, datap, len); break; #endif /* CONFIG_HOST_GPIO_SUPPORT */ case (WMIX_HB_CHALLENGE_RESP_EVENTID): wmi_hbChallengeResp_rx(wmip, datap, len); break; case (WMIX_DBGLOG_EVENTID): wmi_dbglog_event_rx(wmip, datap, len); break; #if defined(CONFIG_TARGET_PROFILE_SUPPORT) case (WMIX_PROF_COUNT_EVENTID): wmi_prof_count_rx(wmip, datap, len); break; #endif /* CONFIG_TARGET_PROFILE_SUPPORT */ default: A_DPRINTF(DBG_WMI|DBG_ERROR, (DBGFMT "Unknown id 0x%x\n", DBGARG, id)); wmip->wmi_stats.cmd_id_err++; status = A_ERROR; break; } return status; } /* * Control Path */ A_UINT32 cmdRecvNum; A_STATUS wmi_control_rx(struct wmi_t *wmip, void *osbuf) { WMI_CMD_HDR *cmd; A_UINT16 id; A_UINT8 *datap; A_UINT32 len, i, loggingReq; A_STATUS status = A_OK; A_ASSERT(osbuf != NULL); if (A_NETBUF_LEN(osbuf) < sizeof(WMI_CMD_HDR)) { A_NETBUF_FREE(osbuf); A_DPRINTF(DBG_WMI, (DBGFMT "bad packet 1\n", DBGARG)); wmip->wmi_stats.cmd_len_err++; return A_ERROR; } cmd = (WMI_CMD_HDR *)A_NETBUF_DATA(osbuf); id = cmd->commandId; if (A_NETBUF_PULL(osbuf, sizeof(WMI_CMD_HDR)) != A_OK) { A_NETBUF_FREE(osbuf); A_DPRINTF(DBG_WMI, (DBGFMT "bad packet 2\n", DBGARG)); wmip->wmi_stats.cmd_len_err++; return A_ERROR; } datap = A_NETBUF_DATA(osbuf); len = A_NETBUF_LEN(osbuf); loggingReq = 0; ar6000_get_driver_cfg(wmip->wmi_devt, AR6000_DRIVER_CFG_LOG_RAW_WMI_MSGS, &loggingReq); if(loggingReq) { AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("WMI %d \n",id)); AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("WMI recv, MsgNo %d : ", cmdRecvNum)); for(i = 0; i < len; i++) AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("%x ", datap[i])); AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("\n")); } LOCK_WMI(wmip); cmdRecvNum++; UNLOCK_WMI(wmip); switch (id) { case (WMI_GET_BITRATE_CMDID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_GET_BITRATE_CMDID\n", DBGARG)); status = wmi_bitrate_reply_rx(wmip, datap, len); break; case (WMI_GET_CHANNEL_LIST_CMDID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_GET_CHANNEL_LIST_CMDID\n", DBGARG)); status = wmi_channelList_reply_rx(wmip, datap, len); break; case (WMI_GET_TX_PWR_CMDID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_GET_TX_PWR_CMDID\n", DBGARG)); status = wmi_txPwr_reply_rx(wmip, datap, len); break; case (WMI_READY_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_READY_EVENTID\n", DBGARG)); status = wmi_ready_event_rx(wmip, datap, len); A_WMI_SEND_EVENT_TO_APP(wmip->wmi_devt, id, datap, len); A_WMI_DBGLOG_INIT_DONE(wmip->wmi_devt); break; case (WMI_CONNECT_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_CONNECT_EVENTID\n", DBGARG)); status = wmi_connect_event_rx(wmip, datap, len); A_WMI_SEND_GENERIC_EVENT_TO_APP(wmip->wmi_devt, id, datap, len); break; case (WMI_DISCONNECT_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_DISCONNECT_EVENTID\n", DBGARG)); status = wmi_disconnect_event_rx(wmip, datap, len); A_WMI_SEND_EVENT_TO_APP(wmip->wmi_devt, id, datap, len); break; case (WMI_PEER_NODE_EVENTID): A_DPRINTF (DBG_WMI, (DBGFMT "WMI_PEER_NODE_EVENTID\n", DBGARG)); status = wmi_peer_node_event_rx(wmip, datap, len); A_WMI_SEND_EVENT_TO_APP(wmip->wmi_devt, id, datap, len); break; case (WMI_TKIP_MICERR_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_TKIP_MICERR_EVENTID\n", DBGARG)); status = wmi_tkip_micerr_event_rx(wmip, datap, len); break; case (WMI_BSSINFO_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_BSSINFO_EVENTID\n", DBGARG)); { /* * convert WMI_BSS_INFO_HDR2 to WMI_BSS_INFO_HDR * Take a local copy of the WMI_BSS_INFO_HDR2 from the wmi buffer * and reconstruct the WMI_BSS_INFO_HDR in its place */ WMI_BSS_INFO_HDR2 bih2; WMI_BSS_INFO_HDR *bih; A_MEMCPY(&bih2, datap, sizeof(WMI_BSS_INFO_HDR2)); A_NETBUF_PUSH(osbuf, 4); datap = A_NETBUF_DATA(osbuf); len = A_NETBUF_LEN(osbuf); bih = (WMI_BSS_INFO_HDR *)datap; bih->channel = bih2.channel; bih->frameType = bih2.frameType; bih->snr = bih2.snr; bih->rssi = bih2.snr - 95; bih->ieMask = bih2.ieMask; A_MEMCPY(bih->bssid, bih2.bssid, ATH_MAC_LEN); status = wmi_bssInfo_event_rx(wmip, datap, len); A_WMI_SEND_GENERIC_EVENT_TO_APP(wmip->wmi_devt, id, datap, len); } break; case (WMI_REGDOMAIN_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_REGDOMAIN_EVENTID\n", DBGARG)); status = wmi_regDomain_event_rx(wmip, datap, len); break; case (WMI_PSTREAM_TIMEOUT_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_PSTREAM_TIMEOUT_EVENTID\n", DBGARG)); status = wmi_pstream_timeout_event_rx(wmip, datap, len); /* pstreams are fatpipe abstractions that get implicitly created. * User apps only deal with thinstreams. creation of a thinstream * by the user or data traffic flow in an AC triggers implicit * pstream creation. Do we need to send this event to App..? * no harm in sending it. */ A_WMI_SEND_EVENT_TO_APP(wmip->wmi_devt, id, datap, len); break; case (WMI_NEIGHBOR_REPORT_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_NEIGHBOR_REPORT_EVENTID\n", DBGARG)); status = wmi_neighborReport_event_rx(wmip, datap, len); break; case (WMI_SCAN_COMPLETE_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_SCAN_COMPLETE_EVENTID\n", DBGARG)); status = wmi_scanComplete_rx(wmip, datap, len); A_WMI_SEND_EVENT_TO_APP(wmip->wmi_devt, id, datap, len); break; case (WMI_CMDERROR_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_CMDERROR_EVENTID\n", DBGARG)); status = wmi_errorEvent_rx(wmip, datap, len); break; case (WMI_REPORT_STATISTICS_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_REPORT_STATISTICS_EVENTID\n", DBGARG)); status = wmi_statsEvent_rx(wmip, datap, len); break; case (WMI_RSSI_THRESHOLD_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_RSSI_THRESHOLD_EVENTID\n", DBGARG)); status = wmi_rssiThresholdEvent_rx(wmip, datap, len); break; case (WMI_ERROR_REPORT_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_ERROR_REPORT_EVENTID\n", DBGARG)); status = wmi_reportErrorEvent_rx(wmip, datap, len); A_WMI_SEND_EVENT_TO_APP(wmip->wmi_devt, id, datap, len); break; case (WMI_OPT_RX_FRAME_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_OPT_RX_FRAME_EVENTID\n", DBGARG)); status = wmi_opt_frame_event_rx(wmip, datap, len); break; case (WMI_REPORT_ROAM_TBL_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_REPORT_ROAM_TBL_EVENTID\n", DBGARG)); status = wmi_roam_tbl_event_rx(wmip, datap, len); break; case (WMI_EXTENSION_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_EXTENSION_EVENTID\n", DBGARG)); status = wmi_control_rx_xtnd(wmip, osbuf); break; case (WMI_CAC_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_CAC_EVENTID\n", DBGARG)); status = wmi_cac_event_rx(wmip, datap, len); break; case (WMI_CHANNEL_CHANGE_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_CHANNEL_CHANGE_EVENTID\n", DBGARG)); status = wmi_channel_change_event_rx(wmip, datap, len); break; case (WMI_REPORT_ROAM_DATA_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_REPORT_ROAM_DATA_EVENTID\n", DBGARG)); status = wmi_roam_data_event_rx(wmip, datap, len); break; #ifdef CONFIG_HOST_TCMD_SUPPORT case (WMI_TEST_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_TEST_EVENTID\n", DBGARG)); status = wmi_tcmd_test_report_rx(wmip, datap, len); break; #endif case (WMI_GET_FIXRATES_CMDID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_GET_FIXRATES_CMDID\n", DBGARG)); status = wmi_ratemask_reply_rx(wmip, datap, len); break; case (WMI_TX_RETRY_ERR_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_TX_RETRY_ERR_EVENTID\n", DBGARG)); status = wmi_txRetryErrEvent_rx(wmip, datap, len); A_WMI_SEND_EVENT_TO_APP(wmip->wmi_devt, id, datap, len); break; case (WMI_SNR_THRESHOLD_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_SNR_THRESHOLD_EVENTID\n", DBGARG)); status = wmi_snrThresholdEvent_rx(wmip, datap, len); break; case (WMI_LQ_THRESHOLD_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_LQ_THRESHOLD_EVENTID\n", DBGARG)); status = wmi_lqThresholdEvent_rx(wmip, datap, len); A_WMI_SEND_EVENT_TO_APP(wmip->wmi_devt, id, datap, len); break; case (WMI_APLIST_EVENTID): AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("Received APLIST Event\n")); status = wmi_aplistEvent_rx(wmip, datap, len); break; case (WMI_GET_KEEPALIVE_CMDID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_GET_KEEPALIVE_CMDID\n", DBGARG)); status = wmi_keepalive_reply_rx(wmip, datap, len); break; case (WMI_GET_WOW_LIST_EVENTID): status = wmi_get_wow_list_event_rx(wmip, datap, len); break; case (WMI_GET_PMKID_LIST_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_GET_PMKID_LIST Event\n", DBGARG)); status = wmi_get_pmkid_list_event_rx(wmip, datap, len); break; case (WMI_PSPOLL_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_PSPOLL_EVENT\n", DBGARG)); status = wmi_pspoll_event_rx(wmip, datap, len); break; case (WMI_DTIMEXPIRY_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_DTIMEXPIRY_EVENT\n", DBGARG)); status = wmi_dtimexpiry_event_rx(wmip, datap, len); break; case (WMI_SET_PARAMS_REPLY_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_SET_PARAMS_REPLY Event\n", DBGARG)); status = wmi_set_params_event_rx(wmip, datap, len); break; #ifdef ATH_AR6K_11N_SUPPORT case (WMI_ADDBA_REQ_EVENTID): status = wmi_addba_req_event_rx(wmip, datap, len); break; case (WMI_ADDBA_RESP_EVENTID): status = wmi_addba_resp_event_rx(wmip, datap, len); break; case (WMI_DELBA_REQ_EVENTID): status = wmi_delba_req_event_rx(wmip, datap, len); break; case (WMI_REPORT_BTCOEX_CONFIG_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_BTCOEX_CONFIG_EVENTID", DBGARG)); status = wmi_btcoex_config_event_rx(wmip, datap, len); break; case (WMI_REPORT_BTCOEX_STATS_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_BTCOEX_STATS_EVENTID", DBGARG)); status = wmi_btcoex_stats_event_rx(wmip, datap, len); break; #endif case (WMI_TX_COMPLETE_EVENTID): { int index; TX_COMPLETE_MSG_V1 *pV1; WMI_TX_COMPLETE_EVENT *pEv = (WMI_TX_COMPLETE_EVENT *)datap; A_PRINTF("comp: %d %d %d\n", pEv->numMessages, pEv->msgLen, pEv->msgType); for(index = 0 ; index < pEv->numMessages ; index++) { pV1 = (TX_COMPLETE_MSG_V1 *)(datap + sizeof(WMI_TX_COMPLETE_EVENT) + index*sizeof(TX_COMPLETE_MSG_V1)); A_PRINTF("msg: %d %d %d %d\n", pV1->status, pV1->pktID, pV1->rateIdx, pV1->ackFailures); } } break; case (WMI_HCI_EVENT_EVENTID): status = wmi_hci_event_rx(wmip, datap, len); break; #ifdef WAPI_ENABLE case (WMI_WAPI_REKEY_EVENTID): A_DPRINTF(DBG_WMI, (DBGFMT "WMI_WAPI_REKEY_EVENTID", DBGARG)); status = wmi_wapi_rekey_event_rx(wmip, datap, len); break; #endif default: A_DPRINTF(DBG_WMI|DBG_ERROR, (DBGFMT "Unknown id 0x%x\n", DBGARG, id)); wmip->wmi_stats.cmd_id_err++; status = A_ERROR; break; } A_NETBUF_FREE(osbuf); return status; } /* Send a "simple" wmi command -- one with no arguments */ static A_STATUS wmi_simple_cmd(struct wmi_t *wmip, WMI_COMMAND_ID cmdid) { void *osbuf; osbuf = A_NETBUF_ALLOC(0); if (osbuf == NULL) { return A_NO_MEMORY; } return (wmi_cmd_send(wmip, osbuf, cmdid, NO_SYNC_WMIFLAG)); } /* Send a "simple" extended wmi command -- one with no arguments. Enabling this command only if GPIO or profiling support is enabled. This is to suppress warnings on some platforms */ #if defined(CONFIG_HOST_GPIO_SUPPORT) || defined(CONFIG_TARGET_PROFILE_SUPPORT) static A_STATUS wmi_simple_cmd_xtnd(struct wmi_t *wmip, WMIX_COMMAND_ID cmdid) { void *osbuf; osbuf = A_NETBUF_ALLOC(0); if (osbuf == NULL) { return A_NO_MEMORY; } return (wmi_cmd_send_xtnd(wmip, osbuf, cmdid, NO_SYNC_WMIFLAG)); } #endif static A_STATUS wmi_ready_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_READY_EVENT *ev = (WMI_READY_EVENT *)datap; if (len < sizeof(WMI_READY_EVENT)) { return A_EINVAL; } A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG)); wmip->wmi_ready = TRUE; A_WMI_READY_EVENT(wmip->wmi_devt, ev->macaddr, ev->phyCapability, ev->sw_version, ev->abi_version); return A_OK; } #define LE_READ_4(p) \ ((A_UINT32) \ ((((A_UINT8 *)(p))[0] ) | (((A_UINT8 *)(p))[1] << 8) | \ (((A_UINT8 *)(p))[2] << 16) | (((A_UINT8 *)(p))[3] << 24))) static int __inline iswmmoui(const A_UINT8 *frm) { return frm[1] > 3 && LE_READ_4(frm+2) == ((WMM_OUI_TYPE<<24)|WMM_OUI); } static int __inline iswmmparam(const A_UINT8 *frm) { return frm[1] > 5 && frm[6] == WMM_PARAM_OUI_SUBTYPE; } static A_STATUS wmi_connect_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_CONNECT_EVENT *ev; A_UINT8 *pie,*peie; if (len < sizeof(WMI_CONNECT_EVENT)) { return A_EINVAL; } ev = (WMI_CONNECT_EVENT *)datap; A_DPRINTF(DBG_WMI, (DBGFMT "freq %d bssid %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", DBGARG, ev->channel, ev->bssid[0], ev->bssid[1], ev->bssid[2], ev->bssid[3], ev->bssid[4], ev->bssid[5])); A_MEMCPY(wmip->wmi_bssid, ev->bssid, ATH_MAC_LEN); /* initialize pointer to start of assoc rsp IEs */ pie = ev->assocInfo + ev->beaconIeLen + ev->assocReqLen + sizeof(A_UINT16) + /* capinfo*/ sizeof(A_UINT16) + /* status Code */ sizeof(A_UINT16) ; /* associd */ /* initialize pointer to end of assoc rsp IEs */ peie = ev->assocInfo + ev->beaconIeLen + ev->assocReqLen + ev->assocRespLen; while (pie < peie) { switch (*pie) { case IEEE80211_ELEMID_VENDOR: if (iswmmoui(pie)) { if(iswmmparam (pie)) { wmip->wmi_is_wmm_enabled = TRUE; } } break; } if (wmip->wmi_is_wmm_enabled) { break; } pie += pie[1] + 2; } A_WMI_CONNECT_EVENT(wmip->wmi_devt, ev->channel, ev->bssid, ev->listenInterval, ev->beaconInterval, (NETWORK_TYPE) ev->networkType, ev->beaconIeLen, ev->assocReqLen, ev->assocRespLen, ev->assocInfo); return A_OK; } static A_STATUS wmi_regDomain_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_REG_DOMAIN_EVENT *ev; if (len < sizeof(*ev)) { return A_EINVAL; } ev = (WMI_REG_DOMAIN_EVENT *)datap; A_WMI_REGDOMAIN_EVENT(wmip->wmi_devt, ev->regDomain); return A_OK; } static A_STATUS wmi_neighborReport_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_NEIGHBOR_REPORT_EVENT *ev; int numAps; if (len < sizeof(*ev)) { return A_EINVAL; } ev = (WMI_NEIGHBOR_REPORT_EVENT *)datap; numAps = ev->numberOfAps; if (len < (int)(sizeof(*ev) + ((numAps - 1) * sizeof(WMI_NEIGHBOR_INFO)))) { return A_EINVAL; } A_WMI_NEIGHBORREPORT_EVENT(wmip->wmi_devt, numAps, ev->neighbor); return A_OK; } static A_STATUS wmi_disconnect_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_DISCONNECT_EVENT *ev; if (len < sizeof(WMI_DISCONNECT_EVENT)) { return A_EINVAL; } A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG)); ev = (WMI_DISCONNECT_EVENT *)datap; A_MEMZERO(wmip->wmi_bssid, sizeof(wmip->wmi_bssid)); wmip->wmi_is_wmm_enabled = FALSE; wmip->wmi_pair_crypto_type = NONE_CRYPT; wmip->wmi_grp_crypto_type = NONE_CRYPT; A_WMI_DISCONNECT_EVENT(wmip->wmi_devt, ev->disconnectReason, ev->bssid, ev->assocRespLen, ev->assocInfo, ev->protocolReasonStatus); return A_OK; } static A_STATUS wmi_peer_node_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_PEER_NODE_EVENT *ev; if (len < sizeof(WMI_PEER_NODE_EVENT)) { return A_EINVAL; } ev = (WMI_PEER_NODE_EVENT *)datap; if (ev->eventCode == PEER_NODE_JOIN_EVENT) { A_DPRINTF (DBG_WMI, (DBGFMT "Joined node with Macaddr: ", DBGARG)); } else if(ev->eventCode == PEER_NODE_LEAVE_EVENT) { A_DPRINTF (DBG_WMI, (DBGFMT "left node with Macaddr: ", DBGARG)); } A_WMI_PEER_EVENT (wmip->wmi_devt, ev->eventCode, ev->peerMacAddr); return A_OK; } static A_STATUS wmi_tkip_micerr_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_TKIP_MICERR_EVENT *ev; if (len < sizeof(*ev)) { return A_EINVAL; } A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG)); ev = (WMI_TKIP_MICERR_EVENT *)datap; A_WMI_TKIP_MICERR_EVENT(wmip->wmi_devt, ev->keyid, ev->ismcast); return A_OK; } static A_STATUS wmi_bssInfo_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { bss_t *bss = NULL; WMI_BSS_INFO_HDR *bih; A_UINT8 *buf; A_UINT32 nodeCachingAllowed = 1; A_UCHAR cached_ssid_len = 0; A_UCHAR cached_ssid_buf[IEEE80211_NWID_LEN] = {0}; A_UINT8 beacon_ssid_len = 0; if (len <= sizeof(WMI_BSS_INFO_HDR)) { return A_EINVAL; } bih = (WMI_BSS_INFO_HDR *)datap; bss = wlan_find_node(&wmip->wmi_scan_table, bih->bssid); if (bih->rssi > 0) { if (NULL == bss) return A_OK; //no node found in the table, just drop the node with incorrect RSSI else bih->rssi = bss->ni_rssi; //Adjust RSSI in datap in case it is used in A_WMI_BSSINFO_EVENT_RX } A_WMI_BSSINFO_EVENT_RX(wmip->wmi_devt, datap, len); /* What is driver config for wlan node caching? */ if(ar6000_get_driver_cfg(wmip->wmi_devt, AR6000_DRIVER_CFG_GET_WLANNODECACHING, &nodeCachingAllowed) != A_OK) { wmi_node_return(wmip, bss); return A_EINVAL; } if(!nodeCachingAllowed) { wmi_node_return(wmip, bss); return A_OK; } buf = datap + sizeof(WMI_BSS_INFO_HDR); len -= sizeof(WMI_BSS_INFO_HDR); A_DPRINTF(DBG_WMI2, (DBGFMT "bssInfo event - ch %u, rssi %02x, " "bssid \"%02x:%02x:%02x:%02x:%02x:%02x\"\n", DBGARG, bih->channel, (unsigned char) bih->rssi, bih->bssid[0], bih->bssid[1], bih->bssid[2], bih->bssid[3], bih->bssid[4], bih->bssid[5])); if(wps_enable && (bih->frameType == PROBERESP_FTYPE) ) { wmi_node_return(wmip, bss); return A_OK; } if (bss != NULL) { /* * Free up the node. Not the most efficient process given * we are about to allocate a new node but it is simple and should be * adequate. */ /* In case of hidden AP, beacon will not have ssid, * but a directed probe response will have it, * so cache the probe-resp-ssid if already present. */ if ((TRUE == is_probe_ssid) && (BEACON_FTYPE == bih->frameType)) { A_UCHAR *ie_ssid; ie_ssid = bss->ni_cie.ie_ssid; if(ie_ssid && (ie_ssid[1] <= IEEE80211_NWID_LEN) && (ie_ssid[2] != 0)) { cached_ssid_len = ie_ssid[1]; memcpy(cached_ssid_buf, ie_ssid + 2, cached_ssid_len); } } /* * Use the current average rssi of associated AP base on assumpiton * 1. Most os with GUI will update RSSI by wmi_get_stats_cmd() periodically * 2. wmi_get_stats_cmd(..) will be called when calling wmi_startscan_cmd(...) * The average value of RSSI give end-user better feeling for instance value of scan result * It also sync up RSSI info in GUI between scan result and RSSI signal icon */ if (bss && IEEE80211_ADDR_EQ(wmip->wmi_bssid, bih->bssid)) { bih->rssi = bss->ni_rssi; bih->snr = bss->ni_snr; } wlan_node_reclaim(&wmip->wmi_scan_table, bss); } /* beacon/probe response frame format * [8] time stamp * [2] beacon interval * [2] capability information * [tlv] ssid */ beacon_ssid_len = buf[SSID_IE_LEN_INDEX]; /* If ssid is cached for this hidden AP, then change buffer len accordingly. */ if ((TRUE == is_probe_ssid) && (BEACON_FTYPE == bih->frameType) && (0 != cached_ssid_len) && (0 == beacon_ssid_len || (cached_ssid_len > beacon_ssid_len && 0 == buf[SSID_IE_LEN_INDEX + 1]))) { len += (cached_ssid_len - beacon_ssid_len); } bss = wlan_node_alloc(&wmip->wmi_scan_table, len); if (bss == NULL) { return A_NO_MEMORY; } bss->ni_snr = bih->snr; bss->ni_rssi = bih->rssi; A_ASSERT(bss->ni_buf != NULL); /* In case of hidden AP, beacon will not have ssid, * but a directed probe response will have it, * so place the cached-ssid(probe-resp) in the bssinfo. */ if ((TRUE == is_probe_ssid) && (BEACON_FTYPE == bih->frameType) && (0 != cached_ssid_len) && (0 == beacon_ssid_len || (beacon_ssid_len && 0 == buf[SSID_IE_LEN_INDEX + 1]))) { A_UINT8 *ni_buf = bss->ni_buf; int buf_len = len; /* copy the first 14 bytes such as * time-stamp(8), beacon-interval(2), cap-info(2), ssid-id(1), ssid-len(1). */ A_MEMCPY(ni_buf, buf, SSID_IE_LEN_INDEX + 1); ni_buf[SSID_IE_LEN_INDEX] = cached_ssid_len; ni_buf += (SSID_IE_LEN_INDEX + 1); buf += (SSID_IE_LEN_INDEX + 1); buf_len -= (SSID_IE_LEN_INDEX + 1); /* copy the cached ssid */ A_MEMCPY(ni_buf, cached_ssid_buf, cached_ssid_len); ni_buf += cached_ssid_len; buf += beacon_ssid_len; buf_len -= beacon_ssid_len; if (cached_ssid_len > beacon_ssid_len) buf_len -= (cached_ssid_len - beacon_ssid_len); /* now copy the rest of bytes */ A_MEMCPY(ni_buf, buf, buf_len); } else A_MEMCPY(bss->ni_buf, buf, len); bss->ni_framelen = len; if (wlan_parse_beacon(bss->ni_buf, len, &bss->ni_cie) != A_OK) { wlan_node_free(bss); return A_EINVAL; } /* * Update the frequency in ie_chan, overwriting of channel number * which is done in wlan_parse_beacon */ bss->ni_cie.ie_chan = bih->channel; wlan_setup_node(&wmip->wmi_scan_table, bss, bih->bssid); return A_OK; } static A_STATUS wmi_opt_frame_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { bss_t *bss; WMI_OPT_RX_INFO_HDR *bih; A_UINT8 *buf; if (len <= sizeof(WMI_OPT_RX_INFO_HDR)) { return A_EINVAL; } bih = (WMI_OPT_RX_INFO_HDR *)datap; buf = datap + sizeof(WMI_OPT_RX_INFO_HDR); len -= sizeof(WMI_OPT_RX_INFO_HDR); A_DPRINTF(DBG_WMI2, (DBGFMT "opt frame event %2.2x:%2.2x\n", DBGARG, bih->bssid[4], bih->bssid[5])); bss = wlan_find_node(&wmip->wmi_scan_table, bih->bssid); if (bss != NULL) { /* * Free up the node. Not the most efficient process given * we are about to allocate a new node but it is simple and should be * adequate. */ wlan_node_reclaim(&wmip->wmi_scan_table, bss); } bss = wlan_node_alloc(&wmip->wmi_scan_table, len); if (bss == NULL) { return A_NO_MEMORY; } bss->ni_snr = bih->snr; bss->ni_cie.ie_chan = bih->channel; A_ASSERT(bss->ni_buf != NULL); A_MEMCPY(bss->ni_buf, buf, len); wlan_setup_node(&wmip->wmi_scan_table, bss, bih->bssid); return A_OK; } /* This event indicates inactivity timeout of a fatpipe(pstream) * at the target */ static A_STATUS wmi_pstream_timeout_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_PSTREAM_TIMEOUT_EVENT *ev; if (len < sizeof(WMI_PSTREAM_TIMEOUT_EVENT)) { return A_EINVAL; } A_DPRINTF(DBG_WMI, (DBGFMT "wmi_pstream_timeout_event_rx\n", DBGARG)); ev = (WMI_PSTREAM_TIMEOUT_EVENT *)datap; /* When the pstream (fat pipe == AC) timesout, it means there were no * thinStreams within this pstream & it got implicitly created due to * data flow on this AC. We start the inactivity timer only for * implicitly created pstream. Just reset the host state. */ /* Set the activeTsids for this AC to 0 */ LOCK_WMI(wmip); wmip->wmi_streamExistsForAC[ev->trafficClass]=0; wmip->wmi_fatPipeExists &= ~(1 << ev->trafficClass); UNLOCK_WMI(wmip); /*Indicate inactivity to driver layer for this fatpipe (pstream)*/ A_WMI_STREAM_TX_INACTIVE(wmip->wmi_devt, ev->trafficClass); return A_OK; } static A_STATUS wmi_bitrate_reply_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_BIT_RATE_REPLY *reply; A_INT32 rate; A_UINT32 sgi,index; /* 54149: * WMI_BIT_RATE_CMD structure is changed to WMI_BIT_RATE_REPLY. * since there is difference in the length and to avoid returning * error value. */ if (len < sizeof(WMI_BIT_RATE_REPLY)) { return A_EINVAL; } reply = (WMI_BIT_RATE_REPLY *)datap; A_DPRINTF(DBG_WMI, (DBGFMT "Enter - rateindex %d\n", DBGARG, reply->rateIndex)); if (reply->rateIndex == (A_INT8) RATE_AUTO) { rate = RATE_AUTO; } else { // the SGI state is stored as the MSb of the rateIndex index = reply->rateIndex & 0x7f; sgi = (reply->rateIndex & 0x80)? 1:0; rate = wmi_rateTable[index][sgi]; } A_WMI_BITRATE_RX(wmip->wmi_devt, rate); return A_OK; } static A_STATUS wmi_ratemask_reply_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_FIX_RATES_REPLY *reply; if (len < sizeof(WMI_FIX_RATES_REPLY)) { return A_EINVAL; } reply = (WMI_FIX_RATES_REPLY *)datap; A_DPRINTF(DBG_WMI, (DBGFMT "Enter - fixed rate mask %x\n", DBGARG, reply->fixRateMask)); A_WMI_RATEMASK_RX(wmip->wmi_devt, reply->fixRateMask); return A_OK; } static A_STATUS wmi_channelList_reply_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_CHANNEL_LIST_REPLY *reply; if (len < sizeof(WMI_CHANNEL_LIST_REPLY)) { return A_EINVAL; } reply = (WMI_CHANNEL_LIST_REPLY *)datap; A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG)); A_WMI_CHANNELLIST_RX(wmip->wmi_devt, reply->numChannels, reply->channelList); return A_OK; } static A_STATUS wmi_txPwr_reply_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_TX_PWR_REPLY *reply; if (len < sizeof(*reply)) { return A_EINVAL; } reply = (WMI_TX_PWR_REPLY *)datap; A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG)); A_WMI_TXPWR_RX(wmip->wmi_devt, reply->dbM); return A_OK; } static A_STATUS wmi_keepalive_reply_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_GET_KEEPALIVE_CMD *reply; if (len < sizeof(*reply)) { return A_EINVAL; } reply = (WMI_GET_KEEPALIVE_CMD *)datap; A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG)); A_WMI_KEEPALIVE_RX(wmip->wmi_devt, reply->configured); return A_OK; } static A_STATUS wmi_dset_open_req_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMIX_DSETOPENREQ_EVENT *dsetopenreq; if (len < sizeof(WMIX_DSETOPENREQ_EVENT)) { return A_EINVAL; } dsetopenreq = (WMIX_DSETOPENREQ_EVENT *)datap; A_DPRINTF(DBG_WMI, (DBGFMT "Enter - dset_id=0x%x\n", DBGARG, dsetopenreq->dset_id)); A_WMI_DSET_OPEN_REQ(wmip->wmi_devt, dsetopenreq->dset_id, dsetopenreq->targ_dset_handle, dsetopenreq->targ_reply_fn, dsetopenreq->targ_reply_arg); return A_OK; } #ifdef CONFIG_HOST_DSET_SUPPORT static A_STATUS wmi_dset_close_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMIX_DSETCLOSE_EVENT *dsetclose; if (len < sizeof(WMIX_DSETCLOSE_EVENT)) { return A_EINVAL; } A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG)); dsetclose = (WMIX_DSETCLOSE_EVENT *)datap; A_WMI_DSET_CLOSE(wmip->wmi_devt, dsetclose->access_cookie); return A_OK; } static A_STATUS wmi_dset_data_req_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMIX_DSETDATAREQ_EVENT *dsetdatareq; if (len < sizeof(WMIX_DSETDATAREQ_EVENT)) { return A_EINVAL; } A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG)); dsetdatareq = (WMIX_DSETDATAREQ_EVENT *)datap; A_WMI_DSET_DATA_REQ(wmip->wmi_devt, dsetdatareq->access_cookie, dsetdatareq->offset, dsetdatareq->length, dsetdatareq->targ_buf, dsetdatareq->targ_reply_fn, dsetdatareq->targ_reply_arg); return A_OK; } #endif /* CONFIG_HOST_DSET_SUPPORT */ static A_STATUS wmi_scanComplete_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_SCAN_COMPLETE_EVENT *ev; ev = (WMI_SCAN_COMPLETE_EVENT *)datap; #ifndef ATHR_NWIFI if ((A_STATUS)ev->status == A_OK) { wlan_refresh_inactive_nodes(&wmip->wmi_scan_table); } #endif A_WMI_SCANCOMPLETE_EVENT(wmip->wmi_devt, (A_STATUS) ev->status); is_probe_ssid = FALSE; return A_OK; } /* * Target is reporting a programming error. This is for * developer aid only. Target only checks a few common violations * and it is responsibility of host to do all error checking. * Behavior of target after wmi error event is undefined. * A reset is recommended. */ static A_STATUS wmi_errorEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_CMD_ERROR_EVENT *ev; ev = (WMI_CMD_ERROR_EVENT *)datap; AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("Programming Error: cmd=%d ", ev->commandId)); switch (ev->errorCode) { case (INVALID_PARAM): AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("Illegal Parameter\n")); break; case (ILLEGAL_STATE): AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("Illegal State\n")); break; case (INTERNAL_ERROR): AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("Internal Error\n")); break; } return A_OK; } static A_STATUS wmi_statsEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG)); A_WMI_TARGETSTATS_EVENT(wmip->wmi_devt, datap, len); return A_OK; } static A_STATUS wmi_rssiThresholdEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_RSSI_THRESHOLD_EVENT *reply; WMI_RSSI_THRESHOLD_VAL newThreshold; WMI_RSSI_THRESHOLD_PARAMS_CMD cmd; SQ_THRESHOLD_PARAMS *sq_thresh = &wmip->wmi_SqThresholdParams[SIGNAL_QUALITY_METRICS_RSSI]; A_UINT8 upper_rssi_threshold, lower_rssi_threshold; A_INT16 rssi; if (len < sizeof(*reply)) { return A_EINVAL; } reply = (WMI_RSSI_THRESHOLD_EVENT *)datap; A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG)); newThreshold = (WMI_RSSI_THRESHOLD_VAL) reply->range; rssi = reply->rssi; /* * Identify the threshold breached and communicate that to the app. After * that install a new set of thresholds based on the signal quality * reported by the target */ if (newThreshold) { /* Upper threshold breached */ if (rssi < sq_thresh->upper_threshold[0]) { A_DPRINTF(DBG_WMI, (DBGFMT "Spurious upper RSSI threshold event: " " %d\n", DBGARG, rssi)); } else if ((rssi < sq_thresh->upper_threshold[1]) && (rssi >= sq_thresh->upper_threshold[0])) { newThreshold = WMI_RSSI_THRESHOLD1_ABOVE; } else if ((rssi < sq_thresh->upper_threshold[2]) && (rssi >= sq_thresh->upper_threshold[1])) { newThreshold = WMI_RSSI_THRESHOLD2_ABOVE; } else if ((rssi < sq_thresh->upper_threshold[3]) && (rssi >= sq_thresh->upper_threshold[2])) { newThreshold = WMI_RSSI_THRESHOLD3_ABOVE; } else if ((rssi < sq_thresh->upper_threshold[4]) && (rssi >= sq_thresh->upper_threshold[3])) { newThreshold = WMI_RSSI_THRESHOLD4_ABOVE; } else if ((rssi < sq_thresh->upper_threshold[5]) && (rssi >= sq_thresh->upper_threshold[4])) { newThreshold = WMI_RSSI_THRESHOLD5_ABOVE; } else if (rssi >= sq_thresh->upper_threshold[5]) { newThreshold = WMI_RSSI_THRESHOLD6_ABOVE; } } else { /* Lower threshold breached */ if (rssi > sq_thresh->lower_threshold[0]) { A_DPRINTF(DBG_WMI, (DBGFMT "Spurious lower RSSI threshold event: " "%d %d\n", DBGARG, rssi, sq_thresh->lower_threshold[0])); } else if ((rssi > sq_thresh->lower_threshold[1]) && (rssi <= sq_thresh->lower_threshold[0])) { newThreshold = WMI_RSSI_THRESHOLD6_BELOW; } else if ((rssi > sq_thresh->lower_threshold[2]) && (rssi <= sq_thresh->lower_threshold[1])) { newThreshold = WMI_RSSI_THRESHOLD5_BELOW; } else if ((rssi > sq_thresh->lower_threshold[3]) && (rssi <= sq_thresh->lower_threshold[2])) { newThreshold = WMI_RSSI_THRESHOLD4_BELOW; } else if ((rssi > sq_thresh->lower_threshold[4]) && (rssi <= sq_thresh->lower_threshold[3])) { newThreshold = WMI_RSSI_THRESHOLD3_BELOW; } else if ((rssi > sq_thresh->lower_threshold[5]) && (rssi <= sq_thresh->lower_threshold[4])) { newThreshold = WMI_RSSI_THRESHOLD2_BELOW; } else if (rssi <= sq_thresh->lower_threshold[5]) { newThreshold = WMI_RSSI_THRESHOLD1_BELOW; } } /* Calculate and install the next set of thresholds */ lower_rssi_threshold = ar6000_get_lower_threshold(rssi, sq_thresh, sq_thresh->lower_threshold_valid_count); upper_rssi_threshold = ar6000_get_upper_threshold(rssi, sq_thresh, sq_thresh->upper_threshold_valid_count); /* Issue a wmi command to install the thresholds */ cmd.thresholdAbove1_Val = upper_rssi_threshold; cmd.thresholdBelow1_Val = lower_rssi_threshold; cmd.weight = sq_thresh->weight; cmd.pollTime = sq_thresh->polling_interval; rssi_event_value = rssi; if (wmi_send_rssi_threshold_params(wmip, &cmd) != A_OK) { A_DPRINTF(DBG_WMI, (DBGFMT "Unable to configure the RSSI thresholds\n", DBGARG)); } A_WMI_RSSI_THRESHOLD_EVENT(wmip->wmi_devt, newThreshold, reply->rssi); return A_OK; } static A_STATUS wmi_reportErrorEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_TARGET_ERROR_REPORT_EVENT *reply; if (len < sizeof(*reply)) { return A_EINVAL; } reply = (WMI_TARGET_ERROR_REPORT_EVENT *)datap; A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG)); A_WMI_REPORT_ERROR_EVENT(wmip->wmi_devt, (WMI_TARGET_ERROR_VAL) reply->errorVal); return A_OK; } static A_STATUS wmi_cac_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_CAC_EVENT *reply; WMM_TSPEC_IE *tspec_ie; A_UINT16 activeTsids; if (len < sizeof(*reply)) { return A_EINVAL; } reply = (WMI_CAC_EVENT *)datap; A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG)); if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) && (reply->statusCode != TSPEC_STATUS_CODE_ADMISSION_ACCEPTED)) { tspec_ie = (WMM_TSPEC_IE *) &(reply->tspecSuggestion); wmi_delete_pstream_cmd(wmip, reply->ac, (tspec_ie->tsInfo_info >> TSPEC_TSID_S) & TSPEC_TSID_MASK); } else if (reply->cac_indication == CAC_INDICATION_NO_RESP) { A_UINT8 i; /* following assumes that there is only one outstanding ADDTS request when this event is received */ LOCK_WMI(wmip); activeTsids = wmip->wmi_streamExistsForAC[reply->ac]; UNLOCK_WMI(wmip); for (i = 0; i < sizeof(activeTsids) * 8; i++) { if ((activeTsids >> i) & 1) { break; } } if (i < (sizeof(activeTsids) * 8)) { wmi_delete_pstream_cmd(wmip, reply->ac, i); } } /* * Ev#72990: Clear active tsids and Add missing handling * for delete qos stream from AP */ else if (reply->cac_indication == CAC_INDICATION_DELETE) { A_UINT8 tsid = 0; tspec_ie = (WMM_TSPEC_IE *) &(reply->tspecSuggestion); tsid= ((tspec_ie->tsInfo_info >> TSPEC_TSID_S) & TSPEC_TSID_MASK); LOCK_WMI(wmip); wmip->wmi_streamExistsForAC[reply->ac] &= ~(1<<tsid); activeTsids = wmip->wmi_streamExistsForAC[reply->ac]; UNLOCK_WMI(wmip); /* Indicate stream inactivity to driver layer only if all tsids * within this AC are deleted. */ if (!activeTsids) { A_WMI_STREAM_TX_INACTIVE(wmip->wmi_devt, reply->ac); wmip->wmi_fatPipeExists &= ~(1 << reply->ac); } } A_WMI_CAC_EVENT(wmip->wmi_devt, reply->ac, reply->cac_indication, reply->statusCode, reply->tspecSuggestion); return A_OK; } static A_STATUS wmi_channel_change_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_CHANNEL_CHANGE_EVENT *reply; if (len < sizeof(*reply)) { return A_EINVAL; } reply = (WMI_CHANNEL_CHANGE_EVENT *)datap; A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG)); A_WMI_CHANNEL_CHANGE_EVENT(wmip->wmi_devt, reply->oldChannel, reply->newChannel); return A_OK; } static A_STATUS wmi_hbChallengeResp_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMIX_HB_CHALLENGE_RESP_EVENT *reply; if (len < sizeof(*reply)) { return A_EINVAL; } reply = (WMIX_HB_CHALLENGE_RESP_EVENT *)datap; A_DPRINTF(DBG_WMI, (DBGFMT "wmi: challenge response event\n", DBGARG)); A_WMI_HBCHALLENGERESP_EVENT(wmip->wmi_devt, reply->cookie, reply->source); return A_OK; } static A_STATUS wmi_roam_tbl_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_TARGET_ROAM_TBL *reply; if (len < sizeof(*reply)) { return A_EINVAL; } reply = (WMI_TARGET_ROAM_TBL *)datap; A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG)); A_WMI_ROAM_TABLE_EVENT(wmip->wmi_devt, reply); return A_OK; } static A_STATUS wmi_roam_data_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_TARGET_ROAM_DATA *reply; if (len < sizeof(*reply)) { return A_EINVAL; } reply = (WMI_TARGET_ROAM_DATA *)datap; A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG)); A_WMI_ROAM_DATA_EVENT(wmip->wmi_devt, reply); return A_OK; } static A_STATUS wmi_txRetryErrEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { if (len < sizeof(WMI_TX_RETRY_ERR_EVENT)) { return A_EINVAL; } A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG)); A_WMI_TX_RETRY_ERR_EVENT(wmip->wmi_devt); return A_OK; } static A_STATUS wmi_snrThresholdEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_SNR_THRESHOLD_EVENT *reply; SQ_THRESHOLD_PARAMS *sq_thresh = &wmip->wmi_SqThresholdParams[SIGNAL_QUALITY_METRICS_SNR]; WMI_SNR_THRESHOLD_VAL newThreshold; WMI_SNR_THRESHOLD_PARAMS_CMD cmd; A_UINT8 upper_snr_threshold, lower_snr_threshold; A_INT16 snr; if (len < sizeof(*reply)) { return A_EINVAL; } reply = (WMI_SNR_THRESHOLD_EVENT *)datap; A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG)); newThreshold = (WMI_SNR_THRESHOLD_VAL) reply->range; snr = reply->snr; /* * Identify the threshold breached and communicate that to the app. After * that install a new set of thresholds based on the signal quality * reported by the target */ if (newThreshold) { /* Upper threshold breached */ if (snr < sq_thresh->upper_threshold[0]) { A_DPRINTF(DBG_WMI, (DBGFMT "Spurious upper SNR threshold event: " "%d\n", DBGARG, snr)); } else if ((snr < sq_thresh->upper_threshold[1]) && (snr >= sq_thresh->upper_threshold[0])) { newThreshold = WMI_SNR_THRESHOLD1_ABOVE; } else if ((snr < sq_thresh->upper_threshold[2]) && (snr >= sq_thresh->upper_threshold[1])) { newThreshold = WMI_SNR_THRESHOLD2_ABOVE; } else if ((snr < sq_thresh->upper_threshold[3]) && (snr >= sq_thresh->upper_threshold[2])) { newThreshold = WMI_SNR_THRESHOLD3_ABOVE; } else if (snr >= sq_thresh->upper_threshold[3]) { newThreshold = WMI_SNR_THRESHOLD4_ABOVE; } } else { /* Lower threshold breached */ if (snr > sq_thresh->lower_threshold[0]) { A_DPRINTF(DBG_WMI, (DBGFMT "Spurious lower SNR threshold event: " "%d %d\n", DBGARG, snr, sq_thresh->lower_threshold[0])); } else if ((snr > sq_thresh->lower_threshold[1]) && (snr <= sq_thresh->lower_threshold[0])) { newThreshold = WMI_SNR_THRESHOLD4_BELOW; } else if ((snr > sq_thresh->lower_threshold[2]) && (snr <= sq_thresh->lower_threshold[1])) { newThreshold = WMI_SNR_THRESHOLD3_BELOW; } else if ((snr > sq_thresh->lower_threshold[3]) && (snr <= sq_thresh->lower_threshold[2])) { newThreshold = WMI_SNR_THRESHOLD2_BELOW; } else if (snr <= sq_thresh->lower_threshold[3]) { newThreshold = WMI_SNR_THRESHOLD1_BELOW; } } /* Calculate and install the next set of thresholds */ lower_snr_threshold = ar6000_get_lower_threshold(snr, sq_thresh, sq_thresh->lower_threshold_valid_count); upper_snr_threshold = ar6000_get_upper_threshold(snr, sq_thresh, sq_thresh->upper_threshold_valid_count); /* Issue a wmi command to install the thresholds */ cmd.thresholdAbove1_Val = upper_snr_threshold; cmd.thresholdBelow1_Val = lower_snr_threshold; cmd.weight = sq_thresh->weight; cmd.pollTime = sq_thresh->polling_interval; A_DPRINTF(DBG_WMI, (DBGFMT "snr: %d, threshold: %d, lower: %d, upper: %d\n" ,DBGARG, snr, newThreshold, lower_snr_threshold, upper_snr_threshold)); snr_event_value = snr; if (wmi_send_snr_threshold_params(wmip, &cmd) != A_OK) { A_DPRINTF(DBG_WMI, (DBGFMT "Unable to configure the SNR thresholds\n", DBGARG)); } A_WMI_SNR_THRESHOLD_EVENT_RX(wmip->wmi_devt, newThreshold, reply->snr); return A_OK; } static A_STATUS wmi_lqThresholdEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_LQ_THRESHOLD_EVENT *reply; if (len < sizeof(*reply)) { return A_EINVAL; } reply = (WMI_LQ_THRESHOLD_EVENT *)datap; A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG)); A_WMI_LQ_THRESHOLD_EVENT_RX(wmip->wmi_devt, (WMI_LQ_THRESHOLD_VAL) reply->range, reply->lq); return A_OK; } static A_STATUS wmi_aplistEvent_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { A_UINT16 ap_info_entry_size; WMI_APLIST_EVENT *ev = (WMI_APLIST_EVENT *)datap; WMI_AP_INFO_V1 *ap_info_v1; A_UINT8 i; if (len < sizeof(WMI_APLIST_EVENT)) { return A_EINVAL; } if (ev->apListVer == APLIST_VER1) { ap_info_entry_size = sizeof(WMI_AP_INFO_V1); ap_info_v1 = (WMI_AP_INFO_V1 *)ev->apList; } else { return A_EINVAL; } AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("Number of APs in APLIST Event is %d\n", ev->numAP)); if (len < (int)(sizeof(WMI_APLIST_EVENT) + (ev->numAP - 1) * ap_info_entry_size)) { return A_EINVAL; } /* * AP List Ver1 Contents */ for (i = 0; i < ev->numAP; i++) { AR_DEBUG_PRINTF(ATH_DEBUG_WMI, ("AP#%d BSSID %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x "\ "Channel %d\n", i, ap_info_v1->bssid[0], ap_info_v1->bssid[1], ap_info_v1->bssid[2], ap_info_v1->bssid[3], ap_info_v1->bssid[4], ap_info_v1->bssid[5], ap_info_v1->channel)); ap_info_v1++; } return A_OK; } static A_STATUS wmi_dbglog_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { A_UINT32 dropped; dropped = *((A_UINT32 *)datap); datap += sizeof(dropped); len -= sizeof(dropped); A_WMI_DBGLOG_EVENT(wmip->wmi_devt, dropped, (A_INT8*)datap, len); return A_OK; } #ifdef CONFIG_HOST_GPIO_SUPPORT static A_STATUS wmi_gpio_intr_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMIX_GPIO_INTR_EVENT *gpio_intr = (WMIX_GPIO_INTR_EVENT *)datap; A_DPRINTF(DBG_WMI, (DBGFMT "Enter - intrmask=0x%x input=0x%x.\n", DBGARG, gpio_intr->intr_mask, gpio_intr->input_values)); A_WMI_GPIO_INTR_RX(gpio_intr->intr_mask, gpio_intr->input_values); return A_OK; } static A_STATUS wmi_gpio_data_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMIX_GPIO_DATA_EVENT *gpio_data = (WMIX_GPIO_DATA_EVENT *)datap; A_DPRINTF(DBG_WMI, (DBGFMT "Enter - reg=%d value=0x%x\n", DBGARG, gpio_data->reg_id, gpio_data->value)); A_WMI_GPIO_DATA_RX(gpio_data->reg_id, gpio_data->value); return A_OK; } static A_STATUS wmi_gpio_ack_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG)); A_WMI_GPIO_ACK_RX(); return A_OK; } #endif /* CONFIG_HOST_GPIO_SUPPORT */ /* * Called to send a wmi command. Command specific data is already built * on osbuf and current osbuf->data points to it. */ A_STATUS wmi_cmd_send(struct wmi_t *wmip, void *osbuf, WMI_COMMAND_ID cmdId, WMI_SYNC_FLAG syncflag) { A_STATUS status; #define IS_OPT_TX_CMD(cmdId) ((cmdId == WMI_OPT_TX_FRAME_CMDID)) WMI_CMD_HDR *cHdr; HTC_ENDPOINT_ID eid = wmip->wmi_endpoint_id; A_ASSERT(osbuf != NULL); if (syncflag >= END_WMIFLAG) { A_NETBUF_FREE(osbuf); return A_EINVAL; } if ((syncflag == SYNC_BEFORE_WMIFLAG) || (syncflag == SYNC_BOTH_WMIFLAG)) { /* * We want to make sure all data currently queued is transmitted before * the cmd execution. Establish a new sync point. */ wmi_sync_point(wmip); } if (A_NETBUF_PUSH(osbuf, sizeof(WMI_CMD_HDR)) != A_OK) { A_NETBUF_FREE(osbuf); return A_NO_MEMORY; } cHdr = (WMI_CMD_HDR *)A_NETBUF_DATA(osbuf); cHdr->commandId = (A_UINT16) cmdId; cHdr->info1 = 0; // added for virtual interface /* * Only for OPT_TX_CMD, use BE endpoint. */ if (IS_OPT_TX_CMD(cmdId)) { if ((status=wmi_data_hdr_add(wmip, osbuf, OPT_MSGTYPE, FALSE, FALSE,0,NULL)) != A_OK) { A_NETBUF_FREE(osbuf); return status; } eid = A_WMI_Ac2EndpointID(wmip->wmi_devt, WMM_AC_BE); } A_WMI_CONTROL_TX(wmip->wmi_devt, osbuf, eid); if ((syncflag == SYNC_AFTER_WMIFLAG) || (syncflag == SYNC_BOTH_WMIFLAG)) { /* * We want to make sure all new data queued waits for the command to * execute. Establish a new sync point. */ wmi_sync_point(wmip); } return (A_OK); #undef IS_OPT_TX_CMD } A_STATUS wmi_cmd_send_xtnd(struct wmi_t *wmip, void *osbuf, WMIX_COMMAND_ID cmdId, WMI_SYNC_FLAG syncflag) { WMIX_CMD_HDR *cHdr; if (A_NETBUF_PUSH(osbuf, sizeof(WMIX_CMD_HDR)) != A_OK) { A_NETBUF_FREE(osbuf); return A_NO_MEMORY; } cHdr = (WMIX_CMD_HDR *)A_NETBUF_DATA(osbuf); cHdr->commandId = (A_UINT32) cmdId; return wmi_cmd_send(wmip, osbuf, WMI_EXTENSION_CMDID, syncflag); } A_STATUS wmi_connect_cmd(struct wmi_t *wmip, NETWORK_TYPE netType, DOT11_AUTH_MODE dot11AuthMode, AUTH_MODE authMode, CRYPTO_TYPE pairwiseCrypto, A_UINT8 pairwiseCryptoLen, CRYPTO_TYPE groupCrypto, A_UINT8 groupCryptoLen, int ssidLength, A_UCHAR *ssid, A_UINT8 *bssid, A_UINT16 channel, A_UINT32 ctrl_flags) { void *osbuf; WMI_CONNECT_CMD *cc; if ((pairwiseCrypto == NONE_CRYPT) && (groupCrypto != NONE_CRYPT)) { return A_EINVAL; } if ((pairwiseCrypto != NONE_CRYPT) && (groupCrypto == NONE_CRYPT)) { return A_EINVAL; } osbuf = A_NETBUF_ALLOC(sizeof(WMI_CONNECT_CMD)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(WMI_CONNECT_CMD)); cc = (WMI_CONNECT_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cc, sizeof(*cc)); if (ssidLength) { A_MEMCPY(cc->ssid, ssid, ssidLength); } cc->ssidLength = ssidLength; cc->networkType = netType; cc->dot11AuthMode = dot11AuthMode; cc->authMode = authMode; cc->pairwiseCryptoType = pairwiseCrypto; cc->pairwiseCryptoLen = pairwiseCryptoLen; cc->groupCryptoType = groupCrypto; cc->groupCryptoLen = groupCryptoLen; cc->channel = channel; cc->ctrl_flags = ctrl_flags; if (bssid != NULL) { A_MEMCPY(cc->bssid, bssid, ATH_MAC_LEN); } wmip->wmi_pair_crypto_type = pairwiseCrypto; wmip->wmi_grp_crypto_type = groupCrypto; return (wmi_cmd_send(wmip, osbuf, WMI_CONNECT_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_reconnect_cmd(struct wmi_t *wmip, A_UINT8 *bssid, A_UINT16 channel) { void *osbuf; WMI_RECONNECT_CMD *cc; osbuf = A_NETBUF_ALLOC(sizeof(WMI_RECONNECT_CMD)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(WMI_RECONNECT_CMD)); cc = (WMI_RECONNECT_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cc, sizeof(*cc)); cc->channel = channel; if (bssid != NULL) { A_MEMCPY(cc->bssid, bssid, ATH_MAC_LEN); } return (wmi_cmd_send(wmip, osbuf, WMI_RECONNECT_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_disconnect_cmd(struct wmi_t *wmip) { A_STATUS status; /* Bug fix for 24817(elevator bug) - the disconnect command does not need to do a SYNC before.*/ status = wmi_simple_cmd(wmip, WMI_DISCONNECT_CMDID); return status; } A_STATUS wmi_startscan_cmd(struct wmi_t *wmip, WMI_SCAN_TYPE scanType, A_BOOL forceFgScan, A_BOOL isLegacy, A_UINT32 homeDwellTime, A_UINT32 forceScanInterval, A_INT8 numChan, A_UINT16 *channelList) { void *osbuf; WMI_START_SCAN_CMD *sc; A_INT8 size; size = sizeof (*sc); if ((scanType != WMI_LONG_SCAN) && (scanType != WMI_SHORT_SCAN)) { return A_EINVAL; } if (numChan) { if (numChan > WMI_MAX_CHANNELS) { return A_EINVAL; } size += sizeof(A_UINT16) * (numChan - 1); } osbuf = A_NETBUF_ALLOC(size); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, size); sc = (WMI_START_SCAN_CMD *)(A_NETBUF_DATA(osbuf)); sc->scanType = scanType; sc->forceFgScan = forceFgScan; sc->isLegacy = isLegacy; sc->homeDwellTime = homeDwellTime; sc->forceScanInterval = forceScanInterval; sc->numChannels = numChan; if (numChan) { A_MEMCPY(sc->channelList, channelList, numChan * sizeof(A_UINT16)); } return (wmi_cmd_send(wmip, osbuf, WMI_START_SCAN_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_scanparams_cmd(struct wmi_t *wmip, A_UINT16 fg_start_sec, A_UINT16 fg_end_sec, A_UINT16 bg_sec, A_UINT16 minact_chdw_msec, A_UINT16 maxact_chdw_msec, A_UINT16 pas_chdw_msec, A_UINT8 shScanRatio, A_UINT8 scanCtrlFlags, A_UINT32 max_dfsch_act_time, A_UINT16 maxact_scan_per_ssid) { void *osbuf; WMI_SCAN_PARAMS_CMD *sc; osbuf = A_NETBUF_ALLOC(sizeof(*sc)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*sc)); sc = (WMI_SCAN_PARAMS_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(sc, sizeof(*sc)); sc->fg_start_period = fg_start_sec; sc->fg_end_period = fg_end_sec; sc->bg_period = bg_sec; sc->minact_chdwell_time = minact_chdw_msec; sc->maxact_chdwell_time = maxact_chdw_msec; sc->pas_chdwell_time = pas_chdw_msec; sc->shortScanRatio = shScanRatio; sc->scanCtrlFlags = scanCtrlFlags; sc->max_dfsch_act_time = max_dfsch_act_time; sc->maxact_scan_per_ssid = maxact_scan_per_ssid; return (wmi_cmd_send(wmip, osbuf, WMI_SET_SCAN_PARAMS_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_bssfilter_cmd(struct wmi_t *wmip, A_UINT8 filter, A_UINT32 ieMask) { void *osbuf; WMI_BSS_FILTER_CMD *cmd; if (filter >= LAST_BSS_FILTER) { return A_EINVAL; } osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_BSS_FILTER_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->bssFilter = filter; cmd->ieMask = ieMask; return (wmi_cmd_send(wmip, osbuf, WMI_SET_BSS_FILTER_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_probedSsid_cmd(struct wmi_t *wmip, A_UINT8 index, A_UINT8 flag, A_UINT8 ssidLength, A_UCHAR *ssid) { void *osbuf; WMI_PROBED_SSID_CMD *cmd; if (index > MAX_PROBED_SSID_INDEX) { return A_EINVAL; } if (ssidLength > sizeof(cmd->ssid)) { return A_EINVAL; } if ((flag & (DISABLE_SSID_FLAG | ANY_SSID_FLAG)) && (ssidLength > 0)) { return A_EINVAL; } if ((flag & SPECIFIC_SSID_FLAG) && !ssidLength) { return A_EINVAL; } if (flag & SPECIFIC_SSID_FLAG) { is_probe_ssid = TRUE; } osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_PROBED_SSID_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->entryIndex = index; cmd->flag = flag; cmd->ssidLength = ssidLength; A_MEMCPY(cmd->ssid, ssid, ssidLength); return (wmi_cmd_send(wmip, osbuf, WMI_SET_PROBED_SSID_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_listeninterval_cmd(struct wmi_t *wmip, A_UINT16 listenInterval, A_UINT16 listenBeacons) { void *osbuf; WMI_LISTEN_INT_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_LISTEN_INT_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->listenInterval = listenInterval; cmd->numBeacons = listenBeacons; return (wmi_cmd_send(wmip, osbuf, WMI_SET_LISTEN_INT_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_bmisstime_cmd(struct wmi_t *wmip, A_UINT16 bmissTime, A_UINT16 bmissBeacons) { void *osbuf; WMI_BMISS_TIME_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_BMISS_TIME_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->bmissTime = bmissTime; cmd->numBeacons = bmissBeacons; return (wmi_cmd_send(wmip, osbuf, WMI_SET_BMISS_TIME_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_associnfo_cmd(struct wmi_t *wmip, A_UINT8 ieType, A_UINT8 ieLen, A_UINT8 *ieInfo) { void *osbuf; WMI_SET_ASSOC_INFO_CMD *cmd; A_UINT16 cmdLen; cmdLen = sizeof(*cmd) + ieLen - 1; osbuf = A_NETBUF_ALLOC(cmdLen); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, cmdLen); cmd = (WMI_SET_ASSOC_INFO_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, cmdLen); cmd->ieType = ieType; cmd->bufferSize = ieLen; A_MEMCPY(cmd->assocInfo, ieInfo, ieLen); return (wmi_cmd_send(wmip, osbuf, WMI_SET_ASSOC_INFO_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_powermode_cmd(struct wmi_t *wmip, A_UINT8 powerMode) { void *osbuf; WMI_POWER_MODE_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_POWER_MODE_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->powerMode = powerMode; wmip->wmi_powerMode = powerMode; return (wmi_cmd_send(wmip, osbuf, WMI_SET_POWER_MODE_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_ibsspmcaps_cmd(struct wmi_t *wmip, A_UINT8 pmEnable, A_UINT8 ttl, A_UINT16 atim_windows, A_UINT16 timeout_value) { void *osbuf; WMI_IBSS_PM_CAPS_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_IBSS_PM_CAPS_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->power_saving = pmEnable; cmd->ttl = ttl; cmd->atim_windows = atim_windows; cmd->timeout_value = timeout_value; return (wmi_cmd_send(wmip, osbuf, WMI_SET_IBSS_PM_CAPS_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_apps_cmd(struct wmi_t *wmip, A_UINT8 psType, A_UINT32 idle_time, A_UINT32 ps_period, A_UINT8 sleep_period) { void *osbuf; WMI_AP_PS_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_AP_PS_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->psType = psType; cmd->idle_time = idle_time; cmd->ps_period = ps_period; cmd->sleep_period = sleep_period; return (wmi_cmd_send(wmip, osbuf, WMI_SET_AP_PS_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_pmparams_cmd(struct wmi_t *wmip, A_UINT16 idlePeriod, A_UINT16 psPollNum, A_UINT16 dtimPolicy, A_UINT16 tx_wakeup_policy, A_UINT16 num_tx_to_wakeup, A_UINT16 ps_fail_event_policy) { void *osbuf; WMI_POWER_PARAMS_CMD *pm; osbuf = A_NETBUF_ALLOC(sizeof(*pm)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*pm)); pm = (WMI_POWER_PARAMS_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(pm, sizeof(*pm)); pm->idle_period = idlePeriod; pm->pspoll_number = psPollNum; pm->dtim_policy = dtimPolicy; pm->tx_wakeup_policy = tx_wakeup_policy; pm->num_tx_to_wakeup = num_tx_to_wakeup; pm->ps_fail_event_policy = ps_fail_event_policy; return (wmi_cmd_send(wmip, osbuf, WMI_SET_POWER_PARAMS_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_disctimeout_cmd(struct wmi_t *wmip, A_UINT8 timeout) { void *osbuf; WMI_DISC_TIMEOUT_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_DISC_TIMEOUT_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->disconnectTimeout = timeout; return (wmi_cmd_send(wmip, osbuf, WMI_SET_DISC_TIMEOUT_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_addKey_cmd(struct wmi_t *wmip, A_UINT8 keyIndex, CRYPTO_TYPE keyType, A_UINT8 keyUsage, A_UINT8 keyLength, A_UINT8 *keyRSC, A_UINT8 *keyMaterial, A_UINT8 key_op_ctrl, A_UINT8 *macAddr, WMI_SYNC_FLAG sync_flag) { void *osbuf; WMI_ADD_CIPHER_KEY_CMD *cmd; if ((keyIndex > WMI_MAX_KEY_INDEX) || (keyLength > WMI_MAX_KEY_LEN) || (keyMaterial == NULL)) { return A_EINVAL; } if ((WEP_CRYPT != keyType) && (NULL == keyRSC)) { return A_EINVAL; } osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_ADD_CIPHER_KEY_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->keyIndex = keyIndex; cmd->keyType = keyType; cmd->keyUsage = keyUsage; cmd->keyLength = keyLength; A_MEMCPY(cmd->key, keyMaterial, keyLength); #ifdef WAPI_ENABLE if (NULL != keyRSC && key_op_ctrl != KEY_OP_INIT_WAPIPN) { #else if (NULL != keyRSC) { #endif // WAPI_ENABLE A_MEMCPY(cmd->keyRSC, keyRSC, sizeof(cmd->keyRSC)); } cmd->key_op_ctrl = key_op_ctrl; if(macAddr) { A_MEMCPY(cmd->key_macaddr,macAddr,IEEE80211_ADDR_LEN); } return (wmi_cmd_send(wmip, osbuf, WMI_ADD_CIPHER_KEY_CMDID, sync_flag)); } A_STATUS wmi_add_krk_cmd(struct wmi_t *wmip, A_UINT8 *krk) { void *osbuf; WMI_ADD_KRK_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_ADD_KRK_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); A_MEMCPY(cmd->krk, krk, WMI_KRK_LEN); return (wmi_cmd_send(wmip, osbuf, WMI_ADD_KRK_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_delete_krk_cmd(struct wmi_t *wmip) { return wmi_simple_cmd(wmip, WMI_DELETE_KRK_CMDID); } A_STATUS wmi_deleteKey_cmd(struct wmi_t *wmip, A_UINT8 keyIndex) { void *osbuf; WMI_DELETE_CIPHER_KEY_CMD *cmd; if (keyIndex > WMI_MAX_KEY_INDEX) { return A_EINVAL; } osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_DELETE_CIPHER_KEY_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->keyIndex = keyIndex; return (wmi_cmd_send(wmip, osbuf, WMI_DELETE_CIPHER_KEY_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_setPmkid_cmd(struct wmi_t *wmip, A_UINT8 *bssid, A_UINT8 *pmkId, A_BOOL set) { void *osbuf; WMI_SET_PMKID_CMD *cmd; if (bssid == NULL) { return A_EINVAL; } if ((set == TRUE) && (pmkId == NULL)) { return A_EINVAL; } osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_SET_PMKID_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMCPY(cmd->bssid, bssid, sizeof(cmd->bssid)); if (set == TRUE) { A_MEMCPY(cmd->pmkid, pmkId, sizeof(cmd->pmkid)); cmd->enable = PMKID_ENABLE; } else { A_MEMZERO(cmd->pmkid, sizeof(cmd->pmkid)); cmd->enable = PMKID_DISABLE; } return (wmi_cmd_send(wmip, osbuf, WMI_SET_PMKID_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_tkip_countermeasures_cmd(struct wmi_t *wmip, A_BOOL en) { void *osbuf; WMI_SET_TKIP_COUNTERMEASURES_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_SET_TKIP_COUNTERMEASURES_CMD *)(A_NETBUF_DATA(osbuf)); cmd->cm_en = (en == TRUE)? WMI_TKIP_CM_ENABLE : WMI_TKIP_CM_DISABLE; return (wmi_cmd_send(wmip, osbuf, WMI_SET_TKIP_COUNTERMEASURES_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_akmp_params_cmd(struct wmi_t *wmip, WMI_SET_AKMP_PARAMS_CMD *akmpParams) { void *osbuf; WMI_SET_AKMP_PARAMS_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_SET_AKMP_PARAMS_CMD *)(A_NETBUF_DATA(osbuf)); cmd->akmpInfo = akmpParams->akmpInfo; return (wmi_cmd_send(wmip, osbuf, WMI_SET_AKMP_PARAMS_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_pmkid_list_cmd(struct wmi_t *wmip, WMI_SET_PMKID_LIST_CMD *pmkInfo) { void *osbuf; WMI_SET_PMKID_LIST_CMD *cmd; A_UINT16 cmdLen; A_UINT8 i; cmdLen = sizeof(pmkInfo->numPMKID) + pmkInfo->numPMKID * sizeof(WMI_PMKID); osbuf = A_NETBUF_ALLOC(cmdLen); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, cmdLen); cmd = (WMI_SET_PMKID_LIST_CMD *)(A_NETBUF_DATA(osbuf)); cmd->numPMKID = pmkInfo->numPMKID; for (i = 0; i < cmd->numPMKID; i++) { A_MEMCPY(&cmd->pmkidList[i], &pmkInfo->pmkidList[i], WMI_PMKID_LEN); } return (wmi_cmd_send(wmip, osbuf, WMI_SET_PMKID_LIST_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_get_pmkid_list_cmd(struct wmi_t *wmip) { return wmi_simple_cmd(wmip, WMI_GET_PMKID_LIST_CMDID); } A_STATUS wmi_dataSync_send(struct wmi_t *wmip, void *osbuf, HTC_ENDPOINT_ID eid) { WMI_DATA_HDR *dtHdr; A_ASSERT( eid != wmip->wmi_endpoint_id); A_ASSERT(osbuf != NULL); if (A_NETBUF_PUSH(osbuf, sizeof(WMI_DATA_HDR)) != A_OK) { return A_NO_MEMORY; } dtHdr = (WMI_DATA_HDR *)A_NETBUF_DATA(osbuf); dtHdr->info = (SYNC_MSGTYPE & WMI_DATA_HDR_MSG_TYPE_MASK) << WMI_DATA_HDR_MSG_TYPE_SHIFT; A_DPRINTF(DBG_WMI, (DBGFMT "Enter - eid %d\n", DBGARG, eid)); return (A_WMI_CONTROL_TX(wmip->wmi_devt, osbuf, eid)); } typedef struct _WMI_DATA_SYNC_BUFS { A_UINT8 trafficClass; void *osbuf; }WMI_DATA_SYNC_BUFS; static A_STATUS wmi_sync_point(struct wmi_t *wmip) { void *cmd_osbuf; WMI_SYNC_CMD *cmd; WMI_DATA_SYNC_BUFS dataSyncBufs[WMM_NUM_AC]; A_UINT8 i,numPriStreams=0; A_STATUS status = A_OK; A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG)); memset(dataSyncBufs,0,sizeof(dataSyncBufs)); /* lock out while we walk through the priority list and assemble our local array */ LOCK_WMI(wmip); for (i=0; i < WMM_NUM_AC ; i++) { if (wmip->wmi_fatPipeExists & (1 << i)) { numPriStreams++; dataSyncBufs[numPriStreams-1].trafficClass = i; } } UNLOCK_WMI(wmip); /* dataSyncBufs is now filled with entries (starting at index 0) containing valid streamIDs */ do { /* * We allocate all network buffers needed so we will be able to * send all required frames. */ cmd_osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (cmd_osbuf == NULL) { status = A_NO_MEMORY; break; } A_NETBUF_PUT(cmd_osbuf, sizeof(*cmd)); cmd = (WMI_SYNC_CMD *)(A_NETBUF_DATA(cmd_osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); /* In the SYNC cmd sent on the control Ep, send a bitmap of the data * eps on which the Data Sync will be sent */ cmd->dataSyncMap = wmip->wmi_fatPipeExists; for (i=0; i < numPriStreams ; i++) { dataSyncBufs[i].osbuf = A_NETBUF_ALLOC(0); if (dataSyncBufs[i].osbuf == NULL) { status = A_NO_MEMORY; break; } } //end for /* if Buffer allocation for any of the dataSync fails, then do not * send the Synchronize cmd on the control ep */ if (A_FAILED(status)) { break; } /* * Send sync cmd followed by sync data messages on all endpoints being * used */ status = wmi_cmd_send(wmip, cmd_osbuf, WMI_SYNCHRONIZE_CMDID, NO_SYNC_WMIFLAG); if (A_FAILED(status)) { break; } /* cmd buffer sent, we no longer own it */ cmd_osbuf = NULL; for(i=0; i < numPriStreams; i++) { A_ASSERT(dataSyncBufs[i].osbuf != NULL); status = wmi_dataSync_send(wmip, dataSyncBufs[i].osbuf, A_WMI_Ac2EndpointID(wmip->wmi_devt, dataSyncBufs[i]. trafficClass) ); if (A_FAILED(status)) { break; } /* we don't own this buffer anymore, NULL it out of the array so it * won't get cleaned up */ dataSyncBufs[i].osbuf = NULL; } //end for } while(FALSE); /* free up any resources left over (possibly due to an error) */ if (cmd_osbuf != NULL) { A_NETBUF_FREE(cmd_osbuf); } for (i = 0; i < numPriStreams; i++) { if (dataSyncBufs[i].osbuf != NULL) { A_NETBUF_FREE(dataSyncBufs[i].osbuf); } } return (status); } A_STATUS wmi_create_pstream_cmd(struct wmi_t *wmip, WMI_CREATE_PSTREAM_CMD *params) { void *osbuf; WMI_CREATE_PSTREAM_CMD *cmd; A_UINT8 fatPipeExistsForAC=0; A_INT32 minimalPHY = 0; A_INT32 nominalPHY = 0; /* Validate all the parameters. */ if( !((params->userPriority < 8) && (params->userPriority <= 0x7) && (convert_userPriority_to_trafficClass(params->userPriority) == params->trafficClass) && (params->trafficDirection == UPLINK_TRAFFIC || params->trafficDirection == DNLINK_TRAFFIC || params->trafficDirection == BIDIR_TRAFFIC) && (params->trafficType == TRAFFIC_TYPE_APERIODIC || params->trafficType == TRAFFIC_TYPE_PERIODIC ) && (params->voicePSCapability == DISABLE_FOR_THIS_AC || params->voicePSCapability == ENABLE_FOR_THIS_AC || params->voicePSCapability == ENABLE_FOR_ALL_AC) && (params->tsid == WMI_IMPLICIT_PSTREAM || params->tsid <= WMI_MAX_THINSTREAM)) ) { return A_EINVAL; } // // check nominal PHY rate is >= minimalPHY, so that DUT // can allow TSRS IE // // get the physical rate minimalPHY = ((params->minPhyRate / 1000)/1000); // unit of bps // check minimal phy < nominal phy rate // if (params->nominalPHY >= minimalPHY) { nominalPHY = (params->nominalPHY * 1000)/500; // unit of 500 kbps A_DPRINTF(DBG_WMI, (DBGFMT "TSRS IE Enabled::MinPhy %x->NominalPhy ===> %x\n", DBGARG, minimalPHY, nominalPHY)); params->nominalPHY = nominalPHY; } else { params->nominalPHY = 0; } osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); A_DPRINTF(DBG_WMI, (DBGFMT "Sending create_pstream_cmd: ac=%d tsid:%d\n", DBGARG, params->trafficClass, params->tsid)); cmd = (WMI_CREATE_PSTREAM_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); A_MEMCPY(cmd, params, sizeof(*cmd)); /* this is an implicitly created Fat pipe */ if ((A_UINT32)params->tsid == (A_UINT32)WMI_IMPLICIT_PSTREAM) { LOCK_WMI(wmip); fatPipeExistsForAC = (wmip->wmi_fatPipeExists & (1 << params->trafficClass)); wmip->wmi_fatPipeExists |= (1<<params->trafficClass); UNLOCK_WMI(wmip); } else { /* this is an explicitly created thin stream within a fat pipe */ LOCK_WMI(wmip); fatPipeExistsForAC = (wmip->wmi_fatPipeExists & (1 << params->trafficClass)); wmip->wmi_streamExistsForAC[params->trafficClass] |= (1<<params->tsid); /* if a thinstream becomes active, the fat pipe automatically * becomes active */ wmip->wmi_fatPipeExists |= (1<<params->trafficClass); UNLOCK_WMI(wmip); } /* Indicate activty change to driver layer only if this is the * first TSID to get created in this AC explicitly or an implicit * fat pipe is getting created. */ if (!fatPipeExistsForAC) { A_WMI_STREAM_TX_ACTIVE(wmip->wmi_devt, params->trafficClass); } /* mike: should be SYNC_BEFORE_WMIFLAG */ return (wmi_cmd_send(wmip, osbuf, WMI_CREATE_PSTREAM_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_delete_pstream_cmd(struct wmi_t *wmip, A_UINT8 trafficClass, A_UINT8 tsid) { void *osbuf; WMI_DELETE_PSTREAM_CMD *cmd; A_STATUS status; A_UINT16 activeTsids=0; /* validate the parameters */ if (trafficClass > 3) { A_DPRINTF(DBG_WMI, (DBGFMT "Invalid trafficClass: %d\n", DBGARG, trafficClass)); return A_EINVAL; } osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_DELETE_PSTREAM_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->trafficClass = trafficClass; cmd->tsid = tsid; LOCK_WMI(wmip); activeTsids = wmip->wmi_streamExistsForAC[trafficClass]; UNLOCK_WMI(wmip); /* Check if the tsid was created & exists */ if (!(activeTsids & (1<<tsid))) { A_NETBUF_FREE(osbuf); A_DPRINTF(DBG_WMI, (DBGFMT "TSID %d does'nt exist for trafficClass: %d\n", DBGARG, tsid, trafficClass)); /* TODO: return a more appropriate err code */ return A_ERROR; } A_DPRINTF(DBG_WMI, (DBGFMT "Sending delete_pstream_cmd: trafficClass: %d tsid=%d\n", DBGARG, trafficClass, tsid)); status = (wmi_cmd_send(wmip, osbuf, WMI_DELETE_PSTREAM_CMDID, SYNC_BEFORE_WMIFLAG)); LOCK_WMI(wmip); wmip->wmi_streamExistsForAC[trafficClass] &= ~(1<<tsid); activeTsids = wmip->wmi_streamExistsForAC[trafficClass]; UNLOCK_WMI(wmip); /* Indicate stream inactivity to driver layer only if all tsids * within this AC are deleted. */ if(!activeTsids) { A_WMI_STREAM_TX_INACTIVE(wmip->wmi_devt, trafficClass); wmip->wmi_fatPipeExists &= ~(1<<trafficClass); } return status; } A_STATUS wmi_set_framerate_cmd(struct wmi_t *wmip, A_UINT8 bEnable, A_UINT8 type, A_UINT8 subType, A_UINT16 rateMask) { void *osbuf; WMI_FRAME_RATES_CMD *cmd; A_UINT8 frameType; A_DPRINTF(DBG_WMI, (DBGFMT " type %02X, subType %02X, rateMask %04x\n", DBGARG, type, subType, rateMask)); if((type != IEEE80211_FRAME_TYPE_MGT && type != IEEE80211_FRAME_TYPE_CTL) || (subType > 15)){ return A_EINVAL; } osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_FRAME_RATES_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); frameType = (A_UINT8)((subType << 4) | type); cmd->bEnableMask = bEnable; cmd->frameType = frameType; cmd->frameRateMask = rateMask; return (wmi_cmd_send(wmip, osbuf, WMI_SET_FRAMERATES_CMDID, NO_SYNC_WMIFLAG)); } /* * used to set the bit rate. rate is in Kbps. If rate == -1 * then auto selection is used. */ A_STATUS wmi_set_bitrate_cmd(struct wmi_t *wmip, A_INT32 dataRate, A_INT32 mgmtRate, A_INT32 ctlRate) { void *osbuf; WMI_BIT_RATE_CMD *cmd; A_INT8 drix, mrix, crix, ret_val; if (dataRate != -1) { ret_val = wmi_validate_bitrate(wmip, dataRate, &drix); if(ret_val == A_EINVAL){ return A_EINVAL; } } else { drix = -1; } if (mgmtRate != -1) { ret_val = wmi_validate_bitrate(wmip, mgmtRate, &mrix); if(ret_val == A_EINVAL){ return A_EINVAL; } } else { mrix = -1; } if (ctlRate != -1) { ret_val = wmi_validate_bitrate(wmip, ctlRate, &crix); if(ret_val == A_EINVAL){ return A_EINVAL; } } else { crix = -1; } osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_BIT_RATE_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->rateIndex = drix; cmd->mgmtRateIndex = mrix; cmd->ctlRateIndex = crix; return (wmi_cmd_send(wmip, osbuf, WMI_SET_BITRATE_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_get_bitrate_cmd(struct wmi_t *wmip) { return wmi_simple_cmd(wmip, WMI_GET_BITRATE_CMDID); } /* * Returns TRUE iff the given rate index is legal in the current PHY mode. */ A_BOOL wmi_is_bitrate_index_valid(struct wmi_t *wmip, A_INT32 rateIndex) { WMI_PHY_MODE phyMode = (WMI_PHY_MODE) wmip->wmi_phyMode; A_BOOL isValid = TRUE; switch(phyMode) { case WMI_11A_MODE: if (wmip->wmi_ht_allowed[A_BAND_5GHZ]){ if ((rateIndex < MODE_A_SUPPORT_RATE_START) || (rateIndex > MODE_GHT20_SUPPORT_RATE_STOP)) { isValid = FALSE; } } else { if ((rateIndex < MODE_A_SUPPORT_RATE_START) || (rateIndex > MODE_A_SUPPORT_RATE_STOP)) { isValid = FALSE; } } break; case WMI_11B_MODE: if ((rateIndex < MODE_B_SUPPORT_RATE_START) || (rateIndex > MODE_B_SUPPORT_RATE_STOP)) { isValid = FALSE; } break; case WMI_11GONLY_MODE: if (wmip->wmi_ht_allowed[A_BAND_24GHZ]){ if ((rateIndex < MODE_GONLY_SUPPORT_RATE_START) || (rateIndex > MODE_GHT20_SUPPORT_RATE_STOP)) { isValid = FALSE; } } else { if ((rateIndex < MODE_GONLY_SUPPORT_RATE_START) || (rateIndex > MODE_GONLY_SUPPORT_RATE_STOP)) { isValid = FALSE; } } break; case WMI_11G_MODE: case WMI_11AG_MODE: if (wmip->wmi_ht_allowed[A_BAND_24GHZ]){ if ((rateIndex < MODE_G_SUPPORT_RATE_START) || (rateIndex > MODE_GHT20_SUPPORT_RATE_STOP)) { isValid = FALSE; } } else { if ((rateIndex < MODE_G_SUPPORT_RATE_START) || (rateIndex > MODE_G_SUPPORT_RATE_STOP)) { isValid = FALSE; } } break; default: A_ASSERT(FALSE); break; } return isValid; } A_INT8 wmi_validate_bitrate(struct wmi_t *wmip, A_INT32 rate, A_INT8 *rate_idx) { A_INT8 i; for (i=0;;i++) { if (wmi_rateTable[(A_UINT32) i][0] == 0) { return A_EINVAL; } if (wmi_rateTable[(A_UINT32) i][0] == rate) { break; } } if(wmi_is_bitrate_index_valid(wmip, (A_INT32) i) != TRUE) { return A_EINVAL; } *rate_idx = i; return A_OK; } A_STATUS wmi_set_fixrates_cmd(struct wmi_t *wmip, A_UINT32 fixRatesMask) { void *osbuf; WMI_FIX_RATES_CMD *cmd; #if 0 A_INT32 rateIndex; /* This check does not work for AR6003 as the HT modes are enabled only when * the STA is connected to a HT_BSS and is not based only on channel. It is * safe to skip this check however because rate control will only use rates * that are permitted by the valid rate mask and the fix rate mask. Meaning * the fix rate mask is not sufficient by itself to cause an invalid rate * to be used. */ /* Make sure all rates in the mask are valid in the current PHY mode */ for(rateIndex = 0; rateIndex < MAX_NUMBER_OF_SUPPORT_RATES; rateIndex++) { if((1 << rateIndex) & (A_UINT32)fixRatesMask) { if(wmi_is_bitrate_index_valid(wmip, rateIndex) != TRUE) { A_DPRINTF(DBG_WMI, (DBGFMT "Set Fix Rates command failed: Given rate is illegal in current PHY mode\n", DBGARG)); return A_EINVAL; } } } #endif osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_FIX_RATES_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->fixRateMask = fixRatesMask; return (wmi_cmd_send(wmip, osbuf, WMI_SET_FIXRATES_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_get_ratemask_cmd(struct wmi_t *wmip) { return wmi_simple_cmd(wmip, WMI_GET_FIXRATES_CMDID); } A_STATUS wmi_get_channelList_cmd(struct wmi_t *wmip) { return wmi_simple_cmd(wmip, WMI_GET_CHANNEL_LIST_CMDID); } /* * used to generate a wmi sey channel Parameters cmd. * mode should always be specified and corresponds to the phy mode of the * wlan. * numChan should alway sbe specified. If zero indicates that all available * channels should be used. * channelList is an array of channel frequencies (in Mhz) which the radio * should limit its operation to. It should be NULL if numChan == 0. Size of * array should correspond to numChan entries. */ A_STATUS wmi_set_channelParams_cmd(struct wmi_t *wmip, A_UINT8 scanParam, WMI_PHY_MODE mode, A_INT8 numChan, A_UINT16 *channelList) { void *osbuf; WMI_CHANNEL_PARAMS_CMD *cmd; A_INT8 size; size = sizeof (*cmd); if (numChan) { if (numChan > WMI_MAX_CHANNELS) { return A_EINVAL; } size += sizeof(A_UINT16) * (numChan - 1); } osbuf = A_NETBUF_ALLOC(size); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, size); cmd = (WMI_CHANNEL_PARAMS_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, size); wmip->wmi_phyMode = mode; cmd->scanParam = scanParam; cmd->phyMode = mode; cmd->numChannels = numChan; A_MEMCPY(cmd->channelList, channelList, numChan * sizeof(A_UINT16)); return (wmi_cmd_send(wmip, osbuf, WMI_SET_CHANNEL_PARAMS_CMDID, NO_SYNC_WMIFLAG)); } void wmi_cache_configure_rssithreshold(struct wmi_t *wmip, WMI_RSSI_THRESHOLD_PARAMS_CMD *rssiCmd) { SQ_THRESHOLD_PARAMS *sq_thresh = &wmip->wmi_SqThresholdParams[SIGNAL_QUALITY_METRICS_RSSI]; /* * Parse the command and store the threshold values here. The checks * for valid values can be put here */ sq_thresh->weight = rssiCmd->weight; sq_thresh->polling_interval = rssiCmd->pollTime; sq_thresh->upper_threshold[0] = rssiCmd->thresholdAbove1_Val - SIGNAL_QUALITY_NOISE_FLOOR; sq_thresh->upper_threshold[1] = rssiCmd->thresholdAbove2_Val - SIGNAL_QUALITY_NOISE_FLOOR; sq_thresh->upper_threshold[2] = rssiCmd->thresholdAbove3_Val - SIGNAL_QUALITY_NOISE_FLOOR; sq_thresh->upper_threshold[3] = rssiCmd->thresholdAbove4_Val - SIGNAL_QUALITY_NOISE_FLOOR; sq_thresh->upper_threshold[4] = rssiCmd->thresholdAbove5_Val - SIGNAL_QUALITY_NOISE_FLOOR; sq_thresh->upper_threshold[5] = rssiCmd->thresholdAbove6_Val - SIGNAL_QUALITY_NOISE_FLOOR; sq_thresh->upper_threshold_valid_count = 6; /* List sorted in descending order */ sq_thresh->lower_threshold[0] = rssiCmd->thresholdBelow6_Val - SIGNAL_QUALITY_NOISE_FLOOR; sq_thresh->lower_threshold[1] = rssiCmd->thresholdBelow5_Val - SIGNAL_QUALITY_NOISE_FLOOR; sq_thresh->lower_threshold[2] = rssiCmd->thresholdBelow4_Val - SIGNAL_QUALITY_NOISE_FLOOR; sq_thresh->lower_threshold[3] = rssiCmd->thresholdBelow3_Val - SIGNAL_QUALITY_NOISE_FLOOR; sq_thresh->lower_threshold[4] = rssiCmd->thresholdBelow2_Val - SIGNAL_QUALITY_NOISE_FLOOR; sq_thresh->lower_threshold[5] = rssiCmd->thresholdBelow1_Val - SIGNAL_QUALITY_NOISE_FLOOR; sq_thresh->lower_threshold_valid_count = 6; if (!rssi_event_value) { /* * Configuring the thresholds to their extremes allows the host to get an * event from the target which is used for the configuring the correct * thresholds */ rssiCmd->thresholdAbove1_Val = sq_thresh->upper_threshold[0]; rssiCmd->thresholdBelow1_Val = sq_thresh->lower_threshold[0]; } else { /* * In case the user issues multiple times of rssi_threshold_setting, * we should not use the extreames anymore, the target does not expect that. */ rssiCmd->thresholdAbove1_Val = ar6000_get_upper_threshold(rssi_event_value, sq_thresh, sq_thresh->upper_threshold_valid_count); rssiCmd->thresholdBelow1_Val = ar6000_get_lower_threshold(rssi_event_value, sq_thresh, sq_thresh->lower_threshold_valid_count); } } A_STATUS wmi_set_rssi_threshold_params(struct wmi_t *wmip, WMI_RSSI_THRESHOLD_PARAMS_CMD *rssiCmd) { /* Check these values are in ascending order */ if( rssiCmd->thresholdAbove6_Val <= rssiCmd->thresholdAbove5_Val || rssiCmd->thresholdAbove5_Val <= rssiCmd->thresholdAbove4_Val || rssiCmd->thresholdAbove4_Val <= rssiCmd->thresholdAbove3_Val || rssiCmd->thresholdAbove3_Val <= rssiCmd->thresholdAbove2_Val || rssiCmd->thresholdAbove2_Val <= rssiCmd->thresholdAbove1_Val || rssiCmd->thresholdBelow6_Val <= rssiCmd->thresholdBelow5_Val || rssiCmd->thresholdBelow5_Val <= rssiCmd->thresholdBelow4_Val || rssiCmd->thresholdBelow4_Val <= rssiCmd->thresholdBelow3_Val || rssiCmd->thresholdBelow3_Val <= rssiCmd->thresholdBelow2_Val || rssiCmd->thresholdBelow2_Val <= rssiCmd->thresholdBelow1_Val) { return A_EINVAL; } wmi_cache_configure_rssithreshold(wmip, rssiCmd); return (wmi_send_rssi_threshold_params(wmip, rssiCmd)); } A_STATUS wmi_set_ip_cmd(struct wmi_t *wmip, WMI_SET_IP_CMD *ipCmd) { void *osbuf; WMI_SET_IP_CMD *cmd; /* Multicast address are not valid */ if((*((A_UINT8*)&ipCmd->ips[0]) >= 0xE0) || (*((A_UINT8*)&ipCmd->ips[1]) >= 0xE0)) { return A_EINVAL; } osbuf = A_NETBUF_ALLOC(sizeof(WMI_SET_IP_CMD)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(WMI_SET_IP_CMD)); cmd = (WMI_SET_IP_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMCPY(cmd, ipCmd, sizeof(WMI_SET_IP_CMD)); return (wmi_cmd_send(wmip, osbuf, WMI_SET_IP_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_host_sleep_mode_cmd(struct wmi_t *wmip, WMI_SET_HOST_SLEEP_MODE_CMD *hostModeCmd) { void *osbuf; A_INT8 size; WMI_SET_HOST_SLEEP_MODE_CMD *cmd; A_UINT16 activeTsids=0; A_UINT8 streamExists=0; A_UINT8 i; if( hostModeCmd->awake == hostModeCmd->asleep) { return A_EINVAL; } size = sizeof (*cmd); osbuf = A_NETBUF_ALLOC(size); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, size); cmd = (WMI_SET_HOST_SLEEP_MODE_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, size); A_MEMCPY(cmd, hostModeCmd, sizeof(WMI_SET_HOST_SLEEP_MODE_CMD)); if(hostModeCmd->asleep) { /* * Relinquish credits from all implicitly created pstreams since when we * go to sleep. If user created explicit thinstreams exists with in a * fatpipe leave them intact for the user to delete */ LOCK_WMI(wmip); streamExists = wmip->wmi_fatPipeExists; UNLOCK_WMI(wmip); for(i=0;i< WMM_NUM_AC;i++) { if (streamExists & (1<<i)) { LOCK_WMI(wmip); activeTsids = wmip->wmi_streamExistsForAC[i]; UNLOCK_WMI(wmip); /* If there are no user created thin streams delete the fatpipe */ if(!activeTsids) { streamExists &= ~(1<<i); /*Indicate inactivity to drv layer for this fatpipe(pstream)*/ A_WMI_STREAM_TX_INACTIVE(wmip->wmi_devt,i); } } } /* Update the fatpipes that exists*/ LOCK_WMI(wmip); wmip->wmi_fatPipeExists = streamExists; UNLOCK_WMI(wmip); } return (wmi_cmd_send(wmip, osbuf, WMI_SET_HOST_SLEEP_MODE_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_wow_mode_cmd(struct wmi_t *wmip, WMI_SET_WOW_MODE_CMD *wowModeCmd) { void *osbuf; A_INT8 size; WMI_SET_WOW_MODE_CMD *cmd; size = sizeof (*cmd); osbuf = A_NETBUF_ALLOC(size); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, size); cmd = (WMI_SET_WOW_MODE_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, size); A_MEMCPY(cmd, wowModeCmd, sizeof(WMI_SET_WOW_MODE_CMD)); return (wmi_cmd_send(wmip, osbuf, WMI_SET_WOW_MODE_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_get_wow_list_cmd(struct wmi_t *wmip, WMI_GET_WOW_LIST_CMD *wowListCmd) { void *osbuf; A_INT8 size; WMI_GET_WOW_LIST_CMD *cmd; size = sizeof (*cmd); osbuf = A_NETBUF_ALLOC(size); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, size); cmd = (WMI_GET_WOW_LIST_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, size); A_MEMCPY(cmd, wowListCmd, sizeof(WMI_GET_WOW_LIST_CMD)); return (wmi_cmd_send(wmip, osbuf, WMI_GET_WOW_LIST_CMDID, NO_SYNC_WMIFLAG)); } static A_STATUS wmi_get_wow_list_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_GET_WOW_LIST_REPLY *reply; if (len < sizeof(WMI_GET_WOW_LIST_REPLY)) { return A_EINVAL; } reply = (WMI_GET_WOW_LIST_REPLY *)datap; A_WMI_WOW_LIST_EVENT(wmip->wmi_devt, reply->num_filters, reply); return A_OK; } A_STATUS wmi_add_wow_pattern_cmd(struct wmi_t *wmip, WMI_ADD_WOW_PATTERN_CMD *addWowCmd, A_UINT8* pattern, A_UINT8* mask, A_UINT8 pattern_size) { void *osbuf; A_INT8 size; WMI_ADD_WOW_PATTERN_CMD *cmd; A_UINT8 *filter_mask = NULL; size = sizeof (*cmd); size += ((2 * addWowCmd->filter_size)* sizeof(A_UINT8)); osbuf = A_NETBUF_ALLOC(size); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, size); cmd = (WMI_ADD_WOW_PATTERN_CMD *)(A_NETBUF_DATA(osbuf)); cmd->filter_list_id = addWowCmd->filter_list_id; cmd->filter_offset = addWowCmd->filter_offset; cmd->filter_size = addWowCmd->filter_size; A_MEMCPY(cmd->filter, pattern, addWowCmd->filter_size); filter_mask = (A_UINT8*)(cmd->filter + cmd->filter_size); A_MEMCPY(filter_mask, mask, addWowCmd->filter_size); return (wmi_cmd_send(wmip, osbuf, WMI_ADD_WOW_PATTERN_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_del_wow_pattern_cmd(struct wmi_t *wmip, WMI_DEL_WOW_PATTERN_CMD *delWowCmd) { void *osbuf; A_INT8 size; WMI_DEL_WOW_PATTERN_CMD *cmd; size = sizeof (*cmd); osbuf = A_NETBUF_ALLOC(size); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, size); cmd = (WMI_DEL_WOW_PATTERN_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, size); A_MEMCPY(cmd, delWowCmd, sizeof(WMI_DEL_WOW_PATTERN_CMD)); return (wmi_cmd_send(wmip, osbuf, WMI_DEL_WOW_PATTERN_CMDID, NO_SYNC_WMIFLAG)); } void wmi_cache_configure_snrthreshold(struct wmi_t *wmip, WMI_SNR_THRESHOLD_PARAMS_CMD *snrCmd) { SQ_THRESHOLD_PARAMS *sq_thresh = &wmip->wmi_SqThresholdParams[SIGNAL_QUALITY_METRICS_SNR]; /* * Parse the command and store the threshold values here. The checks * for valid values can be put here */ sq_thresh->weight = snrCmd->weight; sq_thresh->polling_interval = snrCmd->pollTime; sq_thresh->upper_threshold[0] = snrCmd->thresholdAbove1_Val; sq_thresh->upper_threshold[1] = snrCmd->thresholdAbove2_Val; sq_thresh->upper_threshold[2] = snrCmd->thresholdAbove3_Val; sq_thresh->upper_threshold[3] = snrCmd->thresholdAbove4_Val; sq_thresh->upper_threshold_valid_count = 4; /* List sorted in descending order */ sq_thresh->lower_threshold[0] = snrCmd->thresholdBelow4_Val; sq_thresh->lower_threshold[1] = snrCmd->thresholdBelow3_Val; sq_thresh->lower_threshold[2] = snrCmd->thresholdBelow2_Val; sq_thresh->lower_threshold[3] = snrCmd->thresholdBelow1_Val; sq_thresh->lower_threshold_valid_count = 4; if (!snr_event_value) { /* * Configuring the thresholds to their extremes allows the host to get an * event from the target which is used for the configuring the correct * thresholds */ snrCmd->thresholdAbove1_Val = (A_UINT8)sq_thresh->upper_threshold[0]; snrCmd->thresholdBelow1_Val = (A_UINT8)sq_thresh->lower_threshold[0]; } else { /* * In case the user issues multiple times of snr_threshold_setting, * we should not use the extreames anymore, the target does not expect that. */ snrCmd->thresholdAbove1_Val = ar6000_get_upper_threshold(snr_event_value, sq_thresh, sq_thresh->upper_threshold_valid_count); snrCmd->thresholdBelow1_Val = ar6000_get_lower_threshold(snr_event_value, sq_thresh, sq_thresh->lower_threshold_valid_count); } } A_STATUS wmi_set_snr_threshold_params(struct wmi_t *wmip, WMI_SNR_THRESHOLD_PARAMS_CMD *snrCmd) { if( snrCmd->thresholdAbove4_Val <= snrCmd->thresholdAbove3_Val || snrCmd->thresholdAbove3_Val <= snrCmd->thresholdAbove2_Val || snrCmd->thresholdAbove2_Val <= snrCmd->thresholdAbove1_Val || snrCmd->thresholdBelow4_Val <= snrCmd->thresholdBelow3_Val || snrCmd->thresholdBelow3_Val <= snrCmd->thresholdBelow2_Val || snrCmd->thresholdBelow2_Val <= snrCmd->thresholdBelow1_Val) { return A_EINVAL; } wmi_cache_configure_snrthreshold(wmip, snrCmd); return (wmi_send_snr_threshold_params(wmip, snrCmd)); } A_STATUS wmi_clr_rssi_snr(struct wmi_t *wmip) { void *osbuf; osbuf = A_NETBUF_ALLOC(sizeof(int)); if (osbuf == NULL) { return A_NO_MEMORY; } return (wmi_cmd_send(wmip, osbuf, WMI_CLR_RSSI_SNR_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_lq_threshold_params(struct wmi_t *wmip, WMI_LQ_THRESHOLD_PARAMS_CMD *lqCmd) { void *osbuf; A_INT8 size; WMI_LQ_THRESHOLD_PARAMS_CMD *cmd; /* These values are in ascending order */ if( lqCmd->thresholdAbove4_Val <= lqCmd->thresholdAbove3_Val || lqCmd->thresholdAbove3_Val <= lqCmd->thresholdAbove2_Val || lqCmd->thresholdAbove2_Val <= lqCmd->thresholdAbove1_Val || lqCmd->thresholdBelow4_Val <= lqCmd->thresholdBelow3_Val || lqCmd->thresholdBelow3_Val <= lqCmd->thresholdBelow2_Val || lqCmd->thresholdBelow2_Val <= lqCmd->thresholdBelow1_Val ) { return A_EINVAL; } size = sizeof (*cmd); osbuf = A_NETBUF_ALLOC(size); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, size); cmd = (WMI_LQ_THRESHOLD_PARAMS_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, size); A_MEMCPY(cmd, lqCmd, sizeof(WMI_LQ_THRESHOLD_PARAMS_CMD)); return (wmi_cmd_send(wmip, osbuf, WMI_LQ_THRESHOLD_PARAMS_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_error_report_bitmask(struct wmi_t *wmip, A_UINT32 mask) { void *osbuf; A_INT8 size; WMI_TARGET_ERROR_REPORT_BITMASK *cmd; size = sizeof (*cmd); osbuf = A_NETBUF_ALLOC(size); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, size); cmd = (WMI_TARGET_ERROR_REPORT_BITMASK *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, size); cmd->bitmask = mask; return (wmi_cmd_send(wmip, osbuf, WMI_TARGET_ERROR_REPORT_BITMASK_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_get_challenge_resp_cmd(struct wmi_t *wmip, A_UINT32 cookie, A_UINT32 source) { void *osbuf; WMIX_HB_CHALLENGE_RESP_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMIX_HB_CHALLENGE_RESP_CMD *)(A_NETBUF_DATA(osbuf)); cmd->cookie = cookie; cmd->source = source; return (wmi_cmd_send_xtnd(wmip, osbuf, WMIX_HB_CHALLENGE_RESP_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_config_debug_module_cmd(struct wmi_t *wmip, A_UINT16 mmask, A_UINT16 tsr, A_BOOL rep, A_UINT16 size, A_UINT32 valid) { void *osbuf; WMIX_DBGLOG_CFG_MODULE_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMIX_DBGLOG_CFG_MODULE_CMD *)(A_NETBUF_DATA(osbuf)); cmd->config.cfgmmask = mmask; cmd->config.cfgtsr = tsr; cmd->config.cfgrep = rep; cmd->config.cfgsize = size; cmd->config.cfgvalid = valid; return (wmi_cmd_send_xtnd(wmip, osbuf, WMIX_DBGLOG_CFG_MODULE_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_get_stats_cmd(struct wmi_t *wmip) { return wmi_simple_cmd(wmip, WMI_GET_STATISTICS_CMDID); } A_STATUS wmi_addBadAp_cmd(struct wmi_t *wmip, A_UINT8 apIndex, A_UINT8 *bssid) { void *osbuf; WMI_ADD_BAD_AP_CMD *cmd; if ((bssid == NULL) || (apIndex > WMI_MAX_BAD_AP_INDEX)) { return A_EINVAL; } osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_ADD_BAD_AP_CMD *)(A_NETBUF_DATA(osbuf)); cmd->badApIndex = apIndex; A_MEMCPY(cmd->bssid, bssid, sizeof(cmd->bssid)); return (wmi_cmd_send(wmip, osbuf, WMI_ADD_BAD_AP_CMDID, SYNC_BEFORE_WMIFLAG)); } A_STATUS wmi_deleteBadAp_cmd(struct wmi_t *wmip, A_UINT8 apIndex) { void *osbuf; WMI_DELETE_BAD_AP_CMD *cmd; if (apIndex > WMI_MAX_BAD_AP_INDEX) { return A_EINVAL; } osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_DELETE_BAD_AP_CMD *)(A_NETBUF_DATA(osbuf)); cmd->badApIndex = apIndex; return (wmi_cmd_send(wmip, osbuf, WMI_DELETE_BAD_AP_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_abort_scan_cmd(struct wmi_t *wmip) { return wmi_simple_cmd(wmip, WMI_ABORT_SCAN_CMDID); } A_STATUS wmi_set_txPwr_cmd(struct wmi_t *wmip, A_UINT8 dbM) { void *osbuf; WMI_SET_TX_PWR_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_SET_TX_PWR_CMD *)(A_NETBUF_DATA(osbuf)); cmd->dbM = dbM; return (wmi_cmd_send(wmip, osbuf, WMI_SET_TX_PWR_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_get_txPwr_cmd(struct wmi_t *wmip) { return wmi_simple_cmd(wmip, WMI_GET_TX_PWR_CMDID); } A_UINT16 wmi_get_mapped_qos_queue(struct wmi_t *wmip, A_UINT8 trafficClass) { A_UINT16 activeTsids=0; LOCK_WMI(wmip); activeTsids = wmip->wmi_streamExistsForAC[trafficClass]; UNLOCK_WMI(wmip); return activeTsids; } A_STATUS wmi_get_roam_tbl_cmd(struct wmi_t *wmip) { return wmi_simple_cmd(wmip, WMI_GET_ROAM_TBL_CMDID); } A_STATUS wmi_get_roam_data_cmd(struct wmi_t *wmip, A_UINT8 roamDataType) { void *osbuf; A_UINT32 size = sizeof(A_UINT8); WMI_TARGET_ROAM_DATA *cmd; osbuf = A_NETBUF_ALLOC(size); /* no payload */ if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, size); cmd = (WMI_TARGET_ROAM_DATA *)(A_NETBUF_DATA(osbuf)); cmd->roamDataType = roamDataType; return (wmi_cmd_send(wmip, osbuf, WMI_GET_ROAM_DATA_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_roam_ctrl_cmd(struct wmi_t *wmip, WMI_SET_ROAM_CTRL_CMD *p, A_UINT8 size) { void *osbuf; WMI_SET_ROAM_CTRL_CMD *cmd; osbuf = A_NETBUF_ALLOC(size); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, size); cmd = (WMI_SET_ROAM_CTRL_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, size); A_MEMCPY(cmd, p, size); return (wmi_cmd_send(wmip, osbuf, WMI_SET_ROAM_CTRL_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_powersave_timers_cmd(struct wmi_t *wmip, WMI_POWERSAVE_TIMERS_POLICY_CMD *pCmd, A_UINT8 size) { void *osbuf; WMI_POWERSAVE_TIMERS_POLICY_CMD *cmd; /* These timers can't be zero */ if(!pCmd->psPollTimeout || !pCmd->triggerTimeout || !(pCmd->apsdTimPolicy == IGNORE_TIM_ALL_QUEUES_APSD || pCmd->apsdTimPolicy == PROCESS_TIM_ALL_QUEUES_APSD) || !(pCmd->simulatedAPSDTimPolicy == IGNORE_TIM_SIMULATED_APSD || pCmd->simulatedAPSDTimPolicy == PROCESS_TIM_SIMULATED_APSD)) return A_EINVAL; osbuf = A_NETBUF_ALLOC(size); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, size); cmd = (WMI_POWERSAVE_TIMERS_POLICY_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, size); A_MEMCPY(cmd, pCmd, size); return (wmi_cmd_send(wmip, osbuf, WMI_SET_POWERSAVE_TIMERS_POLICY_CMDID, NO_SYNC_WMIFLAG)); } #ifdef CONFIG_HOST_GPIO_SUPPORT /* Send a command to Target to change GPIO output pins. */ A_STATUS wmi_gpio_output_set(struct wmi_t *wmip, A_UINT32 set_mask, A_UINT32 clear_mask, A_UINT32 enable_mask, A_UINT32 disable_mask) { void *osbuf; WMIX_GPIO_OUTPUT_SET_CMD *output_set; int size; size = sizeof(*output_set); A_DPRINTF(DBG_WMI, (DBGFMT "Enter - set=0x%x clear=0x%x enb=0x%x dis=0x%x\n", DBGARG, set_mask, clear_mask, enable_mask, disable_mask)); osbuf = A_NETBUF_ALLOC(size); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, size); output_set = (WMIX_GPIO_OUTPUT_SET_CMD *)(A_NETBUF_DATA(osbuf)); output_set->set_mask = set_mask; output_set->clear_mask = clear_mask; output_set->enable_mask = enable_mask; output_set->disable_mask = disable_mask; return (wmi_cmd_send_xtnd(wmip, osbuf, WMIX_GPIO_OUTPUT_SET_CMDID, NO_SYNC_WMIFLAG)); } /* Send a command to the Target requesting state of the GPIO input pins */ A_STATUS wmi_gpio_input_get(struct wmi_t *wmip) { A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG)); return wmi_simple_cmd_xtnd(wmip, WMIX_GPIO_INPUT_GET_CMDID); } /* Send a command to the Target that changes the value of a GPIO register. */ A_STATUS wmi_gpio_register_set(struct wmi_t *wmip, A_UINT32 gpioreg_id, A_UINT32 value) { void *osbuf; WMIX_GPIO_REGISTER_SET_CMD *register_set; int size; size = sizeof(*register_set); A_DPRINTF(DBG_WMI, (DBGFMT "Enter - reg=%d value=0x%x\n", DBGARG, gpioreg_id, value)); osbuf = A_NETBUF_ALLOC(size); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, size); register_set = (WMIX_GPIO_REGISTER_SET_CMD *)(A_NETBUF_DATA(osbuf)); register_set->gpioreg_id = gpioreg_id; register_set->value = value; return (wmi_cmd_send_xtnd(wmip, osbuf, WMIX_GPIO_REGISTER_SET_CMDID, NO_SYNC_WMIFLAG)); } /* Send a command to the Target to fetch the value of a GPIO register. */ A_STATUS wmi_gpio_register_get(struct wmi_t *wmip, A_UINT32 gpioreg_id) { void *osbuf; WMIX_GPIO_REGISTER_GET_CMD *register_get; int size; size = sizeof(*register_get); A_DPRINTF(DBG_WMI, (DBGFMT "Enter - reg=%d\n", DBGARG, gpioreg_id)); osbuf = A_NETBUF_ALLOC(size); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, size); register_get = (WMIX_GPIO_REGISTER_GET_CMD *)(A_NETBUF_DATA(osbuf)); register_get->gpioreg_id = gpioreg_id; return (wmi_cmd_send_xtnd(wmip, osbuf, WMIX_GPIO_REGISTER_GET_CMDID, NO_SYNC_WMIFLAG)); } /* Send a command to the Target acknowledging some GPIO interrupts. */ A_STATUS wmi_gpio_intr_ack(struct wmi_t *wmip, A_UINT32 ack_mask) { void *osbuf; WMIX_GPIO_INTR_ACK_CMD *intr_ack; int size; size = sizeof(*intr_ack); A_DPRINTF(DBG_WMI, (DBGFMT "Enter ack_mask=0x%x\n", DBGARG, ack_mask)); osbuf = A_NETBUF_ALLOC(size); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, size); intr_ack = (WMIX_GPIO_INTR_ACK_CMD *)(A_NETBUF_DATA(osbuf)); intr_ack->ack_mask = ack_mask; return (wmi_cmd_send_xtnd(wmip, osbuf, WMIX_GPIO_INTR_ACK_CMDID, NO_SYNC_WMIFLAG)); } #endif /* CONFIG_HOST_GPIO_SUPPORT */ A_STATUS wmi_set_access_params_cmd(struct wmi_t *wmip, A_UINT8 ac, A_UINT16 txop, A_UINT8 eCWmin, A_UINT8 eCWmax, A_UINT8 aifsn) { void *osbuf; WMI_SET_ACCESS_PARAMS_CMD *cmd; if ((eCWmin > WMI_MAX_CW_ACPARAM) || (eCWmax > WMI_MAX_CW_ACPARAM) || (aifsn > WMI_MAX_AIFSN_ACPARAM) || (ac >= WMM_NUM_AC)) { return A_EINVAL; } osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_SET_ACCESS_PARAMS_CMD *)(A_NETBUF_DATA(osbuf)); cmd->txop = txop; cmd->eCWmin = eCWmin; cmd->eCWmax = eCWmax; cmd->aifsn = aifsn; cmd->ac = ac; return (wmi_cmd_send(wmip, osbuf, WMI_SET_ACCESS_PARAMS_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_retry_limits_cmd(struct wmi_t *wmip, A_UINT8 frameType, A_UINT8 trafficClass, A_UINT8 maxRetries, A_UINT8 enableNotify) { void *osbuf; WMI_SET_RETRY_LIMITS_CMD *cmd; if ((frameType != MGMT_FRAMETYPE) && (frameType != CONTROL_FRAMETYPE) && (frameType != DATA_FRAMETYPE)) { return A_EINVAL; } if (maxRetries > WMI_MAX_RETRIES) { return A_EINVAL; } if (frameType != DATA_FRAMETYPE) { trafficClass = 0; } osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_SET_RETRY_LIMITS_CMD *)(A_NETBUF_DATA(osbuf)); cmd->frameType = frameType; cmd->trafficClass = trafficClass; cmd->maxRetries = maxRetries; cmd->enableNotify = enableNotify; return (wmi_cmd_send(wmip, osbuf, WMI_SET_RETRY_LIMITS_CMDID, NO_SYNC_WMIFLAG)); } void wmi_get_current_bssid(struct wmi_t *wmip, A_UINT8 *bssid) { if (bssid != NULL) { A_MEMCPY(bssid, wmip->wmi_bssid, ATH_MAC_LEN); } } A_STATUS wmi_set_opt_mode_cmd(struct wmi_t *wmip, A_UINT8 optMode) { void *osbuf; WMI_SET_OPT_MODE_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_SET_OPT_MODE_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->optMode = optMode; return (wmi_cmd_send(wmip, osbuf, WMI_SET_OPT_MODE_CMDID, SYNC_BOTH_WMIFLAG)); } A_STATUS wmi_opt_tx_frame_cmd(struct wmi_t *wmip, A_UINT8 frmType, A_UINT8 *dstMacAddr, A_UINT8 *bssid, A_UINT16 optIEDataLen, A_UINT8 *optIEData) { void *osbuf; WMI_OPT_TX_FRAME_CMD *cmd; osbuf = A_NETBUF_ALLOC(optIEDataLen + sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, (optIEDataLen + sizeof(*cmd))); cmd = (WMI_OPT_TX_FRAME_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, (optIEDataLen + sizeof(*cmd)-1)); cmd->frmType = frmType; cmd->optIEDataLen = optIEDataLen; //cmd->optIEData = (A_UINT8 *)((int)cmd + sizeof(*cmd)); A_MEMCPY(cmd->bssid, bssid, sizeof(cmd->bssid)); A_MEMCPY(cmd->dstAddr, dstMacAddr, sizeof(cmd->dstAddr)); A_MEMCPY(&cmd->optIEData[0], optIEData, optIEDataLen); return (wmi_cmd_send(wmip, osbuf, WMI_OPT_TX_FRAME_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_adhoc_bconIntvl_cmd(struct wmi_t *wmip, A_UINT16 intvl) { void *osbuf; WMI_BEACON_INT_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_BEACON_INT_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->beaconInterval = intvl; return (wmi_cmd_send(wmip, osbuf, WMI_SET_BEACON_INT_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_voice_pkt_size_cmd(struct wmi_t *wmip, A_UINT16 voicePktSize) { void *osbuf; WMI_SET_VOICE_PKT_SIZE_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_SET_VOICE_PKT_SIZE_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->voicePktSize = voicePktSize; return (wmi_cmd_send(wmip, osbuf, WMI_SET_VOICE_PKT_SIZE_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_max_sp_len_cmd(struct wmi_t *wmip, A_UINT8 maxSPLen) { void *osbuf; WMI_SET_MAX_SP_LEN_CMD *cmd; /* maxSPLen is a two-bit value. If user trys to set anything * other than this, then its invalid */ if(maxSPLen & ~0x03) return A_EINVAL; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_SET_MAX_SP_LEN_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->maxSPLen = maxSPLen; return (wmi_cmd_send(wmip, osbuf, WMI_SET_MAX_SP_LEN_CMDID, NO_SYNC_WMIFLAG)); } A_UINT8 wmi_determine_userPriority( A_UINT8 *pkt, A_UINT32 layer2Pri) { A_UINT8 ipPri; iphdr *ipHdr = (iphdr *)pkt; /* Determine IPTOS priority */ /* * IP Tos format : * (Refer Pg 57 WMM-test-plan-v1.2) * IP-TOS - 8bits * : DSCP(6-bits) ECN(2-bits) * : DSCP - P2 P1 P0 X X X * where (P2 P1 P0) form 802.1D */ ipPri = ipHdr->ip_tos >> 5; ipPri &= 0x7; if ((layer2Pri & 0x7) > ipPri) return ((A_UINT8)layer2Pri & 0x7); else return ipPri; } A_UINT8 convert_userPriority_to_trafficClass(A_UINT8 userPriority) { return (up_to_ac[userPriority & 0x7]); } A_UINT8 wmi_get_power_mode_cmd(struct wmi_t *wmip) { return wmip->wmi_powerMode; } A_STATUS wmi_verify_tspec_params(WMI_CREATE_PSTREAM_CMD *pCmd, A_BOOL tspecCompliance) { A_STATUS ret = A_OK; #define TSPEC_SUSPENSION_INTERVAL_ATHEROS_DEF (~0) #define TSPEC_SERVICE_START_TIME_ATHEROS_DEF 0 #define TSPEC_MAX_BURST_SIZE_ATHEROS_DEF 0 #define TSPEC_DELAY_BOUND_ATHEROS_DEF 0 #define TSPEC_MEDIUM_TIME_ATHEROS_DEF 0 #define TSPEC_SBA_ATHEROS_DEF 0x2000 /* factor is 1 */ /* Verify TSPEC params for ATHEROS compliance */ if(tspecCompliance == ATHEROS_COMPLIANCE) { if ((pCmd->suspensionInt != TSPEC_SUSPENSION_INTERVAL_ATHEROS_DEF) || (pCmd->serviceStartTime != TSPEC_SERVICE_START_TIME_ATHEROS_DEF) || (pCmd->minDataRate != pCmd->meanDataRate) || (pCmd->minDataRate != pCmd->peakDataRate) || (pCmd->maxBurstSize != TSPEC_MAX_BURST_SIZE_ATHEROS_DEF) || (pCmd->delayBound != TSPEC_DELAY_BOUND_ATHEROS_DEF) || (pCmd->sba != TSPEC_SBA_ATHEROS_DEF) || (pCmd->mediumTime != TSPEC_MEDIUM_TIME_ATHEROS_DEF)) { A_DPRINTF(DBG_WMI, (DBGFMT "Invalid TSPEC params\n", DBGARG)); //A_PRINTF("%s: Invalid TSPEC params\n", __func__); ret = A_EINVAL; } } return ret; } #ifdef CONFIG_HOST_TCMD_SUPPORT static A_STATUS wmi_tcmd_test_report_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG)); A_WMI_TCMD_RX_REPORT_EVENT(wmip->wmi_devt, datap, len); return A_OK; } #endif /* CONFIG_HOST_TCMD_SUPPORT*/ A_STATUS wmi_set_authmode_cmd(struct wmi_t *wmip, A_UINT8 mode) { void *osbuf; WMI_SET_AUTH_MODE_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_SET_AUTH_MODE_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->mode = mode; return (wmi_cmd_send(wmip, osbuf, WMI_SET_AUTH_MODE_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_reassocmode_cmd(struct wmi_t *wmip, A_UINT8 mode) { void *osbuf; WMI_SET_REASSOC_MODE_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_SET_REASSOC_MODE_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->mode = mode; return (wmi_cmd_send(wmip, osbuf, WMI_SET_REASSOC_MODE_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_lpreamble_cmd(struct wmi_t *wmip, A_UINT8 status, A_UINT8 preamblePolicy) { void *osbuf; WMI_SET_LPREAMBLE_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_SET_LPREAMBLE_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->status = status; cmd->preamblePolicy = preamblePolicy; return (wmi_cmd_send(wmip, osbuf, WMI_SET_LPREAMBLE_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_rts_cmd(struct wmi_t *wmip, A_UINT16 threshold) { void *osbuf; WMI_SET_RTS_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_SET_RTS_CMD*)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->threshold = threshold; return (wmi_cmd_send(wmip, osbuf, WMI_SET_RTS_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_wmm_cmd(struct wmi_t *wmip, WMI_WMM_STATUS status) { void *osbuf; WMI_SET_WMM_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_SET_WMM_CMD*)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->status = status; return (wmi_cmd_send(wmip, osbuf, WMI_SET_WMM_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_qos_supp_cmd(struct wmi_t *wmip, A_UINT8 status) { void *osbuf; WMI_SET_QOS_SUPP_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_SET_QOS_SUPP_CMD*)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->status = status; return (wmi_cmd_send(wmip, osbuf, WMI_SET_QOS_SUPP_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_wmm_txop(struct wmi_t *wmip, WMI_TXOP_CFG cfg) { void *osbuf; WMI_SET_WMM_TXOP_CMD *cmd; if( !((cfg == WMI_TXOP_DISABLED) || (cfg == WMI_TXOP_ENABLED)) ) return A_EINVAL; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_SET_WMM_TXOP_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->txopEnable = cfg; return (wmi_cmd_send(wmip, osbuf, WMI_SET_WMM_TXOP_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_country(struct wmi_t *wmip, A_UCHAR *countryCode) { void *osbuf; WMI_AP_SET_COUNTRY_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_AP_SET_COUNTRY_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); A_MEMCPY(cmd->countryCode,countryCode,3); return (wmi_cmd_send(wmip, osbuf, WMI_AP_SET_COUNTRY_CMDID, NO_SYNC_WMIFLAG)); } #ifdef CONFIG_HOST_TCMD_SUPPORT /* WMI layer doesn't need to know the data type of the test cmd. This would be beneficial for customers like Qualcomm, who might have different test command requirements from differnt manufacturers */ A_STATUS wmi_test_cmd(struct wmi_t *wmip, A_UINT8 *buf, A_UINT32 len) { void *osbuf; char *data; A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG)); osbuf= A_NETBUF_ALLOC(len); if(osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, len); data = A_NETBUF_DATA(osbuf); A_MEMCPY(data, buf, len); return(wmi_cmd_send(wmip, osbuf, WMI_TEST_CMDID, NO_SYNC_WMIFLAG)); } #endif A_STATUS wmi_set_bt_status_cmd(struct wmi_t *wmip, A_UINT8 streamType, A_UINT8 status) { void *osbuf; WMI_SET_BT_STATUS_CMD *cmd; AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("Enter - streamType=%d, status=%d\n", streamType, status)); osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_SET_BT_STATUS_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->streamType = streamType; cmd->status = status; return (wmi_cmd_send(wmip, osbuf, WMI_SET_BT_STATUS_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_bt_params_cmd(struct wmi_t *wmip, WMI_SET_BT_PARAMS_CMD* cmd) { void *osbuf; WMI_SET_BT_PARAMS_CMD* alloc_cmd; AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("cmd params is %d\n", cmd->paramType)); if (cmd->paramType == BT_PARAM_SCO) { AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("sco params %d %d %d %d %d %d %d %d %d %d %d %d\n", cmd->info.scoParams.numScoCyclesForceTrigger, cmd->info.scoParams.dataResponseTimeout, cmd->info.scoParams.stompScoRules, cmd->info.scoParams.scoOptFlags, cmd->info.scoParams.stompDutyCyleVal, cmd->info.scoParams.stompDutyCyleMaxVal, cmd->info.scoParams.psPollLatencyFraction, cmd->info.scoParams.noSCOSlots, cmd->info.scoParams.noIdleSlots, cmd->info.scoParams.scoOptOffRssi, cmd->info.scoParams.scoOptOnRssi, cmd->info.scoParams.scoOptRtsCount)); } else if (cmd->paramType == BT_PARAM_A2DP) { AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("A2DP params %d %d %d %d %d %d %d %d\n", cmd->info.a2dpParams.a2dpWlanUsageLimit, cmd->info.a2dpParams.a2dpBurstCntMin, cmd->info.a2dpParams.a2dpDataRespTimeout, cmd->info.a2dpParams.a2dpOptFlags, cmd->info.a2dpParams.isCoLocatedBtRoleMaster, cmd->info.a2dpParams.a2dpOptOffRssi, cmd->info.a2dpParams.a2dpOptOnRssi, cmd->info.a2dpParams.a2dpOptRtsCount)); } else if (cmd->paramType == BT_PARAM_ANTENNA_CONFIG) { AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("Ant config %d\n", cmd->info.antType)); } else if (cmd->paramType == BT_PARAM_COLOCATED_BT_DEVICE) { AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("co-located BT %d\n", cmd->info.coLocatedBtDev)); } else if (cmd->paramType == BT_PARAM_ACLCOEX) { AR_DEBUG_PRINTF(ATH_DEBUG_WARN, ("ACL params %d %d %d\n", cmd->info.aclCoexParams.aclWlanMediumUsageTime, cmd->info.aclCoexParams.aclBtMediumUsageTime, cmd->info.aclCoexParams.aclDataRespTimeout)); } else if (cmd->paramType == BT_PARAM_11A_SEPARATE_ANT) { A_DPRINTF(DBG_WMI, (DBGFMT "11A ant\n", DBGARG)); } osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); alloc_cmd = (WMI_SET_BT_PARAMS_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(alloc_cmd, sizeof(*cmd)); A_MEMCPY(alloc_cmd, cmd, sizeof(*cmd)); return (wmi_cmd_send(wmip, osbuf, WMI_SET_BT_PARAMS_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_btcoex_fe_ant_cmd(struct wmi_t *wmip, WMI_SET_BTCOEX_FE_ANT_CMD * cmd) { void *osbuf; WMI_SET_BTCOEX_FE_ANT_CMD *alloc_cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); alloc_cmd = (WMI_SET_BTCOEX_FE_ANT_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(alloc_cmd, sizeof(*cmd)); A_MEMCPY(alloc_cmd,cmd,sizeof(WMI_SET_BTCOEX_FE_ANT_CMD)); return (wmi_cmd_send(wmip, osbuf, WMI_SET_BTCOEX_FE_ANT_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_btcoex_colocated_bt_dev_cmd(struct wmi_t *wmip, WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMD * cmd) { void *osbuf; WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMD *alloc_cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); alloc_cmd = (WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(alloc_cmd, sizeof(*cmd)); A_MEMCPY(alloc_cmd,cmd,sizeof(WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMD)); A_PRINTF("colocated bt = %d\n", alloc_cmd->btcoexCoLocatedBTdev); return (wmi_cmd_send(wmip, osbuf, WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_btcoex_btinquiry_page_config_cmd(struct wmi_t *wmip, WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG_CMD* cmd) { void *osbuf; WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG_CMD *alloc_cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); alloc_cmd = (WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(alloc_cmd, sizeof(*cmd)); A_MEMCPY(alloc_cmd,cmd,sizeof(WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG_CMD)); return (wmi_cmd_send(wmip, osbuf, WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_btcoex_sco_config_cmd(struct wmi_t *wmip, WMI_SET_BTCOEX_SCO_CONFIG_CMD * cmd) { void *osbuf; WMI_SET_BTCOEX_SCO_CONFIG_CMD *alloc_cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); alloc_cmd = (WMI_SET_BTCOEX_SCO_CONFIG_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(alloc_cmd, sizeof(*cmd)); A_MEMCPY(alloc_cmd,cmd,sizeof(WMI_SET_BTCOEX_SCO_CONFIG_CMD)); return (wmi_cmd_send(wmip, osbuf, WMI_SET_BTCOEX_SCO_CONFIG_CMDID , NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_btcoex_a2dp_config_cmd(struct wmi_t *wmip, WMI_SET_BTCOEX_A2DP_CONFIG_CMD * cmd) { void *osbuf; WMI_SET_BTCOEX_A2DP_CONFIG_CMD *alloc_cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); alloc_cmd = (WMI_SET_BTCOEX_A2DP_CONFIG_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(alloc_cmd, sizeof(*cmd)); A_MEMCPY(alloc_cmd,cmd,sizeof(WMI_SET_BTCOEX_A2DP_CONFIG_CMD)); return (wmi_cmd_send(wmip, osbuf, WMI_SET_BTCOEX_A2DP_CONFIG_CMDID , NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_btcoex_aclcoex_config_cmd(struct wmi_t *wmip, WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMD * cmd) { void *osbuf; WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMD *alloc_cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); alloc_cmd = (WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(alloc_cmd, sizeof(*cmd)); A_MEMCPY(alloc_cmd,cmd,sizeof(WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMD)); return (wmi_cmd_send(wmip, osbuf, WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMDID , NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_btcoex_debug_cmd(struct wmi_t *wmip, WMI_SET_BTCOEX_DEBUG_CMD * cmd) { void *osbuf; WMI_SET_BTCOEX_DEBUG_CMD *alloc_cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); alloc_cmd = (WMI_SET_BTCOEX_DEBUG_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(alloc_cmd, sizeof(*cmd)); A_MEMCPY(alloc_cmd,cmd,sizeof(WMI_SET_BTCOEX_DEBUG_CMD)); return (wmi_cmd_send(wmip, osbuf, WMI_SET_BTCOEX_DEBUG_CMDID , NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_btcoex_bt_operating_status_cmd(struct wmi_t * wmip, WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMD * cmd) { void *osbuf; WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMD *alloc_cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); alloc_cmd = (WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(alloc_cmd, sizeof(*cmd)); A_MEMCPY(alloc_cmd,cmd,sizeof(WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMD)); return (wmi_cmd_send(wmip, osbuf, WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMDID , NO_SYNC_WMIFLAG)); } A_STATUS wmi_get_btcoex_config_cmd(struct wmi_t * wmip, WMI_GET_BTCOEX_CONFIG_CMD * cmd) { void *osbuf; WMI_GET_BTCOEX_CONFIG_CMD *alloc_cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); alloc_cmd = (WMI_GET_BTCOEX_CONFIG_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(alloc_cmd, sizeof(*cmd)); A_MEMCPY(alloc_cmd,cmd,sizeof(WMI_GET_BTCOEX_CONFIG_CMD)); return (wmi_cmd_send(wmip, osbuf, WMI_GET_BTCOEX_CONFIG_CMDID , NO_SYNC_WMIFLAG)); } A_STATUS wmi_get_btcoex_stats_cmd(struct wmi_t *wmip) { return wmi_simple_cmd(wmip, WMI_GET_BTCOEX_STATS_CMDID); } A_STATUS wmi_get_keepalive_configured(struct wmi_t *wmip) { void *osbuf; WMI_GET_KEEPALIVE_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_GET_KEEPALIVE_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); return (wmi_cmd_send(wmip, osbuf, WMI_GET_KEEPALIVE_CMDID, NO_SYNC_WMIFLAG)); } A_UINT8 wmi_get_keepalive_cmd(struct wmi_t *wmip) { return wmip->wmi_keepaliveInterval; } A_STATUS wmi_set_keepalive_cmd(struct wmi_t *wmip, A_UINT8 keepaliveInterval) { void *osbuf; WMI_SET_KEEPALIVE_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_SET_KEEPALIVE_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->keepaliveInterval = keepaliveInterval; wmip->wmi_keepaliveInterval = keepaliveInterval; return (wmi_cmd_send(wmip, osbuf, WMI_SET_KEEPALIVE_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_params_cmd(struct wmi_t *wmip, A_UINT32 opcode, A_UINT32 length, A_CHAR* buffer) { void *osbuf; WMI_SET_PARAMS_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd) + length); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd) + length); cmd = (WMI_SET_PARAMS_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->opcode = opcode; cmd->length = length; A_MEMCPY(cmd->buffer, buffer, length); return (wmi_cmd_send(wmip, osbuf, WMI_SET_PARAMS_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_mcast_filter_cmd(struct wmi_t *wmip, A_UINT8 dot1, A_UINT8 dot2, A_UINT8 dot3, A_UINT8 dot4) { void *osbuf; WMI_SET_MCAST_FILTER_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_SET_MCAST_FILTER_CMD *)(A_NETBUF_DATA(osbuf)); cmd->multicast_mac[0] = 0x01; cmd->multicast_mac[1] = 0x00; cmd->multicast_mac[2] = 0x5e; cmd->multicast_mac[3] = dot2&0x7F; cmd->multicast_mac[4] = dot3; cmd->multicast_mac[5] = dot4; return (wmi_cmd_send(wmip, osbuf, WMI_SET_MCAST_FILTER_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_del_mcast_filter_cmd(struct wmi_t *wmip, A_UINT8 dot1, A_UINT8 dot2, A_UINT8 dot3, A_UINT8 dot4) { void *osbuf; WMI_SET_MCAST_FILTER_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_SET_MCAST_FILTER_CMD *)(A_NETBUF_DATA(osbuf)); cmd->multicast_mac[0] = 0x01; cmd->multicast_mac[1] = 0x00; cmd->multicast_mac[2] = 0x5e; cmd->multicast_mac[3] = dot2&0x7F; cmd->multicast_mac[4] = dot3; cmd->multicast_mac[5] = dot4; return (wmi_cmd_send(wmip, osbuf, WMI_DEL_MCAST_FILTER_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_mcast_filter_cmd(struct wmi_t *wmip, A_UINT8 enable) { void *osbuf; WMI_MCAST_FILTER_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_MCAST_FILTER_CMD *)(A_NETBUF_DATA(osbuf)); cmd->enable = enable; return (wmi_cmd_send(wmip, osbuf, WMI_MCAST_FILTER_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_appie_cmd(struct wmi_t *wmip, A_UINT8 mgmtFrmType, A_UINT8 ieLen, A_UINT8 *ieInfo) { void *osbuf; WMI_SET_APPIE_CMD *cmd; A_UINT16 cmdLen; cmdLen = sizeof(*cmd) + ieLen - 1; osbuf = A_NETBUF_ALLOC(cmdLen); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, cmdLen); cmd = (WMI_SET_APPIE_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, cmdLen); cmd->mgmtFrmType = mgmtFrmType; cmd->ieLen = ieLen; A_MEMCPY(cmd->ieInfo, ieInfo, ieLen); return (wmi_cmd_send(wmip, osbuf, WMI_SET_APPIE_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_halparam_cmd(struct wmi_t *wmip, A_UINT8 *cmd, A_UINT16 dataLen) { void *osbuf; A_UINT8 *data; osbuf = A_NETBUF_ALLOC(dataLen); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, dataLen); data = A_NETBUF_DATA(osbuf); A_MEMCPY(data, cmd, dataLen); return (wmi_cmd_send(wmip, osbuf, WMI_SET_WHALPARAM_CMDID, NO_SYNC_WMIFLAG)); } A_INT32 wmi_get_rate(A_INT8 rateindex) { if (rateindex == RATE_AUTO) { return 0; } else { return(wmi_rateTable[(A_UINT32) rateindex][0]); } } void wmi_node_return (struct wmi_t *wmip, bss_t *bss) { if (NULL != bss) { wlan_node_return (&wmip->wmi_scan_table, bss); } } void wmi_set_nodeage(struct wmi_t *wmip, A_UINT32 nodeAge) { wlan_set_nodeage(&wmip->wmi_scan_table,nodeAge); } bss_t * wmi_find_Ssidnode (struct wmi_t *wmip, A_UCHAR *pSsid, A_UINT32 ssidLength, A_BOOL bIsWPA2, A_BOOL bMatchSSID) { bss_t *node = NULL; node = wlan_find_Ssidnode (&wmip->wmi_scan_table, pSsid, ssidLength, bIsWPA2, bMatchSSID); return node; } #ifdef ATHR_NWIFI void wmi_refresh_scan_table (struct wmi_t *wmip, ULONGLONG OldestAllowedEntry) { wlan_refresh_scan_table (&wmip->wmi_scan_table, OldestAllowedEntry); } #endif void wmi_free_allnodes(struct wmi_t *wmip) { wlan_free_allnodes(&wmip->wmi_scan_table); } bss_t * wmi_find_node(struct wmi_t *wmip, const A_UINT8 *macaddr) { bss_t *ni=NULL; ni=wlan_find_node(&wmip->wmi_scan_table,macaddr); return ni; } void wmi_free_node(struct wmi_t *wmip, const A_UINT8 *macaddr) { bss_t *ni=NULL; ni=wlan_find_node(&wmip->wmi_scan_table,macaddr); if (ni != NULL) { wlan_node_reclaim(&wmip->wmi_scan_table, ni); } return; } A_STATUS wmi_dset_open_reply(struct wmi_t *wmip, A_UINT32 status, A_UINT32 access_cookie, A_UINT32 dset_size, A_UINT32 dset_version, A_UINT32 targ_handle, A_UINT32 targ_reply_fn, A_UINT32 targ_reply_arg) { void *osbuf; WMIX_DSETOPEN_REPLY_CMD *open_reply; A_DPRINTF(DBG_WMI, (DBGFMT "Enter - wmip=0x%x\n", DBGARG, (int)wmip)); osbuf = A_NETBUF_ALLOC(sizeof(*open_reply)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*open_reply)); open_reply = (WMIX_DSETOPEN_REPLY_CMD *)(A_NETBUF_DATA(osbuf)); open_reply->status = status; open_reply->targ_dset_handle = targ_handle; open_reply->targ_reply_fn = targ_reply_fn; open_reply->targ_reply_arg = targ_reply_arg; open_reply->access_cookie = access_cookie; open_reply->size = dset_size; open_reply->version = dset_version; return (wmi_cmd_send_xtnd(wmip, osbuf, WMIX_DSETOPEN_REPLY_CMDID, NO_SYNC_WMIFLAG)); } static A_STATUS wmi_get_pmkid_list_event_rx(struct wmi_t *wmip, A_UINT8 *datap, A_UINT32 len) { WMI_PMKID_LIST_REPLY *reply; A_UINT32 expected_len; if (len < sizeof(WMI_PMKID_LIST_REPLY)) { return A_EINVAL; } reply = (WMI_PMKID_LIST_REPLY *)datap; expected_len = sizeof(reply->numPMKID) + reply->numPMKID * WMI_PMKID_LEN; if (len < expected_len) { return A_EINVAL; } A_WMI_PMKID_LIST_EVENT(wmip->wmi_devt, reply->numPMKID, reply->pmkidList, reply->bssidList[0]); return A_OK; } static A_STATUS wmi_set_params_event_rx(struct wmi_t *wmip, A_UINT8 *datap, A_UINT32 len) { WMI_SET_PARAMS_REPLY *reply; if (len < sizeof(WMI_SET_PARAMS_REPLY)) { return A_EINVAL; } reply = (WMI_SET_PARAMS_REPLY *)datap; if (A_OK == reply->status) { } else { } return A_OK; } #ifdef CONFIG_HOST_DSET_SUPPORT A_STATUS wmi_dset_data_reply(struct wmi_t *wmip, A_UINT32 status, A_UINT8 *user_buf, A_UINT32 length, A_UINT32 targ_buf, A_UINT32 targ_reply_fn, A_UINT32 targ_reply_arg) { void *osbuf; WMIX_DSETDATA_REPLY_CMD *data_reply; A_UINT32 size; size = sizeof(*data_reply) + length; if (size <= length) { return A_ERROR; } A_DPRINTF(DBG_WMI, (DBGFMT "Enter - length=%d status=%d\n", DBGARG, length, status)); osbuf = A_NETBUF_ALLOC(size); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, size); data_reply = (WMIX_DSETDATA_REPLY_CMD *)(A_NETBUF_DATA(osbuf)); data_reply->status = status; data_reply->targ_buf = targ_buf; data_reply->targ_reply_fn = targ_reply_fn; data_reply->targ_reply_arg = targ_reply_arg; data_reply->length = length; if (status == A_OK) { if (a_copy_from_user(data_reply->buf, user_buf, length)) { A_NETBUF_FREE(osbuf); return A_ERROR; } } return (wmi_cmd_send_xtnd(wmip, osbuf, WMIX_DSETDATA_REPLY_CMDID, NO_SYNC_WMIFLAG)); } #endif /* CONFIG_HOST_DSET_SUPPORT */ A_STATUS wmi_set_wsc_status_cmd(struct wmi_t *wmip, A_UINT32 status) { void *osbuf; char *cmd; wps_enable = status; osbuf = a_netbuf_alloc(sizeof(1)); if (osbuf == NULL) { return A_NO_MEMORY; } a_netbuf_put(osbuf, sizeof(1)); cmd = (char *)(a_netbuf_to_data(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd[0] = (status?1:0); return (wmi_cmd_send(wmip, osbuf, WMI_SET_WSC_STATUS_CMDID, NO_SYNC_WMIFLAG)); } #if defined(CONFIG_TARGET_PROFILE_SUPPORT) A_STATUS wmi_prof_cfg_cmd(struct wmi_t *wmip, A_UINT32 period, A_UINT32 nbins) { void *osbuf; WMIX_PROF_CFG_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMIX_PROF_CFG_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->period = period; cmd->nbins = nbins; return (wmi_cmd_send_xtnd(wmip, osbuf, WMIX_PROF_CFG_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_prof_addr_set_cmd(struct wmi_t *wmip, A_UINT32 addr) { void *osbuf; WMIX_PROF_ADDR_SET_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMIX_PROF_ADDR_SET_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->addr = addr; return (wmi_cmd_send_xtnd(wmip, osbuf, WMIX_PROF_ADDR_SET_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_prof_start_cmd(struct wmi_t *wmip) { return wmi_simple_cmd_xtnd(wmip, WMIX_PROF_START_CMDID); } A_STATUS wmi_prof_stop_cmd(struct wmi_t *wmip) { return wmi_simple_cmd_xtnd(wmip, WMIX_PROF_STOP_CMDID); } A_STATUS wmi_prof_count_get_cmd(struct wmi_t *wmip) { return wmi_simple_cmd_xtnd(wmip, WMIX_PROF_COUNT_GET_CMDID); } /* Called to handle WMIX_PROF_CONT_EVENTID */ static A_STATUS wmi_prof_count_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMIX_PROF_COUNT_EVENT *prof_data = (WMIX_PROF_COUNT_EVENT *)datap; A_DPRINTF(DBG_WMI, (DBGFMT "Enter - addr=0x%x count=%d\n", DBGARG, prof_data->addr, prof_data->count)); A_WMI_PROF_COUNT_RX(prof_data->addr, prof_data->count); return A_OK; } #endif /* CONFIG_TARGET_PROFILE_SUPPORT */ #ifdef OS_ROAM_MANAGEMENT #define ETHERNET_MAC_ADDRESS_LENGTH 6 void wmi_scan_indication (struct wmi_t *wmip) { struct ieee80211_node_table *nt; A_UINT32 gen; A_UINT32 size; A_UINT32 bsssize; bss_t *bss; A_UINT32 numbss; PNDIS_802_11_BSSID_SCAN_INFO psi; PBYTE pie; NDIS_802_11_FIXED_IEs *pFixed; NDIS_802_11_VARIABLE_IEs *pVar; A_UINT32 RateSize; struct ar6kScanIndication { NDIS_802_11_STATUS_INDICATION ind; NDIS_802_11_BSSID_SCAN_INFO_LIST slist; } *pAr6kScanIndEvent; nt = &wmip->wmi_scan_table; ++nt->nt_si_gen; gen = nt->nt_si_gen; size = offsetof(struct ar6kScanIndication, slist) + offsetof(NDIS_802_11_BSSID_SCAN_INFO_LIST, BssidScanInfo); numbss = 0; IEEE80211_NODE_LOCK(nt); //calc size for (bss = nt->nt_node_first; bss; bss = bss->ni_list_next) { if (bss->ni_si_gen != gen) { bsssize = offsetof(NDIS_802_11_BSSID_SCAN_INFO, Bssid) + offsetof(NDIS_WLAN_BSSID_EX, IEs); bsssize = bsssize + sizeof(NDIS_802_11_FIXED_IEs); #ifdef SUPPORT_WPA2 if (bss->ni_cie.ie_rsn) { bsssize = bsssize + bss->ni_cie.ie_rsn[1] + 2; } #endif if (bss->ni_cie.ie_wpa) { bsssize = bsssize + bss->ni_cie.ie_wpa[1] + 2; } // bsssize must be a multiple of 4 to maintain alignment. bsssize = (bsssize + 3) & ~3; size += bsssize; numbss++; } } if (0 == numbss) { // RETAILMSG(1, (L"AR6K: scan indication: 0 bss\n")); ar6000_scan_indication (wmip->wmi_devt, NULL, 0); IEEE80211_NODE_UNLOCK (nt); return; } pAr6kScanIndEvent = A_MALLOC(size); if (NULL == pAr6kScanIndEvent) { IEEE80211_NODE_UNLOCK(nt); return; } A_MEMZERO(pAr6kScanIndEvent, size); //copy data pAr6kScanIndEvent->ind.StatusType = Ndis802_11StatusType_BssidScanInfoList; pAr6kScanIndEvent->slist.Version = 1; pAr6kScanIndEvent->slist.NumItems = numbss; psi = &pAr6kScanIndEvent->slist.BssidScanInfo[0]; for (bss = nt->nt_node_first; bss; bss = bss->ni_list_next) { if (bss->ni_si_gen != gen) { bss->ni_si_gen = gen; //Set scan time psi->ScanTime = bss->ni_tstamp - WLAN_NODE_INACT_TIMEOUT_MSEC; // Copy data to bssid_ex bsssize = offsetof(NDIS_WLAN_BSSID_EX, IEs); bsssize = bsssize + sizeof(NDIS_802_11_FIXED_IEs); #ifdef SUPPORT_WPA2 if (bss->ni_cie.ie_rsn) { bsssize = bsssize + bss->ni_cie.ie_rsn[1] + 2; } #endif if (bss->ni_cie.ie_wpa) { bsssize = bsssize + bss->ni_cie.ie_wpa[1] + 2; } // bsssize must be a multiple of 4 to maintain alignment. bsssize = (bsssize + 3) & ~3; psi->Bssid.Length = bsssize; memcpy (psi->Bssid.MacAddress, bss->ni_macaddr, ETHERNET_MAC_ADDRESS_LENGTH); //if (((bss->ni_macaddr[3] == 0xCE) && (bss->ni_macaddr[4] == 0xF0) && (bss->ni_macaddr[5] == 0xE7)) || // ((bss->ni_macaddr[3] == 0x03) && (bss->ni_macaddr[4] == 0xE2) && (bss->ni_macaddr[5] == 0x70))) // RETAILMSG (1, (L"%x\n",bss->ni_macaddr[5])); psi->Bssid.Ssid.SsidLength = 0; pie = bss->ni_cie.ie_ssid; if (pie) { // Format of SSID IE is: // Type (1 octet) // Length (1 octet) // SSID (Length octets) // // Validation of the IE should have occurred within WMI. // if (pie[1] <= 32) { psi->Bssid.Ssid.SsidLength = pie[1]; memcpy(psi->Bssid.Ssid.Ssid, &pie[2], psi->Bssid.Ssid.SsidLength); } } psi->Bssid.Privacy = (bss->ni_cie.ie_capInfo & 0x10) ? 1 : 0; //Post the RSSI value relative to the Standard Noise floor value. psi->Bssid.Rssi = bss->ni_rssi; if (bss->ni_cie.ie_chan >= 2412 && bss->ni_cie.ie_chan <= 2484) { if (bss->ni_cie.ie_rates && bss->ni_cie.ie_xrates) { psi->Bssid.NetworkTypeInUse = Ndis802_11OFDM24; } else { psi->Bssid.NetworkTypeInUse = Ndis802_11DS; } } else { psi->Bssid.NetworkTypeInUse = Ndis802_11OFDM5; } psi->Bssid.Configuration.Length = sizeof(psi->Bssid.Configuration); psi->Bssid.Configuration.BeaconPeriod = bss->ni_cie.ie_beaconInt; // Units are Kmicroseconds (1024 us) psi->Bssid.Configuration.ATIMWindow = 0; psi->Bssid.Configuration.DSConfig = bss->ni_cie.ie_chan * 1000; psi->Bssid.InfrastructureMode = ((bss->ni_cie.ie_capInfo & 0x03) == 0x01 ) ? Ndis802_11Infrastructure : Ndis802_11IBSS; RateSize = 0; pie = bss->ni_cie.ie_rates; if (pie) { RateSize = (pie[1] < NDIS_802_11_LENGTH_RATES_EX) ? pie[1] : NDIS_802_11_LENGTH_RATES_EX; memcpy(psi->Bssid.SupportedRates, &pie[2], RateSize); } pie = bss->ni_cie.ie_xrates; if (pie && RateSize < NDIS_802_11_LENGTH_RATES_EX) { memcpy(psi->Bssid.SupportedRates + RateSize, &pie[2], (pie[1] < (NDIS_802_11_LENGTH_RATES_EX - RateSize)) ? pie[1] : (NDIS_802_11_LENGTH_RATES_EX - RateSize)); } // Copy the fixed IEs psi->Bssid.IELength = sizeof(NDIS_802_11_FIXED_IEs); pFixed = (NDIS_802_11_FIXED_IEs *)psi->Bssid.IEs; memcpy(pFixed->Timestamp, bss->ni_cie.ie_tstamp, sizeof(pFixed->Timestamp)); pFixed->BeaconInterval = bss->ni_cie.ie_beaconInt; pFixed->Capabilities = bss->ni_cie.ie_capInfo; // Copy selected variable IEs pVar = (NDIS_802_11_VARIABLE_IEs *)((PBYTE)pFixed + sizeof(NDIS_802_11_FIXED_IEs)); #ifdef SUPPORT_WPA2 // Copy the WPAv2 IE if (bss->ni_cie.ie_rsn) { pie = bss->ni_cie.ie_rsn; psi->Bssid.IELength += pie[1] + 2; memcpy(pVar, pie, pie[1] + 2); pVar = (NDIS_802_11_VARIABLE_IEs *)((PBYTE)pVar + pie[1] + 2); } #endif // Copy the WPAv1 IE if (bss->ni_cie.ie_wpa) { pie = bss->ni_cie.ie_wpa; psi->Bssid.IELength += pie[1] + 2; memcpy(pVar, pie, pie[1] + 2); pVar = (NDIS_802_11_VARIABLE_IEs *)((PBYTE)pVar + pie[1] + 2); } // Advance buffer pointer psi = (PNDIS_802_11_BSSID_SCAN_INFO)((BYTE*)psi + bsssize + FIELD_OFFSET(NDIS_802_11_BSSID_SCAN_INFO, Bssid)); } } IEEE80211_NODE_UNLOCK(nt); // wmi_free_allnodes(wmip); // RETAILMSG(1, (L"AR6K: scan indication: %u bss\n", numbss)); ar6000_scan_indication (wmip->wmi_devt, pAr6kScanIndEvent, size); A_FREE(pAr6kScanIndEvent); } #endif A_UINT8 ar6000_get_upper_threshold(A_INT16 rssi, SQ_THRESHOLD_PARAMS *sq_thresh, A_UINT32 size) { A_UINT32 index; A_UINT8 threshold = (A_UINT8)sq_thresh->upper_threshold[size - 1]; /* The list is already in sorted order. Get the next lower value */ for (index = 0; index < size; index ++) { if (rssi < sq_thresh->upper_threshold[index]) { threshold = (A_UINT8)sq_thresh->upper_threshold[index]; break; } } return threshold; } A_UINT8 ar6000_get_lower_threshold(A_INT16 rssi, SQ_THRESHOLD_PARAMS *sq_thresh, A_UINT32 size) { A_UINT32 index; A_UINT8 threshold = (A_UINT8)sq_thresh->lower_threshold[size - 1]; /* The list is already in sorted order. Get the next lower value */ for (index = 0; index < size; index ++) { if (rssi > sq_thresh->lower_threshold[index]) { threshold = (A_UINT8)sq_thresh->lower_threshold[index]; break; } } return threshold; } static A_STATUS wmi_send_rssi_threshold_params(struct wmi_t *wmip, WMI_RSSI_THRESHOLD_PARAMS_CMD *rssiCmd) { void *osbuf; A_INT8 size; WMI_RSSI_THRESHOLD_PARAMS_CMD *cmd; size = sizeof (*cmd); osbuf = A_NETBUF_ALLOC(size); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, size); cmd = (WMI_RSSI_THRESHOLD_PARAMS_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, size); A_MEMCPY(cmd, rssiCmd, sizeof(WMI_RSSI_THRESHOLD_PARAMS_CMD)); return (wmi_cmd_send(wmip, osbuf, WMI_RSSI_THRESHOLD_PARAMS_CMDID, NO_SYNC_WMIFLAG)); } static A_STATUS wmi_send_snr_threshold_params(struct wmi_t *wmip, WMI_SNR_THRESHOLD_PARAMS_CMD *snrCmd) { void *osbuf; A_INT8 size; WMI_SNR_THRESHOLD_PARAMS_CMD *cmd; size = sizeof (*cmd); osbuf = A_NETBUF_ALLOC(size); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, size); cmd = (WMI_SNR_THRESHOLD_PARAMS_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, size); A_MEMCPY(cmd, snrCmd, sizeof(WMI_SNR_THRESHOLD_PARAMS_CMD)); return (wmi_cmd_send(wmip, osbuf, WMI_SNR_THRESHOLD_PARAMS_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_target_event_report_cmd(struct wmi_t *wmip, WMI_SET_TARGET_EVENT_REPORT_CMD* cmd) { void *osbuf; WMI_SET_TARGET_EVENT_REPORT_CMD* alloc_cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); alloc_cmd = (WMI_SET_TARGET_EVENT_REPORT_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(alloc_cmd, sizeof(*cmd)); A_MEMCPY(alloc_cmd, cmd, sizeof(*cmd)); return (wmi_cmd_send(wmip, osbuf, WMI_SET_TARGET_EVENT_REPORT_CMDID, NO_SYNC_WMIFLAG)); } bss_t *wmi_rm_current_bss (struct wmi_t *wmip, A_UINT8 *id) { wmi_get_current_bssid (wmip, id); return wlan_node_remove (&wmip->wmi_scan_table, id); } A_STATUS wmi_add_current_bss (struct wmi_t *wmip, A_UINT8 *id, bss_t *bss) { wlan_setup_node (&wmip->wmi_scan_table, bss, id); return A_OK; } #ifdef ATH_AR6K_11N_SUPPORT static A_STATUS wmi_addba_req_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_ADDBA_REQ_EVENT *cmd = (WMI_ADDBA_REQ_EVENT *)datap; A_WMI_AGGR_RECV_ADDBA_REQ_EVT(wmip->wmi_devt, cmd); return A_OK; } static A_STATUS wmi_addba_resp_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_ADDBA_RESP_EVENT *cmd = (WMI_ADDBA_RESP_EVENT *)datap; A_WMI_AGGR_RECV_ADDBA_RESP_EVT(wmip->wmi_devt, cmd); return A_OK; } static A_STATUS wmi_delba_req_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_DELBA_EVENT *cmd = (WMI_DELBA_EVENT *)datap; A_WMI_AGGR_RECV_DELBA_REQ_EVT(wmip->wmi_devt, cmd); return A_OK; } A_STATUS wmi_btcoex_config_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG)); A_WMI_BTCOEX_CONFIG_EVENT(wmip->wmi_devt, datap, len); return A_OK; } A_STATUS wmi_btcoex_stats_event_rx(struct wmi_t * wmip,A_UINT8 * datap,int len) { A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG)); A_WMI_BTCOEX_STATS_EVENT(wmip->wmi_devt, datap, len); return A_OK; } #endif static A_STATUS wmi_hci_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_HCI_EVENT *cmd = (WMI_HCI_EVENT *)datap; A_WMI_HCI_EVENT_EVT(wmip->wmi_devt, cmd); return A_OK; } //////////////////////////////////////////////////////////////////////////////// //// //// //// AP mode functions //// //// //// //////////////////////////////////////////////////////////////////////////////// /* * IOCTL: AR6000_XIOCTL_AP_COMMIT_CONFIG * * When AR6K in AP mode, This command will be called after * changing ssid, channel etc. It will pass the profile to * target with a flag which will indicate which parameter changed, * also if this flag is 0, there was no change in parametes, so * commit cmd will not be sent to target. Without calling this IOCTL * the changes will not take effect. */ A_STATUS wmi_ap_profile_commit(struct wmi_t *wmip, WMI_CONNECT_CMD *p) { void *osbuf; WMI_CONNECT_CMD *cm; osbuf = A_NETBUF_ALLOC(sizeof(*cm)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cm)); cm = (WMI_CONNECT_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cm, sizeof(*cm)); A_MEMCPY(cm,p,sizeof(*cm)); return (wmi_cmd_send(wmip, osbuf, WMI_AP_CONFIG_COMMIT_CMDID, NO_SYNC_WMIFLAG)); } /* * IOCTL: AR6000_XIOCTL_AP_HIDDEN_SSID * * This command will be used to enable/disable hidden ssid functioanlity of * beacon. If it is enabled, ssid will be NULL in beacon. */ A_STATUS wmi_ap_set_hidden_ssid(struct wmi_t *wmip, A_UINT8 hidden_ssid) { void *osbuf; WMI_AP_HIDDEN_SSID_CMD *hs; osbuf = A_NETBUF_ALLOC(sizeof(WMI_AP_HIDDEN_SSID_CMD)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(WMI_AP_HIDDEN_SSID_CMD)); hs = (WMI_AP_HIDDEN_SSID_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(hs, sizeof(*hs)); hs->hidden_ssid = hidden_ssid; A_DPRINTF(DBG_WMI, (DBGFMT "AR6000_XIOCTL_AP_HIDDEN_SSID %d\n", DBGARG , hidden_ssid)); return (wmi_cmd_send(wmip, osbuf, WMI_AP_HIDDEN_SSID_CMDID, NO_SYNC_WMIFLAG)); } /* * IOCTL: AR6000_XIOCTL_AP_SET_MAX_NUM_STA * * This command is used to limit max num of STA that can connect * with this AP. This value should not exceed AP_MAX_NUM_STA (this * is max num of STA supported by AP). Value was already validated * in ioctl.c */ A_STATUS wmi_ap_set_num_sta(struct wmi_t *wmip, A_UINT8 num_sta) { void *osbuf; WMI_AP_SET_NUM_STA_CMD *ns; osbuf = A_NETBUF_ALLOC(sizeof(WMI_AP_SET_NUM_STA_CMD)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(WMI_AP_SET_NUM_STA_CMD)); ns = (WMI_AP_SET_NUM_STA_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(ns, sizeof(*ns)); ns->num_sta = num_sta; A_DPRINTF(DBG_WMI, (DBGFMT "AR6000_XIOCTL_AP_SET_MAX_NUM_STA %d\n", DBGARG , num_sta)); return (wmi_cmd_send(wmip, osbuf, WMI_AP_SET_NUM_STA_CMDID, NO_SYNC_WMIFLAG)); } /* * IOCTL: AR6000_XIOCTL_AP_SET_ACL_MAC * * This command is used to send list of mac of STAs which will * be allowed to connect with this AP. When this list is empty * firware will allow all STAs till the count reaches AP_MAX_NUM_STA. */ A_STATUS wmi_ap_acl_mac_list(struct wmi_t *wmip, WMI_AP_ACL_MAC_CMD *acl) { void *osbuf; WMI_AP_ACL_MAC_CMD *a; osbuf = A_NETBUF_ALLOC(sizeof(WMI_AP_ACL_MAC_CMD)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(WMI_AP_ACL_MAC_CMD)); a = (WMI_AP_ACL_MAC_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(a, sizeof(*a)); A_MEMCPY(a,acl,sizeof(*acl)); return (wmi_cmd_send(wmip, osbuf, WMI_AP_ACL_MAC_LIST_CMDID, NO_SYNC_WMIFLAG)); } /* * IOCTL: AR6000_XIOCTL_AP_SET_MLME * * This command is used to send list of mac of STAs which will * be allowed to connect with this AP. When this list is empty * firware will allow all STAs till the count reaches AP_MAX_NUM_STA. */ A_STATUS wmi_ap_set_mlme(struct wmi_t *wmip, A_UINT8 cmd, A_UINT8 *mac, A_UINT16 reason) { void *osbuf; WMI_AP_SET_MLME_CMD *mlme; osbuf = A_NETBUF_ALLOC(sizeof(WMI_AP_SET_MLME_CMD)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(WMI_AP_SET_MLME_CMD)); mlme = (WMI_AP_SET_MLME_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(mlme, sizeof(*mlme)); mlme->cmd = cmd; A_MEMCPY(mlme->mac, mac, ATH_MAC_LEN); mlme->reason = reason; return (wmi_cmd_send(wmip, osbuf, WMI_AP_SET_MLME_CMDID, NO_SYNC_WMIFLAG)); } static A_STATUS wmi_pspoll_event_rx(struct wmi_t *wmip, A_UINT8 *datap, int len) { WMI_PSPOLL_EVENT *ev; if (len < sizeof(WMI_PSPOLL_EVENT)) { return A_EINVAL; } ev = (WMI_PSPOLL_EVENT *)datap; A_WMI_PSPOLL_EVENT(wmip->wmi_devt, ev->aid); return A_OK; } static A_STATUS wmi_dtimexpiry_event_rx(struct wmi_t *wmip, A_UINT8 *datap,int len) { A_WMI_DTIMEXPIRY_EVENT(wmip->wmi_devt); return A_OK; } #ifdef WAPI_ENABLE static A_STATUS wmi_wapi_rekey_event_rx(struct wmi_t *wmip, A_UINT8 *datap,int len) { A_UINT8 *ev; if (len < 7) { return A_EINVAL; } ev = (A_UINT8 *)datap; A_WMI_WAPI_REKEY_EVENT(wmip->wmi_devt, *ev, &ev[1]); return A_OK; } #endif A_STATUS wmi_set_pvb_cmd(struct wmi_t *wmip, A_UINT16 aid, A_BOOL flag) { WMI_AP_SET_PVB_CMD *cmd; void *osbuf = NULL; osbuf = A_NETBUF_ALLOC(sizeof(WMI_AP_SET_PVB_CMD)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(WMI_AP_SET_PVB_CMD)); cmd = (WMI_AP_SET_PVB_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->aid = aid; cmd->flag = flag; return (wmi_cmd_send(wmip, osbuf, WMI_AP_SET_PVB_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_ap_conn_inact_time(struct wmi_t *wmip, A_UINT32 period) { WMI_AP_CONN_INACT_CMD *cmd; void *osbuf = NULL; osbuf = A_NETBUF_ALLOC(sizeof(WMI_AP_CONN_INACT_CMD)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(WMI_AP_CONN_INACT_CMD)); cmd = (WMI_AP_CONN_INACT_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->period = period; return (wmi_cmd_send(wmip, osbuf, WMI_AP_CONN_INACT_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_ap_bgscan_time(struct wmi_t *wmip, A_UINT32 period, A_UINT32 dwell) { WMI_AP_PROT_SCAN_TIME_CMD *cmd; void *osbuf = NULL; osbuf = A_NETBUF_ALLOC(sizeof(WMI_AP_PROT_SCAN_TIME_CMD)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(WMI_AP_PROT_SCAN_TIME_CMD)); cmd = (WMI_AP_PROT_SCAN_TIME_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->period_min = period; cmd->dwell_ms = dwell; return (wmi_cmd_send(wmip, osbuf, WMI_AP_PROT_SCAN_TIME_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_ap_set_dtim(struct wmi_t *wmip, A_UINT8 dtim) { WMI_AP_SET_DTIM_CMD *cmd; void *osbuf = NULL; osbuf = A_NETBUF_ALLOC(sizeof(WMI_AP_SET_DTIM_CMD)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(WMI_AP_SET_DTIM_CMD)); cmd = (WMI_AP_SET_DTIM_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->dtim = dtim; return (wmi_cmd_send(wmip, osbuf, WMI_AP_SET_DTIM_CMDID, NO_SYNC_WMIFLAG)); } /* * IOCTL: AR6000_XIOCTL_AP_SET_ACL_POLICY * * This command is used to set ACL policay. While changing policy, if you * want to retain the existing MAC addresses in the ACL list, policy should be * OR with AP_ACL_RETAIN_LIST_MASK, else the existing list will be cleared. * If there is no chage in policy, the list will be intact. */ A_STATUS wmi_ap_set_acl_policy(struct wmi_t *wmip, A_UINT8 policy) { void *osbuf; WMI_AP_ACL_POLICY_CMD *po; osbuf = A_NETBUF_ALLOC(sizeof(WMI_AP_ACL_POLICY_CMD)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(WMI_AP_ACL_POLICY_CMD)); po = (WMI_AP_ACL_POLICY_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(po, sizeof(*po)); po->policy = policy; return (wmi_cmd_send(wmip, osbuf, WMI_AP_ACL_POLICY_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_ap_set_rateset(struct wmi_t *wmip, A_UINT8 rateset) { void *osbuf; WMI_AP_SET_11BG_RATESET_CMD *rs; osbuf = A_NETBUF_ALLOC(sizeof(WMI_AP_SET_11BG_RATESET_CMD)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(WMI_AP_SET_11BG_RATESET_CMD)); rs = (WMI_AP_SET_11BG_RATESET_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(rs, sizeof(*rs)); rs->rateset = rateset; return (wmi_cmd_send(wmip, osbuf, WMI_AP_SET_11BG_RATESET_CMDID, NO_SYNC_WMIFLAG)); } #ifdef ATH_AR6K_11N_SUPPORT A_STATUS wmi_set_ht_cap_cmd(struct wmi_t *wmip, WMI_SET_HT_CAP_CMD *cmd) { void *osbuf; WMI_SET_HT_CAP_CMD *htCap; A_UINT8 band; osbuf = A_NETBUF_ALLOC(sizeof(*htCap)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*htCap)); band = (cmd->band)? A_BAND_5GHZ : A_BAND_24GHZ; wmip->wmi_ht_allowed[band] = (cmd->enable)? 1:0; htCap = (WMI_SET_HT_CAP_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(htCap, sizeof(*htCap)); A_MEMCPY(htCap, cmd, sizeof(*htCap)); return (wmi_cmd_send(wmip, osbuf, WMI_SET_HT_CAP_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_ht_op_cmd(struct wmi_t *wmip, A_UINT8 sta_chan_width) { void *osbuf; WMI_SET_HT_OP_CMD *htInfo; osbuf = A_NETBUF_ALLOC(sizeof(*htInfo)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*htInfo)); htInfo = (WMI_SET_HT_OP_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(htInfo, sizeof(*htInfo)); htInfo->sta_chan_width = sta_chan_width; return (wmi_cmd_send(wmip, osbuf, WMI_SET_HT_OP_CMDID, NO_SYNC_WMIFLAG)); } #endif A_STATUS wmi_set_tx_select_rates_cmd(struct wmi_t *wmip, A_UINT32 *pMaskArray) { void *osbuf; WMI_SET_TX_SELECT_RATES_CMD *pData; osbuf = A_NETBUF_ALLOC(sizeof(*pData)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*pData)); pData = (WMI_SET_TX_SELECT_RATES_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMCPY(pData, pMaskArray, sizeof(*pData)); return (wmi_cmd_send(wmip, osbuf, WMI_SET_TX_SELECT_RATES_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_send_hci_cmd(struct wmi_t *wmip, A_UINT8 *buf, A_UINT16 sz) { void *osbuf; WMI_HCI_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd) + sz); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd) + sz); cmd = (WMI_HCI_CMD *)(A_NETBUF_DATA(osbuf)); cmd->cmd_buf_sz = sz; A_MEMCPY(cmd->buf, buf, sz); return (wmi_cmd_send(wmip, osbuf, WMI_HCI_CMD_CMDID, NO_SYNC_WMIFLAG)); } #ifdef ATH_AR6K_11N_SUPPORT A_STATUS wmi_allow_aggr_cmd(struct wmi_t *wmip, A_UINT16 tx_tidmask, A_UINT16 rx_tidmask) { void *osbuf; WMI_ALLOW_AGGR_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_ALLOW_AGGR_CMD *)(A_NETBUF_DATA(osbuf)); cmd->tx_allow_aggr = tx_tidmask; cmd->rx_allow_aggr = rx_tidmask; return (wmi_cmd_send(wmip, osbuf, WMI_ALLOW_AGGR_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_setup_aggr_cmd(struct wmi_t *wmip, A_UINT8 tid) { void *osbuf; WMI_ADDBA_REQ_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_ADDBA_REQ_CMD *)(A_NETBUF_DATA(osbuf)); cmd->tid = tid; return (wmi_cmd_send(wmip, osbuf, WMI_ADDBA_REQ_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_delete_aggr_cmd(struct wmi_t *wmip, A_UINT8 tid, A_BOOL uplink) { void *osbuf; WMI_DELBA_REQ_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_DELBA_REQ_CMD *)(A_NETBUF_DATA(osbuf)); cmd->tid = tid; cmd->is_sender_initiator = uplink; /* uplink =1 - uplink direction, 0=downlink direction */ /* Delete the local aggr state, on host */ return (wmi_cmd_send(wmip, osbuf, WMI_DELBA_REQ_CMDID, NO_SYNC_WMIFLAG)); } #endif A_STATUS wmi_set_rx_frame_format_cmd(struct wmi_t *wmip, A_UINT8 rxMetaVersion, A_BOOL rxDot11Hdr, A_BOOL defragOnHost) { void *osbuf; WMI_RX_FRAME_FORMAT_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_RX_FRAME_FORMAT_CMD *)(A_NETBUF_DATA(osbuf)); cmd->dot11Hdr = (rxDot11Hdr==TRUE)? 1:0; cmd->defragOnHost = (defragOnHost==TRUE)? 1:0; cmd->metaVersion = rxMetaVersion; /* */ /* Delete the local aggr state, on host */ return (wmi_cmd_send(wmip, osbuf, WMI_RX_FRAME_FORMAT_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_thin_mode_cmd(struct wmi_t *wmip, A_BOOL bThinMode) { void *osbuf; WMI_SET_THIN_MODE_CMD *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_SET_THIN_MODE_CMD *)(A_NETBUF_DATA(osbuf)); cmd->enable = (bThinMode==TRUE)? 1:0; /* Delete the local aggr state, on host */ return (wmi_cmd_send(wmip, osbuf, WMI_SET_THIN_MODE_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_wlan_conn_precedence_cmd(struct wmi_t *wmip, BT_WLAN_CONN_PRECEDENCE precedence) { void *osbuf; WMI_SET_BT_WLAN_CONN_PRECEDENCE *cmd; osbuf = A_NETBUF_ALLOC(sizeof(*cmd)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(*cmd)); cmd = (WMI_SET_BT_WLAN_CONN_PRECEDENCE *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(cmd, sizeof(*cmd)); cmd->precedence = precedence; return (wmi_cmd_send(wmip, osbuf, WMI_SET_BT_WLAN_CONN_PRECEDENCE_CMDID, NO_SYNC_WMIFLAG)); } A_STATUS wmi_set_pmk_cmd(struct wmi_t *wmip, A_UINT8 *pmk) { void *osbuf; WMI_SET_PMK_CMD *p; osbuf = A_NETBUF_ALLOC(sizeof(WMI_SET_PMK_CMD)); if (osbuf == NULL) { return A_NO_MEMORY; } A_NETBUF_PUT(osbuf, sizeof(WMI_SET_PMK_CMD)); p = (WMI_SET_PMK_CMD *)(A_NETBUF_DATA(osbuf)); A_MEMZERO(p, sizeof(*p)); A_MEMCPY(p->pmk, pmk, WMI_PMK_LEN); return (wmi_cmd_send(wmip, osbuf, WMI_SET_PMK_CMDID, NO_SYNC_WMIFLAG)); } bss_t * wmi_find_matching_Ssidnode (struct wmi_t *wmip, A_UCHAR *pSsid, A_UINT32 ssidLength, A_UINT32 dot11AuthMode, A_UINT32 authMode, A_UINT32 pairwiseCryptoType, A_UINT32 grpwiseCryptoTyp) { bss_t *node = NULL; node = wlan_find_matching_Ssidnode (&wmip->wmi_scan_table, pSsid, ssidLength, dot11AuthMode, authMode, pairwiseCryptoType, grpwiseCryptoTyp); return node; } A_UINT16 wmi_ieee2freq (int chan) { A_UINT16 freq = 0; freq = wlan_ieee2freq (chan); return freq; } A_UINT32 wmi_freq2ieee (A_UINT16 freq) { A_UINT16 chan = 0; chan = wlan_freq2ieee (freq); return chan; }
gpl-2.0
Pivosgroup/buildroot-linux-kernel-m3
security/smack/smack_lsm.c
42
77931
/* * Simplified MAC Kernel (smack) security module * * This file contains the smack hook function implementations. * * Author: * Casey Schaufler <casey@schaufler-ca.com> * * Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com> * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. * Paul Moore <paul.moore@hp.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, * as published by the Free Software Foundation. */ #include <linux/xattr.h> #include <linux/pagemap.h> #include <linux/mount.h> #include <linux/stat.h> #include <linux/ext2_fs.h> #include <linux/kd.h> #include <asm/ioctls.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/pipe_fs_i.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <linux/audit.h> #include <linux/magic.h> #include "smack.h" #define task_security(task) (task_cred_xxx((task), security)) /** * smk_fetch - Fetch the smack label from a file. * @ip: a pointer to the inode * @dp: a pointer to the dentry * * Returns a pointer to the master list entry for the Smack label * or NULL if there was no label to fetch. */ static char *smk_fetch(struct inode *ip, struct dentry *dp) { int rc; char in[SMK_LABELLEN]; if (ip->i_op->getxattr == NULL) return NULL; rc = ip->i_op->getxattr(dp, XATTR_NAME_SMACK, in, SMK_LABELLEN); if (rc < 0) return NULL; return smk_import(in, rc); } /** * new_inode_smack - allocate an inode security blob * @smack: a pointer to the Smack label to use in the blob * * Returns the new blob or NULL if there's no memory available */ struct inode_smack *new_inode_smack(char *smack) { struct inode_smack *isp; isp = kzalloc(sizeof(struct inode_smack), GFP_KERNEL); if (isp == NULL) return NULL; isp->smk_inode = smack; isp->smk_flags = 0; mutex_init(&isp->smk_lock); return isp; } /* * LSM hooks. * We he, that is fun! */ /** * smack_ptrace_access_check - Smack approval on PTRACE_ATTACH * @ctp: child task pointer * @mode: ptrace attachment mode * * Returns 0 if access is OK, an error code otherwise * * Do the capability checks, and require read and write. */ static int smack_ptrace_access_check(struct task_struct *ctp, unsigned int mode) { int rc; struct smk_audit_info ad; char *sp, *tsp; rc = cap_ptrace_access_check(ctp, mode); if (rc != 0) return rc; sp = current_security(); tsp = task_security(ctp); smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK); smk_ad_setfield_u_tsk(&ad, ctp); /* we won't log here, because rc can be overriden */ rc = smk_access(sp, tsp, MAY_READWRITE, NULL); if (rc != 0 && capable(CAP_MAC_OVERRIDE)) rc = 0; smack_log(sp, tsp, MAY_READWRITE, rc, &ad); return rc; } /** * smack_ptrace_traceme - Smack approval on PTRACE_TRACEME * @ptp: parent task pointer * * Returns 0 if access is OK, an error code otherwise * * Do the capability checks, and require read and write. */ static int smack_ptrace_traceme(struct task_struct *ptp) { int rc; struct smk_audit_info ad; char *sp, *tsp; rc = cap_ptrace_traceme(ptp); if (rc != 0) return rc; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK); smk_ad_setfield_u_tsk(&ad, ptp); sp = current_security(); tsp = task_security(ptp); /* we won't log here, because rc can be overriden */ rc = smk_access(tsp, sp, MAY_READWRITE, NULL); if (rc != 0 && has_capability(ptp, CAP_MAC_OVERRIDE)) rc = 0; smack_log(tsp, sp, MAY_READWRITE, rc, &ad); return rc; } /** * smack_syslog - Smack approval on syslog * @type: message type * * Require that the task has the floor label * * Returns 0 on success, error code otherwise. */ static int smack_syslog(int type, bool from_file) { int rc; char *sp = current_security(); rc = cap_syslog(type, from_file); if (rc != 0) return rc; if (capable(CAP_MAC_OVERRIDE)) return 0; if (sp != smack_known_floor.smk_known) rc = -EACCES; return rc; } /* * Superblock Hooks. */ /** * smack_sb_alloc_security - allocate a superblock blob * @sb: the superblock getting the blob * * Returns 0 on success or -ENOMEM on error. */ static int smack_sb_alloc_security(struct super_block *sb) { struct superblock_smack *sbsp; sbsp = kzalloc(sizeof(struct superblock_smack), GFP_KERNEL); if (sbsp == NULL) return -ENOMEM; sbsp->smk_root = smack_known_floor.smk_known; sbsp->smk_default = smack_known_floor.smk_known; sbsp->smk_floor = smack_known_floor.smk_known; sbsp->smk_hat = smack_known_hat.smk_known; sbsp->smk_initialized = 0; spin_lock_init(&sbsp->smk_sblock); sb->s_security = sbsp; return 0; } /** * smack_sb_free_security - free a superblock blob * @sb: the superblock getting the blob * */ static void smack_sb_free_security(struct super_block *sb) { kfree(sb->s_security); sb->s_security = NULL; } /** * smack_sb_copy_data - copy mount options data for processing * @orig: where to start * @smackopts: mount options string * * Returns 0 on success or -ENOMEM on error. * * Copy the Smack specific mount options out of the mount * options list. */ static int smack_sb_copy_data(char *orig, char *smackopts) { char *cp, *commap, *otheropts, *dp; otheropts = (char *)get_zeroed_page(GFP_KERNEL); if (otheropts == NULL) return -ENOMEM; for (cp = orig, commap = orig; commap != NULL; cp = commap + 1) { if (strstr(cp, SMK_FSDEFAULT) == cp) dp = smackopts; else if (strstr(cp, SMK_FSFLOOR) == cp) dp = smackopts; else if (strstr(cp, SMK_FSHAT) == cp) dp = smackopts; else if (strstr(cp, SMK_FSROOT) == cp) dp = smackopts; else dp = otheropts; commap = strchr(cp, ','); if (commap != NULL) *commap = '\0'; if (*dp != '\0') strcat(dp, ","); strcat(dp, cp); } strcpy(orig, otheropts); free_page((unsigned long)otheropts); return 0; } /** * smack_sb_kern_mount - Smack specific mount processing * @sb: the file system superblock * @flags: the mount flags * @data: the smack mount options * * Returns 0 on success, an error code on failure */ static int smack_sb_kern_mount(struct super_block *sb, int flags, void *data) { struct dentry *root = sb->s_root; struct inode *inode = root->d_inode; struct superblock_smack *sp = sb->s_security; struct inode_smack *isp; char *op; char *commap; char *nsp; spin_lock(&sp->smk_sblock); if (sp->smk_initialized != 0) { spin_unlock(&sp->smk_sblock); return 0; } sp->smk_initialized = 1; spin_unlock(&sp->smk_sblock); for (op = data; op != NULL; op = commap) { commap = strchr(op, ','); if (commap != NULL) *commap++ = '\0'; if (strncmp(op, SMK_FSHAT, strlen(SMK_FSHAT)) == 0) { op += strlen(SMK_FSHAT); nsp = smk_import(op, 0); if (nsp != NULL) sp->smk_hat = nsp; } else if (strncmp(op, SMK_FSFLOOR, strlen(SMK_FSFLOOR)) == 0) { op += strlen(SMK_FSFLOOR); nsp = smk_import(op, 0); if (nsp != NULL) sp->smk_floor = nsp; } else if (strncmp(op, SMK_FSDEFAULT, strlen(SMK_FSDEFAULT)) == 0) { op += strlen(SMK_FSDEFAULT); nsp = smk_import(op, 0); if (nsp != NULL) sp->smk_default = nsp; } else if (strncmp(op, SMK_FSROOT, strlen(SMK_FSROOT)) == 0) { op += strlen(SMK_FSROOT); nsp = smk_import(op, 0); if (nsp != NULL) sp->smk_root = nsp; } } /* * Initialize the root inode. */ isp = inode->i_security; if (isp == NULL) inode->i_security = new_inode_smack(sp->smk_root); else isp->smk_inode = sp->smk_root; return 0; } /** * smack_sb_statfs - Smack check on statfs * @dentry: identifies the file system in question * * Returns 0 if current can read the floor of the filesystem, * and error code otherwise */ static int smack_sb_statfs(struct dentry *dentry) { struct superblock_smack *sbp = dentry->d_sb->s_security; int rc; struct smk_audit_info ad; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); smk_ad_setfield_u_fs_path_dentry(&ad, dentry); rc = smk_curacc(sbp->smk_floor, MAY_READ, &ad); return rc; } /** * smack_sb_mount - Smack check for mounting * @dev_name: unused * @path: mount point * @type: unused * @flags: unused * @data: unused * * Returns 0 if current can write the floor of the filesystem * being mounted on, an error code otherwise. */ static int smack_sb_mount(char *dev_name, struct path *path, char *type, unsigned long flags, void *data) { struct superblock_smack *sbp = path->mnt->mnt_sb->s_security; struct smk_audit_info ad; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); smk_ad_setfield_u_fs_path(&ad, *path); return smk_curacc(sbp->smk_floor, MAY_WRITE, &ad); } /** * smack_sb_umount - Smack check for unmounting * @mnt: file system to unmount * @flags: unused * * Returns 0 if current can write the floor of the filesystem * being unmounted, an error code otherwise. */ static int smack_sb_umount(struct vfsmount *mnt, int flags) { struct superblock_smack *sbp; struct smk_audit_info ad; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); smk_ad_setfield_u_fs_path_dentry(&ad, mnt->mnt_root); smk_ad_setfield_u_fs_path_mnt(&ad, mnt); sbp = mnt->mnt_sb->s_security; return smk_curacc(sbp->smk_floor, MAY_WRITE, &ad); } /* * Inode hooks */ /** * smack_inode_alloc_security - allocate an inode blob * @inode: the inode in need of a blob * * Returns 0 if it gets a blob, -ENOMEM otherwise */ static int smack_inode_alloc_security(struct inode *inode) { inode->i_security = new_inode_smack(current_security()); if (inode->i_security == NULL) return -ENOMEM; return 0; } /** * smack_inode_free_security - free an inode blob * @inode: the inode with a blob * * Clears the blob pointer in inode */ static void smack_inode_free_security(struct inode *inode) { kfree(inode->i_security); inode->i_security = NULL; } /** * smack_inode_init_security - copy out the smack from an inode * @inode: the inode * @dir: unused * @name: where to put the attribute name * @value: where to put the attribute value * @len: where to put the length of the attribute * * Returns 0 if it all works out, -ENOMEM if there's no memory */ static int smack_inode_init_security(struct inode *inode, struct inode *dir, char **name, void **value, size_t *len) { char *isp = smk_of_inode(inode); if (name) { *name = kstrdup(XATTR_SMACK_SUFFIX, GFP_KERNEL); if (*name == NULL) return -ENOMEM; } if (value) { *value = kstrdup(isp, GFP_KERNEL); if (*value == NULL) return -ENOMEM; } if (len) *len = strlen(isp) + 1; return 0; } /** * smack_inode_link - Smack check on link * @old_dentry: the existing object * @dir: unused * @new_dentry: the new object * * Returns 0 if access is permitted, an error code otherwise */ static int smack_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry) { char *isp; struct smk_audit_info ad; int rc; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); smk_ad_setfield_u_fs_path_dentry(&ad, old_dentry); isp = smk_of_inode(old_dentry->d_inode); rc = smk_curacc(isp, MAY_WRITE, &ad); if (rc == 0 && new_dentry->d_inode != NULL) { isp = smk_of_inode(new_dentry->d_inode); smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry); rc = smk_curacc(isp, MAY_WRITE, &ad); } return rc; } /** * smack_inode_unlink - Smack check on inode deletion * @dir: containing directory object * @dentry: file to unlink * * Returns 0 if current can write the containing directory * and the object, error code otherwise */ static int smack_inode_unlink(struct inode *dir, struct dentry *dentry) { struct inode *ip = dentry->d_inode; struct smk_audit_info ad; int rc; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); smk_ad_setfield_u_fs_path_dentry(&ad, dentry); /* * You need write access to the thing you're unlinking */ rc = smk_curacc(smk_of_inode(ip), MAY_WRITE, &ad); if (rc == 0) { /* * You also need write access to the containing directory */ smk_ad_setfield_u_fs_path_dentry(&ad, NULL); smk_ad_setfield_u_fs_inode(&ad, dir); rc = smk_curacc(smk_of_inode(dir), MAY_WRITE, &ad); } return rc; } /** * smack_inode_rmdir - Smack check on directory deletion * @dir: containing directory object * @dentry: directory to unlink * * Returns 0 if current can write the containing directory * and the directory, error code otherwise */ static int smack_inode_rmdir(struct inode *dir, struct dentry *dentry) { struct smk_audit_info ad; int rc; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); smk_ad_setfield_u_fs_path_dentry(&ad, dentry); /* * You need write access to the thing you're removing */ rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad); if (rc == 0) { /* * You also need write access to the containing directory */ smk_ad_setfield_u_fs_path_dentry(&ad, NULL); smk_ad_setfield_u_fs_inode(&ad, dir); rc = smk_curacc(smk_of_inode(dir), MAY_WRITE, &ad); } return rc; } /** * smack_inode_rename - Smack check on rename * @old_inode: the old directory * @old_dentry: unused * @new_inode: the new directory * @new_dentry: unused * * Read and write access is required on both the old and * new directories. * * Returns 0 if access is permitted, an error code otherwise */ static int smack_inode_rename(struct inode *old_inode, struct dentry *old_dentry, struct inode *new_inode, struct dentry *new_dentry) { int rc; char *isp; struct smk_audit_info ad; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); smk_ad_setfield_u_fs_path_dentry(&ad, old_dentry); isp = smk_of_inode(old_dentry->d_inode); rc = smk_curacc(isp, MAY_READWRITE, &ad); if (rc == 0 && new_dentry->d_inode != NULL) { isp = smk_of_inode(new_dentry->d_inode); smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry); rc = smk_curacc(isp, MAY_READWRITE, &ad); } return rc; } /** * smack_inode_permission - Smack version of permission() * @inode: the inode in question * @mask: the access requested * * This is the important Smack hook. * * Returns 0 if access is permitted, -EACCES otherwise */ static int smack_inode_permission(struct inode *inode, int mask) { struct smk_audit_info ad; /* * No permission to check. Existence test. Yup, it's there. */ if (mask == 0) return 0; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); smk_ad_setfield_u_fs_inode(&ad, inode); return smk_curacc(smk_of_inode(inode), mask, &ad); } /** * smack_inode_setattr - Smack check for setting attributes * @dentry: the object * @iattr: for the force flag * * Returns 0 if access is permitted, an error code otherwise */ static int smack_inode_setattr(struct dentry *dentry, struct iattr *iattr) { struct smk_audit_info ad; /* * Need to allow for clearing the setuid bit. */ if (iattr->ia_valid & ATTR_FORCE) return 0; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); smk_ad_setfield_u_fs_path_dentry(&ad, dentry); return smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad); } /** * smack_inode_getattr - Smack check for getting attributes * @mnt: unused * @dentry: the object * * Returns 0 if access is permitted, an error code otherwise */ static int smack_inode_getattr(struct vfsmount *mnt, struct dentry *dentry) { struct smk_audit_info ad; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); smk_ad_setfield_u_fs_path_dentry(&ad, dentry); smk_ad_setfield_u_fs_path_mnt(&ad, mnt); return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ, &ad); } /** * smack_inode_setxattr - Smack check for setting xattrs * @dentry: the object * @name: name of the attribute * @value: unused * @size: unused * @flags: unused * * This protects the Smack attribute explicitly. * * Returns 0 if access is permitted, an error code otherwise */ static int smack_inode_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { struct smk_audit_info ad; int rc = 0; if (strcmp(name, XATTR_NAME_SMACK) == 0 || strcmp(name, XATTR_NAME_SMACKIPIN) == 0 || strcmp(name, XATTR_NAME_SMACKIPOUT) == 0) { if (!capable(CAP_MAC_ADMIN)) rc = -EPERM; /* * check label validity here so import wont fail on * post_setxattr */ if (size == 0 || size >= SMK_LABELLEN || smk_import(value, size) == NULL) rc = -EINVAL; } else rc = cap_inode_setxattr(dentry, name, value, size, flags); smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); smk_ad_setfield_u_fs_path_dentry(&ad, dentry); if (rc == 0) rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad); return rc; } /** * smack_inode_post_setxattr - Apply the Smack update approved above * @dentry: object * @name: attribute name * @value: attribute value * @size: attribute size * @flags: unused * * Set the pointer in the inode blob to the entry found * in the master label list. */ static void smack_inode_post_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { struct inode_smack *isp; char *nsp; /* * Not SMACK */ if (strcmp(name, XATTR_NAME_SMACK)) return; isp = dentry->d_inode->i_security; /* * No locking is done here. This is a pointer * assignment. */ nsp = smk_import(value, size); if (nsp != NULL) isp->smk_inode = nsp; else isp->smk_inode = smack_known_invalid.smk_known; return; } /* * smack_inode_getxattr - Smack check on getxattr * @dentry: the object * @name: unused * * Returns 0 if access is permitted, an error code otherwise */ static int smack_inode_getxattr(struct dentry *dentry, const char *name) { struct smk_audit_info ad; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); smk_ad_setfield_u_fs_path_dentry(&ad, dentry); return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ, &ad); } /* * smack_inode_removexattr - Smack check on removexattr * @dentry: the object * @name: name of the attribute * * Removing the Smack attribute requires CAP_MAC_ADMIN * * Returns 0 if access is permitted, an error code otherwise */ static int smack_inode_removexattr(struct dentry *dentry, const char *name) { struct smk_audit_info ad; int rc = 0; if (strcmp(name, XATTR_NAME_SMACK) == 0 || strcmp(name, XATTR_NAME_SMACKIPIN) == 0 || strcmp(name, XATTR_NAME_SMACKIPOUT) == 0) { if (!capable(CAP_MAC_ADMIN)) rc = -EPERM; } else rc = cap_inode_removexattr(dentry, name); smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); smk_ad_setfield_u_fs_path_dentry(&ad, dentry); if (rc == 0) rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad); return rc; } /** * smack_inode_getsecurity - get smack xattrs * @inode: the object * @name: attribute name * @buffer: where to put the result * @alloc: unused * * Returns the size of the attribute or an error code */ static int smack_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc) { struct socket_smack *ssp; struct socket *sock; struct super_block *sbp; struct inode *ip = (struct inode *)inode; char *isp; int ilen; int rc = 0; if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) { isp = smk_of_inode(inode); ilen = strlen(isp) + 1; *buffer = isp; return ilen; } /* * The rest of the Smack xattrs are only on sockets. */ sbp = ip->i_sb; if (sbp->s_magic != SOCKFS_MAGIC) return -EOPNOTSUPP; sock = SOCKET_I(ip); if (sock == NULL || sock->sk == NULL) return -EOPNOTSUPP; ssp = sock->sk->sk_security; if (strcmp(name, XATTR_SMACK_IPIN) == 0) isp = ssp->smk_in; else if (strcmp(name, XATTR_SMACK_IPOUT) == 0) isp = ssp->smk_out; else return -EOPNOTSUPP; ilen = strlen(isp) + 1; if (rc == 0) { *buffer = isp; rc = ilen; } return rc; } /** * smack_inode_listsecurity - list the Smack attributes * @inode: the object * @buffer: where they go * @buffer_size: size of buffer * * Returns 0 on success, -EINVAL otherwise */ static int smack_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size) { int len = strlen(XATTR_NAME_SMACK); if (buffer != NULL && len <= buffer_size) { memcpy(buffer, XATTR_NAME_SMACK, len); return len; } return -EINVAL; } /** * smack_inode_getsecid - Extract inode's security id * @inode: inode to extract the info from * @secid: where result will be saved */ static void smack_inode_getsecid(const struct inode *inode, u32 *secid) { struct inode_smack *isp = inode->i_security; *secid = smack_to_secid(isp->smk_inode); } /* * File Hooks */ /** * smack_file_permission - Smack check on file operations * @file: unused * @mask: unused * * Returns 0 * * Should access checks be done on each read or write? * UNICOS and SELinux say yes. * Trusted Solaris, Trusted Irix, and just about everyone else says no. * * I'll say no for now. Smack does not do the frequent * label changing that SELinux does. */ static int smack_file_permission(struct file *file, int mask) { return 0; } /** * smack_file_alloc_security - assign a file security blob * @file: the object * * The security blob for a file is a pointer to the master * label list, so no allocation is done. * * Returns 0 */ static int smack_file_alloc_security(struct file *file) { file->f_security = current_security(); return 0; } /** * smack_file_free_security - clear a file security blob * @file: the object * * The security blob for a file is a pointer to the master * label list, so no memory is freed. */ static void smack_file_free_security(struct file *file) { file->f_security = NULL; } /** * smack_file_ioctl - Smack check on ioctls * @file: the object * @cmd: what to do * @arg: unused * * Relies heavily on the correct use of the ioctl command conventions. * * Returns 0 if allowed, error code otherwise */ static int smack_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int rc = 0; struct smk_audit_info ad; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); smk_ad_setfield_u_fs_path(&ad, file->f_path); if (_IOC_DIR(cmd) & _IOC_WRITE) rc = smk_curacc(file->f_security, MAY_WRITE, &ad); if (rc == 0 && (_IOC_DIR(cmd) & _IOC_READ)) rc = smk_curacc(file->f_security, MAY_READ, &ad); return rc; } /** * smack_file_lock - Smack check on file locking * @file: the object * @cmd: unused * * Returns 0 if current has write access, error code otherwise */ static int smack_file_lock(struct file *file, unsigned int cmd) { struct smk_audit_info ad; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); smk_ad_setfield_u_fs_path_dentry(&ad, file->f_path.dentry); return smk_curacc(file->f_security, MAY_WRITE, &ad); } /** * smack_file_fcntl - Smack check on fcntl * @file: the object * @cmd: what action to check * @arg: unused * * Returns 0 if current has access, error code otherwise */ static int smack_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg) { struct smk_audit_info ad; int rc; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); smk_ad_setfield_u_fs_path(&ad, file->f_path); switch (cmd) { case F_DUPFD: case F_GETFD: case F_GETFL: case F_GETLK: case F_GETOWN: case F_GETSIG: rc = smk_curacc(file->f_security, MAY_READ, &ad); break; case F_SETFD: case F_SETFL: case F_SETLK: case F_SETLKW: case F_SETOWN: case F_SETSIG: rc = smk_curacc(file->f_security, MAY_WRITE, &ad); break; default: rc = smk_curacc(file->f_security, MAY_READWRITE, &ad); } return rc; } /** * smack_file_set_fowner - set the file security blob value * @file: object in question * * Returns 0 * Further research may be required on this one. */ static int smack_file_set_fowner(struct file *file) { file->f_security = current_security(); return 0; } /** * smack_file_send_sigiotask - Smack on sigio * @tsk: The target task * @fown: the object the signal come from * @signum: unused * * Allow a privileged task to get signals even if it shouldn't * * Returns 0 if a subject with the object's smack could * write to the task, an error code otherwise. */ static int smack_file_send_sigiotask(struct task_struct *tsk, struct fown_struct *fown, int signum) { struct file *file; int rc; char *tsp = tsk->cred->security; struct smk_audit_info ad; /* * struct fown_struct is never outside the context of a struct file */ file = container_of(fown, struct file, f_owner); /* we don't log here as rc can be overriden */ rc = smk_access(file->f_security, tsp, MAY_WRITE, NULL); if (rc != 0 && has_capability(tsk, CAP_MAC_OVERRIDE)) rc = 0; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK); smk_ad_setfield_u_tsk(&ad, tsk); smack_log(file->f_security, tsp, MAY_WRITE, rc, &ad); return rc; } /** * smack_file_receive - Smack file receive check * @file: the object * * Returns 0 if current has access, error code otherwise */ static int smack_file_receive(struct file *file) { int may = 0; struct smk_audit_info ad; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK); smk_ad_setfield_u_fs_path(&ad, file->f_path); /* * This code relies on bitmasks. */ if (file->f_mode & FMODE_READ) may = MAY_READ; if (file->f_mode & FMODE_WRITE) may |= MAY_WRITE; return smk_curacc(file->f_security, may, &ad); } /* * Task hooks */ /** * smack_cred_alloc_blank - "allocate" blank task-level security credentials * @new: the new credentials * @gfp: the atomicity of any memory allocations * * Prepare a blank set of credentials for modification. This must allocate all * the memory the LSM module might require such that cred_transfer() can * complete without error. */ static int smack_cred_alloc_blank(struct cred *cred, gfp_t gfp) { cred->security = NULL; return 0; } /** * smack_cred_free - "free" task-level security credentials * @cred: the credentials in question * * Smack isn't using copies of blobs. Everyone * points to an immutable list. The blobs never go away. * There is no leak here. */ static void smack_cred_free(struct cred *cred) { cred->security = NULL; } /** * smack_cred_prepare - prepare new set of credentials for modification * @new: the new credentials * @old: the original credentials * @gfp: the atomicity of any memory allocations * * Prepare a new set of credentials for modification. */ static int smack_cred_prepare(struct cred *new, const struct cred *old, gfp_t gfp) { new->security = old->security; return 0; } /** * smack_cred_commit - commit new credentials * @new: the new credentials * @old: the original credentials */ static void smack_cred_commit(struct cred *new, const struct cred *old) { } /** * smack_cred_transfer - Transfer the old credentials to the new credentials * @new: the new credentials * @old: the original credentials * * Fill in a set of blank credentials from another set of credentials. */ static void smack_cred_transfer(struct cred *new, const struct cred *old) { new->security = old->security; } /** * smack_kernel_act_as - Set the subjective context in a set of credentials * @new: points to the set of credentials to be modified. * @secid: specifies the security ID to be set * * Set the security data for a kernel service. */ static int smack_kernel_act_as(struct cred *new, u32 secid) { char *smack = smack_from_secid(secid); if (smack == NULL) return -EINVAL; new->security = smack; return 0; } /** * smack_kernel_create_files_as - Set the file creation label in a set of creds * @new: points to the set of credentials to be modified * @inode: points to the inode to use as a reference * * Set the file creation context in a set of credentials to the same * as the objective context of the specified inode */ static int smack_kernel_create_files_as(struct cred *new, struct inode *inode) { struct inode_smack *isp = inode->i_security; new->security = isp->smk_inode; return 0; } /** * smk_curacc_on_task - helper to log task related access * @p: the task object * @access : the access requested * * Return 0 if access is permitted */ static int smk_curacc_on_task(struct task_struct *p, int access) { struct smk_audit_info ad; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK); smk_ad_setfield_u_tsk(&ad, p); return smk_curacc(task_security(p), access, &ad); } /** * smack_task_setpgid - Smack check on setting pgid * @p: the task object * @pgid: unused * * Return 0 if write access is permitted */ static int smack_task_setpgid(struct task_struct *p, pid_t pgid) { return smk_curacc_on_task(p, MAY_WRITE); } /** * smack_task_getpgid - Smack access check for getpgid * @p: the object task * * Returns 0 if current can read the object task, error code otherwise */ static int smack_task_getpgid(struct task_struct *p) { return smk_curacc_on_task(p, MAY_READ); } /** * smack_task_getsid - Smack access check for getsid * @p: the object task * * Returns 0 if current can read the object task, error code otherwise */ static int smack_task_getsid(struct task_struct *p) { return smk_curacc_on_task(p, MAY_READ); } /** * smack_task_getsecid - get the secid of the task * @p: the object task * @secid: where to put the result * * Sets the secid to contain a u32 version of the smack label. */ static void smack_task_getsecid(struct task_struct *p, u32 *secid) { *secid = smack_to_secid(task_security(p)); } /** * smack_task_setnice - Smack check on setting nice * @p: the task object * @nice: unused * * Return 0 if write access is permitted */ static int smack_task_setnice(struct task_struct *p, int nice) { int rc; rc = cap_task_setnice(p, nice); if (rc == 0) rc = smk_curacc_on_task(p, MAY_WRITE); return rc; } /** * smack_task_setioprio - Smack check on setting ioprio * @p: the task object * @ioprio: unused * * Return 0 if write access is permitted */ static int smack_task_setioprio(struct task_struct *p, int ioprio) { int rc; rc = cap_task_setioprio(p, ioprio); if (rc == 0) rc = smk_curacc_on_task(p, MAY_WRITE); return rc; } /** * smack_task_getioprio - Smack check on reading ioprio * @p: the task object * * Return 0 if read access is permitted */ static int smack_task_getioprio(struct task_struct *p) { return smk_curacc_on_task(p, MAY_READ); } /** * smack_task_setscheduler - Smack check on setting scheduler * @p: the task object * @policy: unused * @lp: unused * * Return 0 if read access is permitted */ static int smack_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp) { int rc; rc = cap_task_setscheduler(p, policy, lp); if (rc == 0) rc = smk_curacc_on_task(p, MAY_WRITE); return rc; } /** * smack_task_getscheduler - Smack check on reading scheduler * @p: the task object * * Return 0 if read access is permitted */ static int smack_task_getscheduler(struct task_struct *p) { return smk_curacc_on_task(p, MAY_READ); } /** * smack_task_movememory - Smack check on moving memory * @p: the task object * * Return 0 if write access is permitted */ static int smack_task_movememory(struct task_struct *p) { return smk_curacc_on_task(p, MAY_WRITE); } /** * smack_task_kill - Smack check on signal delivery * @p: the task object * @info: unused * @sig: unused * @secid: identifies the smack to use in lieu of current's * * Return 0 if write access is permitted * * The secid behavior is an artifact of an SELinux hack * in the USB code. Someday it may go away. */ static int smack_task_kill(struct task_struct *p, struct siginfo *info, int sig, u32 secid) { struct smk_audit_info ad; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK); smk_ad_setfield_u_tsk(&ad, p); /* * Sending a signal requires that the sender * can write the receiver. */ if (secid == 0) return smk_curacc(task_security(p), MAY_WRITE, &ad); /* * If the secid isn't 0 we're dealing with some USB IO * specific behavior. This is not clean. For one thing * we can't take privilege into account. */ return smk_access(smack_from_secid(secid), task_security(p), MAY_WRITE, &ad); } /** * smack_task_wait - Smack access check for waiting * @p: task to wait for * * Returns 0 if current can wait for p, error code otherwise */ static int smack_task_wait(struct task_struct *p) { struct smk_audit_info ad; char *sp = current_security(); char *tsp = task_security(p); int rc; /* we don't log here, we can be overriden */ rc = smk_access(sp, tsp, MAY_WRITE, NULL); if (rc == 0) goto out_log; /* * Allow the operation to succeed if either task * has privilege to perform operations that might * account for the smack labels having gotten to * be different in the first place. * * This breaks the strict subject/object access * control ideal, taking the object's privilege * state into account in the decision as well as * the smack value. */ if (capable(CAP_MAC_OVERRIDE) || has_capability(p, CAP_MAC_OVERRIDE)) rc = 0; /* we log only if we didn't get overriden */ out_log: smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK); smk_ad_setfield_u_tsk(&ad, p); smack_log(sp, tsp, MAY_WRITE, rc, &ad); return rc; } /** * smack_task_to_inode - copy task smack into the inode blob * @p: task to copy from * @inode: inode to copy to * * Sets the smack pointer in the inode security blob */ static void smack_task_to_inode(struct task_struct *p, struct inode *inode) { struct inode_smack *isp = inode->i_security; isp->smk_inode = task_security(p); } /* * Socket hooks. */ /** * smack_sk_alloc_security - Allocate a socket blob * @sk: the socket * @family: unused * @gfp_flags: memory allocation flags * * Assign Smack pointers to current * * Returns 0 on success, -ENOMEM is there's no memory */ static int smack_sk_alloc_security(struct sock *sk, int family, gfp_t gfp_flags) { char *csp = current_security(); struct socket_smack *ssp; ssp = kzalloc(sizeof(struct socket_smack), gfp_flags); if (ssp == NULL) return -ENOMEM; ssp->smk_in = csp; ssp->smk_out = csp; ssp->smk_packet[0] = '\0'; sk->sk_security = ssp; return 0; } /** * smack_sk_free_security - Free a socket blob * @sk: the socket * * Clears the blob pointer */ static void smack_sk_free_security(struct sock *sk) { kfree(sk->sk_security); } /** * smack_host_label - check host based restrictions * @sip: the object end * * looks for host based access restrictions * * This version will only be appropriate for really small sets of single label * hosts. The caller is responsible for ensuring that the RCU read lock is * taken before calling this function. * * Returns the label of the far end or NULL if it's not special. */ static char *smack_host_label(struct sockaddr_in *sip) { struct smk_netlbladdr *snp; struct in_addr *siap = &sip->sin_addr; if (siap->s_addr == 0) return NULL; list_for_each_entry_rcu(snp, &smk_netlbladdr_list, list) /* * we break after finding the first match because * the list is sorted from longest to shortest mask * so we have found the most specific match */ if ((&snp->smk_host.sin_addr)->s_addr == (siap->s_addr & (&snp->smk_mask)->s_addr)) { /* we have found the special CIPSO option */ if (snp->smk_label == smack_cipso_option) return NULL; return snp->smk_label; } return NULL; } /** * smack_set_catset - convert a capset to netlabel mls categories * @catset: the Smack categories * @sap: where to put the netlabel categories * * Allocates and fills attr.mls.cat */ static void smack_set_catset(char *catset, struct netlbl_lsm_secattr *sap) { unsigned char *cp; unsigned char m; int cat; int rc; int byte; if (!catset) return; sap->flags |= NETLBL_SECATTR_MLS_CAT; sap->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); sap->attr.mls.cat->startbit = 0; for (cat = 1, cp = catset, byte = 0; byte < SMK_LABELLEN; cp++, byte++) for (m = 0x80; m != 0; m >>= 1, cat++) { if ((m & *cp) == 0) continue; rc = netlbl_secattr_catmap_setbit(sap->attr.mls.cat, cat, GFP_ATOMIC); } } /** * smack_to_secattr - fill a secattr from a smack value * @smack: the smack value * @nlsp: where the result goes * * Casey says that CIPSO is good enough for now. * It can be used to effect. * It can also be abused to effect when necessary. * Appologies to the TSIG group in general and GW in particular. */ static void smack_to_secattr(char *smack, struct netlbl_lsm_secattr *nlsp) { struct smack_cipso cipso; int rc; nlsp->domain = smack; nlsp->flags = NETLBL_SECATTR_DOMAIN | NETLBL_SECATTR_MLS_LVL; rc = smack_to_cipso(smack, &cipso); if (rc == 0) { nlsp->attr.mls.lvl = cipso.smk_level; smack_set_catset(cipso.smk_catset, nlsp); } else { nlsp->attr.mls.lvl = smack_cipso_direct; smack_set_catset(smack, nlsp); } } /** * smack_netlabel - Set the secattr on a socket * @sk: the socket * @labeled: socket label scheme * * Convert the outbound smack value (smk_out) to a * secattr and attach it to the socket. * * Returns 0 on success or an error code */ static int smack_netlabel(struct sock *sk, int labeled) { struct socket_smack *ssp = sk->sk_security; struct netlbl_lsm_secattr secattr; int rc = 0; /* * Usually the netlabel code will handle changing the * packet labeling based on the label. * The case of a single label host is different, because * a single label host should never get a labeled packet * even though the label is usually associated with a packet * label. */ local_bh_disable(); bh_lock_sock_nested(sk); if (ssp->smk_out == smack_net_ambient || labeled == SMACK_UNLABELED_SOCKET) netlbl_sock_delattr(sk); else { netlbl_secattr_init(&secattr); smack_to_secattr(ssp->smk_out, &secattr); rc = netlbl_sock_setattr(sk, sk->sk_family, &secattr); netlbl_secattr_destroy(&secattr); } bh_unlock_sock(sk); local_bh_enable(); return rc; } /** * smack_netlbel_send - Set the secattr on a socket and perform access checks * @sk: the socket * @sap: the destination address * * Set the correct secattr for the given socket based on the destination * address and perform any outbound access checks needed. * * Returns 0 on success or an error code. * */ static int smack_netlabel_send(struct sock *sk, struct sockaddr_in *sap) { int rc; int sk_lbl; char *hostsp; struct socket_smack *ssp = sk->sk_security; struct smk_audit_info ad; rcu_read_lock(); hostsp = smack_host_label(sap); if (hostsp != NULL) { sk_lbl = SMACK_UNLABELED_SOCKET; #ifdef CONFIG_AUDIT smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET); ad.a.u.net.family = sap->sin_family; ad.a.u.net.dport = sap->sin_port; ad.a.u.net.v4info.daddr = sap->sin_addr.s_addr; #endif rc = smk_access(ssp->smk_out, hostsp, MAY_WRITE, &ad); } else { sk_lbl = SMACK_CIPSO_SOCKET; rc = 0; } rcu_read_unlock(); if (rc != 0) return rc; return smack_netlabel(sk, sk_lbl); } /** * smack_inode_setsecurity - set smack xattrs * @inode: the object * @name: attribute name * @value: attribute value * @size: size of the attribute * @flags: unused * * Sets the named attribute in the appropriate blob * * Returns 0 on success, or an error code */ static int smack_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags) { char *sp; struct inode_smack *nsp = inode->i_security; struct socket_smack *ssp; struct socket *sock; int rc = 0; if (value == NULL || size > SMK_LABELLEN || size == 0) return -EACCES; sp = smk_import(value, size); if (sp == NULL) return -EINVAL; if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) { nsp->smk_inode = sp; nsp->smk_flags |= SMK_INODE_INSTANT; return 0; } /* * The rest of the Smack xattrs are only on sockets. */ if (inode->i_sb->s_magic != SOCKFS_MAGIC) return -EOPNOTSUPP; sock = SOCKET_I(inode); if (sock == NULL || sock->sk == NULL) return -EOPNOTSUPP; ssp = sock->sk->sk_security; if (strcmp(name, XATTR_SMACK_IPIN) == 0) ssp->smk_in = sp; else if (strcmp(name, XATTR_SMACK_IPOUT) == 0) { ssp->smk_out = sp; rc = smack_netlabel(sock->sk, SMACK_CIPSO_SOCKET); if (rc != 0) printk(KERN_WARNING "Smack: \"%s\" netlbl error %d.\n", __func__, -rc); } else return -EOPNOTSUPP; return 0; } /** * smack_socket_post_create - finish socket setup * @sock: the socket * @family: protocol family * @type: unused * @protocol: unused * @kern: unused * * Sets the netlabel information on the socket * * Returns 0 on success, and error code otherwise */ static int smack_socket_post_create(struct socket *sock, int family, int type, int protocol, int kern) { if (family != PF_INET || sock->sk == NULL) return 0; /* * Set the outbound netlbl. */ return smack_netlabel(sock->sk, SMACK_CIPSO_SOCKET); } /** * smack_socket_connect - connect access check * @sock: the socket * @sap: the other end * @addrlen: size of sap * * Verifies that a connection may be possible * * Returns 0 on success, and error code otherwise */ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap, int addrlen) { if (sock->sk == NULL || sock->sk->sk_family != PF_INET) return 0; if (addrlen < sizeof(struct sockaddr_in)) return -EINVAL; return smack_netlabel_send(sock->sk, (struct sockaddr_in *)sap); } /** * smack_flags_to_may - convert S_ to MAY_ values * @flags: the S_ value * * Returns the equivalent MAY_ value */ static int smack_flags_to_may(int flags) { int may = 0; if (flags & S_IRUGO) may |= MAY_READ; if (flags & S_IWUGO) may |= MAY_WRITE; if (flags & S_IXUGO) may |= MAY_EXEC; return may; } /** * smack_msg_msg_alloc_security - Set the security blob for msg_msg * @msg: the object * * Returns 0 */ static int smack_msg_msg_alloc_security(struct msg_msg *msg) { msg->security = current_security(); return 0; } /** * smack_msg_msg_free_security - Clear the security blob for msg_msg * @msg: the object * * Clears the blob pointer */ static void smack_msg_msg_free_security(struct msg_msg *msg) { msg->security = NULL; } /** * smack_of_shm - the smack pointer for the shm * @shp: the object * * Returns a pointer to the smack value */ static char *smack_of_shm(struct shmid_kernel *shp) { return (char *)shp->shm_perm.security; } /** * smack_shm_alloc_security - Set the security blob for shm * @shp: the object * * Returns 0 */ static int smack_shm_alloc_security(struct shmid_kernel *shp) { struct kern_ipc_perm *isp = &shp->shm_perm; isp->security = current_security(); return 0; } /** * smack_shm_free_security - Clear the security blob for shm * @shp: the object * * Clears the blob pointer */ static void smack_shm_free_security(struct shmid_kernel *shp) { struct kern_ipc_perm *isp = &shp->shm_perm; isp->security = NULL; } /** * smk_curacc_shm : check if current has access on shm * @shp : the object * @access : access requested * * Returns 0 if current has the requested access, error code otherwise */ static int smk_curacc_shm(struct shmid_kernel *shp, int access) { char *ssp = smack_of_shm(shp); struct smk_audit_info ad; #ifdef CONFIG_AUDIT smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_IPC); ad.a.u.ipc_id = shp->shm_perm.id; #endif return smk_curacc(ssp, access, &ad); } /** * smack_shm_associate - Smack access check for shm * @shp: the object * @shmflg: access requested * * Returns 0 if current has the requested access, error code otherwise */ static int smack_shm_associate(struct shmid_kernel *shp, int shmflg) { int may; may = smack_flags_to_may(shmflg); return smk_curacc_shm(shp, may); } /** * smack_shm_shmctl - Smack access check for shm * @shp: the object * @cmd: what it wants to do * * Returns 0 if current has the requested access, error code otherwise */ static int smack_shm_shmctl(struct shmid_kernel *shp, int cmd) { int may; switch (cmd) { case IPC_STAT: case SHM_STAT: may = MAY_READ; break; case IPC_SET: case SHM_LOCK: case SHM_UNLOCK: case IPC_RMID: may = MAY_READWRITE; break; case IPC_INFO: case SHM_INFO: /* * System level information. */ return 0; default: return -EINVAL; } return smk_curacc_shm(shp, may); } /** * smack_shm_shmat - Smack access for shmat * @shp: the object * @shmaddr: unused * @shmflg: access requested * * Returns 0 if current has the requested access, error code otherwise */ static int smack_shm_shmat(struct shmid_kernel *shp, char __user *shmaddr, int shmflg) { int may; may = smack_flags_to_may(shmflg); return smk_curacc_shm(shp, may); } /** * smack_of_sem - the smack pointer for the sem * @sma: the object * * Returns a pointer to the smack value */ static char *smack_of_sem(struct sem_array *sma) { return (char *)sma->sem_perm.security; } /** * smack_sem_alloc_security - Set the security blob for sem * @sma: the object * * Returns 0 */ static int smack_sem_alloc_security(struct sem_array *sma) { struct kern_ipc_perm *isp = &sma->sem_perm; isp->security = current_security(); return 0; } /** * smack_sem_free_security - Clear the security blob for sem * @sma: the object * * Clears the blob pointer */ static void smack_sem_free_security(struct sem_array *sma) { struct kern_ipc_perm *isp = &sma->sem_perm; isp->security = NULL; } /** * smk_curacc_sem : check if current has access on sem * @sma : the object * @access : access requested * * Returns 0 if current has the requested access, error code otherwise */ static int smk_curacc_sem(struct sem_array *sma, int access) { char *ssp = smack_of_sem(sma); struct smk_audit_info ad; #ifdef CONFIG_AUDIT smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_IPC); ad.a.u.ipc_id = sma->sem_perm.id; #endif return smk_curacc(ssp, access, &ad); } /** * smack_sem_associate - Smack access check for sem * @sma: the object * @semflg: access requested * * Returns 0 if current has the requested access, error code otherwise */ static int smack_sem_associate(struct sem_array *sma, int semflg) { int may; may = smack_flags_to_may(semflg); return smk_curacc_sem(sma, may); } /** * smack_sem_shmctl - Smack access check for sem * @sma: the object * @cmd: what it wants to do * * Returns 0 if current has the requested access, error code otherwise */ static int smack_sem_semctl(struct sem_array *sma, int cmd) { int may; switch (cmd) { case GETPID: case GETNCNT: case GETZCNT: case GETVAL: case GETALL: case IPC_STAT: case SEM_STAT: may = MAY_READ; break; case SETVAL: case SETALL: case IPC_RMID: case IPC_SET: may = MAY_READWRITE; break; case IPC_INFO: case SEM_INFO: /* * System level information */ return 0; default: return -EINVAL; } return smk_curacc_sem(sma, may); } /** * smack_sem_semop - Smack checks of semaphore operations * @sma: the object * @sops: unused * @nsops: unused * @alter: unused * * Treated as read and write in all cases. * * Returns 0 if access is allowed, error code otherwise */ static int smack_sem_semop(struct sem_array *sma, struct sembuf *sops, unsigned nsops, int alter) { return smk_curacc_sem(sma, MAY_READWRITE); } /** * smack_msg_alloc_security - Set the security blob for msg * @msq: the object * * Returns 0 */ static int smack_msg_queue_alloc_security(struct msg_queue *msq) { struct kern_ipc_perm *kisp = &msq->q_perm; kisp->security = current_security(); return 0; } /** * smack_msg_free_security - Clear the security blob for msg * @msq: the object * * Clears the blob pointer */ static void smack_msg_queue_free_security(struct msg_queue *msq) { struct kern_ipc_perm *kisp = &msq->q_perm; kisp->security = NULL; } /** * smack_of_msq - the smack pointer for the msq * @msq: the object * * Returns a pointer to the smack value */ static char *smack_of_msq(struct msg_queue *msq) { return (char *)msq->q_perm.security; } /** * smk_curacc_msq : helper to check if current has access on msq * @msq : the msq * @access : access requested * * return 0 if current has access, error otherwise */ static int smk_curacc_msq(struct msg_queue *msq, int access) { char *msp = smack_of_msq(msq); struct smk_audit_info ad; #ifdef CONFIG_AUDIT smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_IPC); ad.a.u.ipc_id = msq->q_perm.id; #endif return smk_curacc(msp, access, &ad); } /** * smack_msg_queue_associate - Smack access check for msg_queue * @msq: the object * @msqflg: access requested * * Returns 0 if current has the requested access, error code otherwise */ static int smack_msg_queue_associate(struct msg_queue *msq, int msqflg) { int may; may = smack_flags_to_may(msqflg); return smk_curacc_msq(msq, may); } /** * smack_msg_queue_msgctl - Smack access check for msg_queue * @msq: the object * @cmd: what it wants to do * * Returns 0 if current has the requested access, error code otherwise */ static int smack_msg_queue_msgctl(struct msg_queue *msq, int cmd) { int may; switch (cmd) { case IPC_STAT: case MSG_STAT: may = MAY_READ; break; case IPC_SET: case IPC_RMID: may = MAY_READWRITE; break; case IPC_INFO: case MSG_INFO: /* * System level information */ return 0; default: return -EINVAL; } return smk_curacc_msq(msq, may); } /** * smack_msg_queue_msgsnd - Smack access check for msg_queue * @msq: the object * @msg: unused * @msqflg: access requested * * Returns 0 if current has the requested access, error code otherwise */ static int smack_msg_queue_msgsnd(struct msg_queue *msq, struct msg_msg *msg, int msqflg) { int may; may = smack_flags_to_may(msqflg); return smk_curacc_msq(msq, may); } /** * smack_msg_queue_msgsnd - Smack access check for msg_queue * @msq: the object * @msg: unused * @target: unused * @type: unused * @mode: unused * * Returns 0 if current has read and write access, error code otherwise */ static int smack_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg, struct task_struct *target, long type, int mode) { return smk_curacc_msq(msq, MAY_READWRITE); } /** * smack_ipc_permission - Smack access for ipc_permission() * @ipp: the object permissions * @flag: access requested * * Returns 0 if current has read and write access, error code otherwise */ static int smack_ipc_permission(struct kern_ipc_perm *ipp, short flag) { char *isp = ipp->security; int may = smack_flags_to_may(flag); struct smk_audit_info ad; #ifdef CONFIG_AUDIT smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_IPC); ad.a.u.ipc_id = ipp->id; #endif return smk_curacc(isp, may, &ad); } /** * smack_ipc_getsecid - Extract smack security id * @ipp: the object permissions * @secid: where result will be saved */ static void smack_ipc_getsecid(struct kern_ipc_perm *ipp, u32 *secid) { char *smack = ipp->security; *secid = smack_to_secid(smack); } /** * smack_d_instantiate - Make sure the blob is correct on an inode * @opt_dentry: unused * @inode: the object * * Set the inode's security blob if it hasn't been done already. */ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode) { struct super_block *sbp; struct superblock_smack *sbsp; struct inode_smack *isp; char *csp = current_security(); char *fetched; char *final; struct dentry *dp; if (inode == NULL) return; isp = inode->i_security; mutex_lock(&isp->smk_lock); /* * If the inode is already instantiated * take the quick way out */ if (isp->smk_flags & SMK_INODE_INSTANT) goto unlockandout; sbp = inode->i_sb; sbsp = sbp->s_security; /* * We're going to use the superblock default label * if there's no label on the file. */ final = sbsp->smk_default; /* * If this is the root inode the superblock * may be in the process of initialization. * If that is the case use the root value out * of the superblock. */ if (opt_dentry->d_parent == opt_dentry) { isp->smk_inode = sbsp->smk_root; isp->smk_flags |= SMK_INODE_INSTANT; goto unlockandout; } /* * This is pretty hackish. * Casey says that we shouldn't have to do * file system specific code, but it does help * with keeping it simple. */ switch (sbp->s_magic) { case SMACK_MAGIC: /* * Casey says that it's a little embarassing * that the smack file system doesn't do * extended attributes. */ final = smack_known_star.smk_known; break; case PIPEFS_MAGIC: /* * Casey says pipes are easy (?) */ final = smack_known_star.smk_known; break; case DEVPTS_SUPER_MAGIC: /* * devpts seems content with the label of the task. * Programs that change smack have to treat the * pty with respect. */ final = csp; break; case SOCKFS_MAGIC: /* * Casey says sockets get the smack of the task. */ final = csp; break; case PROC_SUPER_MAGIC: /* * Casey says procfs appears not to care. * The superblock default suffices. */ break; case TMPFS_MAGIC: /* * Device labels should come from the filesystem, * but watch out, because they're volitile, * getting recreated on every reboot. */ final = smack_known_star.smk_known; /* * No break. * * If a smack value has been set we want to use it, * but since tmpfs isn't giving us the opportunity * to set mount options simulate setting the * superblock default. */ default: /* * This isn't an understood special case. * Get the value from the xattr. * * No xattr support means, alas, no SMACK label. * Use the aforeapplied default. * It would be curious if the label of the task * does not match that assigned. */ if (inode->i_op->getxattr == NULL) break; /* * Get the dentry for xattr. */ if (opt_dentry == NULL) { dp = d_find_alias(inode); if (dp == NULL) break; } else { dp = dget(opt_dentry); if (dp == NULL) break; } fetched = smk_fetch(inode, dp); if (fetched != NULL) final = fetched; dput(dp); break; } if (final == NULL) isp->smk_inode = csp; else isp->smk_inode = final; isp->smk_flags |= SMK_INODE_INSTANT; unlockandout: mutex_unlock(&isp->smk_lock); return; } /** * smack_getprocattr - Smack process attribute access * @p: the object task * @name: the name of the attribute in /proc/.../attr * @value: where to put the result * * Places a copy of the task Smack into value * * Returns the length of the smack label or an error code */ static int smack_getprocattr(struct task_struct *p, char *name, char **value) { char *cp; int slen; if (strcmp(name, "current") != 0) return -EINVAL; cp = kstrdup(task_security(p), GFP_KERNEL); if (cp == NULL) return -ENOMEM; slen = strlen(cp); *value = cp; return slen; } /** * smack_setprocattr - Smack process attribute setting * @p: the object task * @name: the name of the attribute in /proc/.../attr * @value: the value to set * @size: the size of the value * * Sets the Smack value of the task. Only setting self * is permitted and only with privilege * * Returns the length of the smack label or an error code */ static int smack_setprocattr(struct task_struct *p, char *name, void *value, size_t size) { struct cred *new; char *newsmack; /* * Changing another process' Smack value is too dangerous * and supports no sane use case. */ if (p != current) return -EPERM; if (!capable(CAP_MAC_ADMIN)) return -EPERM; if (value == NULL || size == 0 || size >= SMK_LABELLEN) return -EINVAL; if (strcmp(name, "current") != 0) return -EINVAL; newsmack = smk_import(value, size); if (newsmack == NULL) return -EINVAL; /* * No process is ever allowed the web ("@") label. */ if (newsmack == smack_known_web.smk_known) return -EPERM; new = prepare_creds(); if (new == NULL) return -ENOMEM; new->security = newsmack; commit_creds(new); return size; } /** * smack_unix_stream_connect - Smack access on UDS * @sock: one socket * @other: the other socket * @newsk: unused * * Return 0 if a subject with the smack of sock could access * an object with the smack of other, otherwise an error code */ static int smack_unix_stream_connect(struct socket *sock, struct socket *other, struct sock *newsk) { struct inode *sp = SOCK_INODE(sock); struct inode *op = SOCK_INODE(other); struct smk_audit_info ad; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET); smk_ad_setfield_u_net_sk(&ad, other->sk); return smk_access(smk_of_inode(sp), smk_of_inode(op), MAY_READWRITE, &ad); } /** * smack_unix_may_send - Smack access on UDS * @sock: one socket * @other: the other socket * * Return 0 if a subject with the smack of sock could access * an object with the smack of other, otherwise an error code */ static int smack_unix_may_send(struct socket *sock, struct socket *other) { struct inode *sp = SOCK_INODE(sock); struct inode *op = SOCK_INODE(other); struct smk_audit_info ad; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET); smk_ad_setfield_u_net_sk(&ad, other->sk); return smk_access(smk_of_inode(sp), smk_of_inode(op), MAY_WRITE, &ad); } /** * smack_socket_sendmsg - Smack check based on destination host * @sock: the socket * @msg: the message * @size: the size of the message * * Return 0 if the current subject can write to the destination * host. This is only a question if the destination is a single * label host. */ static int smack_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size) { struct sockaddr_in *sip = (struct sockaddr_in *) msg->msg_name; /* * Perfectly reasonable for this to be NULL */ if (sip == NULL || sip->sin_family != AF_INET) return 0; return smack_netlabel_send(sock->sk, sip); } /** * smack_from_secattr - Convert a netlabel attr.mls.lvl/attr.mls.cat pair to smack * @sap: netlabel secattr * @sip: where to put the result * * Copies a smack label into sip */ static void smack_from_secattr(struct netlbl_lsm_secattr *sap, char *sip) { char smack[SMK_LABELLEN]; char *sp; int pcat; if ((sap->flags & NETLBL_SECATTR_MLS_LVL) != 0) { /* * Looks like a CIPSO packet. * If there are flags but no level netlabel isn't * behaving the way we expect it to. * * Get the categories, if any * Without guidance regarding the smack value * for the packet fall back on the network * ambient value. */ memset(smack, '\0', SMK_LABELLEN); if ((sap->flags & NETLBL_SECATTR_MLS_CAT) != 0) for (pcat = -1;;) { pcat = netlbl_secattr_catmap_walk( sap->attr.mls.cat, pcat + 1); if (pcat < 0) break; smack_catset_bit(pcat, smack); } /* * If it is CIPSO using smack direct mapping * we are already done. WeeHee. */ if (sap->attr.mls.lvl == smack_cipso_direct) { memcpy(sip, smack, SMK_MAXLEN); return; } /* * Look it up in the supplied table if it is not * a direct mapping. */ smack_from_cipso(sap->attr.mls.lvl, smack, sip); return; } if ((sap->flags & NETLBL_SECATTR_SECID) != 0) { /* * Looks like a fallback, which gives us a secid. */ sp = smack_from_secid(sap->attr.secid); /* * This has got to be a bug because it is * impossible to specify a fallback without * specifying the label, which will ensure * it has a secid, and the only way to get a * secid is from a fallback. */ BUG_ON(sp == NULL); strncpy(sip, sp, SMK_MAXLEN); return; } /* * Without guidance regarding the smack value * for the packet fall back on the network * ambient value. */ strncpy(sip, smack_net_ambient, SMK_MAXLEN); return; } /** * smack_socket_sock_rcv_skb - Smack packet delivery access check * @sk: socket * @skb: packet * * Returns 0 if the packet should be delivered, an error code otherwise */ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) { struct netlbl_lsm_secattr secattr; struct socket_smack *ssp = sk->sk_security; char smack[SMK_LABELLEN]; char *csp; int rc; struct smk_audit_info ad; if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6) return 0; /* * Translate what netlabel gave us. */ netlbl_secattr_init(&secattr); rc = netlbl_skbuff_getattr(skb, sk->sk_family, &secattr); if (rc == 0) { smack_from_secattr(&secattr, smack); csp = smack; } else csp = smack_net_ambient; netlbl_secattr_destroy(&secattr); #ifdef CONFIG_AUDIT smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET); ad.a.u.net.family = sk->sk_family; ad.a.u.net.netif = skb->skb_iif; ipv4_skb_to_auditdata(skb, &ad.a, NULL); #endif /* * Receiving a packet requires that the other end * be able to write here. Read access is not required. * This is the simplist possible security model * for networking. */ rc = smk_access(csp, ssp->smk_in, MAY_WRITE, &ad); if (rc != 0) netlbl_skbuff_err(skb, rc, 0); return rc; } /** * smack_socket_getpeersec_stream - pull in packet label * @sock: the socket * @optval: user's destination * @optlen: size thereof * @len: max thereof * * returns zero on success, an error code otherwise */ static int smack_socket_getpeersec_stream(struct socket *sock, char __user *optval, int __user *optlen, unsigned len) { struct socket_smack *ssp; int slen; int rc = 0; ssp = sock->sk->sk_security; slen = strlen(ssp->smk_packet) + 1; if (slen > len) rc = -ERANGE; else if (copy_to_user(optval, ssp->smk_packet, slen) != 0) rc = -EFAULT; if (put_user(slen, optlen) != 0) rc = -EFAULT; return rc; } /** * smack_socket_getpeersec_dgram - pull in packet label * @sock: the socket * @skb: packet data * @secid: pointer to where to put the secid of the packet * * Sets the netlabel socket state on sk from parent */ static int smack_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) { struct netlbl_lsm_secattr secattr; struct sock *sk; char smack[SMK_LABELLEN]; int family = PF_INET; u32 s; int rc; /* * Only works for families with packets. */ if (sock != NULL) { sk = sock->sk; if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6) return 0; family = sk->sk_family; } /* * Translate what netlabel gave us. */ netlbl_secattr_init(&secattr); rc = netlbl_skbuff_getattr(skb, family, &secattr); if (rc == 0) smack_from_secattr(&secattr, smack); netlbl_secattr_destroy(&secattr); /* * Give up if we couldn't get anything */ if (rc != 0) return rc; s = smack_to_secid(smack); if (s == 0) return -EINVAL; *secid = s; return 0; } /** * smack_sock_graft - Initialize a newly created socket with an existing sock * @sk: child sock * @parent: parent socket * * Set the smk_{in,out} state of an existing sock based on the process that * is creating the new socket. */ static void smack_sock_graft(struct sock *sk, struct socket *parent) { struct socket_smack *ssp; if (sk == NULL || (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)) return; ssp = sk->sk_security; ssp->smk_in = ssp->smk_out = current_security(); /* cssp->smk_packet is already set in smack_inet_csk_clone() */ } /** * smack_inet_conn_request - Smack access check on connect * @sk: socket involved * @skb: packet * @req: unused * * Returns 0 if a task with the packet label could write to * the socket, otherwise an error code */ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb, struct request_sock *req) { u16 family = sk->sk_family; struct socket_smack *ssp = sk->sk_security; struct netlbl_lsm_secattr secattr; struct sockaddr_in addr; struct iphdr *hdr; char smack[SMK_LABELLEN]; int rc; struct smk_audit_info ad; /* handle mapped IPv4 packets arriving via IPv6 sockets */ if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP)) family = PF_INET; netlbl_secattr_init(&secattr); rc = netlbl_skbuff_getattr(skb, family, &secattr); if (rc == 0) smack_from_secattr(&secattr, smack); else strncpy(smack, smack_known_huh.smk_known, SMK_MAXLEN); netlbl_secattr_destroy(&secattr); #ifdef CONFIG_AUDIT smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET); ad.a.u.net.family = family; ad.a.u.net.netif = skb->skb_iif; ipv4_skb_to_auditdata(skb, &ad.a, NULL); #endif /* * Receiving a packet requires that the other end be able to write * here. Read access is not required. */ rc = smk_access(smack, ssp->smk_in, MAY_WRITE, &ad); if (rc != 0) return rc; /* * Save the peer's label in the request_sock so we can later setup * smk_packet in the child socket so that SO_PEERCRED can report it. */ req->peer_secid = smack_to_secid(smack); /* * We need to decide if we want to label the incoming connection here * if we do we only need to label the request_sock and the stack will * propogate the wire-label to the sock when it is created. */ hdr = ip_hdr(skb); addr.sin_addr.s_addr = hdr->saddr; rcu_read_lock(); if (smack_host_label(&addr) == NULL) { rcu_read_unlock(); netlbl_secattr_init(&secattr); smack_to_secattr(smack, &secattr); rc = netlbl_req_setattr(req, &secattr); netlbl_secattr_destroy(&secattr); } else { rcu_read_unlock(); netlbl_req_delattr(req); } return rc; } /** * smack_inet_csk_clone - Copy the connection information to the new socket * @sk: the new socket * @req: the connection's request_sock * * Transfer the connection's peer label to the newly created socket. */ static void smack_inet_csk_clone(struct sock *sk, const struct request_sock *req) { struct socket_smack *ssp = sk->sk_security; char *smack; if (req->peer_secid != 0) { smack = smack_from_secid(req->peer_secid); strncpy(ssp->smk_packet, smack, SMK_MAXLEN); } else ssp->smk_packet[0] = '\0'; } /* * Key management security hooks * * Casey has not tested key support very heavily. * The permission check is most likely too restrictive. * If you care about keys please have a look. */ #ifdef CONFIG_KEYS /** * smack_key_alloc - Set the key security blob * @key: object * @cred: the credentials to use * @flags: unused * * No allocation required * * Returns 0 */ static int smack_key_alloc(struct key *key, const struct cred *cred, unsigned long flags) { key->security = cred->security; return 0; } /** * smack_key_free - Clear the key security blob * @key: the object * * Clear the blob pointer */ static void smack_key_free(struct key *key) { key->security = NULL; } /* * smack_key_permission - Smack access on a key * @key_ref: gets to the object * @cred: the credentials to use * @perm: unused * * Return 0 if the task has read and write to the object, * an error code otherwise */ static int smack_key_permission(key_ref_t key_ref, const struct cred *cred, key_perm_t perm) { struct key *keyp; struct smk_audit_info ad; keyp = key_ref_to_ptr(key_ref); if (keyp == NULL) return -EINVAL; /* * If the key hasn't been initialized give it access so that * it may do so. */ if (keyp->security == NULL) return 0; /* * This should not occur */ if (cred->security == NULL) return -EACCES; #ifdef CONFIG_AUDIT smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_KEY); ad.a.u.key_struct.key = keyp->serial; ad.a.u.key_struct.key_desc = keyp->description; #endif return smk_access(cred->security, keyp->security, MAY_READWRITE, &ad); } #endif /* CONFIG_KEYS */ /* * Smack Audit hooks * * Audit requires a unique representation of each Smack specific * rule. This unique representation is used to distinguish the * object to be audited from remaining kernel objects and also * works as a glue between the audit hooks. * * Since repository entries are added but never deleted, we'll use * the smack_known label address related to the given audit rule as * the needed unique representation. This also better fits the smack * model where nearly everything is a label. */ #ifdef CONFIG_AUDIT /** * smack_audit_rule_init - Initialize a smack audit rule * @field: audit rule fields given from user-space (audit.h) * @op: required testing operator (=, !=, >, <, ...) * @rulestr: smack label to be audited * @vrule: pointer to save our own audit rule representation * * Prepare to audit cases where (@field @op @rulestr) is true. * The label to be audited is created if necessay. */ static int smack_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule) { char **rule = (char **)vrule; *rule = NULL; if (field != AUDIT_SUBJ_USER && field != AUDIT_OBJ_USER) return -EINVAL; if (op != Audit_equal && op != Audit_not_equal) return -EINVAL; *rule = smk_import(rulestr, 0); return 0; } /** * smack_audit_rule_known - Distinguish Smack audit rules * @krule: rule of interest, in Audit kernel representation format * * This is used to filter Smack rules from remaining Audit ones. * If it's proved that this rule belongs to us, the * audit_rule_match hook will be called to do the final judgement. */ static int smack_audit_rule_known(struct audit_krule *krule) { struct audit_field *f; int i; for (i = 0; i < krule->field_count; i++) { f = &krule->fields[i]; if (f->type == AUDIT_SUBJ_USER || f->type == AUDIT_OBJ_USER) return 1; } return 0; } /** * smack_audit_rule_match - Audit given object ? * @secid: security id for identifying the object to test * @field: audit rule flags given from user-space * @op: required testing operator * @vrule: smack internal rule presentation * @actx: audit context associated with the check * * The core Audit hook. It's used to take the decision of * whether to audit or not to audit a given object. */ static int smack_audit_rule_match(u32 secid, u32 field, u32 op, void *vrule, struct audit_context *actx) { char *smack; char *rule = vrule; if (!rule) { audit_log(actx, GFP_KERNEL, AUDIT_SELINUX_ERR, "Smack: missing rule\n"); return -ENOENT; } if (field != AUDIT_SUBJ_USER && field != AUDIT_OBJ_USER) return 0; smack = smack_from_secid(secid); /* * No need to do string comparisons. If a match occurs, * both pointers will point to the same smack_known * label. */ if (op == Audit_equal) return (rule == smack); if (op == Audit_not_equal) return (rule != smack); return 0; } /** * smack_audit_rule_free - free smack rule representation * @vrule: rule to be freed. * * No memory was allocated. */ static void smack_audit_rule_free(void *vrule) { /* No-op */ } #endif /* CONFIG_AUDIT */ /** * smack_secid_to_secctx - return the smack label for a secid * @secid: incoming integer * @secdata: destination * @seclen: how long it is * * Exists for networking code. */ static int smack_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) { char *sp = smack_from_secid(secid); *secdata = sp; *seclen = strlen(sp); return 0; } /** * smack_secctx_to_secid - return the secid for a smack label * @secdata: smack label * @seclen: how long result is * @secid: outgoing integer * * Exists for audit and networking code. */ static int smack_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid) { *secid = smack_to_secid(secdata); return 0; } /** * smack_release_secctx - don't do anything. * @secdata: unused * @seclen: unused * * Exists to make sure nothing gets done, and properly */ static void smack_release_secctx(char *secdata, u32 seclen) { } static int smack_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen) { return smack_inode_setsecurity(inode, XATTR_SMACK_SUFFIX, ctx, ctxlen, 0); } static int smack_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen) { return __vfs_setxattr_noperm(dentry, XATTR_NAME_SMACK, ctx, ctxlen, 0); } static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen) { int len = 0; len = smack_inode_getsecurity(inode, XATTR_SMACK_SUFFIX, ctx, true); if (len < 0) return len; *ctxlen = len; return 0; } struct security_operations smack_ops = { .name = "smack", .ptrace_access_check = smack_ptrace_access_check, .ptrace_traceme = smack_ptrace_traceme, .syslog = smack_syslog, .sb_alloc_security = smack_sb_alloc_security, .sb_free_security = smack_sb_free_security, .sb_copy_data = smack_sb_copy_data, .sb_kern_mount = smack_sb_kern_mount, .sb_statfs = smack_sb_statfs, .sb_mount = smack_sb_mount, .sb_umount = smack_sb_umount, .inode_alloc_security = smack_inode_alloc_security, .inode_free_security = smack_inode_free_security, .inode_init_security = smack_inode_init_security, .inode_link = smack_inode_link, .inode_unlink = smack_inode_unlink, .inode_rmdir = smack_inode_rmdir, .inode_rename = smack_inode_rename, .inode_permission = smack_inode_permission, .inode_setattr = smack_inode_setattr, .inode_getattr = smack_inode_getattr, .inode_setxattr = smack_inode_setxattr, .inode_post_setxattr = smack_inode_post_setxattr, .inode_getxattr = smack_inode_getxattr, .inode_removexattr = smack_inode_removexattr, .inode_getsecurity = smack_inode_getsecurity, .inode_setsecurity = smack_inode_setsecurity, .inode_listsecurity = smack_inode_listsecurity, .inode_getsecid = smack_inode_getsecid, .file_permission = smack_file_permission, .file_alloc_security = smack_file_alloc_security, .file_free_security = smack_file_free_security, .file_ioctl = smack_file_ioctl, .file_lock = smack_file_lock, .file_fcntl = smack_file_fcntl, .file_set_fowner = smack_file_set_fowner, .file_send_sigiotask = smack_file_send_sigiotask, .file_receive = smack_file_receive, .cred_alloc_blank = smack_cred_alloc_blank, .cred_free = smack_cred_free, .cred_prepare = smack_cred_prepare, .cred_commit = smack_cred_commit, .cred_transfer = smack_cred_transfer, .kernel_act_as = smack_kernel_act_as, .kernel_create_files_as = smack_kernel_create_files_as, .task_setpgid = smack_task_setpgid, .task_getpgid = smack_task_getpgid, .task_getsid = smack_task_getsid, .task_getsecid = smack_task_getsecid, .task_setnice = smack_task_setnice, .task_setioprio = smack_task_setioprio, .task_getioprio = smack_task_getioprio, .task_setscheduler = smack_task_setscheduler, .task_getscheduler = smack_task_getscheduler, .task_movememory = smack_task_movememory, .task_kill = smack_task_kill, .task_wait = smack_task_wait, .task_to_inode = smack_task_to_inode, .ipc_permission = smack_ipc_permission, .ipc_getsecid = smack_ipc_getsecid, .msg_msg_alloc_security = smack_msg_msg_alloc_security, .msg_msg_free_security = smack_msg_msg_free_security, .msg_queue_alloc_security = smack_msg_queue_alloc_security, .msg_queue_free_security = smack_msg_queue_free_security, .msg_queue_associate = smack_msg_queue_associate, .msg_queue_msgctl = smack_msg_queue_msgctl, .msg_queue_msgsnd = smack_msg_queue_msgsnd, .msg_queue_msgrcv = smack_msg_queue_msgrcv, .shm_alloc_security = smack_shm_alloc_security, .shm_free_security = smack_shm_free_security, .shm_associate = smack_shm_associate, .shm_shmctl = smack_shm_shmctl, .shm_shmat = smack_shm_shmat, .sem_alloc_security = smack_sem_alloc_security, .sem_free_security = smack_sem_free_security, .sem_associate = smack_sem_associate, .sem_semctl = smack_sem_semctl, .sem_semop = smack_sem_semop, .d_instantiate = smack_d_instantiate, .getprocattr = smack_getprocattr, .setprocattr = smack_setprocattr, .unix_stream_connect = smack_unix_stream_connect, .unix_may_send = smack_unix_may_send, .socket_post_create = smack_socket_post_create, .socket_connect = smack_socket_connect, .socket_sendmsg = smack_socket_sendmsg, .socket_sock_rcv_skb = smack_socket_sock_rcv_skb, .socket_getpeersec_stream = smack_socket_getpeersec_stream, .socket_getpeersec_dgram = smack_socket_getpeersec_dgram, .sk_alloc_security = smack_sk_alloc_security, .sk_free_security = smack_sk_free_security, .sock_graft = smack_sock_graft, .inet_conn_request = smack_inet_conn_request, .inet_csk_clone = smack_inet_csk_clone, /* key management security hooks */ #ifdef CONFIG_KEYS .key_alloc = smack_key_alloc, .key_free = smack_key_free, .key_permission = smack_key_permission, #endif /* CONFIG_KEYS */ /* Audit hooks */ #ifdef CONFIG_AUDIT .audit_rule_init = smack_audit_rule_init, .audit_rule_known = smack_audit_rule_known, .audit_rule_match = smack_audit_rule_match, .audit_rule_free = smack_audit_rule_free, #endif /* CONFIG_AUDIT */ .secid_to_secctx = smack_secid_to_secctx, .secctx_to_secid = smack_secctx_to_secid, .release_secctx = smack_release_secctx, .inode_notifysecctx = smack_inode_notifysecctx, .inode_setsecctx = smack_inode_setsecctx, .inode_getsecctx = smack_inode_getsecctx, }; static __init void init_smack_know_list(void) { list_add(&smack_known_huh.list, &smack_known_list); list_add(&smack_known_hat.list, &smack_known_list); list_add(&smack_known_star.list, &smack_known_list); list_add(&smack_known_floor.list, &smack_known_list); list_add(&smack_known_invalid.list, &smack_known_list); list_add(&smack_known_web.list, &smack_known_list); } /** * smack_init - initialize the smack system * * Returns 0 */ static __init int smack_init(void) { struct cred *cred; if (!security_module_enable(&smack_ops)) return 0; printk(KERN_INFO "Smack: Initializing.\n"); /* * Set the security state for the initial task. */ cred = (struct cred *) current->cred; cred->security = &smack_known_floor.smk_known; /* initilize the smack_know_list */ init_smack_know_list(); /* * Initialize locks */ spin_lock_init(&smack_known_huh.smk_cipsolock); spin_lock_init(&smack_known_hat.smk_cipsolock); spin_lock_init(&smack_known_star.smk_cipsolock); spin_lock_init(&smack_known_floor.smk_cipsolock); spin_lock_init(&smack_known_invalid.smk_cipsolock); /* * Register with LSM */ if (register_security(&smack_ops)) panic("smack: Unable to register with kernel.\n"); return 0; } /* * Smack requires early initialization in order to label * all processes and objects when they are created. */ security_initcall(smack_init);
gpl-2.0
cambridgehackers/linux-xlnx
drivers/staging/vt6655/vntwifi.c
298
17081
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * * File: vntwifi.c * * Purpose: export functions for vntwifi lib * * Functions: * * Revision History: * * Author: Yiching Chen * * Date: feb. 2, 2005 * */ #include "vntwifi.h" #include "IEEE11h.h" #include "country.h" #include "device.h" #include "wmgr.h" #include "datarate.h" /*--------------------- Static Definitions -------------------------*/ //static int msglevel =MSG_LEVEL_DEBUG; //static int msglevel =MSG_LEVEL_INFO; /*--------------------- Static Classes ----------------------------*/ /*--------------------- Static Variables --------------------------*/ /*--------------------- Static Functions --------------------------*/ /*--------------------- Export Variables --------------------------*/ /*--------------------- Export Functions --------------------------*/ /*+ * * Description: * Set Operation Mode * * Parameters: * In: * pMgmtHandle - pointer to management object * eOPMode - Operation Mode * Out: * none * * Return Value: none * -*/ void VNTWIFIvSetOPMode( void *pMgmtHandle, WMAC_CONFIG_MODE eOPMode ) { PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; pMgmt->eConfigMode = eOPMode; } /*+ * * Description: * Set Operation Mode * * Parameters: * In: * pMgmtHandle - pointer to management object * wBeaconPeriod - Beacon Period * wATIMWindow - ATIM window * uChannel - channel number * Out: * none * * Return Value: none * -*/ void VNTWIFIvSetIBSSParameter( void *pMgmtHandle, unsigned short wBeaconPeriod, unsigned short wATIMWindow, unsigned int uChannel ) { PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; pMgmt->wIBSSBeaconPeriod = wBeaconPeriod; pMgmt->wIBSSATIMWindow = wATIMWindow; pMgmt->uIBSSChannel = uChannel; } /*+ * * Description: * Get current SSID * * Parameters: * In: * pMgmtHandle - pointer to management object * Out: * none * * Return Value: current SSID pointer. * -*/ PWLAN_IE_SSID VNTWIFIpGetCurrentSSID( void *pMgmtHandle ) { PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; return (PWLAN_IE_SSID) pMgmt->abyCurrSSID; } /*+ * * Description: * Get current link channel * * Parameters: * In: * pMgmtHandle - pointer to management object * Out: * none * * Return Value: current Channel. * -*/ unsigned int VNTWIFIpGetCurrentChannel( void *pMgmtHandle ) { PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; if (pMgmtHandle != NULL) { return pMgmt->uCurrChannel; } return 0; } /*+ * * Description: * Get current Assoc ID * * Parameters: * In: * pMgmtHandle - pointer to management object * Out: * none * * Return Value: current Assoc ID * -*/ unsigned short VNTWIFIwGetAssocID( void *pMgmtHandle ) { PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; return pMgmt->wCurrAID; } /*+ * * Description: * This routine return max support rate of IES * * Parameters: * In: * pSupportRateIEs * pExtSupportRateIEs * * Out: * * Return Value: max support rate * -*/ unsigned char VNTWIFIbyGetMaxSupportRate( PWLAN_IE_SUPP_RATES pSupportRateIEs, PWLAN_IE_SUPP_RATES pExtSupportRateIEs ) { unsigned char byMaxSupportRate = RATE_1M; unsigned char bySupportRate = RATE_1M; unsigned int ii = 0; if (pSupportRateIEs) { for (ii = 0; ii < pSupportRateIEs->len; ii++) { bySupportRate = DATARATEbyGetRateIdx(pSupportRateIEs->abyRates[ii]); if (bySupportRate > byMaxSupportRate) { byMaxSupportRate = bySupportRate; } } } if (pExtSupportRateIEs) { for (ii = 0; ii < pExtSupportRateIEs->len; ii++) { bySupportRate = DATARATEbyGetRateIdx(pExtSupportRateIEs->abyRates[ii]); if (bySupportRate > byMaxSupportRate) { byMaxSupportRate = bySupportRate; } } } return byMaxSupportRate; } /*+ * * Description: * This routine return data rate of ACK packtet * * Parameters: * In: * byRxDataRate * pSupportRateIEs * pExtSupportRateIEs * * Out: * * Return Value: max support rate * -*/ unsigned char VNTWIFIbyGetACKTxRate( unsigned char byRxDataRate, PWLAN_IE_SUPP_RATES pSupportRateIEs, PWLAN_IE_SUPP_RATES pExtSupportRateIEs ) { unsigned char byMaxAckRate; unsigned char byBasicRate; unsigned int ii; if (byRxDataRate <= RATE_11M) { byMaxAckRate = RATE_1M; } else { // 24M is mandatory for 802.11a and 802.11g byMaxAckRate = RATE_24M; } if (pSupportRateIEs) { for (ii = 0; ii < pSupportRateIEs->len; ii++) { if (pSupportRateIEs->abyRates[ii] & 0x80) { byBasicRate = DATARATEbyGetRateIdx(pSupportRateIEs->abyRates[ii]); if ((byBasicRate <= byRxDataRate) && (byBasicRate > byMaxAckRate)) { byMaxAckRate = byBasicRate; } } } } if (pExtSupportRateIEs) { for (ii = 0; ii < pExtSupportRateIEs->len; ii++) { if (pExtSupportRateIEs->abyRates[ii] & 0x80) { byBasicRate = DATARATEbyGetRateIdx(pExtSupportRateIEs->abyRates[ii]); if ((byBasicRate <= byRxDataRate) && (byBasicRate > byMaxAckRate)) { byMaxAckRate = byBasicRate; } } } } return byMaxAckRate; } /*+ * * Description: * Set Authentication Mode * * Parameters: * In: * pMgmtHandle - pointer to management object * eAuthMode - Authentication mode * Out: * none * * Return Value: none * -*/ void VNTWIFIvSetAuthenticationMode( void *pMgmtHandle, WMAC_AUTHENTICATION_MODE eAuthMode ) { PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; pMgmt->eAuthenMode = eAuthMode; if ((eAuthMode == WMAC_AUTH_SHAREKEY) || (eAuthMode == WMAC_AUTH_AUTO)) { pMgmt->bShareKeyAlgorithm = true; } else { pMgmt->bShareKeyAlgorithm = false; } } /*+ * * Description: * Set Encryption Mode * * Parameters: * In: * pMgmtHandle - pointer to management object * eAuthMode - Authentication mode * Out: * none * * Return Value: none * -*/ void VNTWIFIvSetEncryptionMode( void *pMgmtHandle, WMAC_ENCRYPTION_MODE eEncryptionMode ) { PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; pMgmt->eEncryptionMode = eEncryptionMode; if ((eEncryptionMode == WMAC_ENCRYPTION_WEPEnabled) || (eEncryptionMode == WMAC_ENCRYPTION_TKIPEnabled) || (eEncryptionMode == WMAC_ENCRYPTION_AESEnabled)) { pMgmt->bPrivacyInvoked = true; } else { pMgmt->bPrivacyInvoked = false; } } bool VNTWIFIbConfigPhyMode( void *pMgmtHandle, CARD_PHY_TYPE ePhyType ) { PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; if ((ePhyType != PHY_TYPE_AUTO) && (ePhyType != pMgmt->eCurrentPHYMode)) { if (CARDbSetPhyParameter(pMgmt->pAdapter, ePhyType, 0, 0, NULL, NULL) == true) { pMgmt->eCurrentPHYMode = ePhyType; } else { return false; } } pMgmt->eConfigPHYMode = ePhyType; return true; } void VNTWIFIbGetConfigPhyMode( void *pMgmtHandle, void *pePhyType ) { PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; if ((pMgmt != NULL) && (pePhyType != NULL)) { *(PCARD_PHY_TYPE)pePhyType = pMgmt->eConfigPHYMode; } } /*+ * * Description: * Clear BSS List Database except current assoc BSS * * Parameters: * In: * pMgmtHandle - Management Object structure * bLinkPass - Current Link status * Out: * * Return Value: None. * -*/ /*+ * * Description: * Query BSS List in management database * * Parameters: * In: * pMgmtHandle - Management Object structure * Out: * puBSSCount - BSS count * pvFirstBSS - pointer to first BSS * * Return Value: None. * -*/ void VNTWIFIvQueryBSSList(void *pMgmtHandle, unsigned int *puBSSCount, void **pvFirstBSS) { unsigned int ii = 0; PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; PKnownBSS pBSS = NULL; unsigned int uCount = 0; *pvFirstBSS = NULL; for (ii = 0; ii < MAX_BSS_NUM; ii++) { pBSS = &(pMgmt->sBSSList[ii]); if (!pBSS->bActive) { continue; } if (*pvFirstBSS == NULL) { *pvFirstBSS = &(pMgmt->sBSSList[ii]); } uCount++; } *puBSSCount = uCount; } void VNTWIFIvGetNextBSS( void *pMgmtHandle, void *pvCurrentBSS, void **pvNextBSS ) { PKnownBSS pBSS = (PKnownBSS) pvCurrentBSS; PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; *pvNextBSS = NULL; while (*pvNextBSS == NULL) { pBSS++; if (pBSS > &(pMgmt->sBSSList[MAX_BSS_NUM])) { return; } if (pBSS->bActive == true) { *pvNextBSS = pBSS; return; } } } /*+ * * Description: * Update Tx attemps, Tx failure counter in Node DB * * In: * Out: * none * * Return Value: none * -*/ void VNTWIFIvUpdateNodeTxCounter( void *pMgmtHandle, unsigned char *pbyDestAddress, bool bTxOk, unsigned short wRate, unsigned char *pbyTxFailCount ) { PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; unsigned int uNodeIndex = 0; unsigned int ii; if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) || (pMgmt->eCurrMode == WMAC_MODE_ESS_AP)) { if (BSSDBbIsSTAInNodeDB(pMgmt, pbyDestAddress, &uNodeIndex) == false) { return; } } pMgmt->sNodeDBTable[uNodeIndex].uTxAttempts++; if (bTxOk) { // transmit success, TxAttempts at least plus one pMgmt->sNodeDBTable[uNodeIndex].uTxOk[MAX_RATE]++; pMgmt->sNodeDBTable[uNodeIndex].uTxOk[wRate]++; } else { pMgmt->sNodeDBTable[uNodeIndex].uTxFailures++; } pMgmt->sNodeDBTable[uNodeIndex].uTxRetry += pbyTxFailCount[MAX_RATE]; for (ii = 0; ii < MAX_RATE; ii++) { pMgmt->sNodeDBTable[uNodeIndex].uTxFail[ii] += pbyTxFailCount[ii]; } return; } void VNTWIFIvGetTxRate( void *pMgmtHandle, unsigned char *pbyDestAddress, unsigned short *pwTxDataRate, unsigned char *pbyACKRate, unsigned char *pbyCCKBasicRate, unsigned char *pbyOFDMBasicRate ) { PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; unsigned int uNodeIndex = 0; unsigned short wTxDataRate = RATE_1M; unsigned char byACKRate = RATE_1M; unsigned char byCCKBasicRate = RATE_1M; unsigned char byOFDMBasicRate = RATE_24M; PWLAN_IE_SUPP_RATES pSupportRateIEs = NULL; PWLAN_IE_SUPP_RATES pExtSupportRateIEs = NULL; if ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) || (pMgmt->eCurrMode == WMAC_MODE_ESS_AP)) { // Adhoc Tx rate decided from node DB if (BSSDBbIsSTAInNodeDB(pMgmt, pbyDestAddress, &uNodeIndex)) { wTxDataRate = (pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate); pSupportRateIEs = (PWLAN_IE_SUPP_RATES) (pMgmt->sNodeDBTable[uNodeIndex].abyCurrSuppRates); pExtSupportRateIEs = (PWLAN_IE_SUPP_RATES) (pMgmt->sNodeDBTable[uNodeIndex].abyCurrExtSuppRates); } else { if (pMgmt->eCurrentPHYMode != PHY_TYPE_11A) { wTxDataRate = RATE_2M; } else { wTxDataRate = RATE_24M; } pSupportRateIEs = (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrSuppRates; pExtSupportRateIEs = (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrExtSuppRates; } } else { // Infrastructure: rate decided from AP Node, index = 0 wTxDataRate = (pMgmt->sNodeDBTable[0].wTxDataRate); #ifdef PLICE_DEBUG printk(KERN_DEBUG "GetTxRate:AP MAC is %pM,TxRate is %d\n", pMgmt->sNodeDBTable[0].abyMACAddr, wTxDataRate); #endif pSupportRateIEs = (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrSuppRates; pExtSupportRateIEs = (PWLAN_IE_SUPP_RATES) pMgmt->abyCurrExtSuppRates; } byACKRate = VNTWIFIbyGetACKTxRate((unsigned char) wTxDataRate, pSupportRateIEs, pExtSupportRateIEs ); if (byACKRate > (unsigned char) wTxDataRate) { byACKRate = (unsigned char) wTxDataRate; } byCCKBasicRate = VNTWIFIbyGetACKTxRate(RATE_11M, pSupportRateIEs, pExtSupportRateIEs ); byOFDMBasicRate = VNTWIFIbyGetACKTxRate(RATE_54M, pSupportRateIEs, pExtSupportRateIEs ); *pwTxDataRate = wTxDataRate; *pbyACKRate = byACKRate; *pbyCCKBasicRate = byCCKBasicRate; *pbyOFDMBasicRate = byOFDMBasicRate; return; } unsigned char VNTWIFIbyGetKeyCypher( void *pMgmtHandle, bool bGroupKey ) { PSMgmtObject pMgmt = (PSMgmtObject)pMgmtHandle; if (bGroupKey) { return pMgmt->byCSSGK; } else { return pMgmt->byCSSPK; } } /* bool VNTWIFIbInit( void *pAdapterHandler, void **pMgmtHandler ) { PSMgmtObject pMgmt = NULL; unsigned int ii; pMgmt = (PSMgmtObject)kmalloc(sizeof(SMgmtObject), (int)GFP_ATOMIC); if (pMgmt == NULL) { *pMgmtHandler = NULL; return false; } memset(pMgmt, 0, sizeof(SMgmtObject)); pMgmt->pAdapter = (void *) pAdapterHandler; // should initial MAC address abyMACAddr for (ii=0; ii<WLAN_BSSID_LEN; ii++) { pMgmt->abyDesireBSSID[ii] = 0xFF; } pMgmt->pbyPSPacketPool = &pMgmt->byPSPacketPool[0]; pMgmt->pbyMgmtPacketPool = &pMgmt->byMgmtPacketPool[0]; pMgmt->byCSSPK = KEY_CTL_NONE; pMgmt->byCSSGK = KEY_CTL_NONE; pMgmt->wIBSSBeaconPeriod = DEFAULT_IBSS_BI; pMgmt->cbFreeCmdQueue = CMD_Q_SIZE; pMgmt->uCmdDequeueIdx = 0; pMgmt->uCmdEnqueueIdx = 0; pMgmt->eCommandState = WLAN_CMD_STATE_IDLE; pMgmt->bCmdStop = false; pMgmt->bCmdRunning = false; *pMgmtHandler = pMgmt; return true; } */ bool VNTWIFIbSetPMKIDCache( void *pMgmtObject, unsigned long ulCount, void *pPMKIDInfo ) { PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject; if (ulCount > MAX_PMKID_CACHE) { return false; } pMgmt->gsPMKIDCache.BSSIDInfoCount = ulCount; memcpy(pMgmt->gsPMKIDCache.BSSIDInfo, pPMKIDInfo, (ulCount*sizeof(PMKIDInfo))); return true; } unsigned short VNTWIFIwGetMaxSupportRate( void *pMgmtObject ) { unsigned short wRate = RATE_54M; PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject; for (wRate = RATE_54M; wRate > RATE_1M; wRate--) { if (pMgmt->sNodeDBTable[0].wSuppRate & (1<<wRate)) { return wRate; } } if (pMgmt->eCurrentPHYMode == PHY_TYPE_11A) { return RATE_6M; } else { return RATE_1M; } } void VNTWIFIvSet11h( void *pMgmtObject, bool b11hEnable ) { PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject; pMgmt->b11hEnable = b11hEnable; } bool VNTWIFIbMeasureReport( void *pMgmtObject, bool bEndOfReport, void *pvMeasureEID, unsigned char byReportMode, unsigned char byBasicMap, unsigned char byCCAFraction, unsigned char *pbyRPIs ) { PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject; unsigned char *pbyCurrentEID = (unsigned char *)(pMgmt->pCurrMeasureEIDRep); //spin_lock_irq(&pDevice->lock); if ((pvMeasureEID != NULL) && (pMgmt->uLengthOfRepEIDs < (WLAN_A3FR_MAXLEN - sizeof(MEASEURE_REP) - sizeof(WLAN_80211HDR_A3) - 3)) ) { pMgmt->pCurrMeasureEIDRep->byElementID = WLAN_EID_MEASURE_REP; pMgmt->pCurrMeasureEIDRep->len = 3; pMgmt->pCurrMeasureEIDRep->byToken = ((PWLAN_IE_MEASURE_REQ)pvMeasureEID)->byToken; pMgmt->pCurrMeasureEIDRep->byMode = byReportMode; pMgmt->pCurrMeasureEIDRep->byType = ((PWLAN_IE_MEASURE_REQ) pvMeasureEID)->byType; switch (pMgmt->pCurrMeasureEIDRep->byType) { case MEASURE_TYPE_BASIC: pMgmt->pCurrMeasureEIDRep->len += sizeof(MEASEURE_REP_BASIC); memcpy(&(pMgmt->pCurrMeasureEIDRep->sRep.sBasic), &(((PWLAN_IE_MEASURE_REQ) pvMeasureEID)->sReq), sizeof(MEASEURE_REQ)); pMgmt->pCurrMeasureEIDRep->sRep.sBasic.byMap = byBasicMap; break; case MEASURE_TYPE_CCA: pMgmt->pCurrMeasureEIDRep->len += sizeof(MEASEURE_REP_CCA); memcpy(&(pMgmt->pCurrMeasureEIDRep->sRep.sCCA), &(((PWLAN_IE_MEASURE_REQ) pvMeasureEID)->sReq), sizeof(MEASEURE_REQ)); pMgmt->pCurrMeasureEIDRep->sRep.sCCA.byCCABusyFraction = byCCAFraction; break; case MEASURE_TYPE_RPI: pMgmt->pCurrMeasureEIDRep->len += sizeof(MEASEURE_REP_RPI); memcpy(&(pMgmt->pCurrMeasureEIDRep->sRep.sRPI), &(((PWLAN_IE_MEASURE_REQ) pvMeasureEID)->sReq), sizeof(MEASEURE_REQ)); memcpy(pMgmt->pCurrMeasureEIDRep->sRep.sRPI.abyRPIdensity, pbyRPIs, 8); break; default: break; } pbyCurrentEID += (2 + pMgmt->pCurrMeasureEIDRep->len); pMgmt->uLengthOfRepEIDs += (2 + pMgmt->pCurrMeasureEIDRep->len); pMgmt->pCurrMeasureEIDRep = (PWLAN_IE_MEASURE_REP) pbyCurrentEID; } if (bEndOfReport) { IEEE11hbMSRRepTx(pMgmt); } //spin_unlock_irq(&pDevice->lock); return true; } bool VNTWIFIbChannelSwitch( void *pMgmtObject, unsigned char byNewChannel ) { PSMgmtObject pMgmt = (PSMgmtObject) pMgmtObject; //spin_lock_irq(&pDevice->lock); pMgmt->uCurrChannel = byNewChannel; pMgmt->bSwitchChannel = false; //spin_unlock_irq(&pDevice->lock); return true; }
gpl-2.0
Beeko/android_kernel_samsung_tuna
net/socket.c
298
84204
/* * NET An implementation of the SOCKET network access protocol. * * Version: @(#)socket.c 1.1.93 18/02/95 * * Authors: Orest Zborowski, <obz@Kodak.COM> * Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * * Fixes: * Anonymous : NOTSOCK/BADF cleanup. Error fix in * shutdown() * Alan Cox : verify_area() fixes * Alan Cox : Removed DDI * Jonathan Kamens : SOCK_DGRAM reconnect bug * Alan Cox : Moved a load of checks to the very * top level. * Alan Cox : Move address structures to/from user * mode above the protocol layers. * Rob Janssen : Allow 0 length sends. * Alan Cox : Asynchronous I/O support (cribbed from the * tty drivers). * Niibe Yutaka : Asynchronous I/O for writes (4.4BSD style) * Jeff Uphoff : Made max number of sockets command-line * configurable. * Matti Aarnio : Made the number of sockets dynamic, * to be allocated when needed, and mr. * Uphoff's max is used as max to be * allowed to allocate. * Linus : Argh. removed all the socket allocation * altogether: it's in the inode now. * Alan Cox : Made sock_alloc()/sock_release() public * for NetROM and future kernel nfsd type * stuff. * Alan Cox : sendmsg/recvmsg basics. * Tom Dyas : Export net symbols. * Marcin Dalecki : Fixed problems with CONFIG_NET="n". * Alan Cox : Added thread locking to sys_* calls * for sockets. May have errors at the * moment. * Kevin Buhr : Fixed the dumb errors in the above. * Andi Kleen : Some small cleanups, optimizations, * and fixed a copy_from_user() bug. * Tigran Aivazian : sys_send(args) calls sys_sendto(args, NULL, 0) * Tigran Aivazian : Made listen(2) backlog sanity checks * protocol-independent * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * * This module is effectively the top level interface to the BSD socket * paradigm. * * Based upon Swansea University Computer Society NET3.039 */ #include <linux/mm.h> #include <linux/socket.h> #include <linux/file.h> #include <linux/net.h> #include <linux/interrupt.h> #include <linux/thread_info.h> #include <linux/rcupdate.h> #include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/mutex.h> #include <linux/wanrouter.h> #include <linux/if_bridge.h> #include <linux/if_frad.h> #include <linux/if_vlan.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/cache.h> #include <linux/module.h> #include <linux/highmem.h> #include <linux/mount.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/compat.h> #include <linux/kmod.h> #include <linux/audit.h> #include <linux/wireless.h> #include <linux/nsproxy.h> #include <linux/magic.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <asm/unistd.h> #include <net/compat.h> #include <net/wext.h> #include <net/cls_cgroup.h> #include <net/sock.h> #include <linux/netfilter.h> #include <linux/if_tun.h> #include <linux/ipv6_route.h> #include <linux/route.h> #include <linux/sockios.h> #include <linux/atalk.h> static int sock_no_open(struct inode *irrelevant, struct file *dontcare); static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos); static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos); static int sock_mmap(struct file *file, struct vm_area_struct *vma); static int sock_close(struct inode *inode, struct file *file); static unsigned int sock_poll(struct file *file, struct poll_table_struct *wait); static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #ifdef CONFIG_COMPAT static long compat_sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #endif static int sock_fasync(int fd, struct file *filp, int on); static ssize_t sock_sendpage(struct file *file, struct page *page, int offset, size_t size, loff_t *ppos, int more); static ssize_t sock_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); /* * Socket files have a set of 'special' operations as well as the generic file ones. These don't appear * in the operation structures but are done directly via the socketcall() multiplexor. */ static const struct file_operations socket_file_ops = { .owner = THIS_MODULE, .llseek = no_llseek, .aio_read = sock_aio_read, .aio_write = sock_aio_write, .poll = sock_poll, .unlocked_ioctl = sock_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_sock_ioctl, #endif .mmap = sock_mmap, .open = sock_no_open, /* special open code to disallow open via /proc */ .release = sock_close, .fasync = sock_fasync, .sendpage = sock_sendpage, .splice_write = generic_splice_sendpage, .splice_read = sock_splice_read, }; /* * The protocol list. Each protocol is registered in here. */ static DEFINE_SPINLOCK(net_family_lock); static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly; /* * Statistics counters of the socket lists */ static DEFINE_PER_CPU(int, sockets_in_use); /* * Support routines. * Move socket addresses back and forth across the kernel/user * divide and look after the messy bits. */ /** * move_addr_to_kernel - copy a socket address into kernel space * @uaddr: Address in user space * @kaddr: Address in kernel space * @ulen: Length in user space * * The address is copied into kernel space. If the provided address is * too long an error code of -EINVAL is returned. If the copy gives * invalid addresses -EFAULT is returned. On a success 0 is returned. */ int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr *kaddr) { if (ulen < 0 || ulen > sizeof(struct sockaddr_storage)) return -EINVAL; if (ulen == 0) return 0; if (copy_from_user(kaddr, uaddr, ulen)) return -EFAULT; return audit_sockaddr(ulen, kaddr); } /** * move_addr_to_user - copy an address to user space * @kaddr: kernel space address * @klen: length of address in kernel * @uaddr: user space address * @ulen: pointer to user length field * * The value pointed to by ulen on entry is the buffer length available. * This is overwritten with the buffer space used. -EINVAL is returned * if an overlong buffer is specified or a negative buffer size. -EFAULT * is returned if either the buffer or the length field are not * accessible. * After copying the data up to the limit the user specifies, the true * length of the data is written over the length limit the user * specified. Zero is returned for a success. */ static int move_addr_to_user(struct sockaddr *kaddr, int klen, void __user *uaddr, int __user *ulen) { int err; int len; err = get_user(len, ulen); if (err) return err; if (len > klen) len = klen; if (len < 0 || len > sizeof(struct sockaddr_storage)) return -EINVAL; if (len) { if (audit_sockaddr(klen, kaddr)) return -ENOMEM; if (copy_to_user(uaddr, kaddr, len)) return -EFAULT; } /* * "fromlen shall refer to the value before truncation.." * 1003.1g */ return __put_user(klen, ulen); } static struct kmem_cache *sock_inode_cachep __read_mostly; static struct inode *sock_alloc_inode(struct super_block *sb) { struct socket_alloc *ei; struct socket_wq *wq; ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL); if (!ei) return NULL; wq = kmalloc(sizeof(*wq), GFP_KERNEL); if (!wq) { kmem_cache_free(sock_inode_cachep, ei); return NULL; } init_waitqueue_head(&wq->wait); wq->fasync_list = NULL; RCU_INIT_POINTER(ei->socket.wq, wq); ei->socket.state = SS_UNCONNECTED; ei->socket.flags = 0; ei->socket.ops = NULL; ei->socket.sk = NULL; ei->socket.file = NULL; return &ei->vfs_inode; } static void sock_destroy_inode(struct inode *inode) { struct socket_alloc *ei; struct socket_wq *wq; ei = container_of(inode, struct socket_alloc, vfs_inode); wq = rcu_dereference_protected(ei->socket.wq, 1); kfree_rcu(wq, rcu); kmem_cache_free(sock_inode_cachep, ei); } static void init_once(void *foo) { struct socket_alloc *ei = (struct socket_alloc *)foo; inode_init_once(&ei->vfs_inode); } static int init_inodecache(void) { sock_inode_cachep = kmem_cache_create("sock_inode_cache", sizeof(struct socket_alloc), 0, (SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD), init_once); if (sock_inode_cachep == NULL) return -ENOMEM; return 0; } static const struct super_operations sockfs_ops = { .alloc_inode = sock_alloc_inode, .destroy_inode = sock_destroy_inode, .statfs = simple_statfs, }; /* * sockfs_dname() is called from d_path(). */ static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen) { return dynamic_dname(dentry, buffer, buflen, "socket:[%lu]", dentry->d_inode->i_ino); } static const struct dentry_operations sockfs_dentry_operations = { .d_dname = sockfs_dname, }; static struct dentry *sockfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_pseudo(fs_type, "socket:", &sockfs_ops, &sockfs_dentry_operations, SOCKFS_MAGIC); } static struct vfsmount *sock_mnt __read_mostly; static struct file_system_type sock_fs_type = { .name = "sockfs", .mount = sockfs_mount, .kill_sb = kill_anon_super, }; /* * Obtains the first available file descriptor and sets it up for use. * * These functions create file structures and maps them to fd space * of the current process. On success it returns file descriptor * and file struct implicitly stored in sock->file. * Note that another thread may close file descriptor before we return * from this function. We use the fact that now we do not refer * to socket after mapping. If one day we will need it, this * function will increment ref. count on file by 1. * * In any case returned fd MAY BE not valid! * This race condition is unavoidable * with shared fd spaces, we cannot solve it inside kernel, * but we take care of internal coherence yet. */ static int sock_alloc_file(struct socket *sock, struct file **f, int flags) { struct qstr name = { .name = "" }; struct path path; struct file *file; int fd; fd = get_unused_fd_flags(flags); if (unlikely(fd < 0)) return fd; path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name); if (unlikely(!path.dentry)) { put_unused_fd(fd); return -ENOMEM; } path.mnt = mntget(sock_mnt); d_instantiate(path.dentry, SOCK_INODE(sock)); SOCK_INODE(sock)->i_fop = &socket_file_ops; file = alloc_file(&path, FMODE_READ | FMODE_WRITE, &socket_file_ops); if (unlikely(!file)) { /* drop dentry, keep inode */ ihold(path.dentry->d_inode); path_put(&path); put_unused_fd(fd); return -ENFILE; } sock->file = file; file->f_flags = O_RDWR | (flags & O_NONBLOCK); file->f_pos = 0; file->private_data = sock; *f = file; return fd; } int sock_map_fd(struct socket *sock, int flags) { struct file *newfile; int fd = sock_alloc_file(sock, &newfile, flags); if (likely(fd >= 0)) fd_install(fd, newfile); return fd; } EXPORT_SYMBOL(sock_map_fd); static struct socket *sock_from_file(struct file *file, int *err) { if (file->f_op == &socket_file_ops) return file->private_data; /* set in sock_map_fd */ *err = -ENOTSOCK; return NULL; } /** * sockfd_lookup - Go from a file number to its socket slot * @fd: file handle * @err: pointer to an error code return * * The file handle passed in is locked and the socket it is bound * too is returned. If an error occurs the err pointer is overwritten * with a negative errno code and NULL is returned. The function checks * for both invalid handles and passing a handle which is not a socket. * * On a success the socket object pointer is returned. */ struct socket *sockfd_lookup(int fd, int *err) { struct file *file; struct socket *sock; file = fget(fd); if (!file) { *err = -EBADF; return NULL; } sock = sock_from_file(file, err); if (!sock) fput(file); return sock; } EXPORT_SYMBOL(sockfd_lookup); static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) { struct file *file; struct socket *sock; *err = -EBADF; file = fget_light(fd, fput_needed); if (file) { sock = sock_from_file(file, err); if (sock) return sock; fput_light(file, *fput_needed); } return NULL; } /** * sock_alloc - allocate a socket * * Allocate a new inode and socket object. The two are bound together * and initialised. The socket is then returned. If we are out of inodes * NULL is returned. */ static struct socket *sock_alloc(void) { struct inode *inode; struct socket *sock; inode = new_inode(sock_mnt->mnt_sb); if (!inode) return NULL; sock = SOCKET_I(inode); kmemcheck_annotate_bitfield(sock, type); inode->i_ino = get_next_ino(); inode->i_mode = S_IFSOCK | S_IRWXUGO; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); percpu_add(sockets_in_use, 1); return sock; } /* * In theory you can't get an open on this inode, but /proc provides * a back door. Remember to keep it shut otherwise you'll let the * creepy crawlies in. */ static int sock_no_open(struct inode *irrelevant, struct file *dontcare) { return -ENXIO; } const struct file_operations bad_sock_fops = { .owner = THIS_MODULE, .open = sock_no_open, .llseek = noop_llseek, }; /** * sock_release - close a socket * @sock: socket to close * * The socket is released from the protocol stack if it has a release * callback, and the inode is then released if the socket is bound to * an inode not a file. */ void sock_release(struct socket *sock) { if (sock->ops) { struct module *owner = sock->ops->owner; sock->ops->release(sock); sock->ops = NULL; module_put(owner); } if (rcu_dereference_protected(sock->wq, 1)->fasync_list) printk(KERN_ERR "sock_release: fasync list not empty!\n"); percpu_sub(sockets_in_use, 1); if (!sock->file) { iput(SOCK_INODE(sock)); return; } sock->file = NULL; } EXPORT_SYMBOL(sock_release); int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags) { *tx_flags = 0; if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE)) *tx_flags |= SKBTX_HW_TSTAMP; if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE)) *tx_flags |= SKBTX_SW_TSTAMP; return 0; } EXPORT_SYMBOL(sock_tx_timestamp); static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size) { struct sock_iocb *si = kiocb_to_siocb(iocb); sock_update_classid(sock->sk); si->sock = sock; si->scm = NULL; si->msg = msg; si->size = size; return sock->ops->sendmsg(iocb, sock, msg, size); } static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size) { int err = security_socket_sendmsg(sock, msg, size); return err ?: __sock_sendmsg_nosec(iocb, sock, msg, size); } int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) { struct kiocb iocb; struct sock_iocb siocb; int ret; init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; ret = __sock_sendmsg(&iocb, sock, msg, size); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } EXPORT_SYMBOL(sock_sendmsg); int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size) { struct kiocb iocb; struct sock_iocb siocb; int ret; init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; ret = __sock_sendmsg_nosec(&iocb, sock, msg, size); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t size) { mm_segment_t oldfs = get_fs(); int result; set_fs(KERNEL_DS); /* * the following is safe, since for compiler definitions of kvec and * iovec are identical, yielding the same in-core layout and alignment */ msg->msg_iov = (struct iovec *)vec; msg->msg_iovlen = num; result = sock_sendmsg(sock, msg, size); set_fs(oldfs); return result; } EXPORT_SYMBOL(kernel_sendmsg); static int ktime2ts(ktime_t kt, struct timespec *ts) { if (kt.tv64) { *ts = ktime_to_timespec(kt); return 1; } else { return 0; } } /* * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP) */ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { int need_software_tstamp = sock_flag(sk, SOCK_RCVTSTAMP); struct timespec ts[3]; int empty = 1; struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); /* Race occurred between timestamp enabling and packet receiving. Fill in the current time for now. */ if (need_software_tstamp && skb->tstamp.tv64 == 0) __net_timestamp(skb); if (need_software_tstamp) { if (!sock_flag(sk, SOCK_RCVTSTAMPNS)) { struct timeval tv; skb_get_timestamp(skb, &tv); put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP, sizeof(tv), &tv); } else { skb_get_timestampns(skb, &ts[0]); put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS, sizeof(ts[0]), &ts[0]); } } memset(ts, 0, sizeof(ts)); if (skb->tstamp.tv64 && sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) { skb_get_timestampns(skb, ts + 0); empty = 0; } if (shhwtstamps) { if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE) && ktime2ts(shhwtstamps->syststamp, ts + 1)) empty = 0; if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE) && ktime2ts(shhwtstamps->hwtstamp, ts + 2)) empty = 0; } if (!empty) put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING, sizeof(ts), &ts); } EXPORT_SYMBOL_GPL(__sock_recv_timestamp); static inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { if (sock_flag(sk, SOCK_RXQ_OVFL) && skb && skb->dropcount) put_cmsg(msg, SOL_SOCKET, SO_RXQ_OVFL, sizeof(__u32), &skb->dropcount); } void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { sock_recv_timestamp(msg, sk, skb); sock_recv_drops(msg, sk, skb); } EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops); static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock_iocb *si = kiocb_to_siocb(iocb); sock_update_classid(sock->sk); si->sock = sock; si->scm = NULL; si->msg = msg; si->size = size; si->flags = flags; return sock->ops->recvmsg(iocb, sock, msg, size, flags); } static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { int err = security_socket_recvmsg(sock, msg, size, flags); return err ?: __sock_recvmsg_nosec(iocb, sock, msg, size, flags); } int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct kiocb iocb; struct sock_iocb siocb; int ret; init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; ret = __sock_recvmsg(&iocb, sock, msg, size, flags); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } EXPORT_SYMBOL(sock_recvmsg); static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct kiocb iocb; struct sock_iocb siocb; int ret; init_sync_kiocb(&iocb, NULL); iocb.private = &siocb; ret = __sock_recvmsg_nosec(&iocb, sock, msg, size, flags); if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&iocb); return ret; } /** * kernel_recvmsg - Receive a message from a socket (kernel space) * @sock: The socket to receive the message from * @msg: Received message * @vec: Input s/g array for message data * @num: Size of input s/g array * @size: Number of bytes to read * @flags: Message flags (MSG_DONTWAIT, etc...) * * On return the msg structure contains the scatter/gather array passed in the * vec argument. The array is modified so that it consists of the unfilled * portion of the original array. * * The returned value is the total number of bytes received, or an error. */ int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t size, int flags) { mm_segment_t oldfs = get_fs(); int result; set_fs(KERNEL_DS); /* * the following is safe, since for compiler definitions of kvec and * iovec are identical, yielding the same in-core layout and alignment */ msg->msg_iov = (struct iovec *)vec, msg->msg_iovlen = num; result = sock_recvmsg(sock, msg, size, flags); set_fs(oldfs); return result; } EXPORT_SYMBOL(kernel_recvmsg); static void sock_aio_dtor(struct kiocb *iocb) { kfree(iocb->private); } static ssize_t sock_sendpage(struct file *file, struct page *page, int offset, size_t size, loff_t *ppos, int more) { struct socket *sock; int flags; sock = file->private_data; flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; /* more is a combination of MSG_MORE and MSG_SENDPAGE_NOTLAST */ flags |= more; return kernel_sendpage(sock, page, offset, size, flags); } static ssize_t sock_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct socket *sock = file->private_data; if (unlikely(!sock->ops->splice_read)) return -EINVAL; sock_update_classid(sock->sk); return sock->ops->splice_read(sock, ppos, pipe, len, flags); } static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb, struct sock_iocb *siocb) { if (!is_sync_kiocb(iocb)) { siocb = kmalloc(sizeof(*siocb), GFP_KERNEL); if (!siocb) return NULL; iocb->ki_dtor = sock_aio_dtor; } siocb->kiocb = iocb; iocb->private = siocb; return siocb; } static ssize_t do_sock_read(struct msghdr *msg, struct kiocb *iocb, struct file *file, const struct iovec *iov, unsigned long nr_segs) { struct socket *sock = file->private_data; size_t size = 0; int i; for (i = 0; i < nr_segs; i++) size += iov[i].iov_len; msg->msg_name = NULL; msg->msg_namelen = 0; msg->msg_control = NULL; msg->msg_controllen = 0; msg->msg_iov = (struct iovec *)iov; msg->msg_iovlen = nr_segs; msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; return __sock_recvmsg(iocb, sock, msg, size, msg->msg_flags); } static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct sock_iocb siocb, *x; if (pos != 0) return -ESPIPE; if (iocb->ki_left == 0) /* Match SYS5 behaviour */ return 0; x = alloc_sock_iocb(iocb, &siocb); if (!x) return -ENOMEM; return do_sock_read(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs); } static ssize_t do_sock_write(struct msghdr *msg, struct kiocb *iocb, struct file *file, const struct iovec *iov, unsigned long nr_segs) { struct socket *sock = file->private_data; size_t size = 0; int i; for (i = 0; i < nr_segs; i++) size += iov[i].iov_len; msg->msg_name = NULL; msg->msg_namelen = 0; msg->msg_control = NULL; msg->msg_controllen = 0; msg->msg_iov = (struct iovec *)iov; msg->msg_iovlen = nr_segs; msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; if (sock->type == SOCK_SEQPACKET) msg->msg_flags |= MSG_EOR; return __sock_sendmsg(iocb, sock, msg, size); } static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct sock_iocb siocb, *x; if (pos != 0) return -ESPIPE; x = alloc_sock_iocb(iocb, &siocb); if (!x) return -ENOMEM; return do_sock_write(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs); } /* * Atomic setting of ioctl hooks to avoid race * with module unload. */ static DEFINE_MUTEX(br_ioctl_mutex); static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg); void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *)) { mutex_lock(&br_ioctl_mutex); br_ioctl_hook = hook; mutex_unlock(&br_ioctl_mutex); } EXPORT_SYMBOL(brioctl_set); static DEFINE_MUTEX(vlan_ioctl_mutex); static int (*vlan_ioctl_hook) (struct net *, void __user *arg); void vlan_ioctl_set(int (*hook) (struct net *, void __user *)) { mutex_lock(&vlan_ioctl_mutex); vlan_ioctl_hook = hook; mutex_unlock(&vlan_ioctl_mutex); } EXPORT_SYMBOL(vlan_ioctl_set); static DEFINE_MUTEX(dlci_ioctl_mutex); static int (*dlci_ioctl_hook) (unsigned int, void __user *); void dlci_ioctl_set(int (*hook) (unsigned int, void __user *)) { mutex_lock(&dlci_ioctl_mutex); dlci_ioctl_hook = hook; mutex_unlock(&dlci_ioctl_mutex); } EXPORT_SYMBOL(dlci_ioctl_set); static long sock_do_ioctl(struct net *net, struct socket *sock, unsigned int cmd, unsigned long arg) { int err; void __user *argp = (void __user *)arg; err = sock->ops->ioctl(sock, cmd, arg); /* * If this ioctl is unknown try to hand it down * to the NIC driver. */ if (err == -ENOIOCTLCMD) err = dev_ioctl(net, cmd, argp); return err; } /* * With an ioctl, arg may well be a user mode pointer, but we don't know * what to do with it - that's up to the protocol still. */ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) { struct socket *sock; struct sock *sk; void __user *argp = (void __user *)arg; int pid, err; struct net *net; sock = file->private_data; sk = sock->sk; net = sock_net(sk); if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) { err = dev_ioctl(net, cmd, argp); } else #ifdef CONFIG_WEXT_CORE if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { err = dev_ioctl(net, cmd, argp); } else #endif switch (cmd) { case FIOSETOWN: case SIOCSPGRP: err = -EFAULT; if (get_user(pid, (int __user *)argp)) break; err = f_setown(sock->file, pid, 1); break; case FIOGETOWN: case SIOCGPGRP: err = put_user(f_getown(sock->file), (int __user *)argp); break; case SIOCGIFBR: case SIOCSIFBR: case SIOCBRADDBR: case SIOCBRDELBR: err = -ENOPKG; if (!br_ioctl_hook) request_module("bridge"); mutex_lock(&br_ioctl_mutex); if (br_ioctl_hook) err = br_ioctl_hook(net, cmd, argp); mutex_unlock(&br_ioctl_mutex); break; case SIOCGIFVLAN: case SIOCSIFVLAN: err = -ENOPKG; if (!vlan_ioctl_hook) request_module("8021q"); mutex_lock(&vlan_ioctl_mutex); if (vlan_ioctl_hook) err = vlan_ioctl_hook(net, argp); mutex_unlock(&vlan_ioctl_mutex); break; case SIOCADDDLCI: case SIOCDELDLCI: err = -ENOPKG; if (!dlci_ioctl_hook) request_module("dlci"); mutex_lock(&dlci_ioctl_mutex); if (dlci_ioctl_hook) err = dlci_ioctl_hook(cmd, argp); mutex_unlock(&dlci_ioctl_mutex); break; default: err = sock_do_ioctl(net, sock, cmd, arg); break; } return err; } int sock_create_lite(int family, int type, int protocol, struct socket **res) { int err; struct socket *sock = NULL; err = security_socket_create(family, type, protocol, 1); if (err) goto out; sock = sock_alloc(); if (!sock) { err = -ENOMEM; goto out; } sock->type = type; err = security_socket_post_create(sock, family, type, protocol, 1); if (err) goto out_release; out: *res = sock; return err; out_release: sock_release(sock); sock = NULL; goto out; } EXPORT_SYMBOL(sock_create_lite); /* No kernel lock held - perfect */ static unsigned int sock_poll(struct file *file, poll_table *wait) { struct socket *sock; /* * We can't return errors to poll, so it's either yes or no. */ sock = file->private_data; return sock->ops->poll(file, sock, wait); } static int sock_mmap(struct file *file, struct vm_area_struct *vma) { struct socket *sock = file->private_data; return sock->ops->mmap(file, sock, vma); } static int sock_close(struct inode *inode, struct file *filp) { /* * It was possible the inode is NULL we were * closing an unfinished socket. */ if (!inode) { printk(KERN_DEBUG "sock_close: NULL inode\n"); return 0; } sock_release(SOCKET_I(inode)); return 0; } /* * Update the socket async list * * Fasync_list locking strategy. * * 1. fasync_list is modified only under process context socket lock * i.e. under semaphore. * 2. fasync_list is used under read_lock(&sk->sk_callback_lock) * or under socket lock */ static int sock_fasync(int fd, struct file *filp, int on) { struct socket *sock = filp->private_data; struct sock *sk = sock->sk; struct socket_wq *wq; if (sk == NULL) return -EINVAL; lock_sock(sk); wq = rcu_dereference_protected(sock->wq, sock_owned_by_user(sk)); fasync_helper(fd, filp, on, &wq->fasync_list); if (!wq->fasync_list) sock_reset_flag(sk, SOCK_FASYNC); else sock_set_flag(sk, SOCK_FASYNC); release_sock(sk); return 0; } /* This function may be called only under socket lock or callback_lock or rcu_lock */ int sock_wake_async(struct socket *sock, int how, int band) { struct socket_wq *wq; if (!sock) return -1; rcu_read_lock(); wq = rcu_dereference(sock->wq); if (!wq || !wq->fasync_list) { rcu_read_unlock(); return -1; } switch (how) { case SOCK_WAKE_WAITD: if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags)) break; goto call_kill; case SOCK_WAKE_SPACE: if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags)) break; /* fall through */ case SOCK_WAKE_IO: call_kill: kill_fasync(&wq->fasync_list, SIGIO, band); break; case SOCK_WAKE_URG: kill_fasync(&wq->fasync_list, SIGURG, band); } rcu_read_unlock(); return 0; } EXPORT_SYMBOL(sock_wake_async); int __sock_create(struct net *net, int family, int type, int protocol, struct socket **res, int kern) { int err; struct socket *sock; const struct net_proto_family *pf; /* * Check protocol is in range */ if (family < 0 || family >= NPROTO) return -EAFNOSUPPORT; if (type < 0 || type >= SOCK_MAX) return -EINVAL; /* Compatibility. This uglymoron is moved from INET layer to here to avoid deadlock in module load. */ if (family == PF_INET && type == SOCK_PACKET) { static int warned; if (!warned) { warned = 1; printk(KERN_INFO "%s uses obsolete (PF_INET,SOCK_PACKET)\n", current->comm); } family = PF_PACKET; } err = security_socket_create(family, type, protocol, kern); if (err) return err; /* * Allocate the socket and allow the family to set things up. if * the protocol is 0, the family is instructed to select an appropriate * default. */ sock = sock_alloc(); if (!sock) { if (net_ratelimit()) printk(KERN_WARNING "socket: no more sockets\n"); return -ENFILE; /* Not exactly a match, but its the closest posix thing */ } sock->type = type; #ifdef CONFIG_MODULES /* Attempt to load a protocol module if the find failed. * * 12/09/1996 Marcin: But! this makes REALLY only sense, if the user * requested real, full-featured networking support upon configuration. * Otherwise module support will break! */ if (rcu_access_pointer(net_families[family]) == NULL) request_module("net-pf-%d", family); #endif rcu_read_lock(); pf = rcu_dereference(net_families[family]); err = -EAFNOSUPPORT; if (!pf) goto out_release; /* * We will call the ->create function, that possibly is in a loadable * module, so we have to bump that loadable module refcnt first. */ if (!try_module_get(pf->owner)) goto out_release; /* Now protected by module ref count */ rcu_read_unlock(); err = pf->create(net, sock, protocol, kern); if (err < 0) goto out_module_put; /* * Now to bump the refcnt of the [loadable] module that owns this * socket at sock_release time we decrement its refcnt. */ if (!try_module_get(sock->ops->owner)) goto out_module_busy; /* * Now that we're done with the ->create function, the [loadable] * module can have its refcnt decremented */ module_put(pf->owner); err = security_socket_post_create(sock, family, type, protocol, kern); if (err) goto out_sock_release; *res = sock; return 0; out_module_busy: err = -EAFNOSUPPORT; out_module_put: sock->ops = NULL; module_put(pf->owner); out_sock_release: sock_release(sock); return err; out_release: rcu_read_unlock(); goto out_sock_release; } EXPORT_SYMBOL(__sock_create); int sock_create(int family, int type, int protocol, struct socket **res) { return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0); } EXPORT_SYMBOL(sock_create); int sock_create_kern(int family, int type, int protocol, struct socket **res) { return __sock_create(&init_net, family, type, protocol, res, 1); } EXPORT_SYMBOL(sock_create_kern); SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol) { int retval; struct socket *sock; int flags; /* Check the SOCK_* constants for consistency. */ BUILD_BUG_ON(SOCK_CLOEXEC != O_CLOEXEC); BUILD_BUG_ON((SOCK_MAX | SOCK_TYPE_MASK) != SOCK_TYPE_MASK); BUILD_BUG_ON(SOCK_CLOEXEC & SOCK_TYPE_MASK); BUILD_BUG_ON(SOCK_NONBLOCK & SOCK_TYPE_MASK); flags = type & ~SOCK_TYPE_MASK; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; type &= SOCK_TYPE_MASK; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; retval = sock_create(family, type, protocol, &sock); if (retval < 0) goto out; retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK)); if (retval < 0) goto out_release; out: /* It may be already another descriptor 8) Not kernel problem. */ return retval; out_release: sock_release(sock); return retval; } /* * Create a pair of connected sockets. */ SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol, int __user *, usockvec) { struct socket *sock1, *sock2; int fd1, fd2, err; struct file *newfile1, *newfile2; int flags; flags = type & ~SOCK_TYPE_MASK; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; type &= SOCK_TYPE_MASK; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; /* * Obtain the first socket and check if the underlying protocol * supports the socketpair call. */ err = sock_create(family, type, protocol, &sock1); if (err < 0) goto out; err = sock_create(family, type, protocol, &sock2); if (err < 0) goto out_release_1; err = sock1->ops->socketpair(sock1, sock2); if (err < 0) goto out_release_both; fd1 = sock_alloc_file(sock1, &newfile1, flags); if (unlikely(fd1 < 0)) { err = fd1; goto out_release_both; } fd2 = sock_alloc_file(sock2, &newfile2, flags); if (unlikely(fd2 < 0)) { err = fd2; fput(newfile1); put_unused_fd(fd1); sock_release(sock2); goto out; } audit_fd_pair(fd1, fd2); fd_install(fd1, newfile1); fd_install(fd2, newfile2); /* fd1 and fd2 may be already another descriptors. * Not kernel problem. */ err = put_user(fd1, &usockvec[0]); if (!err) err = put_user(fd2, &usockvec[1]); if (!err) return 0; sys_close(fd2); sys_close(fd1); return err; out_release_both: sock_release(sock2); out_release_1: sock_release(sock1); out: return err; } /* * Bind a name to a socket. Nothing much to do here since it's * the protocol's responsibility to handle the local address. * * We move the socket address to kernel space before we call * the protocol layer (having also checked the address is ok). */ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen) { struct socket *sock; struct sockaddr_storage address; int err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock) { err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address); if (err >= 0) { err = security_socket_bind(sock, (struct sockaddr *)&address, addrlen); if (!err) err = sock->ops->bind(sock, (struct sockaddr *) &address, addrlen); } fput_light(sock->file, fput_needed); } return err; } /* * Perform a listen. Basically, we allow the protocol to do anything * necessary for a listen, and if that works, we mark the socket as * ready for listening. */ SYSCALL_DEFINE2(listen, int, fd, int, backlog) { struct socket *sock; int err, fput_needed; int somaxconn; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock) { somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn; if ((unsigned)backlog > somaxconn) backlog = somaxconn; err = security_socket_listen(sock, backlog); if (!err) err = sock->ops->listen(sock, backlog); fput_light(sock->file, fput_needed); } return err; } /* * For accept, we attempt to create a new socket, set up the link * with the client, wake up the client, then return the new * connected fd. We collect the address of the connector in kernel * space and move it to user at the very end. This is unclean because * we open the socket then return an error. * * 1003.1g adds the ability to recvmsg() to query connection pending * status to recvmsg. We need to add that support in a way thats * clean when we restucture accept also. */ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, int __user *, upeer_addrlen, int, flags) { struct socket *sock, *newsock; struct file *newfile; int err, len, newfd, fput_needed; struct sockaddr_storage address; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = -ENFILE; newsock = sock_alloc(); if (!newsock) goto out_put; newsock->type = sock->type; newsock->ops = sock->ops; /* * We don't need try_module_get here, as the listening socket (sock) * has the protocol module (sock->ops->owner) held. */ __module_get(newsock->ops->owner); newfd = sock_alloc_file(newsock, &newfile, flags); if (unlikely(newfd < 0)) { err = newfd; sock_release(newsock); goto out_put; } err = security_socket_accept(sock, newsock); if (err) goto out_fd; err = sock->ops->accept(sock, newsock, sock->file->f_flags); if (err < 0) goto out_fd; if (upeer_sockaddr) { if (newsock->ops->getname(newsock, (struct sockaddr *)&address, &len, 2) < 0) { err = -ECONNABORTED; goto out_fd; } err = move_addr_to_user((struct sockaddr *)&address, len, upeer_sockaddr, upeer_addrlen); if (err < 0) goto out_fd; } /* File flags are not inherited via accept() unlike another OSes. */ fd_install(newfd, newfile); err = newfd; out_put: fput_light(sock->file, fput_needed); out: return err; out_fd: fput(newfile); put_unused_fd(newfd); goto out_put; } SYSCALL_DEFINE3(accept, int, fd, struct sockaddr __user *, upeer_sockaddr, int __user *, upeer_addrlen) { return sys_accept4(fd, upeer_sockaddr, upeer_addrlen, 0); } /* * Attempt to connect to a socket with the server address. The address * is in user space so we verify it is OK and move it to kernel space. * * For 1003.1g we need to add clean support for a bind to AF_UNSPEC to * break bindings * * NOTE: 1003.1g draft 6.3 is broken with respect to AX.25/NetROM and * other SEQPACKET protocols that take time to connect() as it doesn't * include the -EINPROGRESS status for such sockets. */ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr, int, addrlen) { struct socket *sock; struct sockaddr_storage address; int err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = move_addr_to_kernel(uservaddr, addrlen, (struct sockaddr *)&address); if (err < 0) goto out_put; err = security_socket_connect(sock, (struct sockaddr *)&address, addrlen); if (err) goto out_put; err = sock->ops->connect(sock, (struct sockaddr *)&address, addrlen, sock->file->f_flags); out_put: fput_light(sock->file, fput_needed); out: return err; } /* * Get the local address ('name') of a socket object. Move the obtained * name to user space. */ SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len) { struct socket *sock; struct sockaddr_storage address; int len, err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = security_socket_getsockname(sock); if (err) goto out_put; err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 0); if (err) goto out_put; err = move_addr_to_user((struct sockaddr *)&address, len, usockaddr, usockaddr_len); out_put: fput_light(sock->file, fput_needed); out: return err; } /* * Get the remote address ('name') of a socket object. Move the obtained * name to user space. */ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len) { struct socket *sock; struct sockaddr_storage address; int len, err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_getpeername(sock); if (err) { fput_light(sock->file, fput_needed); return err; } err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 1); if (!err) err = move_addr_to_user((struct sockaddr *)&address, len, usockaddr, usockaddr_len); fput_light(sock->file, fput_needed); } return err; } /* * Send a datagram to a given address. We move the address into kernel * space and check the user space data area is readable before invoking * the protocol. */ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, unsigned, flags, struct sockaddr __user *, addr, int, addr_len) { struct socket *sock; struct sockaddr_storage address; int err; struct msghdr msg; struct iovec iov; int fput_needed; if (len > INT_MAX) len = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; iov.iov_base = buff; iov.iov_len = len; msg.msg_name = NULL; msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_namelen = 0; if (addr) { err = move_addr_to_kernel(addr, addr_len, (struct sockaddr *)&address); if (err < 0) goto out_put; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = addr_len; } if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; msg.msg_flags = flags; err = sock_sendmsg(sock, &msg, len); out_put: fput_light(sock->file, fput_needed); out: return err; } /* * Send a datagram down a socket. */ SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len, unsigned, flags) { return sys_sendto(fd, buff, len, flags, NULL, 0); } /* * Receive a frame from the socket and optionally record the address of the * sender. We verify the buffers are writable and if needed move the * sender address from kernel to user space. */ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, unsigned, flags, struct sockaddr __user *, addr, int __user *, addr_len) { struct socket *sock; struct iovec iov; struct msghdr msg; struct sockaddr_storage address; int err, err2; int fput_needed; if (size > INT_MAX) size = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_iovlen = 1; msg.msg_iov = &iov; iov.iov_len = size; iov.iov_base = ubuf; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = sizeof(address); if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = sock_recvmsg(sock, &msg, size, flags); if (err >= 0 && addr != NULL) { err2 = move_addr_to_user((struct sockaddr *)&address, msg.msg_namelen, addr, addr_len); if (err2 < 0) err = err2; } fput_light(sock->file, fput_needed); out: return err; } /* * Receive a datagram from a socket. */ asmlinkage long sys_recv(int fd, void __user *ubuf, size_t size, unsigned flags) { return sys_recvfrom(fd, ubuf, size, flags, NULL, NULL); } /* * Set a socket option. Because we don't know the option lengths we have * to pass the user mode parameter for the protocols to sort out. */ SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname, char __user *, optval, int, optlen) { int err, fput_needed; struct socket *sock; if (optlen < 0) return -EINVAL; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_setsockopt(sock, level, optname); if (err) goto out_put; if (level == SOL_SOCKET) err = sock_setsockopt(sock, level, optname, optval, optlen); else err = sock->ops->setsockopt(sock, level, optname, optval, optlen); out_put: fput_light(sock->file, fput_needed); } return err; } /* * Get a socket option. Because we don't know the option lengths we have * to pass a user mode parameter for the protocols to sort out. */ SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname, char __user *, optval, int __user *, optlen) { int err, fput_needed; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_getsockopt(sock, level, optname); if (err) goto out_put; if (level == SOL_SOCKET) err = sock_getsockopt(sock, level, optname, optval, optlen); else err = sock->ops->getsockopt(sock, level, optname, optval, optlen); out_put: fput_light(sock->file, fput_needed); } return err; } /* * Shutdown a socket. */ SYSCALL_DEFINE2(shutdown, int, fd, int, how) { int err, fput_needed; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_shutdown(sock, how); if (!err) err = sock->ops->shutdown(sock, how); fput_light(sock->file, fput_needed); } return err; } /* A couple of helpful macros for getting the address of the 32/64 bit * fields which are the same type (int / unsigned) on our platforms. */ #define COMPAT_MSG(msg, member) ((MSG_CMSG_COMPAT & flags) ? &msg##_compat->member : &msg->member) #define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen) #define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags) struct used_address { struct sockaddr_storage name; unsigned int name_len; }; static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg, struct msghdr *msg_sys, unsigned flags, struct used_address *used_address) { struct compat_msghdr __user *msg_compat = (struct compat_msghdr __user *)msg; struct sockaddr_storage address; struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; unsigned char ctl[sizeof(struct cmsghdr) + 20] __attribute__ ((aligned(sizeof(__kernel_size_t)))); /* 20 is size of ipv6_pktinfo */ unsigned char *ctl_buf = ctl; int err, ctl_len, iov_size, total_len; err = -EFAULT; if (MSG_CMSG_COMPAT & flags) { if (get_compat_msghdr(msg_sys, msg_compat)) return -EFAULT; } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) return -EFAULT; /* do not move before msg_sys is valid */ err = -EMSGSIZE; if (msg_sys->msg_iovlen > UIO_MAXIOV) goto out; /* Check whether to allocate the iovec area */ err = -ENOMEM; iov_size = msg_sys->msg_iovlen * sizeof(struct iovec); if (msg_sys->msg_iovlen > UIO_FASTIOV) { iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL); if (!iov) goto out; } /* This will also move the address data into kernel space */ if (MSG_CMSG_COMPAT & flags) { err = verify_compat_iovec(msg_sys, iov, (struct sockaddr *)&address, VERIFY_READ); } else err = verify_iovec(msg_sys, iov, (struct sockaddr *)&address, VERIFY_READ); if (err < 0) goto out_freeiov; total_len = err; err = -ENOBUFS; if (msg_sys->msg_controllen > INT_MAX) goto out_freeiov; ctl_len = msg_sys->msg_controllen; if ((MSG_CMSG_COMPAT & flags) && ctl_len) { err = cmsghdr_from_user_compat_to_kern(msg_sys, sock->sk, ctl, sizeof(ctl)); if (err) goto out_freeiov; ctl_buf = msg_sys->msg_control; ctl_len = msg_sys->msg_controllen; } else if (ctl_len) { if (ctl_len > sizeof(ctl)) { ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL); if (ctl_buf == NULL) goto out_freeiov; } err = -EFAULT; /* * Careful! Before this, msg_sys->msg_control contains a user pointer. * Afterwards, it will be a kernel pointer. Thus the compiler-assisted * checking falls down on this. */ if (copy_from_user(ctl_buf, (void __user __force *)msg_sys->msg_control, ctl_len)) goto out_freectl; msg_sys->msg_control = ctl_buf; } msg_sys->msg_flags = flags; if (sock->file->f_flags & O_NONBLOCK) msg_sys->msg_flags |= MSG_DONTWAIT; /* * If this is sendmmsg() and current destination address is same as * previously succeeded address, omit asking LSM's decision. * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ if (used_address && msg_sys->msg_name && used_address->name_len == msg_sys->msg_namelen && !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; } err = sock_sendmsg(sock, msg_sys, total_len); /* * If this is sendmmsg() and sending to current destination address was * successful, remember it. */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; if (msg_sys->msg_name) memcpy(&used_address->name, msg_sys->msg_name, used_address->name_len); } out_freectl: if (ctl_buf != ctl) sock_kfree_s(sock->sk, ctl_buf, ctl_len); out_freeiov: if (iov != iovstack) sock_kfree_s(sock->sk, iov, iov_size); out: return err; } /* * BSD sendmsg interface */ long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags) { int fput_needed, err; struct msghdr msg_sys; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL); fput_light(sock->file, fput_needed); out: return err; } SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags) { if (flags & MSG_CMSG_COMPAT) return -EINVAL; return __sys_sendmsg(fd, msg, flags); } /* * Linux sendmmsg interface */ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags) { int fput_needed, err, datagrams; struct socket *sock; struct mmsghdr __user *entry; struct compat_mmsghdr __user *compat_entry; struct msghdr msg_sys; struct used_address used_address; if (vlen > UIO_MAXIOV) vlen = UIO_MAXIOV; datagrams = 0; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) return err; used_address.name_len = UINT_MAX; entry = mmsg; compat_entry = (struct compat_mmsghdr __user *)mmsg; err = 0; while (datagrams < vlen) { if (MSG_CMSG_COMPAT & flags) { err = ___sys_sendmsg(sock, (struct msghdr __user *)compat_entry, &msg_sys, flags, &used_address); if (err < 0) break; err = __put_user(err, &compat_entry->msg_len); ++compat_entry; } else { err = ___sys_sendmsg(sock, (struct msghdr __user *)entry, &msg_sys, flags, &used_address); if (err < 0) break; err = put_user(err, &entry->msg_len); ++entry; } if (err) break; ++datagrams; } fput_light(sock->file, fput_needed); /* We only return an error if no datagrams were able to be sent */ if (datagrams != 0) return datagrams; return err; } SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg, unsigned int, vlen, unsigned int, flags) { if (flags & MSG_CMSG_COMPAT) return -EINVAL; return __sys_sendmmsg(fd, mmsg, vlen, flags); } static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg, struct msghdr *msg_sys, unsigned flags, int nosec) { struct compat_msghdr __user *msg_compat = (struct compat_msghdr __user *)msg; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; unsigned long cmsg_ptr; int err, iov_size, total_len, len; /* kernel mode address */ struct sockaddr_storage addr; /* user mode address pointers */ struct sockaddr __user *uaddr; int __user *uaddr_len; if (MSG_CMSG_COMPAT & flags) { if (get_compat_msghdr(msg_sys, msg_compat)) return -EFAULT; } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) return -EFAULT; err = -EMSGSIZE; if (msg_sys->msg_iovlen > UIO_MAXIOV) goto out; /* Check whether to allocate the iovec area */ err = -ENOMEM; iov_size = msg_sys->msg_iovlen * sizeof(struct iovec); if (msg_sys->msg_iovlen > UIO_FASTIOV) { iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL); if (!iov) goto out; } /* * Save the user-mode address (verify_iovec will change the * kernel msghdr to use the kernel address space) */ uaddr = (__force void __user *)msg_sys->msg_name; uaddr_len = COMPAT_NAMELEN(msg); if (MSG_CMSG_COMPAT & flags) { err = verify_compat_iovec(msg_sys, iov, (struct sockaddr *)&addr, VERIFY_WRITE); } else err = verify_iovec(msg_sys, iov, (struct sockaddr *)&addr, VERIFY_WRITE); if (err < 0) goto out_freeiov; total_len = err; cmsg_ptr = (unsigned long)msg_sys->msg_control; msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys, total_len, flags); if (err < 0) goto out_freeiov; len = err; if (uaddr != NULL) { err = move_addr_to_user((struct sockaddr *)&addr, msg_sys->msg_namelen, uaddr, uaddr_len); if (err < 0) goto out_freeiov; } err = __put_user((msg_sys->msg_flags & ~MSG_CMSG_COMPAT), COMPAT_FLAGS(msg)); if (err) goto out_freeiov; if (MSG_CMSG_COMPAT & flags) err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, &msg_compat->msg_controllen); else err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, &msg->msg_controllen); if (err) goto out_freeiov; err = len; out_freeiov: if (iov != iovstack) sock_kfree_s(sock->sk, iov, iov_size); out: return err; } /* * BSD recvmsg interface */ long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags) { int fput_needed, err; struct msghdr msg_sys; struct socket *sock; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = ___sys_recvmsg(sock, msg, &msg_sys, flags, 0); fput_light(sock->file, fput_needed); out: return err; } SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags) { if (flags & MSG_CMSG_COMPAT) return -EINVAL; return __sys_recvmsg(fd, msg, flags); } /* * Linux recvmmsg interface */ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, struct timespec *timeout) { int fput_needed, err, datagrams; struct socket *sock; struct mmsghdr __user *entry; struct compat_mmsghdr __user *compat_entry; struct msghdr msg_sys; struct timespec end_time; if (timeout && poll_select_set_timeout(&end_time, timeout->tv_sec, timeout->tv_nsec)) return -EINVAL; datagrams = 0; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) return err; err = sock_error(sock->sk); if (err) goto out_put; entry = mmsg; compat_entry = (struct compat_mmsghdr __user *)mmsg; while (datagrams < vlen) { /* * No need to ask LSM for more than the first datagram. */ if (MSG_CMSG_COMPAT & flags) { err = ___sys_recvmsg(sock, (struct msghdr __user *)compat_entry, &msg_sys, flags & ~MSG_WAITFORONE, datagrams); if (err < 0) break; err = __put_user(err, &compat_entry->msg_len); ++compat_entry; } else { err = ___sys_recvmsg(sock, (struct msghdr __user *)entry, &msg_sys, flags & ~MSG_WAITFORONE, datagrams); if (err < 0) break; err = put_user(err, &entry->msg_len); ++entry; } if (err) break; ++datagrams; /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ if (flags & MSG_WAITFORONE) flags |= MSG_DONTWAIT; if (timeout) { ktime_get_ts(timeout); *timeout = timespec_sub(end_time, *timeout); if (timeout->tv_sec < 0) { timeout->tv_sec = timeout->tv_nsec = 0; break; } /* Timeout, return less than vlen datagrams */ if (timeout->tv_nsec == 0 && timeout->tv_sec == 0) break; } /* Out of band data, return right away */ if (msg_sys.msg_flags & MSG_OOB) break; } out_put: fput_light(sock->file, fput_needed); if (err == 0) return datagrams; if (datagrams != 0) { /* * We may return less entries than requested (vlen) if the * sock is non block and there aren't enough datagrams... */ if (err != -EAGAIN) { /* * ... or if recvmsg returns an error after we * received some datagrams, where we record the * error to return on the next call or if the * app asks about it using getsockopt(SO_ERROR). */ sock->sk->sk_err = -err; } return datagrams; } return err; } SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg, unsigned int, vlen, unsigned int, flags, struct timespec __user *, timeout) { int datagrams; struct timespec timeout_sys; if (flags & MSG_CMSG_COMPAT) return -EINVAL; if (!timeout) return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL); if (copy_from_user(&timeout_sys, timeout, sizeof(timeout_sys))) return -EFAULT; datagrams = __sys_recvmmsg(fd, mmsg, vlen, flags, &timeout_sys); if (datagrams > 0 && copy_to_user(timeout, &timeout_sys, sizeof(timeout_sys))) datagrams = -EFAULT; return datagrams; } #ifdef __ARCH_WANT_SYS_SOCKETCALL /* Argument list sizes for sys_socketcall */ #define AL(x) ((x) * sizeof(unsigned long)) static const unsigned char nargs[21] = { AL(0), AL(3), AL(3), AL(3), AL(2), AL(3), AL(3), AL(3), AL(4), AL(4), AL(4), AL(6), AL(6), AL(2), AL(5), AL(5), AL(3), AL(3), AL(4), AL(5), AL(4) }; #undef AL /* * System call vectors. * * Argument checking cleaned up. Saved 20% in size. * This function doesn't need to set the kernel lock because * it is set by the callees. */ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) { unsigned long a[6]; unsigned long a0, a1; int err; unsigned int len; if (call < 1 || call > SYS_SENDMMSG) return -EINVAL; len = nargs[call]; if (len > sizeof(a)) return -EINVAL; /* copy_from_user should be SMP safe. */ if (copy_from_user(a, args, len)) return -EFAULT; audit_socketcall(nargs[call] / sizeof(unsigned long), a); a0 = a[0]; a1 = a[1]; switch (call) { case SYS_SOCKET: err = sys_socket(a0, a1, a[2]); break; case SYS_BIND: err = sys_bind(a0, (struct sockaddr __user *)a1, a[2]); break; case SYS_CONNECT: err = sys_connect(a0, (struct sockaddr __user *)a1, a[2]); break; case SYS_LISTEN: err = sys_listen(a0, a1); break; case SYS_ACCEPT: err = sys_accept4(a0, (struct sockaddr __user *)a1, (int __user *)a[2], 0); break; case SYS_GETSOCKNAME: err = sys_getsockname(a0, (struct sockaddr __user *)a1, (int __user *)a[2]); break; case SYS_GETPEERNAME: err = sys_getpeername(a0, (struct sockaddr __user *)a1, (int __user *)a[2]); break; case SYS_SOCKETPAIR: err = sys_socketpair(a0, a1, a[2], (int __user *)a[3]); break; case SYS_SEND: err = sys_send(a0, (void __user *)a1, a[2], a[3]); break; case SYS_SENDTO: err = sys_sendto(a0, (void __user *)a1, a[2], a[3], (struct sockaddr __user *)a[4], a[5]); break; case SYS_RECV: err = sys_recv(a0, (void __user *)a1, a[2], a[3]); break; case SYS_RECVFROM: err = sys_recvfrom(a0, (void __user *)a1, a[2], a[3], (struct sockaddr __user *)a[4], (int __user *)a[5]); break; case SYS_SHUTDOWN: err = sys_shutdown(a0, a1); break; case SYS_SETSOCKOPT: err = sys_setsockopt(a0, a1, a[2], (char __user *)a[3], a[4]); break; case SYS_GETSOCKOPT: err = sys_getsockopt(a0, a1, a[2], (char __user *)a[3], (int __user *)a[4]); break; case SYS_SENDMSG: err = sys_sendmsg(a0, (struct msghdr __user *)a1, a[2]); break; case SYS_SENDMMSG: err = sys_sendmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3]); break; case SYS_RECVMSG: err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]); break; case SYS_RECVMMSG: err = sys_recvmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3], (struct timespec __user *)a[4]); break; case SYS_ACCEPT4: err = sys_accept4(a0, (struct sockaddr __user *)a1, (int __user *)a[2], a[3]); break; default: err = -EINVAL; break; } return err; } #endif /* __ARCH_WANT_SYS_SOCKETCALL */ /** * sock_register - add a socket protocol handler * @ops: description of protocol * * This function is called by a protocol handler that wants to * advertise its address family, and have it linked into the * socket interface. The value ops->family coresponds to the * socket system call protocol family. */ int sock_register(const struct net_proto_family *ops) { int err; if (ops->family >= NPROTO) { printk(KERN_CRIT "protocol %d >= NPROTO(%d)\n", ops->family, NPROTO); return -ENOBUFS; } spin_lock(&net_family_lock); if (rcu_dereference_protected(net_families[ops->family], lockdep_is_held(&net_family_lock))) err = -EEXIST; else { rcu_assign_pointer(net_families[ops->family], ops); err = 0; } spin_unlock(&net_family_lock); printk(KERN_INFO "NET: Registered protocol family %d\n", ops->family); return err; } EXPORT_SYMBOL(sock_register); /** * sock_unregister - remove a protocol handler * @family: protocol family to remove * * This function is called by a protocol handler that wants to * remove its address family, and have it unlinked from the * new socket creation. * * If protocol handler is a module, then it can use module reference * counts to protect against new references. If protocol handler is not * a module then it needs to provide its own protection in * the ops->create routine. */ void sock_unregister(int family) { BUG_ON(family < 0 || family >= NPROTO); spin_lock(&net_family_lock); rcu_assign_pointer(net_families[family], NULL); spin_unlock(&net_family_lock); synchronize_rcu(); printk(KERN_INFO "NET: Unregistered protocol family %d\n", family); } EXPORT_SYMBOL(sock_unregister); static int __init sock_init(void) { int err; /* * Initialize sock SLAB cache. */ sk_init(); /* * Initialize skbuff SLAB cache */ skb_init(); /* * Initialize the protocols module. */ init_inodecache(); err = register_filesystem(&sock_fs_type); if (err) goto out_fs; sock_mnt = kern_mount(&sock_fs_type); if (IS_ERR(sock_mnt)) { err = PTR_ERR(sock_mnt); goto out_mount; } /* The real protocol initialization is performed in later initcalls. */ #ifdef CONFIG_NETFILTER netfilter_init(); #endif #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING skb_timestamping_init(); #endif out: return err; out_mount: unregister_filesystem(&sock_fs_type); out_fs: goto out; } core_initcall(sock_init); /* early initcall */ #ifdef CONFIG_PROC_FS void socket_seq_show(struct seq_file *seq) { int cpu; int counter = 0; for_each_possible_cpu(cpu) counter += per_cpu(sockets_in_use, cpu); /* It can be negative, by the way. 8) */ if (counter < 0) counter = 0; seq_printf(seq, "sockets: used %d\n", counter); } #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_COMPAT static int do_siocgstamp(struct net *net, struct socket *sock, unsigned int cmd, struct compat_timeval __user *up) { mm_segment_t old_fs = get_fs(); struct timeval ktv; int err; set_fs(KERNEL_DS); err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); set_fs(old_fs); if (!err) { err = put_user(ktv.tv_sec, &up->tv_sec); err |= __put_user(ktv.tv_usec, &up->tv_usec); } return err; } static int do_siocgstampns(struct net *net, struct socket *sock, unsigned int cmd, struct compat_timespec __user *up) { mm_segment_t old_fs = get_fs(); struct timespec kts; int err; set_fs(KERNEL_DS); err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); set_fs(old_fs); if (!err) { err = put_user(kts.tv_sec, &up->tv_sec); err |= __put_user(kts.tv_nsec, &up->tv_nsec); } return err; } static int dev_ifname32(struct net *net, struct compat_ifreq __user *uifr32) { struct ifreq __user *uifr; int err; uifr = compat_alloc_user_space(sizeof(struct ifreq)); if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) return -EFAULT; err = dev_ioctl(net, SIOCGIFNAME, uifr); if (err) return err; if (copy_in_user(uifr32, uifr, sizeof(struct compat_ifreq))) return -EFAULT; return 0; } static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32) { struct compat_ifconf ifc32; struct ifconf ifc; struct ifconf __user *uifc; struct compat_ifreq __user *ifr32; struct ifreq __user *ifr; unsigned int i, j; int err; if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf))) return -EFAULT; memset(&ifc, 0, sizeof(ifc)); if (ifc32.ifcbuf == 0) { ifc32.ifc_len = 0; ifc.ifc_len = 0; ifc.ifc_req = NULL; uifc = compat_alloc_user_space(sizeof(struct ifconf)); } else { size_t len = ((ifc32.ifc_len / sizeof(struct compat_ifreq)) + 1) * sizeof(struct ifreq); uifc = compat_alloc_user_space(sizeof(struct ifconf) + len); ifc.ifc_len = len; ifr = ifc.ifc_req = (void __user *)(uifc + 1); ifr32 = compat_ptr(ifc32.ifcbuf); for (i = 0; i < ifc32.ifc_len; i += sizeof(struct compat_ifreq)) { if (copy_in_user(ifr, ifr32, sizeof(struct compat_ifreq))) return -EFAULT; ifr++; ifr32++; } } if (copy_to_user(uifc, &ifc, sizeof(struct ifconf))) return -EFAULT; err = dev_ioctl(net, SIOCGIFCONF, uifc); if (err) return err; if (copy_from_user(&ifc, uifc, sizeof(struct ifconf))) return -EFAULT; ifr = ifc.ifc_req; ifr32 = compat_ptr(ifc32.ifcbuf); for (i = 0, j = 0; i + sizeof(struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len; i += sizeof(struct compat_ifreq), j += sizeof(struct ifreq)) { if (copy_in_user(ifr32, ifr, sizeof(struct compat_ifreq))) return -EFAULT; ifr32++; ifr++; } if (ifc32.ifcbuf == 0) { /* Translate from 64-bit structure multiple to * a 32-bit one. */ i = ifc.ifc_len; i = ((i / sizeof(struct ifreq)) * sizeof(struct compat_ifreq)); ifc32.ifc_len = i; } else { ifc32.ifc_len = i; } if (copy_to_user(uifc32, &ifc32, sizeof(struct compat_ifconf))) return -EFAULT; return 0; } static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) { struct compat_ethtool_rxnfc __user *compat_rxnfc; bool convert_in = false, convert_out = false; size_t buf_size = ALIGN(sizeof(struct ifreq), 8); struct ethtool_rxnfc __user *rxnfc; struct ifreq __user *ifr; u32 rule_cnt = 0, actual_rule_cnt; u32 ethcmd; u32 data; int ret; if (get_user(data, &ifr32->ifr_ifru.ifru_data)) return -EFAULT; compat_rxnfc = compat_ptr(data); if (get_user(ethcmd, &compat_rxnfc->cmd)) return -EFAULT; /* Most ethtool structures are defined without padding. * Unfortunately struct ethtool_rxnfc is an exception. */ switch (ethcmd) { default: break; case ETHTOOL_GRXCLSRLALL: /* Buffer size is variable */ if (get_user(rule_cnt, &compat_rxnfc->rule_cnt)) return -EFAULT; if (rule_cnt > KMALLOC_MAX_SIZE / sizeof(u32)) return -ENOMEM; buf_size += rule_cnt * sizeof(u32); /* fall through */ case ETHTOOL_GRXRINGS: case ETHTOOL_GRXCLSRLCNT: case ETHTOOL_GRXCLSRULE: convert_out = true; /* fall through */ case ETHTOOL_SRXCLSRLDEL: case ETHTOOL_SRXCLSRLINS: buf_size += sizeof(struct ethtool_rxnfc); convert_in = true; break; } ifr = compat_alloc_user_space(buf_size); rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8); if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) return -EFAULT; if (put_user(convert_in ? rxnfc : compat_ptr(data), &ifr->ifr_ifru.ifru_data)) return -EFAULT; if (convert_in) { /* We expect there to be holes between fs.m_ext and * fs.ring_cookie and at the end of fs, but nowhere else. */ BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) + sizeof(compat_rxnfc->fs.m_ext) != offsetof(struct ethtool_rxnfc, fs.m_ext) + sizeof(rxnfc->fs.m_ext)); BUILD_BUG_ON( offsetof(struct compat_ethtool_rxnfc, fs.location) - offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) != offsetof(struct ethtool_rxnfc, fs.location) - offsetof(struct ethtool_rxnfc, fs.ring_cookie)); if (copy_in_user(rxnfc, compat_rxnfc, (void *)(&rxnfc->fs.m_ext + 1) - (void *)rxnfc) || copy_in_user(&rxnfc->fs.ring_cookie, &compat_rxnfc->fs.ring_cookie, (void *)(&rxnfc->fs.location + 1) - (void *)&rxnfc->fs.ring_cookie) || copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt, sizeof(rxnfc->rule_cnt))) return -EFAULT; } ret = dev_ioctl(net, SIOCETHTOOL, ifr); if (ret) return ret; if (convert_out) { if (copy_in_user(compat_rxnfc, rxnfc, (const void *)(&rxnfc->fs.m_ext + 1) - (const void *)rxnfc) || copy_in_user(&compat_rxnfc->fs.ring_cookie, &rxnfc->fs.ring_cookie, (const void *)(&rxnfc->fs.location + 1) - (const void *)&rxnfc->fs.ring_cookie) || copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt, sizeof(rxnfc->rule_cnt))) return -EFAULT; if (ethcmd == ETHTOOL_GRXCLSRLALL) { /* As an optimisation, we only copy the actual * number of rules that the underlying * function returned. Since Mallory might * change the rule count in user memory, we * check that it is less than the rule count * originally given (as the user buffer size), * which has been range-checked. */ if (get_user(actual_rule_cnt, &rxnfc->rule_cnt)) return -EFAULT; if (actual_rule_cnt < rule_cnt) rule_cnt = actual_rule_cnt; if (copy_in_user(&compat_rxnfc->rule_locs[0], &rxnfc->rule_locs[0], rule_cnt * sizeof(u32))) return -EFAULT; } } return 0; } static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32) { void __user *uptr; compat_uptr_t uptr32; struct ifreq __user *uifr; uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) return -EFAULT; if (get_user(uptr32, &uifr32->ifr_settings.ifs_ifsu)) return -EFAULT; uptr = compat_ptr(uptr32); if (put_user(uptr, &uifr->ifr_settings.ifs_ifsu.raw_hdlc)) return -EFAULT; return dev_ioctl(net, SIOCWANDEV, uifr); } static int bond_ioctl(struct net *net, unsigned int cmd, struct compat_ifreq __user *ifr32) { struct ifreq kifr; struct ifreq __user *uifr; mm_segment_t old_fs; int err; u32 data; void __user *datap; switch (cmd) { case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDCHANGEACTIVE: if (copy_from_user(&kifr, ifr32, sizeof(struct compat_ifreq))) return -EFAULT; old_fs = get_fs(); set_fs(KERNEL_DS); err = dev_ioctl(net, cmd, (struct ifreq __user __force *) &kifr); set_fs(old_fs); return err; case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(&uifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) return -EFAULT; if (get_user(data, &ifr32->ifr_ifru.ifru_data)) return -EFAULT; datap = compat_ptr(data); if (put_user(datap, &uifr->ifr_ifru.ifru_data)) return -EFAULT; return dev_ioctl(net, cmd, uifr); default: return -EINVAL; } } static int siocdevprivate_ioctl(struct net *net, unsigned int cmd, struct compat_ifreq __user *u_ifreq32) { struct ifreq __user *u_ifreq64; char tmp_buf[IFNAMSIZ]; void __user *data64; u32 data32; if (copy_from_user(&tmp_buf[0], &(u_ifreq32->ifr_ifrn.ifrn_name[0]), IFNAMSIZ)) return -EFAULT; if (__get_user(data32, &u_ifreq32->ifr_ifru.ifru_data)) return -EFAULT; data64 = compat_ptr(data32); u_ifreq64 = compat_alloc_user_space(sizeof(*u_ifreq64)); /* Don't check these user accesses, just let that get trapped * in the ioctl handler instead. */ if (copy_to_user(&u_ifreq64->ifr_ifrn.ifrn_name[0], &tmp_buf[0], IFNAMSIZ)) return -EFAULT; if (__put_user(data64, &u_ifreq64->ifr_ifru.ifru_data)) return -EFAULT; return dev_ioctl(net, cmd, u_ifreq64); } static int dev_ifsioc(struct net *net, struct socket *sock, unsigned int cmd, struct compat_ifreq __user *uifr32) { struct ifreq __user *uifr; int err; uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(uifr, uifr32, sizeof(*uifr32))) return -EFAULT; err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr); if (!err) { switch (cmd) { case SIOCGIFFLAGS: case SIOCGIFMETRIC: case SIOCGIFMTU: case SIOCGIFMEM: case SIOCGIFHWADDR: case SIOCGIFINDEX: case SIOCGIFADDR: case SIOCGIFBRDADDR: case SIOCGIFDSTADDR: case SIOCGIFNETMASK: case SIOCGIFPFLAGS: case SIOCGIFTXQLEN: case SIOCGMIIPHY: case SIOCGMIIREG: if (copy_in_user(uifr32, uifr, sizeof(*uifr32))) err = -EFAULT; break; } } return err; } static int compat_sioc_ifmap(struct net *net, unsigned int cmd, struct compat_ifreq __user *uifr32) { struct ifreq ifr; struct compat_ifmap __user *uifmap32; mm_segment_t old_fs; int err; uifmap32 = &uifr32->ifr_ifru.ifru_map; err = copy_from_user(&ifr, uifr32, sizeof(ifr.ifr_name)); err |= __get_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); err |= __get_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); err |= __get_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); err |= __get_user(ifr.ifr_map.irq, &uifmap32->irq); err |= __get_user(ifr.ifr_map.dma, &uifmap32->dma); err |= __get_user(ifr.ifr_map.port, &uifmap32->port); if (err) return -EFAULT; old_fs = get_fs(); set_fs(KERNEL_DS); err = dev_ioctl(net, cmd, (void __user __force *)&ifr); set_fs(old_fs); if (cmd == SIOCGIFMAP && !err) { err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name)); err |= __put_user(ifr.ifr_map.mem_start, &uifmap32->mem_start); err |= __put_user(ifr.ifr_map.mem_end, &uifmap32->mem_end); err |= __put_user(ifr.ifr_map.base_addr, &uifmap32->base_addr); err |= __put_user(ifr.ifr_map.irq, &uifmap32->irq); err |= __put_user(ifr.ifr_map.dma, &uifmap32->dma); err |= __put_user(ifr.ifr_map.port, &uifmap32->port); if (err) err = -EFAULT; } return err; } static int compat_siocshwtstamp(struct net *net, struct compat_ifreq __user *uifr32) { void __user *uptr; compat_uptr_t uptr32; struct ifreq __user *uifr; uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) return -EFAULT; if (get_user(uptr32, &uifr32->ifr_data)) return -EFAULT; uptr = compat_ptr(uptr32); if (put_user(uptr, &uifr->ifr_data)) return -EFAULT; return dev_ioctl(net, SIOCSHWTSTAMP, uifr); } struct rtentry32 { u32 rt_pad1; struct sockaddr rt_dst; /* target address */ struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */ struct sockaddr rt_genmask; /* target network mask (IP) */ unsigned short rt_flags; short rt_pad2; u32 rt_pad3; unsigned char rt_tos; unsigned char rt_class; short rt_pad4; short rt_metric; /* +1 for binary compatibility! */ /* char * */ u32 rt_dev; /* forcing the device at add */ u32 rt_mtu; /* per route MTU/Window */ u32 rt_window; /* Window clamping */ unsigned short rt_irtt; /* Initial RTT */ }; struct in6_rtmsg32 { struct in6_addr rtmsg_dst; struct in6_addr rtmsg_src; struct in6_addr rtmsg_gateway; u32 rtmsg_type; u16 rtmsg_dst_len; u16 rtmsg_src_len; u32 rtmsg_metric; u32 rtmsg_info; u32 rtmsg_flags; s32 rtmsg_ifindex; }; static int routing_ioctl(struct net *net, struct socket *sock, unsigned int cmd, void __user *argp) { int ret; void *r = NULL; struct in6_rtmsg r6; struct rtentry r4; char devname[16]; u32 rtdev; mm_segment_t old_fs = get_fs(); if (sock && sock->sk && sock->sk->sk_family == AF_INET6) { /* ipv6 */ struct in6_rtmsg32 __user *ur6 = argp; ret = copy_from_user(&r6.rtmsg_dst, &(ur6->rtmsg_dst), 3 * sizeof(struct in6_addr)); ret |= __get_user(r6.rtmsg_type, &(ur6->rtmsg_type)); ret |= __get_user(r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len)); ret |= __get_user(r6.rtmsg_src_len, &(ur6->rtmsg_src_len)); ret |= __get_user(r6.rtmsg_metric, &(ur6->rtmsg_metric)); ret |= __get_user(r6.rtmsg_info, &(ur6->rtmsg_info)); ret |= __get_user(r6.rtmsg_flags, &(ur6->rtmsg_flags)); ret |= __get_user(r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex)); r = (void *) &r6; } else { /* ipv4 */ struct rtentry32 __user *ur4 = argp; ret = copy_from_user(&r4.rt_dst, &(ur4->rt_dst), 3 * sizeof(struct sockaddr)); ret |= __get_user(r4.rt_flags, &(ur4->rt_flags)); ret |= __get_user(r4.rt_metric, &(ur4->rt_metric)); ret |= __get_user(r4.rt_mtu, &(ur4->rt_mtu)); ret |= __get_user(r4.rt_window, &(ur4->rt_window)); ret |= __get_user(r4.rt_irtt, &(ur4->rt_irtt)); ret |= __get_user(rtdev, &(ur4->rt_dev)); if (rtdev) { ret |= copy_from_user(devname, compat_ptr(rtdev), 15); r4.rt_dev = (char __user __force *)devname; devname[15] = 0; } else r4.rt_dev = NULL; r = (void *) &r4; } if (ret) { ret = -EFAULT; goto out; } set_fs(KERNEL_DS); ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); set_fs(old_fs); out: return ret; } /* Since old style bridge ioctl's endup using SIOCDEVPRIVATE * for some operations; this forces use of the newer bridge-utils that * use compatible ioctls */ static int old_bridge_ioctl(compat_ulong_t __user *argp) { compat_ulong_t tmp; if (get_user(tmp, argp)) return -EFAULT; if (tmp == BRCTL_GET_VERSION) return BRCTL_VERSION + 1; return -EINVAL; } static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, unsigned int cmd, unsigned long arg) { void __user *argp = compat_ptr(arg); struct sock *sk = sock->sk; struct net *net = sock_net(sk); if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) return siocdevprivate_ioctl(net, cmd, argp); switch (cmd) { case SIOCSIFBR: case SIOCGIFBR: return old_bridge_ioctl(argp); case SIOCGIFNAME: return dev_ifname32(net, argp); case SIOCGIFCONF: return dev_ifconf(net, argp); case SIOCETHTOOL: return ethtool_ioctl(net, argp); case SIOCWANDEV: return compat_siocwandev(net, argp); case SIOCGIFMAP: case SIOCSIFMAP: return compat_sioc_ifmap(net, cmd, argp); case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: case SIOCBONDCHANGEACTIVE: return bond_ioctl(net, cmd, argp); case SIOCADDRT: case SIOCDELRT: return routing_ioctl(net, sock, cmd, argp); case SIOCGSTAMP: return do_siocgstamp(net, sock, cmd, argp); case SIOCGSTAMPNS: return do_siocgstampns(net, sock, cmd, argp); case SIOCSHWTSTAMP: return compat_siocshwtstamp(net, argp); case FIOSETOWN: case SIOCSPGRP: case FIOGETOWN: case SIOCGPGRP: case SIOCBRADDBR: case SIOCBRDELBR: case SIOCGIFVLAN: case SIOCSIFVLAN: case SIOCADDDLCI: case SIOCDELDLCI: return sock_ioctl(file, cmd, arg); case SIOCGIFFLAGS: case SIOCSIFFLAGS: case SIOCGIFMETRIC: case SIOCSIFMETRIC: case SIOCGIFMTU: case SIOCSIFMTU: case SIOCGIFMEM: case SIOCSIFMEM: case SIOCGIFHWADDR: case SIOCSIFHWADDR: case SIOCADDMULTI: case SIOCDELMULTI: case SIOCGIFINDEX: case SIOCGIFADDR: case SIOCSIFADDR: case SIOCSIFHWBROADCAST: case SIOCDIFADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCSIFPFLAGS: case SIOCGIFPFLAGS: case SIOCGIFTXQLEN: case SIOCSIFTXQLEN: case SIOCBRADDIF: case SIOCBRDELIF: case SIOCSIFNAME: case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: return dev_ifsioc(net, sock, cmd, argp); case SIOCSARP: case SIOCGARP: case SIOCDARP: case SIOCATMARK: return sock_do_ioctl(net, sock, cmd, arg); } /* Prevent warning from compat_sys_ioctl, these always * result in -EINVAL in the native case anyway. */ switch (cmd) { case SIOCRTMSG: case SIOCGIFCOUNT: case SIOCSRARP: case SIOCGRARP: case SIOCDRARP: case SIOCSIFLINK: case SIOCGIFSLAVE: case SIOCSIFSLAVE: return -EINVAL; } return -ENOIOCTLCMD; } static long compat_sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) { struct socket *sock = file->private_data; int ret = -ENOIOCTLCMD; struct sock *sk; struct net *net; sk = sock->sk; net = sock_net(sk); if (sock->ops->compat_ioctl) ret = sock->ops->compat_ioctl(sock, cmd, arg); if (ret == -ENOIOCTLCMD && (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)) ret = compat_wext_handle_ioctl(net, cmd, arg); if (ret == -ENOIOCTLCMD) ret = compat_sock_ioctl_trans(file, sock, cmd, arg); return ret; } #endif int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen) { return sock->ops->bind(sock, addr, addrlen); } EXPORT_SYMBOL(kernel_bind); int kernel_listen(struct socket *sock, int backlog) { return sock->ops->listen(sock, backlog); } EXPORT_SYMBOL(kernel_listen); int kernel_accept(struct socket *sock, struct socket **newsock, int flags) { struct sock *sk = sock->sk; int err; err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol, newsock); if (err < 0) goto done; err = sock->ops->accept(sock, *newsock, flags); if (err < 0) { sock_release(*newsock); *newsock = NULL; goto done; } (*newsock)->ops = sock->ops; __module_get((*newsock)->ops->owner); done: return err; } EXPORT_SYMBOL(kernel_accept); int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, int flags) { return sock->ops->connect(sock, addr, addrlen, flags); } EXPORT_SYMBOL(kernel_connect); int kernel_getsockname(struct socket *sock, struct sockaddr *addr, int *addrlen) { return sock->ops->getname(sock, addr, addrlen, 0); } EXPORT_SYMBOL(kernel_getsockname); int kernel_getpeername(struct socket *sock, struct sockaddr *addr, int *addrlen) { return sock->ops->getname(sock, addr, addrlen, 1); } EXPORT_SYMBOL(kernel_getpeername); int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval, int *optlen) { mm_segment_t oldfs = get_fs(); char __user *uoptval; int __user *uoptlen; int err; uoptval = (char __user __force *) optval; uoptlen = (int __user __force *) optlen; set_fs(KERNEL_DS); if (level == SOL_SOCKET) err = sock_getsockopt(sock, level, optname, uoptval, uoptlen); else err = sock->ops->getsockopt(sock, level, optname, uoptval, uoptlen); set_fs(oldfs); return err; } EXPORT_SYMBOL(kernel_getsockopt); int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval, unsigned int optlen) { mm_segment_t oldfs = get_fs(); char __user *uoptval; int err; uoptval = (char __user __force *) optval; set_fs(KERNEL_DS); if (level == SOL_SOCKET) err = sock_setsockopt(sock, level, optname, uoptval, optlen); else err = sock->ops->setsockopt(sock, level, optname, uoptval, optlen); set_fs(oldfs); return err; } EXPORT_SYMBOL(kernel_setsockopt); int kernel_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) { sock_update_classid(sock->sk); if (sock->ops->sendpage) return sock->ops->sendpage(sock, page, offset, size, flags); return sock_no_sendpage(sock, page, offset, size, flags); } EXPORT_SYMBOL(kernel_sendpage); int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg) { mm_segment_t oldfs = get_fs(); int err; set_fs(KERNEL_DS); err = sock->ops->ioctl(sock, cmd, arg); set_fs(oldfs); return err; } EXPORT_SYMBOL(kernel_sock_ioctl); int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how) { return sock->ops->shutdown(sock, how); } EXPORT_SYMBOL(kernel_sock_shutdown);
gpl-2.0
DZB-Team/kernel_torino_cm11
fs/xfs/xfs_bmap_btree.c
810
23353
/* * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_types.h" #include "xfs_bit.h" #include "xfs_log.h" #include "xfs_inum.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_dir2.h" #include "xfs_dmapi.h" #include "xfs_mount.h" #include "xfs_bmap_btree.h" #include "xfs_alloc_btree.h" #include "xfs_ialloc_btree.h" #include "xfs_dir2_sf.h" #include "xfs_attr_sf.h" #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_inode_item.h" #include "xfs_alloc.h" #include "xfs_btree.h" #include "xfs_btree_trace.h" #include "xfs_ialloc.h" #include "xfs_itable.h" #include "xfs_bmap.h" #include "xfs_error.h" #include "xfs_quota.h" /* * Determine the extent state. */ /* ARGSUSED */ STATIC xfs_exntst_t xfs_extent_state( xfs_filblks_t blks, int extent_flag) { if (extent_flag) { ASSERT(blks != 0); /* saved for DMIG */ return XFS_EXT_UNWRITTEN; } return XFS_EXT_NORM; } /* * Convert on-disk form of btree root to in-memory form. */ void xfs_bmdr_to_bmbt( struct xfs_mount *mp, xfs_bmdr_block_t *dblock, int dblocklen, struct xfs_btree_block *rblock, int rblocklen) { int dmxr; xfs_bmbt_key_t *fkp; __be64 *fpp; xfs_bmbt_key_t *tkp; __be64 *tpp; rblock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC); rblock->bb_level = dblock->bb_level; ASSERT(be16_to_cpu(rblock->bb_level) > 0); rblock->bb_numrecs = dblock->bb_numrecs; rblock->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO); rblock->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO); dmxr = xfs_bmdr_maxrecs(mp, dblocklen, 0); fkp = XFS_BMDR_KEY_ADDR(dblock, 1); tkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1); fpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr); tpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen); dmxr = be16_to_cpu(dblock->bb_numrecs); memcpy(tkp, fkp, sizeof(*fkp) * dmxr); memcpy(tpp, fpp, sizeof(*fpp) * dmxr); } /* * Convert a compressed bmap extent record to an uncompressed form. * This code must be in sync with the routines xfs_bmbt_get_startoff, * xfs_bmbt_get_startblock, xfs_bmbt_get_blockcount and xfs_bmbt_get_state. */ STATIC void __xfs_bmbt_get_all( __uint64_t l0, __uint64_t l1, xfs_bmbt_irec_t *s) { int ext_flag; xfs_exntst_t st; ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN)); s->br_startoff = ((xfs_fileoff_t)l0 & xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; #if XFS_BIG_BLKNOS s->br_startblock = (((xfs_fsblock_t)l0 & xfs_mask64lo(9)) << 43) | (((xfs_fsblock_t)l1) >> 21); #else #ifdef DEBUG { xfs_dfsbno_t b; b = (((xfs_dfsbno_t)l0 & xfs_mask64lo(9)) << 43) | (((xfs_dfsbno_t)l1) >> 21); ASSERT((b >> 32) == 0 || isnulldstartblock(b)); s->br_startblock = (xfs_fsblock_t)b; } #else /* !DEBUG */ s->br_startblock = (xfs_fsblock_t)(((xfs_dfsbno_t)l1) >> 21); #endif /* DEBUG */ #endif /* XFS_BIG_BLKNOS */ s->br_blockcount = (xfs_filblks_t)(l1 & xfs_mask64lo(21)); /* This is xfs_extent_state() in-line */ if (ext_flag) { ASSERT(s->br_blockcount != 0); /* saved for DMIG */ st = XFS_EXT_UNWRITTEN; } else st = XFS_EXT_NORM; s->br_state = st; } void xfs_bmbt_get_all( xfs_bmbt_rec_host_t *r, xfs_bmbt_irec_t *s) { __xfs_bmbt_get_all(r->l0, r->l1, s); } /* * Extract the blockcount field from an in memory bmap extent record. */ xfs_filblks_t xfs_bmbt_get_blockcount( xfs_bmbt_rec_host_t *r) { return (xfs_filblks_t)(r->l1 & xfs_mask64lo(21)); } /* * Extract the startblock field from an in memory bmap extent record. */ xfs_fsblock_t xfs_bmbt_get_startblock( xfs_bmbt_rec_host_t *r) { #if XFS_BIG_BLKNOS return (((xfs_fsblock_t)r->l0 & xfs_mask64lo(9)) << 43) | (((xfs_fsblock_t)r->l1) >> 21); #else #ifdef DEBUG xfs_dfsbno_t b; b = (((xfs_dfsbno_t)r->l0 & xfs_mask64lo(9)) << 43) | (((xfs_dfsbno_t)r->l1) >> 21); ASSERT((b >> 32) == 0 || isnulldstartblock(b)); return (xfs_fsblock_t)b; #else /* !DEBUG */ return (xfs_fsblock_t)(((xfs_dfsbno_t)r->l1) >> 21); #endif /* DEBUG */ #endif /* XFS_BIG_BLKNOS */ } /* * Extract the startoff field from an in memory bmap extent record. */ xfs_fileoff_t xfs_bmbt_get_startoff( xfs_bmbt_rec_host_t *r) { return ((xfs_fileoff_t)r->l0 & xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; } xfs_exntst_t xfs_bmbt_get_state( xfs_bmbt_rec_host_t *r) { int ext_flag; ext_flag = (int)((r->l0) >> (64 - BMBT_EXNTFLAG_BITLEN)); return xfs_extent_state(xfs_bmbt_get_blockcount(r), ext_flag); } /* * Extract the blockcount field from an on disk bmap extent record. */ xfs_filblks_t xfs_bmbt_disk_get_blockcount( xfs_bmbt_rec_t *r) { return (xfs_filblks_t)(be64_to_cpu(r->l1) & xfs_mask64lo(21)); } /* * Extract the startoff field from a disk format bmap extent record. */ xfs_fileoff_t xfs_bmbt_disk_get_startoff( xfs_bmbt_rec_t *r) { return ((xfs_fileoff_t)be64_to_cpu(r->l0) & xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; } /* * Set all the fields in a bmap extent record from the arguments. */ void xfs_bmbt_set_allf( xfs_bmbt_rec_host_t *r, xfs_fileoff_t startoff, xfs_fsblock_t startblock, xfs_filblks_t blockcount, xfs_exntst_t state) { int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1; ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN); ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0); ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0); #if XFS_BIG_BLKNOS ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0); r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | ((xfs_bmbt_rec_base_t)startoff << 9) | ((xfs_bmbt_rec_base_t)startblock >> 43); r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) | ((xfs_bmbt_rec_base_t)blockcount & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)); #else /* !XFS_BIG_BLKNOS */ if (isnullstartblock(startblock)) { r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | ((xfs_bmbt_rec_base_t)startoff << 9) | (xfs_bmbt_rec_base_t)xfs_mask64lo(9); r->l1 = xfs_mask64hi(11) | ((xfs_bmbt_rec_base_t)startblock << 21) | ((xfs_bmbt_rec_base_t)blockcount & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)); } else { r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | ((xfs_bmbt_rec_base_t)startoff << 9); r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) | ((xfs_bmbt_rec_base_t)blockcount & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)); } #endif /* XFS_BIG_BLKNOS */ } /* * Set all the fields in a bmap extent record from the uncompressed form. */ void xfs_bmbt_set_all( xfs_bmbt_rec_host_t *r, xfs_bmbt_irec_t *s) { xfs_bmbt_set_allf(r, s->br_startoff, s->br_startblock, s->br_blockcount, s->br_state); } /* * Set all the fields in a disk format bmap extent record from the arguments. */ void xfs_bmbt_disk_set_allf( xfs_bmbt_rec_t *r, xfs_fileoff_t startoff, xfs_fsblock_t startblock, xfs_filblks_t blockcount, xfs_exntst_t state) { int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1; ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN); ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0); ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0); #if XFS_BIG_BLKNOS ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0); r->l0 = cpu_to_be64( ((xfs_bmbt_rec_base_t)extent_flag << 63) | ((xfs_bmbt_rec_base_t)startoff << 9) | ((xfs_bmbt_rec_base_t)startblock >> 43)); r->l1 = cpu_to_be64( ((xfs_bmbt_rec_base_t)startblock << 21) | ((xfs_bmbt_rec_base_t)blockcount & (xfs_bmbt_rec_base_t)xfs_mask64lo(21))); #else /* !XFS_BIG_BLKNOS */ if (isnullstartblock(startblock)) { r->l0 = cpu_to_be64( ((xfs_bmbt_rec_base_t)extent_flag << 63) | ((xfs_bmbt_rec_base_t)startoff << 9) | (xfs_bmbt_rec_base_t)xfs_mask64lo(9)); r->l1 = cpu_to_be64(xfs_mask64hi(11) | ((xfs_bmbt_rec_base_t)startblock << 21) | ((xfs_bmbt_rec_base_t)blockcount & (xfs_bmbt_rec_base_t)xfs_mask64lo(21))); } else { r->l0 = cpu_to_be64( ((xfs_bmbt_rec_base_t)extent_flag << 63) | ((xfs_bmbt_rec_base_t)startoff << 9)); r->l1 = cpu_to_be64( ((xfs_bmbt_rec_base_t)startblock << 21) | ((xfs_bmbt_rec_base_t)blockcount & (xfs_bmbt_rec_base_t)xfs_mask64lo(21))); } #endif /* XFS_BIG_BLKNOS */ } /* * Set all the fields in a bmap extent record from the uncompressed form. */ STATIC void xfs_bmbt_disk_set_all( xfs_bmbt_rec_t *r, xfs_bmbt_irec_t *s) { xfs_bmbt_disk_set_allf(r, s->br_startoff, s->br_startblock, s->br_blockcount, s->br_state); } /* * Set the blockcount field in a bmap extent record. */ void xfs_bmbt_set_blockcount( xfs_bmbt_rec_host_t *r, xfs_filblks_t v) { ASSERT((v & xfs_mask64hi(43)) == 0); r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64hi(43)) | (xfs_bmbt_rec_base_t)(v & xfs_mask64lo(21)); } /* * Set the startblock field in a bmap extent record. */ void xfs_bmbt_set_startblock( xfs_bmbt_rec_host_t *r, xfs_fsblock_t v) { #if XFS_BIG_BLKNOS ASSERT((v & xfs_mask64hi(12)) == 0); r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)xfs_mask64hi(55)) | (xfs_bmbt_rec_base_t)(v >> 43); r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)) | (xfs_bmbt_rec_base_t)(v << 21); #else /* !XFS_BIG_BLKNOS */ if (isnullstartblock(v)) { r->l0 |= (xfs_bmbt_rec_base_t)xfs_mask64lo(9); r->l1 = (xfs_bmbt_rec_base_t)xfs_mask64hi(11) | ((xfs_bmbt_rec_base_t)v << 21) | (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)); } else { r->l0 &= ~(xfs_bmbt_rec_base_t)xfs_mask64lo(9); r->l1 = ((xfs_bmbt_rec_base_t)v << 21) | (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)); } #endif /* XFS_BIG_BLKNOS */ } /* * Set the startoff field in a bmap extent record. */ void xfs_bmbt_set_startoff( xfs_bmbt_rec_host_t *r, xfs_fileoff_t v) { ASSERT((v & xfs_mask64hi(9)) == 0); r->l0 = (r->l0 & (xfs_bmbt_rec_base_t) xfs_mask64hi(1)) | ((xfs_bmbt_rec_base_t)v << 9) | (r->l0 & (xfs_bmbt_rec_base_t)xfs_mask64lo(9)); } /* * Set the extent state field in a bmap extent record. */ void xfs_bmbt_set_state( xfs_bmbt_rec_host_t *r, xfs_exntst_t v) { ASSERT(v == XFS_EXT_NORM || v == XFS_EXT_UNWRITTEN); if (v == XFS_EXT_NORM) r->l0 &= xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN); else r->l0 |= xfs_mask64hi(BMBT_EXNTFLAG_BITLEN); } /* * Convert in-memory form of btree root to on-disk form. */ void xfs_bmbt_to_bmdr( struct xfs_mount *mp, struct xfs_btree_block *rblock, int rblocklen, xfs_bmdr_block_t *dblock, int dblocklen) { int dmxr; xfs_bmbt_key_t *fkp; __be64 *fpp; xfs_bmbt_key_t *tkp; __be64 *tpp; ASSERT(be32_to_cpu(rblock->bb_magic) == XFS_BMAP_MAGIC); ASSERT(be64_to_cpu(rblock->bb_u.l.bb_leftsib) == NULLDFSBNO); ASSERT(be64_to_cpu(rblock->bb_u.l.bb_rightsib) == NULLDFSBNO); ASSERT(be16_to_cpu(rblock->bb_level) > 0); dblock->bb_level = rblock->bb_level; dblock->bb_numrecs = rblock->bb_numrecs; dmxr = xfs_bmdr_maxrecs(mp, dblocklen, 0); fkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1); tkp = XFS_BMDR_KEY_ADDR(dblock, 1); fpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen); tpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr); dmxr = be16_to_cpu(dblock->bb_numrecs); memcpy(tkp, fkp, sizeof(*fkp) * dmxr); memcpy(tpp, fpp, sizeof(*fpp) * dmxr); } /* * Check extent records, which have just been read, for * any bit in the extent flag field. ASSERT on debug * kernels, as this condition should not occur. * Return an error condition (1) if any flags found, * otherwise return 0. */ int xfs_check_nostate_extents( xfs_ifork_t *ifp, xfs_extnum_t idx, xfs_extnum_t num) { for (; num > 0; num--, idx++) { xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx); if ((ep->l0 >> (64 - BMBT_EXNTFLAG_BITLEN)) != 0) { ASSERT(0); return 1; } } return 0; } STATIC struct xfs_btree_cur * xfs_bmbt_dup_cursor( struct xfs_btree_cur *cur) { struct xfs_btree_cur *new; new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp, cur->bc_private.b.ip, cur->bc_private.b.whichfork); /* * Copy the firstblock, flist, and flags values, * since init cursor doesn't get them. */ new->bc_private.b.firstblock = cur->bc_private.b.firstblock; new->bc_private.b.flist = cur->bc_private.b.flist; new->bc_private.b.flags = cur->bc_private.b.flags; return new; } STATIC void xfs_bmbt_update_cursor( struct xfs_btree_cur *src, struct xfs_btree_cur *dst) { ASSERT((dst->bc_private.b.firstblock != NULLFSBLOCK) || (dst->bc_private.b.ip->i_d.di_flags & XFS_DIFLAG_REALTIME)); ASSERT(dst->bc_private.b.flist == src->bc_private.b.flist); dst->bc_private.b.allocated += src->bc_private.b.allocated; dst->bc_private.b.firstblock = src->bc_private.b.firstblock; src->bc_private.b.allocated = 0; } STATIC int xfs_bmbt_alloc_block( struct xfs_btree_cur *cur, union xfs_btree_ptr *start, union xfs_btree_ptr *new, int length, int *stat) { xfs_alloc_arg_t args; /* block allocation args */ int error; /* error return value */ memset(&args, 0, sizeof(args)); args.tp = cur->bc_tp; args.mp = cur->bc_mp; args.fsbno = cur->bc_private.b.firstblock; args.firstblock = args.fsbno; if (args.fsbno == NULLFSBLOCK) { args.fsbno = be64_to_cpu(start->l); args.type = XFS_ALLOCTYPE_START_BNO; /* * Make sure there is sufficient room left in the AG to * complete a full tree split for an extent insert. If * we are converting the middle part of an extent then * we may need space for two tree splits. * * We are relying on the caller to make the correct block * reservation for this operation to succeed. If the * reservation amount is insufficient then we may fail a * block allocation here and corrupt the filesystem. */ args.minleft = xfs_trans_get_block_res(args.tp); } else if (cur->bc_private.b.flist->xbf_low) { args.type = XFS_ALLOCTYPE_START_BNO; } else { args.type = XFS_ALLOCTYPE_NEAR_BNO; } args.minlen = args.maxlen = args.prod = 1; args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL; if (!args.wasdel && xfs_trans_get_block_res(args.tp) == 0) { error = XFS_ERROR(ENOSPC); goto error0; } error = xfs_alloc_vextent(&args); if (error) goto error0; if (args.fsbno == NULLFSBLOCK && args.minleft) { /* * Could not find an AG with enough free space to satisfy * a full btree split. Try again without minleft and if * successful activate the lowspace algorithm. */ args.fsbno = 0; args.type = XFS_ALLOCTYPE_FIRST_AG; args.minleft = 0; error = xfs_alloc_vextent(&args); if (error) goto error0; cur->bc_private.b.flist->xbf_low = 1; } if (args.fsbno == NULLFSBLOCK) { XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); *stat = 0; return 0; } ASSERT(args.len == 1); cur->bc_private.b.firstblock = args.fsbno; cur->bc_private.b.allocated++; cur->bc_private.b.ip->i_d.di_nblocks++; xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE); xfs_trans_mod_dquot_byino(args.tp, cur->bc_private.b.ip, XFS_TRANS_DQ_BCOUNT, 1L); new->l = cpu_to_be64(args.fsbno); XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); *stat = 1; return 0; error0: XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); return error; } STATIC int xfs_bmbt_free_block( struct xfs_btree_cur *cur, struct xfs_buf *bp) { struct xfs_mount *mp = cur->bc_mp; struct xfs_inode *ip = cur->bc_private.b.ip; struct xfs_trans *tp = cur->bc_tp; xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp)); xfs_bmap_add_free(fsbno, 1, cur->bc_private.b.flist, mp); ip->i_d.di_nblocks--; xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); xfs_trans_binval(tp, bp); return 0; } STATIC int xfs_bmbt_get_minrecs( struct xfs_btree_cur *cur, int level) { if (level == cur->bc_nlevels - 1) { struct xfs_ifork *ifp; ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, cur->bc_private.b.whichfork); return xfs_bmbt_maxrecs(cur->bc_mp, ifp->if_broot_bytes, level == 0) / 2; } return cur->bc_mp->m_bmap_dmnr[level != 0]; } int xfs_bmbt_get_maxrecs( struct xfs_btree_cur *cur, int level) { if (level == cur->bc_nlevels - 1) { struct xfs_ifork *ifp; ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, cur->bc_private.b.whichfork); return xfs_bmbt_maxrecs(cur->bc_mp, ifp->if_broot_bytes, level == 0); } return cur->bc_mp->m_bmap_dmxr[level != 0]; } /* * Get the maximum records we could store in the on-disk format. * * For non-root nodes this is equivalent to xfs_bmbt_get_maxrecs, but * for the root node this checks the available space in the dinode fork * so that we can resize the in-memory buffer to match it. After a * resize to the maximum size this function returns the same value * as xfs_bmbt_get_maxrecs for the root node, too. */ STATIC int xfs_bmbt_get_dmaxrecs( struct xfs_btree_cur *cur, int level) { if (level != cur->bc_nlevels - 1) return cur->bc_mp->m_bmap_dmxr[level != 0]; return xfs_bmdr_maxrecs(cur->bc_mp, cur->bc_private.b.forksize, level == 0); } STATIC void xfs_bmbt_init_key_from_rec( union xfs_btree_key *key, union xfs_btree_rec *rec) { key->bmbt.br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(&rec->bmbt)); } STATIC void xfs_bmbt_init_rec_from_key( union xfs_btree_key *key, union xfs_btree_rec *rec) { ASSERT(key->bmbt.br_startoff != 0); xfs_bmbt_disk_set_allf(&rec->bmbt, be64_to_cpu(key->bmbt.br_startoff), 0, 0, XFS_EXT_NORM); } STATIC void xfs_bmbt_init_rec_from_cur( struct xfs_btree_cur *cur, union xfs_btree_rec *rec) { xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b); } STATIC void xfs_bmbt_init_ptr_from_cur( struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr) { ptr->l = 0; } STATIC __int64_t xfs_bmbt_key_diff( struct xfs_btree_cur *cur, union xfs_btree_key *key) { return (__int64_t)be64_to_cpu(key->bmbt.br_startoff) - cur->bc_rec.b.br_startoff; } #ifdef DEBUG STATIC int xfs_bmbt_keys_inorder( struct xfs_btree_cur *cur, union xfs_btree_key *k1, union xfs_btree_key *k2) { return be64_to_cpu(k1->bmbt.br_startoff) < be64_to_cpu(k2->bmbt.br_startoff); } STATIC int xfs_bmbt_recs_inorder( struct xfs_btree_cur *cur, union xfs_btree_rec *r1, union xfs_btree_rec *r2) { return xfs_bmbt_disk_get_startoff(&r1->bmbt) + xfs_bmbt_disk_get_blockcount(&r1->bmbt) <= xfs_bmbt_disk_get_startoff(&r2->bmbt); } #endif /* DEBUG */ #ifdef XFS_BTREE_TRACE ktrace_t *xfs_bmbt_trace_buf; STATIC void xfs_bmbt_trace_enter( struct xfs_btree_cur *cur, const char *func, char *s, int type, int line, __psunsigned_t a0, __psunsigned_t a1, __psunsigned_t a2, __psunsigned_t a3, __psunsigned_t a4, __psunsigned_t a5, __psunsigned_t a6, __psunsigned_t a7, __psunsigned_t a8, __psunsigned_t a9, __psunsigned_t a10) { struct xfs_inode *ip = cur->bc_private.b.ip; int whichfork = cur->bc_private.b.whichfork; ktrace_enter(xfs_bmbt_trace_buf, (void *)((__psint_t)type | (whichfork << 8) | (line << 16)), (void *)func, (void *)s, (void *)ip, (void *)cur, (void *)a0, (void *)a1, (void *)a2, (void *)a3, (void *)a4, (void *)a5, (void *)a6, (void *)a7, (void *)a8, (void *)a9, (void *)a10); } STATIC void xfs_bmbt_trace_cursor( struct xfs_btree_cur *cur, __uint32_t *s0, __uint64_t *l0, __uint64_t *l1) { struct xfs_bmbt_rec_host r; xfs_bmbt_set_all(&r, &cur->bc_rec.b); *s0 = (cur->bc_nlevels << 24) | (cur->bc_private.b.flags << 16) | cur->bc_private.b.allocated; *l0 = r.l0; *l1 = r.l1; } STATIC void xfs_bmbt_trace_key( struct xfs_btree_cur *cur, union xfs_btree_key *key, __uint64_t *l0, __uint64_t *l1) { *l0 = be64_to_cpu(key->bmbt.br_startoff); *l1 = 0; } /* Endian flipping versions of the bmbt extraction functions */ STATIC void xfs_bmbt_disk_get_all( xfs_bmbt_rec_t *r, xfs_bmbt_irec_t *s) { __xfs_bmbt_get_all(get_unaligned_be64(&r->l0), get_unaligned_be64(&r->l1), s); } STATIC void xfs_bmbt_trace_record( struct xfs_btree_cur *cur, union xfs_btree_rec *rec, __uint64_t *l0, __uint64_t *l1, __uint64_t *l2) { struct xfs_bmbt_irec irec; xfs_bmbt_disk_get_all(&rec->bmbt, &irec); *l0 = irec.br_startoff; *l1 = irec.br_startblock; *l2 = irec.br_blockcount; } #endif /* XFS_BTREE_TRACE */ static const struct xfs_btree_ops xfs_bmbt_ops = { .rec_len = sizeof(xfs_bmbt_rec_t), .key_len = sizeof(xfs_bmbt_key_t), .dup_cursor = xfs_bmbt_dup_cursor, .update_cursor = xfs_bmbt_update_cursor, .alloc_block = xfs_bmbt_alloc_block, .free_block = xfs_bmbt_free_block, .get_maxrecs = xfs_bmbt_get_maxrecs, .get_minrecs = xfs_bmbt_get_minrecs, .get_dmaxrecs = xfs_bmbt_get_dmaxrecs, .init_key_from_rec = xfs_bmbt_init_key_from_rec, .init_rec_from_key = xfs_bmbt_init_rec_from_key, .init_rec_from_cur = xfs_bmbt_init_rec_from_cur, .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur, .key_diff = xfs_bmbt_key_diff, #ifdef DEBUG .keys_inorder = xfs_bmbt_keys_inorder, .recs_inorder = xfs_bmbt_recs_inorder, #endif #ifdef XFS_BTREE_TRACE .trace_enter = xfs_bmbt_trace_enter, .trace_cursor = xfs_bmbt_trace_cursor, .trace_key = xfs_bmbt_trace_key, .trace_record = xfs_bmbt_trace_record, #endif }; /* * Allocate a new bmap btree cursor. */ struct xfs_btree_cur * /* new bmap btree cursor */ xfs_bmbt_init_cursor( struct xfs_mount *mp, /* file system mount point */ struct xfs_trans *tp, /* transaction pointer */ struct xfs_inode *ip, /* inode owning the btree */ int whichfork) /* data or attr fork */ { struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); struct xfs_btree_cur *cur; cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP); cur->bc_tp = tp; cur->bc_mp = mp; cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1; cur->bc_btnum = XFS_BTNUM_BMAP; cur->bc_blocklog = mp->m_sb.sb_blocklog; cur->bc_ops = &xfs_bmbt_ops; cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE; cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork); cur->bc_private.b.ip = ip; cur->bc_private.b.firstblock = NULLFSBLOCK; cur->bc_private.b.flist = NULL; cur->bc_private.b.allocated = 0; cur->bc_private.b.flags = 0; cur->bc_private.b.whichfork = whichfork; return cur; } /* * Calculate number of records in a bmap btree block. */ int xfs_bmbt_maxrecs( struct xfs_mount *mp, int blocklen, int leaf) { blocklen -= XFS_BMBT_BLOCK_LEN(mp); if (leaf) return blocklen / sizeof(xfs_bmbt_rec_t); return blocklen / (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t)); } /* * Calculate number of records in a bmap btree inode root. */ int xfs_bmdr_maxrecs( struct xfs_mount *mp, int blocklen, int leaf) { blocklen -= sizeof(xfs_bmdr_block_t); if (leaf) return blocklen / sizeof(xfs_bmdr_rec_t); return blocklen / (sizeof(xfs_bmdr_key_t) + sizeof(xfs_bmdr_ptr_t)); }
gpl-2.0
weitengchu/linux-emcraft
net/sctp/ssnmap.c
810
3510
/* SCTP kernel implementation * Copyright (c) 2003 International Business Machines, Corp. * * This file is part of the SCTP kernel implementation * * These functions manipulate sctp SSN tracker. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, write to * the Free Software Foundation, 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <lksctp-developers@lists.sourceforge.net> * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp * * Written or modified by: * Jon Grimm <jgrimm@us.ibm.com> * * Any bugs reported given to us we will try to fix... any fixes shared will * be incorporated into the next SCTP release. */ #include <linux/types.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> #define MAX_KMALLOC_SIZE 131072 static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in, __u16 out); /* Storage size needed for map includes 2 headers and then the * specific needs of in or out streams. */ static inline size_t sctp_ssnmap_size(__u16 in, __u16 out) { return sizeof(struct sctp_ssnmap) + (in + out) * sizeof(__u16); } /* Create a new sctp_ssnmap. * Allocate room to store at least 'len' contiguous TSNs. */ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, gfp_t gfp) { struct sctp_ssnmap *retval; int size; size = sctp_ssnmap_size(in, out); if (size <= MAX_KMALLOC_SIZE) retval = kmalloc(size, gfp); else retval = (struct sctp_ssnmap *) __get_free_pages(gfp, get_order(size)); if (!retval) goto fail; if (!sctp_ssnmap_init(retval, in, out)) goto fail_map; retval->malloced = 1; SCTP_DBG_OBJCNT_INC(ssnmap); return retval; fail_map: if (size <= MAX_KMALLOC_SIZE) kfree(retval); else free_pages((unsigned long)retval, get_order(size)); fail: return NULL; } /* Initialize a block of memory as a ssnmap. */ static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in, __u16 out) { memset(map, 0x00, sctp_ssnmap_size(in, out)); /* Start 'in' stream just after the map header. */ map->in.ssn = (__u16 *)&map[1]; map->in.len = in; /* Start 'out' stream just after 'in'. */ map->out.ssn = &map->in.ssn[in]; map->out.len = out; return map; } /* Clear out the ssnmap streams. */ void sctp_ssnmap_clear(struct sctp_ssnmap *map) { size_t size; size = (map->in.len + map->out.len) * sizeof(__u16); memset(map->in.ssn, 0x00, size); } /* Dispose of a ssnmap. */ void sctp_ssnmap_free(struct sctp_ssnmap *map) { if (map && map->malloced) { int size; size = sctp_ssnmap_size(map->in.len, map->out.len); if (size <= MAX_KMALLOC_SIZE) kfree(map); else free_pages((unsigned long)map, get_order(size)); SCTP_DBG_OBJCNT_DEC(ssnmap); } }
gpl-2.0
spacecaker/CM7_Space_Kernel_Cooper
drivers/staging/comedi/drivers/das6402.c
810
8561
/* Some comments on the code.. - it shouldn't be necessary to use outb_p(). - ignoreirq creates a race condition. It needs to be fixed. */ /* comedi/drivers/das6402.c An experimental driver for Computerboards' DAS6402 I/O card Copyright (C) 1999 Oystein Svendsen <svendsen@pvv.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: das6402 Description: Keithley Metrabyte DAS6402 (& compatibles) Author: Oystein Svendsen <svendsen@pvv.org> Status: bitrotten Devices: [Keithley Metrabyte] DAS6402 (das6402) This driver has suffered bitrot. */ #include <linux/interrupt.h> #include "../comedidev.h" #include <linux/ioport.h> #define DAS6402_SIZE 16 #define N_WORDS (3000*64) #define STOP 0 #define START 1 #define SCANL 0x3f00 #define BYTE unsigned char #define WORD unsigned short /*----- register 8 ----*/ #define CLRINT 0x01 #define CLRXTR 0x02 #define CLRXIN 0x04 #define EXTEND 0x10 #define ARMED 0x20 /* enable conting of post sample conv */ #define POSTMODE 0x40 #define MHZ 0x80 /* 10 MHz clock */ /*---------------------*/ /*----- register 9 ----*/ #define IRQ (0x04 << 4) /* these two are */ #define IRQV 10 /* dependent on each other */ #define CONVSRC 0x03 /* trig src is Intarnal pacer */ #define BURSTEN 0x04 /* enable burst */ #define XINTE 0x08 /* use external int. trig */ #define INTE 0x80 /* enable analog interrupts */ /*---------------------*/ /*----- register 10 ---*/ #define TGEN 0x01 /* Use pin DI1 for externl trigging? */ #define TGSEL 0x02 /* Use edge triggering */ #define TGPOL 0x04 /* active edge is falling */ #define PRETRIG 0x08 /* pretrig */ /*---------------------*/ /*----- register 11 ---*/ #define EOB 0x0c #define FIFOHFULL 0x08 #define GAIN 0x01 #define FIFONEPTY 0x04 #define MODE 0x10 #define SEM 0x20 #define BIP 0x40 /*---------------------*/ #define M0 0x00 #define M2 0x04 #define C0 0x00 #define C1 0x40 #define C2 0x80 #define RWLH 0x30 static int das6402_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int das6402_detach(struct comedi_device *dev); static struct comedi_driver driver_das6402 = { .driver_name = "das6402", .module = THIS_MODULE, .attach = das6402_attach, .detach = das6402_detach, }; COMEDI_INITCLEANUP(driver_das6402); struct das6402_private { int ai_bytes_to_read; int das6402_ignoreirq; }; #define devpriv ((struct das6402_private *)dev->private) static void das6402_ai_fifo_dregs(struct comedi_device *dev, struct comedi_subdevice *s); static void das6402_setcounter(struct comedi_device *dev) { BYTE p; unsigned short ctrlwrd; /* set up counter0 first, mode 0 */ p = M0 | C0 | RWLH; outb_p(p, dev->iobase + 15); ctrlwrd = 2000; p = (BYTE) (0xff & ctrlwrd); outb_p(p, dev->iobase + 12); p = (BYTE) (0xff & (ctrlwrd >> 8)); outb_p(p, dev->iobase + 12); /* set up counter1, mode 2 */ p = M2 | C1 | RWLH; outb_p(p, dev->iobase + 15); ctrlwrd = 10; p = (BYTE) (0xff & ctrlwrd); outb_p(p, dev->iobase + 13); p = (BYTE) (0xff & (ctrlwrd >> 8)); outb_p(p, dev->iobase + 13); /* set up counter1, mode 2 */ p = M2 | C2 | RWLH; outb_p(p, dev->iobase + 15); ctrlwrd = 1000; p = (BYTE) (0xff & ctrlwrd); outb_p(p, dev->iobase + 14); p = (BYTE) (0xff & (ctrlwrd >> 8)); outb_p(p, dev->iobase + 14); } static irqreturn_t intr_handler(int irq, void *d) { struct comedi_device *dev = d; struct comedi_subdevice *s = dev->subdevices; if (!dev->attached || devpriv->das6402_ignoreirq) { printk("das6402: BUG: spurious interrupt\n"); return IRQ_HANDLED; } #ifdef DEBUG printk("das6402: interrupt! das6402_irqcount=%i\n", devpriv->das6402_irqcount); printk("das6402: iobase+2=%i\n", inw_p(dev->iobase + 2)); #endif das6402_ai_fifo_dregs(dev, s); if (s->async->buf_write_count >= devpriv->ai_bytes_to_read) { outw_p(SCANL, dev->iobase + 2); /* clears the fifo */ outb(0x07, dev->iobase + 8); /* clears all flip-flops */ #ifdef DEBUG printk("das6402: Got %i samples\n\n", devpriv->das6402_wordsread - diff); #endif s->async->events |= COMEDI_CB_EOA; comedi_event(dev, s); } outb(0x01, dev->iobase + 8); /* clear only the interrupt flip-flop */ comedi_event(dev, s); return IRQ_HANDLED; } #if 0 static void das6402_ai_fifo_read(struct comedi_device *dev, short *data, int n) { int i; for (i = 0; i < n; i++) data[i] = inw(dev->iobase); } #endif static void das6402_ai_fifo_dregs(struct comedi_device *dev, struct comedi_subdevice *s) { while (1) { if (!(inb(dev->iobase + 8) & 0x01)) return; comedi_buf_put(s->async, inw(dev->iobase)); } } static int das6402_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { /* * This function should reset the board from whatever condition it * is in (i.e., acquiring data), to a non-active state. */ devpriv->das6402_ignoreirq = 1; #ifdef DEBUG printk("das6402: Stopping acquisition\n"); #endif devpriv->das6402_ignoreirq = 1; outb_p(0x02, dev->iobase + 10); /* disable external trigging */ outw_p(SCANL, dev->iobase + 2); /* resets the card fifo */ outb_p(0, dev->iobase + 9); /* disables interrupts */ outw_p(SCANL, dev->iobase + 2); return 0; } #ifdef unused static int das6402_ai_mode2(struct comedi_device *dev, struct comedi_subdevice *s, comedi_trig * it) { devpriv->das6402_ignoreirq = 1; #ifdef DEBUG printk("das6402: Starting acquisition\n"); #endif outb_p(0x03, dev->iobase + 10); /* enable external trigging */ outw_p(SCANL, dev->iobase + 2); /* resets the card fifo */ outb_p(IRQ | CONVSRC | BURSTEN | INTE, dev->iobase + 9); devpriv->ai_bytes_to_read = it->n * sizeof(short); /* um... ignoreirq is a nasty race condition */ devpriv->das6402_ignoreirq = 0; outw_p(SCANL, dev->iobase + 2); return 0; } #endif static int board_init(struct comedi_device *dev) { BYTE b; devpriv->das6402_ignoreirq = 1; outb(0x07, dev->iobase + 8); /* register 11 */ outb_p(MODE, dev->iobase + 11); b = BIP | SEM | MODE | GAIN | FIFOHFULL; outb_p(b, dev->iobase + 11); /* register 8 */ outb_p(EXTEND, dev->iobase + 8); b = EXTEND | MHZ; outb_p(b, dev->iobase + 8); b = MHZ | CLRINT | CLRXTR | CLRXIN; outb_p(b, dev->iobase + 8); /* register 9 */ b = IRQ | CONVSRC | BURSTEN | INTE; outb_p(b, dev->iobase + 9); /* register 10 */ b = TGSEL | TGEN; outb_p(b, dev->iobase + 10); b = 0x07; outb_p(b, dev->iobase + 8); das6402_setcounter(dev); outw_p(SCANL, dev->iobase + 2); /* reset card fifo */ devpriv->das6402_ignoreirq = 0; return 0; } static int das6402_detach(struct comedi_device *dev) { if (dev->irq) free_irq(dev->irq, dev); if (dev->iobase) release_region(dev->iobase, DAS6402_SIZE); return 0; } static int das6402_attach(struct comedi_device *dev, struct comedi_devconfig *it) { unsigned int irq; unsigned long iobase; int ret; struct comedi_subdevice *s; dev->board_name = "das6402"; iobase = it->options[0]; if (iobase == 0) iobase = 0x300; printk("comedi%d: das6402: 0x%04lx", dev->minor, iobase); if (!request_region(iobase, DAS6402_SIZE, "das6402")) { printk(" I/O port conflict\n"); return -EIO; } dev->iobase = iobase; /* should do a probe here */ irq = it->options[0]; printk(" ( irq = %u )", irq); ret = request_irq(irq, intr_handler, 0, "das6402", dev); if (ret < 0) { printk("irq conflict\n"); return ret; } dev->irq = irq; ret = alloc_private(dev, sizeof(struct das6402_private)); if (ret < 0) return ret; ret = alloc_subdevices(dev, 1); if (ret < 0) return ret; /* ai subdevice */ s = dev->subdevices + 0; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_GROUND; s->n_chan = 8; /* s->trig[2]=das6402_ai_mode2; */ s->cancel = das6402_ai_cancel; s->maxdata = (1 << 12) - 1; s->len_chanlist = 16; /* ? */ s->range_table = &range_unknown; board_init(dev); return 0; }
gpl-2.0
nvl1109/kernel
arch/parisc/hpux/sys_hpux.c
810
27528
/* * Implements HPUX syscalls. * * Copyright (C) 1999 Matthew Wilcox <willy with parisc-linux.org> * Copyright (C) 2000 Philipp Rumpf * Copyright (C) 2000 John Marvin <jsm with parisc-linux.org> * Copyright (C) 2000 Michael Ang <mang with subcarrier.org> * Copyright (C) 2001 Nathan Neulinger <nneul at umr.edu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/capability.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/smp_lock.h> #include <linux/syscalls.h> #include <linux/utsname.h> #include <linux/vfs.h> #include <linux/vmalloc.h> #include <asm/errno.h> #include <asm/pgalloc.h> #include <asm/uaccess.h> unsigned long hpux_brk(unsigned long addr) { /* Sigh. Looks like HP/UX libc relies on kernel bugs. */ return sys_brk(addr + PAGE_SIZE); } int hpux_sbrk(void) { return -ENOSYS; } /* Random other syscalls */ int hpux_nice(int priority_change) { return -ENOSYS; } int hpux_ptrace(void) { return -ENOSYS; } int hpux_wait(int __user *stat_loc) { return sys_waitpid(-1, stat_loc, 0); } int hpux_setpgrp(void) { return sys_setpgid(0,0); } int hpux_setpgrp3(void) { return hpux_setpgrp(); } #define _SC_CPU_VERSION 10001 #define _SC_OPEN_MAX 4 #define CPU_PA_RISC1_1 0x210 int hpux_sysconf(int which) { switch (which) { case _SC_CPU_VERSION: return CPU_PA_RISC1_1; case _SC_OPEN_MAX: return INT_MAX; default: return -EINVAL; } } /*****************************************************************************/ #define HPUX_UTSLEN 9 #define HPUX_SNLEN 15 struct hpux_utsname { char sysname[HPUX_UTSLEN]; char nodename[HPUX_UTSLEN]; char release[HPUX_UTSLEN]; char version[HPUX_UTSLEN]; char machine[HPUX_UTSLEN]; char idnumber[HPUX_SNLEN]; } ; struct hpux_ustat { int32_t f_tfree; /* total free (daddr_t) */ u_int32_t f_tinode; /* total inodes free (ino_t) */ char f_fname[6]; /* filsys name */ char f_fpack[6]; /* filsys pack name */ u_int32_t f_blksize; /* filsys block size (int) */ }; /* * HPUX's utssys() call. It's a collection of miscellaneous functions, * alas, so there's no nice way of splitting them up. */ /* This function is called from hpux_utssys(); HP-UX implements * ustat() as an option to utssys(). * * Now, struct ustat on HP-UX is exactly the same as on Linux, except * that it contains one addition field on the end, int32_t f_blksize. * So, we could have written this function to just call the Linux * sys_ustat(), (defined in linux/fs/super.c), and then just * added this additional field to the user's structure. But I figure * if we're gonna be digging through filesystem structures to get * this, we might as well just do the whole enchilada all in one go. * * So, most of this function is almost identical to sys_ustat(). * I have placed comments at the few lines changed or added, to * aid in porting forward if and when sys_ustat() is changed from * its form in kernel 2.2.5. */ static int hpux_ustat(dev_t dev, struct hpux_ustat __user *ubuf) { struct super_block *s; struct hpux_ustat tmp; /* Changed to hpux_ustat */ struct kstatfs sbuf; int err = -EINVAL; s = user_get_super(dev); if (s == NULL) goto out; err = vfs_statfs(s->s_root, &sbuf); drop_super(s); if (err) goto out; memset(&tmp,0,sizeof(tmp)); tmp.f_tfree = (int32_t)sbuf.f_bfree; tmp.f_tinode = (u_int32_t)sbuf.f_ffree; tmp.f_blksize = (u_int32_t)sbuf.f_bsize; /* Added this line */ err = copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0; out: return err; } /* * Wrapper for hpux statfs call. At the moment, just calls the linux native one * and ignores the extra fields at the end of the hpux statfs struct. * */ typedef int32_t hpux_fsid_t[2]; /* file system ID type */ typedef uint16_t hpux_site_t; struct hpux_statfs { int32_t f_type; /* type of info, zero for now */ int32_t f_bsize; /* fundamental file system block size */ int32_t f_blocks; /* total blocks in file system */ int32_t f_bfree; /* free block in fs */ int32_t f_bavail; /* free blocks avail to non-superuser */ int32_t f_files; /* total file nodes in file system */ int32_t f_ffree; /* free file nodes in fs */ hpux_fsid_t f_fsid; /* file system ID */ int32_t f_magic; /* file system magic number */ int32_t f_featurebits; /* file system features */ int32_t f_spare[4]; /* spare for later */ hpux_site_t f_cnode; /* cluster node where mounted */ int16_t f_pad; }; static int vfs_statfs_hpux(struct dentry *dentry, struct hpux_statfs *buf) { struct kstatfs st; int retval; retval = vfs_statfs(dentry, &st); if (retval) return retval; memset(buf, 0, sizeof(*buf)); buf->f_type = st.f_type; buf->f_bsize = st.f_bsize; buf->f_blocks = st.f_blocks; buf->f_bfree = st.f_bfree; buf->f_bavail = st.f_bavail; buf->f_files = st.f_files; buf->f_ffree = st.f_ffree; buf->f_fsid[0] = st.f_fsid.val[0]; buf->f_fsid[1] = st.f_fsid.val[1]; return 0; } /* hpux statfs */ asmlinkage long hpux_statfs(const char __user *pathname, struct hpux_statfs __user *buf) { struct path path; int error; error = user_path(pathname, &path); if (!error) { struct hpux_statfs tmp; error = vfs_statfs_hpux(path.dentry, &tmp); if (!error && copy_to_user(buf, &tmp, sizeof(tmp))) error = -EFAULT; path_put(&path); } return error; } asmlinkage long hpux_fstatfs(unsigned int fd, struct hpux_statfs __user * buf) { struct file *file; struct hpux_statfs tmp; int error; error = -EBADF; file = fget(fd); if (!file) goto out; error = vfs_statfs_hpux(file->f_path.dentry, &tmp); if (!error && copy_to_user(buf, &tmp, sizeof(tmp))) error = -EFAULT; fput(file); out: return error; } /* This function is called from hpux_utssys(); HP-UX implements * uname() as an option to utssys(). * * The form of this function is pretty much copied from sys_olduname(), * defined in linux/arch/i386/kernel/sys_i386.c. */ /* TODO: Are these put_user calls OK? Should they pass an int? * (I copied it from sys_i386.c like this.) */ static int hpux_uname(struct hpux_utsname __user *name) { int error; if (!name) return -EFAULT; if (!access_ok(VERIFY_WRITE,name,sizeof(struct hpux_utsname))) return -EFAULT; down_read(&uts_sem); error = __copy_to_user(&name->sysname, &utsname()->sysname, HPUX_UTSLEN - 1); error |= __put_user(0, name->sysname + HPUX_UTSLEN - 1); error |= __copy_to_user(&name->nodename, &utsname()->nodename, HPUX_UTSLEN - 1); error |= __put_user(0, name->nodename + HPUX_UTSLEN - 1); error |= __copy_to_user(&name->release, &utsname()->release, HPUX_UTSLEN - 1); error |= __put_user(0, name->release + HPUX_UTSLEN - 1); error |= __copy_to_user(&name->version, &utsname()->version, HPUX_UTSLEN - 1); error |= __put_user(0, name->version + HPUX_UTSLEN - 1); error |= __copy_to_user(&name->machine, &utsname()->machine, HPUX_UTSLEN - 1); error |= __put_user(0, name->machine + HPUX_UTSLEN - 1); up_read(&uts_sem); /* HP-UX utsname has no domainname field. */ /* TODO: Implement idnumber!!! */ #if 0 error |= __put_user(0,name->idnumber); error |= __put_user(0,name->idnumber+HPUX_SNLEN-1); #endif error = error ? -EFAULT : 0; return error; } /* Note: HP-UX just uses the old suser() function to check perms * in this system call. We'll use capable(CAP_SYS_ADMIN). */ int hpux_utssys(char __user *ubuf, int n, int type) { int len; int error; switch( type ) { case 0: /* uname(): */ return hpux_uname((struct hpux_utsname __user *)ubuf); break ; case 1: /* Obsolete (used to be umask().) */ return -EFAULT ; break ; case 2: /* ustat(): */ return hpux_ustat(new_decode_dev(n), (struct hpux_ustat __user *)ubuf); break; case 3: /* setuname(): * * On linux (unlike HP-UX), utsname.nodename * is the same as the hostname. * * sys_sethostname() is defined in linux/kernel/sys.c. */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; /* Unlike Linux, HP-UX returns an error if n==0: */ if ( n <= 0 ) return -EINVAL ; /* Unlike Linux, HP-UX truncates it if n is too big: */ len = (n <= __NEW_UTS_LEN) ? n : __NEW_UTS_LEN ; return sys_sethostname(ubuf, len); break ; case 4: /* sethostname(): * * sys_sethostname() is defined in linux/kernel/sys.c. */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; /* Unlike Linux, HP-UX returns an error if n==0: */ if ( n <= 0 ) return -EINVAL ; /* Unlike Linux, HP-UX truncates it if n is too big: */ len = (n <= __NEW_UTS_LEN) ? n : __NEW_UTS_LEN ; return sys_sethostname(ubuf, len); break ; case 5: /* gethostname(): * * sys_gethostname() is defined in linux/kernel/sys.c. */ /* Unlike Linux, HP-UX returns an error if n==0: */ if ( n <= 0 ) return -EINVAL ; return sys_gethostname(ubuf, n); break ; case 6: /* Supposedly called from setuname() in libc. * TODO: When and why is this called? * Is it ever even called? * * This code should look a lot like sys_sethostname(), * defined in linux/kernel/sys.c. If that gets updated, * update this code similarly. */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; /* Unlike Linux, HP-UX returns an error if n==0: */ if ( n <= 0 ) return -EINVAL ; /* Unlike Linux, HP-UX truncates it if n is too big: */ len = (n <= __NEW_UTS_LEN) ? n : __NEW_UTS_LEN ; /**/ /* TODO: print a warning about using this? */ down_write(&uts_sem); error = -EFAULT; if (!copy_from_user(utsname()->sysname, ubuf, len)) { utsname()->sysname[len] = 0; error = 0; } up_write(&uts_sem); return error; break ; case 7: /* Sets utsname.release, if you're allowed. * Undocumented. Used by swinstall to change the * OS version, during OS updates. Yuck!!! * * This code should look a lot like sys_sethostname() * in linux/kernel/sys.c. If that gets updated, update * this code similarly. */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; /* Unlike Linux, HP-UX returns an error if n==0: */ if ( n <= 0 ) return -EINVAL ; /* Unlike Linux, HP-UX truncates it if n is too big: */ len = (n <= __NEW_UTS_LEN) ? n : __NEW_UTS_LEN ; /**/ /* TODO: print a warning about this? */ down_write(&uts_sem); error = -EFAULT; if (!copy_from_user(utsname()->release, ubuf, len)) { utsname()->release[len] = 0; error = 0; } up_write(&uts_sem); return error; break ; default: /* This system call returns -EFAULT if given an unknown type. * Why not -EINVAL? I don't know, it's just not what they did. */ return -EFAULT ; } } int hpux_getdomainname(char __user *name, int len) { int nlen; int err = -EFAULT; down_read(&uts_sem); nlen = strlen(utsname()->domainname) + 1; if (nlen < len) len = nlen; if(len > __NEW_UTS_LEN) goto done; if(copy_to_user(name, utsname()->domainname, len)) goto done; err = 0; done: up_read(&uts_sem); return err; } int hpux_pipe(int *kstack_fildes) { return do_pipe_flags(kstack_fildes, 0); } /* lies - says it works, but it really didn't lock anything */ int hpux_lockf(int fildes, int function, off_t size) { return 0; } int hpux_sysfs(int opcode, unsigned long arg1, unsigned long arg2) { char *fsname = NULL; int len = 0; int fstype; /*Unimplemented HP-UX syscall emulation. Syscall #334 (sysfs) Args: 1 80057bf4 0 400179f0 0 0 0 */ printk(KERN_DEBUG "in hpux_sysfs\n"); printk(KERN_DEBUG "hpux_sysfs called with opcode = %d\n", opcode); printk(KERN_DEBUG "hpux_sysfs called with arg1='%lx'\n", arg1); if ( opcode == 1 ) { /* GETFSIND */ char __user *user_fsname = (char __user *)arg1; len = strlen_user(user_fsname); printk(KERN_DEBUG "len of arg1 = %d\n", len); if (len == 0) return 0; fsname = kmalloc(len, GFP_KERNEL); if (!fsname) { printk(KERN_DEBUG "failed to kmalloc fsname\n"); return 0; } if (copy_from_user(fsname, user_fsname, len)) { printk(KERN_DEBUG "failed to copy_from_user fsname\n"); kfree(fsname); return 0; } /* String could be altered by userspace after strlen_user() */ fsname[len] = '\0'; printk(KERN_DEBUG "that is '%s' as (char *)\n", fsname); if ( !strcmp(fsname, "hfs") ) { fstype = 0; } else { fstype = 0; } kfree(fsname); printk(KERN_DEBUG "returning fstype=%d\n", fstype); return fstype; /* something other than default */ } return 0; } /* Table of syscall names and handle for unimplemented routines */ static const char * const syscall_names[] = { "nosys", /* 0 */ "exit", "fork", "read", "write", "open", /* 5 */ "close", "wait", "creat", "link", "unlink", /* 10 */ "execv", "chdir", "time", "mknod", "chmod", /* 15 */ "chown", "brk", "lchmod", "lseek", "getpid", /* 20 */ "mount", "umount", "setuid", "getuid", "stime", /* 25 */ "ptrace", "alarm", NULL, "pause", "utime", /* 30 */ "stty", "gtty", "access", "nice", "ftime", /* 35 */ "sync", "kill", "stat", "setpgrp3", "lstat", /* 40 */ "dup", "pipe", "times", "profil", "ki_call", /* 45 */ "setgid", "getgid", NULL, NULL, NULL, /* 50 */ "acct", "set_userthreadid", NULL, "ioctl", "reboot", /* 55 */ "symlink", "utssys", "readlink", "execve", "umask", /* 60 */ "chroot", "fcntl", "ulimit", NULL, NULL, /* 65 */ "vfork", NULL, NULL, NULL, NULL, /* 70 */ "mmap", NULL, "munmap", "mprotect", "madvise", /* 75 */ "vhangup", "swapoff", NULL, "getgroups", "setgroups", /* 80 */ "getpgrp2", "setpgid/setpgrp2", "setitimer", "wait3", "swapon", /* 85 */ "getitimer", NULL, NULL, NULL, "dup2", /* 90 */ NULL, "fstat", "select", NULL, "fsync", /* 95 */ "setpriority", NULL, NULL, NULL, "getpriority", /* 100 */ NULL, NULL, NULL, NULL, NULL, /* 105 */ NULL, NULL, "sigvector", "sigblock", "sigsetmask", /* 110 */ "sigpause", "sigstack", NULL, NULL, NULL, /* 115 */ "gettimeofday", "getrusage", NULL, NULL, "readv", /* 120 */ "writev", "settimeofday", "fchown", "fchmod", NULL, /* 125 */ "setresuid", "setresgid", "rename", "truncate", "ftruncate", /* 130 */ NULL, "sysconf", NULL, NULL, NULL, /* 135 */ "mkdir", "rmdir", NULL, "sigcleanup", "setcore", /* 140 */ NULL, "gethostid", "sethostid", "getrlimit", "setrlimit", /* 145 */ NULL, NULL, "quotactl", "get_sysinfo", NULL, /* 150 */ "privgrp", "rtprio", "plock", NULL, "lockf", /* 155 */ "semget", NULL, "semop", "msgget", NULL, /* 160 */ "msgsnd", "msgrcv", "shmget", NULL, "shmat", /* 165 */ "shmdt", NULL, "csp/nsp_init", "cluster", "mkrnod", /* 170 */ "test", "unsp_open", NULL, "getcontext", "osetcontext", /* 175 */ "bigio", "pipenode", "lsync", "getmachineid", "cnodeid/mysite", /* 180 */ "cnodes/sitels", "swapclients", "rmtprocess", "dskless_stats", "sigprocmask", /* 185 */ "sigpending", "sigsuspend", "sigaction", NULL, "nfssvc", /* 190 */ "getfh", "getdomainname", "setdomainname", "async_daemon", "getdirentries", /* 195 */ NULL, NULL, "vfsmount", NULL, "waitpid", /* 200 */ NULL, NULL, NULL, NULL, NULL, /* 205 */ NULL, NULL, NULL, NULL, NULL, /* 210 */ NULL, NULL, NULL, NULL, NULL, /* 215 */ NULL, NULL, NULL, NULL, NULL, /* 220 */ NULL, NULL, NULL, "sigsetreturn", "sigsetstatemask", /* 225 */ "bfactl", "cs", "cds", NULL, "pathconf", /* 230 */ "fpathconf", NULL, NULL, "nfs_fcntl", "ogetacl", /* 235 */ "ofgetacl", "osetacl", "ofsetacl", "pstat", "getaudid", /* 240 */ "setaudid", "getaudproc", "setaudproc", "getevent", "setevent", /* 245 */ "audwrite", "audswitch", "audctl", "ogetaccess", "fsctl", /* 250 */ "ulconnect", "ulcontrol", "ulcreate", "uldest", "ulrecv", /* 255 */ "ulrecvcn", "ulsend", "ulshutdown", "swapfs", "fss", /* 260 */ NULL, NULL, NULL, NULL, NULL, /* 265 */ NULL, "tsync", "getnumfds", "poll", "getmsg", /* 270 */ "putmsg", "fchdir", "getmount_cnt", "getmount_entry", "accept", /* 275 */ "bind", "connect", "getpeername", "getsockname", "getsockopt", /* 280 */ "listen", "recv", "recvfrom", "recvmsg", "send", /* 285 */ "sendmsg", "sendto", "setsockopt", "shutdown", "socket", /* 290 */ "socketpair", "proc_open", "proc_close", "proc_send", "proc_recv", /* 295 */ "proc_sendrecv", "proc_syscall", "ipccreate", "ipcname", "ipcnamerase", /* 300 */ "ipclookup", "ipcselect", "ipcconnect", "ipcrecvcn", "ipcsend", /* 305 */ "ipcrecv", "ipcgetnodename", "ipcsetnodename", "ipccontrol", "ipcshutdown", /* 310 */ "ipcdest", "semctl", "msgctl", "shmctl", "mpctl", /* 315 */ "exportfs", "getpmsg", "putpmsg", "strioctl", "msync", /* 320 */ "msleep", "mwakeup", "msem_init", "msem_remove", "adjtime", /* 325 */ "kload", "fattach", "fdetach", "serialize", "statvfs", /* 330 */ "fstatvfs", "lchown", "getsid", "sysfs", NULL, /* 335 */ NULL, "sched_setparam", "sched_getparam", "sched_setscheduler", "sched_getscheduler", /* 340 */ "sched_yield", "sched_get_priority_max", "sched_get_priority_min", "sched_rr_get_interval", "clock_settime", /* 345 */ "clock_gettime", "clock_getres", "timer_create", "timer_delete", "timer_settime", /* 350 */ "timer_gettime", "timer_getoverrun", "nanosleep", "toolbox", NULL, /* 355 */ "getdents", "getcontext", "sysinfo", "fcntl64", "ftruncate64", /* 360 */ "fstat64", "getdirentries64", "getrlimit64", "lockf64", "lseek64", /* 365 */ "lstat64", "mmap64", "setrlimit64", "stat64", "truncate64", /* 370 */ "ulimit64", NULL, NULL, NULL, NULL, /* 375 */ NULL, NULL, NULL, NULL, "setcontext", /* 380 */ "sigaltstack", "waitid", "setpgrp", "recvmsg2", "sendmsg2", /* 385 */ "socket2", "socketpair2", "setregid", "lwp_create", "lwp_terminate", /* 390 */ "lwp_wait", "lwp_suspend", "lwp_resume", "lwp_self", "lwp_abort_syscall", /* 395 */ "lwp_info", "lwp_kill", "ksleep", "kwakeup", "ksleep_abort", /* 400 */ "lwp_proc_info", "lwp_exit", "lwp_continue", "getacl", "fgetacl", /* 405 */ "setacl", "fsetacl", "getaccess", "lwp_mutex_init", "lwp_mutex_lock_sys", /* 410 */ "lwp_mutex_unlock", "lwp_cond_init", "lwp_cond_signal", "lwp_cond_broadcast", "lwp_cond_wait_sys", /* 415 */ "lwp_getscheduler", "lwp_setscheduler", "lwp_getprivate", "lwp_setprivate", "lwp_detach", /* 420 */ "mlock", "munlock", "mlockall", "munlockall", "shm_open", /* 425 */ "shm_unlink", "sigqueue", "sigwaitinfo", "sigtimedwait", "sigwait", /* 430 */ "aio_read", "aio_write", "lio_listio", "aio_error", "aio_return", /* 435 */ "aio_cancel", "aio_suspend", "aio_fsync", "mq_open", "mq_unlink", /* 440 */ "mq_send", "mq_receive", "mq_notify", "mq_setattr", "mq_getattr", /* 445 */ "ksem_open", "ksem_unlink", "ksem_close", "ksem_destroy", "lw_sem_incr", /* 450 */ "lw_sem_decr", "lw_sem_read", "mq_close", }; static const int syscall_names_max = 453; int hpux_unimplemented(unsigned long arg1,unsigned long arg2,unsigned long arg3, unsigned long arg4,unsigned long arg5,unsigned long arg6, unsigned long arg7,unsigned long sc_num) { /* NOTE: sc_num trashes arg8 for the few syscalls that actually * have a valid 8th argument. */ const char *name = NULL; if ( sc_num <= syscall_names_max && sc_num >= 0 ) { name = syscall_names[sc_num]; } if ( name ) { printk(KERN_DEBUG "Unimplemented HP-UX syscall emulation. Syscall #%lu (%s)\n", sc_num, name); } else { printk(KERN_DEBUG "Unimplemented unknown HP-UX syscall emulation. Syscall #%lu\n", sc_num); } printk(KERN_DEBUG " Args: %lx %lx %lx %lx %lx %lx %lx\n", arg1, arg2, arg3, arg4, arg5, arg6, arg7); return -ENOSYS; }
gpl-2.0
adafruit/adafruit-raspberrypi-linux
arch/arm/mach-imx/devices/platform-mxc_nand.c
1322
2165
/* * Copyright (C) 2009-2010 Pengutronix * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include <asm/sizes.h> #include "../hardware.h" #include "devices-common.h" #define imx_mxc_nand_data_entry_single(soc, _devid, _size) \ { \ .devid = _devid, \ .iobase = soc ## _NFC_BASE_ADDR, \ .iosize = _size, \ .irq = soc ## _INT_NFC \ } #define imx_mxc_nandv3_data_entry_single(soc, _devid, _size) \ { \ .devid = _devid, \ .id = -1, \ .iobase = soc ## _NFC_BASE_ADDR, \ .iosize = _size, \ .axibase = soc ## _NFC_AXI_BASE_ADDR, \ .irq = soc ## _INT_NFC \ } #ifdef CONFIG_SOC_IMX21 const struct imx_mxc_nand_data imx21_mxc_nand_data __initconst = imx_mxc_nand_data_entry_single(MX21, "imx21-nand", SZ_4K); #endif /* ifdef CONFIG_SOC_IMX21 */ #ifdef CONFIG_SOC_IMX27 const struct imx_mxc_nand_data imx27_mxc_nand_data __initconst = imx_mxc_nand_data_entry_single(MX27, "imx27-nand", SZ_4K); #endif /* ifdef CONFIG_SOC_IMX27 */ #ifdef CONFIG_SOC_IMX31 const struct imx_mxc_nand_data imx31_mxc_nand_data __initconst = imx_mxc_nand_data_entry_single(MX31, "imx27-nand", SZ_4K); #endif #ifdef CONFIG_SOC_IMX35 const struct imx_mxc_nand_data imx35_mxc_nand_data __initconst = imx_mxc_nand_data_entry_single(MX35, "imx25-nand", SZ_8K); #endif struct platform_device *__init imx_add_mxc_nand( const struct imx_mxc_nand_data *data, const struct mxc_nand_platform_data *pdata) { /* AXI has to come first, that's how the mxc_nand driver expect it */ struct resource res[] = { { .start = data->iobase, .end = data->iobase + data->iosize - 1, .flags = IORESOURCE_MEM, }, { .start = data->irq, .end = data->irq, .flags = IORESOURCE_IRQ, }, { .start = data->axibase, .end = data->axibase + SZ_16K - 1, .flags = IORESOURCE_MEM, }, }; return imx_add_platform_device(data->devid, data->id, res, ARRAY_SIZE(res) - !data->axibase, pdata, sizeof(*pdata)); }
gpl-2.0
patjak/linux-stable
fs/isofs/rock.c
1834
17925
/* * linux/fs/isofs/rock.c * * (C) 1992, 1993 Eric Youngdale * * Rock Ridge Extensions to iso9660 */ #include <linux/slab.h> #include <linux/pagemap.h> #include "isofs.h" #include "rock.h" /* * These functions are designed to read the system areas of a directory record * and extract relevant information. There are different functions provided * depending upon what information we need at the time. One function fills * out an inode structure, a second one extracts a filename, a third one * returns a symbolic link name, and a fourth one returns the extent number * for the file. */ #define SIG(A,B) ((A) | ((B) << 8)) /* isonum_721() */ struct rock_state { void *buffer; unsigned char *chr; int len; int cont_size; int cont_extent; int cont_offset; struct inode *inode; }; /* * This is a way of ensuring that we have something in the system * use fields that is compatible with Rock Ridge. Return zero on success. */ static int check_sp(struct rock_ridge *rr, struct inode *inode) { if (rr->u.SP.magic[0] != 0xbe) return -1; if (rr->u.SP.magic[1] != 0xef) return -1; ISOFS_SB(inode->i_sb)->s_rock_offset = rr->u.SP.skip; return 0; } static void setup_rock_ridge(struct iso_directory_record *de, struct inode *inode, struct rock_state *rs) { rs->len = sizeof(struct iso_directory_record) + de->name_len[0]; if (rs->len & 1) (rs->len)++; rs->chr = (unsigned char *)de + rs->len; rs->len = *((unsigned char *)de) - rs->len; if (rs->len < 0) rs->len = 0; if (ISOFS_SB(inode->i_sb)->s_rock_offset != -1) { rs->len -= ISOFS_SB(inode->i_sb)->s_rock_offset; rs->chr += ISOFS_SB(inode->i_sb)->s_rock_offset; if (rs->len < 0) rs->len = 0; } } static void init_rock_state(struct rock_state *rs, struct inode *inode) { memset(rs, 0, sizeof(*rs)); rs->inode = inode; } /* * Returns 0 if the caller should continue scanning, 1 if the scan must end * and -ve on error. */ static int rock_continue(struct rock_state *rs) { int ret = 1; int blocksize = 1 << rs->inode->i_blkbits; const int min_de_size = offsetof(struct rock_ridge, u); kfree(rs->buffer); rs->buffer = NULL; if ((unsigned)rs->cont_offset > blocksize - min_de_size || (unsigned)rs->cont_size > blocksize || (unsigned)(rs->cont_offset + rs->cont_size) > blocksize) { printk(KERN_NOTICE "rock: corrupted directory entry. " "extent=%d, offset=%d, size=%d\n", rs->cont_extent, rs->cont_offset, rs->cont_size); ret = -EIO; goto out; } if (rs->cont_extent) { struct buffer_head *bh; rs->buffer = kmalloc(rs->cont_size, GFP_KERNEL); if (!rs->buffer) { ret = -ENOMEM; goto out; } ret = -EIO; bh = sb_bread(rs->inode->i_sb, rs->cont_extent); if (bh) { memcpy(rs->buffer, bh->b_data + rs->cont_offset, rs->cont_size); put_bh(bh); rs->chr = rs->buffer; rs->len = rs->cont_size; rs->cont_extent = 0; rs->cont_size = 0; rs->cont_offset = 0; return 0; } printk("Unable to read rock-ridge attributes\n"); } out: kfree(rs->buffer); rs->buffer = NULL; return ret; } /* * We think there's a record of type `sig' at rs->chr. Parse the signature * and make sure that there's really room for a record of that type. */ static int rock_check_overflow(struct rock_state *rs, int sig) { int len; switch (sig) { case SIG('S', 'P'): len = sizeof(struct SU_SP_s); break; case SIG('C', 'E'): len = sizeof(struct SU_CE_s); break; case SIG('E', 'R'): len = sizeof(struct SU_ER_s); break; case SIG('R', 'R'): len = sizeof(struct RR_RR_s); break; case SIG('P', 'X'): len = sizeof(struct RR_PX_s); break; case SIG('P', 'N'): len = sizeof(struct RR_PN_s); break; case SIG('S', 'L'): len = sizeof(struct RR_SL_s); break; case SIG('N', 'M'): len = sizeof(struct RR_NM_s); break; case SIG('C', 'L'): len = sizeof(struct RR_CL_s); break; case SIG('P', 'L'): len = sizeof(struct RR_PL_s); break; case SIG('T', 'F'): len = sizeof(struct RR_TF_s); break; case SIG('Z', 'F'): len = sizeof(struct RR_ZF_s); break; default: len = 0; break; } len += offsetof(struct rock_ridge, u); if (len > rs->len) { printk(KERN_NOTICE "rock: directory entry would overflow " "storage\n"); printk(KERN_NOTICE "rock: sig=0x%02x, size=%d, remaining=%d\n", sig, len, rs->len); return -EIO; } return 0; } /* * return length of name field; 0: not found, -1: to be ignored */ int get_rock_ridge_filename(struct iso_directory_record *de, char *retname, struct inode *inode) { struct rock_state rs; struct rock_ridge *rr; int sig; int retnamlen = 0; int truncate = 0; int ret = 0; if (!ISOFS_SB(inode->i_sb)->s_rock) return 0; *retname = 0; init_rock_state(&rs, inode); setup_rock_ridge(de, inode, &rs); repeat: while (rs.len > 2) { /* There may be one byte for padding somewhere */ rr = (struct rock_ridge *)rs.chr; /* * Ignore rock ridge info if rr->len is out of range, but * don't return -EIO because that would make the file * invisible. */ if (rr->len < 3) goto out; /* Something got screwed up here */ sig = isonum_721(rs.chr); if (rock_check_overflow(&rs, sig)) goto eio; rs.chr += rr->len; rs.len -= rr->len; /* * As above, just ignore the rock ridge info if rr->len * is bogus. */ if (rs.len < 0) goto out; /* Something got screwed up here */ switch (sig) { case SIG('R', 'R'): if ((rr->u.RR.flags[0] & RR_NM) == 0) goto out; break; case SIG('S', 'P'): if (check_sp(rr, inode)) goto out; break; case SIG('C', 'E'): rs.cont_extent = isonum_733(rr->u.CE.extent); rs.cont_offset = isonum_733(rr->u.CE.offset); rs.cont_size = isonum_733(rr->u.CE.size); break; case SIG('N', 'M'): if (truncate) break; if (rr->len < 5) break; /* * If the flags are 2 or 4, this indicates '.' or '..'. * We don't want to do anything with this, because it * screws up the code that calls us. We don't really * care anyways, since we can just use the non-RR * name. */ if (rr->u.NM.flags & 6) break; if (rr->u.NM.flags & ~1) { printk("Unsupported NM flag settings (%d)\n", rr->u.NM.flags); break; } if ((strlen(retname) + rr->len - 5) >= 254) { truncate = 1; break; } strncat(retname, rr->u.NM.name, rr->len - 5); retnamlen += rr->len - 5; break; case SIG('R', 'E'): kfree(rs.buffer); return -1; default: break; } } ret = rock_continue(&rs); if (ret == 0) goto repeat; if (ret == 1) return retnamlen; /* If 0, this file did not have a NM field */ out: kfree(rs.buffer); return ret; eio: ret = -EIO; goto out; } static int parse_rock_ridge_inode_internal(struct iso_directory_record *de, struct inode *inode, int regard_xa) { int symlink_len = 0; int cnt, sig; struct inode *reloc; struct rock_ridge *rr; int rootflag; struct rock_state rs; int ret = 0; if (!ISOFS_SB(inode->i_sb)->s_rock) return 0; init_rock_state(&rs, inode); setup_rock_ridge(de, inode, &rs); if (regard_xa) { rs.chr += 14; rs.len -= 14; if (rs.len < 0) rs.len = 0; } repeat: while (rs.len > 2) { /* There may be one byte for padding somewhere */ rr = (struct rock_ridge *)rs.chr; /* * Ignore rock ridge info if rr->len is out of range, but * don't return -EIO because that would make the file * invisible. */ if (rr->len < 3) goto out; /* Something got screwed up here */ sig = isonum_721(rs.chr); if (rock_check_overflow(&rs, sig)) goto eio; rs.chr += rr->len; rs.len -= rr->len; /* * As above, just ignore the rock ridge info if rr->len * is bogus. */ if (rs.len < 0) goto out; /* Something got screwed up here */ switch (sig) { #ifndef CONFIG_ZISOFS /* No flag for SF or ZF */ case SIG('R', 'R'): if ((rr->u.RR.flags[0] & (RR_PX | RR_TF | RR_SL | RR_CL)) == 0) goto out; break; #endif case SIG('S', 'P'): if (check_sp(rr, inode)) goto out; break; case SIG('C', 'E'): rs.cont_extent = isonum_733(rr->u.CE.extent); rs.cont_offset = isonum_733(rr->u.CE.offset); rs.cont_size = isonum_733(rr->u.CE.size); break; case SIG('E', 'R'): ISOFS_SB(inode->i_sb)->s_rock = 1; printk(KERN_DEBUG "ISO 9660 Extensions: "); { int p; for (p = 0; p < rr->u.ER.len_id; p++) printk("%c", rr->u.ER.data[p]); } printk("\n"); break; case SIG('P', 'X'): inode->i_mode = isonum_733(rr->u.PX.mode); set_nlink(inode, isonum_733(rr->u.PX.n_links)); i_uid_write(inode, isonum_733(rr->u.PX.uid)); i_gid_write(inode, isonum_733(rr->u.PX.gid)); break; case SIG('P', 'N'): { int high, low; high = isonum_733(rr->u.PN.dev_high); low = isonum_733(rr->u.PN.dev_low); /* * The Rock Ridge standard specifies that if * sizeof(dev_t) <= 4, then the high field is * unused, and the device number is completely * stored in the low field. Some writers may * ignore this subtlety, * and as a result we test to see if the entire * device number is * stored in the low field, and use that. */ if ((low & ~0xff) && high == 0) { inode->i_rdev = MKDEV(low >> 8, low & 0xff); } else { inode->i_rdev = MKDEV(high, low); } } break; case SIG('T', 'F'): /* * Some RRIP writers incorrectly place ctime in the * TF_CREATE field. Try to handle this correctly for * either case. */ /* Rock ridge never appears on a High Sierra disk */ cnt = 0; if (rr->u.TF.flags & TF_CREATE) { inode->i_ctime.tv_sec = iso_date(rr->u.TF.times[cnt++].time, 0); inode->i_ctime.tv_nsec = 0; } if (rr->u.TF.flags & TF_MODIFY) { inode->i_mtime.tv_sec = iso_date(rr->u.TF.times[cnt++].time, 0); inode->i_mtime.tv_nsec = 0; } if (rr->u.TF.flags & TF_ACCESS) { inode->i_atime.tv_sec = iso_date(rr->u.TF.times[cnt++].time, 0); inode->i_atime.tv_nsec = 0; } if (rr->u.TF.flags & TF_ATTRIBUTES) { inode->i_ctime.tv_sec = iso_date(rr->u.TF.times[cnt++].time, 0); inode->i_ctime.tv_nsec = 0; } break; case SIG('S', 'L'): { int slen; struct SL_component *slp; struct SL_component *oldslp; slen = rr->len - 5; slp = &rr->u.SL.link; inode->i_size = symlink_len; while (slen > 1) { rootflag = 0; switch (slp->flags & ~1) { case 0: inode->i_size += slp->len; break; case 2: inode->i_size += 1; break; case 4: inode->i_size += 2; break; case 8: rootflag = 1; inode->i_size += 1; break; default: printk("Symlink component flag " "not implemented\n"); } slen -= slp->len + 2; oldslp = slp; slp = (struct SL_component *) (((char *)slp) + slp->len + 2); if (slen < 2) { if (((rr->u.SL. flags & 1) != 0) && ((oldslp-> flags & 1) == 0)) inode->i_size += 1; break; } /* * If this component record isn't * continued, then append a '/'. */ if (!rootflag && (oldslp->flags & 1) == 0) inode->i_size += 1; } } symlink_len = inode->i_size; break; case SIG('R', 'E'): printk(KERN_WARNING "Attempt to read inode for " "relocated directory\n"); goto out; case SIG('C', 'L'): ISOFS_I(inode)->i_first_extent = isonum_733(rr->u.CL.location); reloc = isofs_iget(inode->i_sb, ISOFS_I(inode)->i_first_extent, 0); if (IS_ERR(reloc)) { ret = PTR_ERR(reloc); goto out; } inode->i_mode = reloc->i_mode; set_nlink(inode, reloc->i_nlink); inode->i_uid = reloc->i_uid; inode->i_gid = reloc->i_gid; inode->i_rdev = reloc->i_rdev; inode->i_size = reloc->i_size; inode->i_blocks = reloc->i_blocks; inode->i_atime = reloc->i_atime; inode->i_ctime = reloc->i_ctime; inode->i_mtime = reloc->i_mtime; iput(reloc); break; #ifdef CONFIG_ZISOFS case SIG('Z', 'F'): { int algo; if (ISOFS_SB(inode->i_sb)->s_nocompress) break; algo = isonum_721(rr->u.ZF.algorithm); if (algo == SIG('p', 'z')) { int block_shift = isonum_711(&rr->u.ZF.parms[1]); if (block_shift > 17) { printk(KERN_WARNING "isofs: " "Can't handle ZF block " "size of 2^%d\n", block_shift); } else { /* * Note: we don't change * i_blocks here */ ISOFS_I(inode)->i_file_format = isofs_file_compressed; /* * Parameters to compression * algorithm (header size, * block size) */ ISOFS_I(inode)->i_format_parm[0] = isonum_711(&rr->u.ZF.parms[0]); ISOFS_I(inode)->i_format_parm[1] = isonum_711(&rr->u.ZF.parms[1]); inode->i_size = isonum_733(rr->u.ZF. real_size); } } else { printk(KERN_WARNING "isofs: Unknown ZF compression " "algorithm: %c%c\n", rr->u.ZF.algorithm[0], rr->u.ZF.algorithm[1]); } break; } #endif default: break; } } ret = rock_continue(&rs); if (ret == 0) goto repeat; if (ret == 1) ret = 0; out: kfree(rs.buffer); return ret; eio: ret = -EIO; goto out; } static char *get_symlink_chunk(char *rpnt, struct rock_ridge *rr, char *plimit) { int slen; int rootflag; struct SL_component *oldslp; struct SL_component *slp; slen = rr->len - 5; slp = &rr->u.SL.link; while (slen > 1) { rootflag = 0; switch (slp->flags & ~1) { case 0: if (slp->len > plimit - rpnt) return NULL; memcpy(rpnt, slp->text, slp->len); rpnt += slp->len; break; case 2: if (rpnt >= plimit) return NULL; *rpnt++ = '.'; break; case 4: if (2 > plimit - rpnt) return NULL; *rpnt++ = '.'; *rpnt++ = '.'; break; case 8: if (rpnt >= plimit) return NULL; rootflag = 1; *rpnt++ = '/'; break; default: printk("Symlink component flag not implemented (%d)\n", slp->flags); } slen -= slp->len + 2; oldslp = slp; slp = (struct SL_component *)((char *)slp + slp->len + 2); if (slen < 2) { /* * If there is another SL record, and this component * record isn't continued, then add a slash. */ if ((!rootflag) && (rr->u.SL.flags & 1) && !(oldslp->flags & 1)) { if (rpnt >= plimit) return NULL; *rpnt++ = '/'; } break; } /* * If this component record isn't continued, then append a '/'. */ if (!rootflag && !(oldslp->flags & 1)) { if (rpnt >= plimit) return NULL; *rpnt++ = '/'; } } return rpnt; } int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode) { int result = parse_rock_ridge_inode_internal(de, inode, 0); /* * if rockridge flag was reset and we didn't look for attributes * behind eventual XA attributes, have a look there */ if ((ISOFS_SB(inode->i_sb)->s_rock_offset == -1) && (ISOFS_SB(inode->i_sb)->s_rock == 2)) { result = parse_rock_ridge_inode_internal(de, inode, 14); } return result; } /* * readpage() for symlinks: reads symlink contents into the page and either * makes it uptodate and returns 0 or returns error (-EIO) */ static int rock_ridge_symlink_readpage(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; struct iso_inode_info *ei = ISOFS_I(inode); struct isofs_sb_info *sbi = ISOFS_SB(inode->i_sb); char *link = kmap(page); unsigned long bufsize = ISOFS_BUFFER_SIZE(inode); struct buffer_head *bh; char *rpnt = link; unsigned char *pnt; struct iso_directory_record *raw_de; unsigned long block, offset; int sig; struct rock_ridge *rr; struct rock_state rs; int ret; if (!sbi->s_rock) goto error; init_rock_state(&rs, inode); block = ei->i_iget5_block; bh = sb_bread(inode->i_sb, block); if (!bh) goto out_noread; offset = ei->i_iget5_offset; pnt = (unsigned char *)bh->b_data + offset; raw_de = (struct iso_directory_record *)pnt; /* * If we go past the end of the buffer, there is some sort of error. */ if (offset + *pnt > bufsize) goto out_bad_span; /* * Now test for possible Rock Ridge extensions which will override * some of these numbers in the inode structure. */ setup_rock_ridge(raw_de, inode, &rs); repeat: while (rs.len > 2) { /* There may be one byte for padding somewhere */ rr = (struct rock_ridge *)rs.chr; if (rr->len < 3) goto out; /* Something got screwed up here */ sig = isonum_721(rs.chr); if (rock_check_overflow(&rs, sig)) goto out; rs.chr += rr->len; rs.len -= rr->len; if (rs.len < 0) goto out; /* corrupted isofs */ switch (sig) { case SIG('R', 'R'): if ((rr->u.RR.flags[0] & RR_SL) == 0) goto out; break; case SIG('S', 'P'): if (check_sp(rr, inode)) goto out; break; case SIG('S', 'L'): rpnt = get_symlink_chunk(rpnt, rr, link + (PAGE_SIZE - 1)); if (rpnt == NULL) goto out; break; case SIG('C', 'E'): /* This tells is if there is a continuation record */ rs.cont_extent = isonum_733(rr->u.CE.extent); rs.cont_offset = isonum_733(rr->u.CE.offset); rs.cont_size = isonum_733(rr->u.CE.size); default: break; } } ret = rock_continue(&rs); if (ret == 0) goto repeat; if (ret < 0) goto fail; if (rpnt == link) goto fail; brelse(bh); *rpnt = '\0'; SetPageUptodate(page); kunmap(page); unlock_page(page); return 0; /* error exit from macro */ out: kfree(rs.buffer); goto fail; out_noread: printk("unable to read i-node block"); goto fail; out_bad_span: printk("symlink spans iso9660 blocks\n"); fail: brelse(bh); error: SetPageError(page); kunmap(page); unlock_page(page); return -EIO; } const struct address_space_operations isofs_symlink_aops = { .readpage = rock_ridge_symlink_readpage };
gpl-2.0
Jovy23/N920TUVU2COJ5_Kernel
drivers/staging/comedi/drivers/pcmda12.c
2090
6052
/* comedi/drivers/pcmda12.c Driver for Winsystems PC-104 based PCM-D/A-12 8-channel AO board. COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2006 Calin A. Culianu <calin@ajvar.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: pcmda12 Description: A driver for the Winsystems PCM-D/A-12 Devices: [Winsystems] PCM-D/A-12 (pcmda12) Author: Calin Culianu <calin@ajvar.org> Updated: Fri, 13 Jan 2006 12:01:01 -0500 Status: works A driver for the relatively straightforward-to-program PCM-D/A-12. This board doesn't support commands, and the only way to set its analog output range is to jumper the board. As such, comedi_data_write() ignores the range value specified. The board uses 16 consecutive I/O addresses starting at the I/O port base address. Each address corresponds to the LSB then MSB of a particular channel from 0-7. Note that the board is not ISA-PNP capable and thus needs the I/O port comedi_config parameter. Note that passing a nonzero value as the second config option will enable "simultaneous xfer" mode for this board, in which AO writes will not take effect until a subsequent read of any AO channel. This is so that one can speed up programming by preloading all AO registers with values before simultaneously setting them to take effect with one read command. Configuration Options: [0] - I/O port base address [1] - Do Simultaneous Xfer (see description) */ #include "../comedidev.h" #define CHANS 8 #define IOSIZE 16 #define LSB(x) ((unsigned char)((x) & 0xff)) #define MSB(x) ((unsigned char)((((unsigned short)(x))>>8) & 0xff)) #define LSB_PORT(chan) (dev->iobase + (chan)*2) #define MSB_PORT(chan) (LSB_PORT(chan)+1) #define BITS 12 /* note these have no effect and are merely here for reference.. these are configured by jumpering the board! */ static const struct comedi_lrange pcmda12_ranges = { 3, { UNI_RANGE(5), UNI_RANGE(10), BIP_RANGE(5) } }; struct pcmda12_private { unsigned int ao_readback[CHANS]; int simultaneous_xfer_mode; }; static void zero_chans(struct comedi_device *dev) { /* sets up an ASIC chip to defaults */ int i; for (i = 0; i < CHANS; ++i) { /* /\* do this as one instruction?? *\/ */ /* outw(0, LSB_PORT(chan)); */ outb(0, LSB_PORT(i)); outb(0, MSB_PORT(i)); } inb(LSB_PORT(0)); /* update chans. */ } static int ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct pcmda12_private *devpriv = dev->private; int i; int chan = CR_CHAN(insn->chanspec); /* Writing a list of values to an AO channel is probably not * very useful, but that's how the interface is defined. */ for (i = 0; i < insn->n; ++i) { /* /\* do this as one instruction?? *\/ */ /* outw(data[i], LSB_PORT(chan)); */ /* Need to do this as two instructions due to 8-bit bus?? */ /* first, load the low byte */ outb(LSB(data[i]), LSB_PORT(chan)); /* next, write the high byte */ outb(MSB(data[i]), MSB_PORT(chan)); /* save shadow register */ devpriv->ao_readback[chan] = data[i]; if (!devpriv->simultaneous_xfer_mode) inb(LSB_PORT(chan)); } /* return the number of samples written */ return i; } /* AO subdevices should have a read insn as well as a write insn. Usually this means copying a value stored in devpriv->ao_readback. However, since this driver supports simultaneous xfer then sometimes this function actually accomplishes work. Simultaneaous xfer mode is accomplished by loading ALL the values you want for AO in all the channels, then READing off one of the AO registers to initiate the instantaneous simultaneous update of all DAC outputs, which makes all AO channels update simultaneously. This is useful for some control applications, I would imagine. */ static int ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct pcmda12_private *devpriv = dev->private; int i; int chan = CR_CHAN(insn->chanspec); for (i = 0; i < insn->n; i++) { if (devpriv->simultaneous_xfer_mode) inb(LSB_PORT(chan)); /* read back shadow register */ data[i] = devpriv->ao_readback[chan]; } return i; } static int pcmda12_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct pcmda12_private *devpriv; struct comedi_subdevice *s; int ret; ret = comedi_request_region(dev, it->options[0], IOSIZE); if (ret) return ret; devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL); if (!devpriv) return -ENOMEM; dev->private = devpriv; devpriv->simultaneous_xfer_mode = it->options[1]; ret = comedi_alloc_subdevices(dev, 1); if (ret) return ret; s = &dev->subdevices[0]; s->private = NULL; s->maxdata = (0x1 << BITS) - 1; s->range_table = &pcmda12_ranges; s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = CHANS; s->insn_write = &ao_winsn; s->insn_read = &ao_rinsn; zero_chans(dev); /* clear out all the registers, basically */ return 1; } static struct comedi_driver pcmda12_driver = { .driver_name = "pcmda12", .module = THIS_MODULE, .attach = pcmda12_attach, .detach = comedi_legacy_detach, }; module_comedi_driver(pcmda12_driver); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
geiti94/NEMESIS_KERNEL_N5
drivers/usb/musb/da8xx.c
2090
16069
/* * Texas Instruments DA8xx/OMAP-L1x "glue layer" * * Copyright (c) 2008-2009 MontaVista Software, Inc. <source@mvista.com> * * Based on the DaVinci "glue layer" code. * Copyright (C) 2005-2006 by Texas Instruments * * This file is part of the Inventra Controller Driver for Linux. * * The Inventra Controller Driver for Linux is free software; you * can redistribute it and/or modify it under the terms of the GNU * General Public License version 2 as published by the Free Software * Foundation. * * The Inventra Controller Driver for Linux is distributed in * the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public * License for more details. * * You should have received a copy of the GNU General Public License * along with The Inventra Controller Driver for Linux ; if not, * write to the Free Software Foundation, Inc., 59 Temple Place, * Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/module.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/usb/nop-usb-xceiv.h> #include <mach/da8xx.h> #include <linux/platform_data/usb-davinci.h> #include "musb_core.h" /* * DA8XX specific definitions */ /* USB 2.0 OTG module registers */ #define DA8XX_USB_REVISION_REG 0x00 #define DA8XX_USB_CTRL_REG 0x04 #define DA8XX_USB_STAT_REG 0x08 #define DA8XX_USB_EMULATION_REG 0x0c #define DA8XX_USB_MODE_REG 0x10 /* Transparent, CDC, [Generic] RNDIS */ #define DA8XX_USB_AUTOREQ_REG 0x14 #define DA8XX_USB_SRP_FIX_TIME_REG 0x18 #define DA8XX_USB_TEARDOWN_REG 0x1c #define DA8XX_USB_INTR_SRC_REG 0x20 #define DA8XX_USB_INTR_SRC_SET_REG 0x24 #define DA8XX_USB_INTR_SRC_CLEAR_REG 0x28 #define DA8XX_USB_INTR_MASK_REG 0x2c #define DA8XX_USB_INTR_MASK_SET_REG 0x30 #define DA8XX_USB_INTR_MASK_CLEAR_REG 0x34 #define DA8XX_USB_INTR_SRC_MASKED_REG 0x38 #define DA8XX_USB_END_OF_INTR_REG 0x3c #define DA8XX_USB_GENERIC_RNDIS_EP_SIZE_REG(n) (0x50 + (((n) - 1) << 2)) /* Control register bits */ #define DA8XX_SOFT_RESET_MASK 1 #define DA8XX_USB_TX_EP_MASK 0x1f /* EP0 + 4 Tx EPs */ #define DA8XX_USB_RX_EP_MASK 0x1e /* 4 Rx EPs */ /* USB interrupt register bits */ #define DA8XX_INTR_USB_SHIFT 16 #define DA8XX_INTR_USB_MASK (0x1ff << DA8XX_INTR_USB_SHIFT) /* 8 Mentor */ /* interrupts and DRVVBUS interrupt */ #define DA8XX_INTR_DRVVBUS 0x100 #define DA8XX_INTR_RX_SHIFT 8 #define DA8XX_INTR_RX_MASK (DA8XX_USB_RX_EP_MASK << DA8XX_INTR_RX_SHIFT) #define DA8XX_INTR_TX_SHIFT 0 #define DA8XX_INTR_TX_MASK (DA8XX_USB_TX_EP_MASK << DA8XX_INTR_TX_SHIFT) #define DA8XX_MENTOR_CORE_OFFSET 0x400 #define CFGCHIP2 IO_ADDRESS(DA8XX_SYSCFG0_BASE + DA8XX_CFGCHIP2_REG) struct da8xx_glue { struct device *dev; struct platform_device *musb; struct clk *clk; }; /* * REVISIT (PM): we should be able to keep the PHY in low power mode most * of the time (24 MHz oscillator and PLL off, etc.) by setting POWER.D0 * and, when in host mode, autosuspending idle root ports... PHY_PLLON * (overriding SUSPENDM?) then likely needs to stay off. */ static inline void phy_on(void) { u32 cfgchip2 = __raw_readl(CFGCHIP2); /* * Start the on-chip PHY and its PLL. */ cfgchip2 &= ~(CFGCHIP2_RESET | CFGCHIP2_PHYPWRDN | CFGCHIP2_OTGPWRDN); cfgchip2 |= CFGCHIP2_PHY_PLLON; __raw_writel(cfgchip2, CFGCHIP2); pr_info("Waiting for USB PHY clock good...\n"); while (!(__raw_readl(CFGCHIP2) & CFGCHIP2_PHYCLKGD)) cpu_relax(); } static inline void phy_off(void) { u32 cfgchip2 = __raw_readl(CFGCHIP2); /* * Ensure that USB 1.1 reference clock is not being sourced from * USB 2.0 PHY. Otherwise do not power down the PHY. */ if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX) && (cfgchip2 & CFGCHIP2_USB1SUSPENDM)) { pr_warning("USB 1.1 clocked from USB 2.0 PHY -- " "can't power it down\n"); return; } /* * Power down the on-chip PHY. */ cfgchip2 |= CFGCHIP2_PHYPWRDN | CFGCHIP2_OTGPWRDN; __raw_writel(cfgchip2, CFGCHIP2); } /* * Because we don't set CTRL.UINT, it's "important" to: * - not read/write INTRUSB/INTRUSBE (except during * initial setup, as a workaround); * - use INTSET/INTCLR instead. */ /** * da8xx_musb_enable - enable interrupts */ static void da8xx_musb_enable(struct musb *musb) { void __iomem *reg_base = musb->ctrl_base; u32 mask; /* Workaround: setup IRQs through both register sets. */ mask = ((musb->epmask & DA8XX_USB_TX_EP_MASK) << DA8XX_INTR_TX_SHIFT) | ((musb->epmask & DA8XX_USB_RX_EP_MASK) << DA8XX_INTR_RX_SHIFT) | DA8XX_INTR_USB_MASK; musb_writel(reg_base, DA8XX_USB_INTR_MASK_SET_REG, mask); /* Force the DRVVBUS IRQ so we can start polling for ID change. */ musb_writel(reg_base, DA8XX_USB_INTR_SRC_SET_REG, DA8XX_INTR_DRVVBUS << DA8XX_INTR_USB_SHIFT); } /** * da8xx_musb_disable - disable HDRC and flush interrupts */ static void da8xx_musb_disable(struct musb *musb) { void __iomem *reg_base = musb->ctrl_base; musb_writel(reg_base, DA8XX_USB_INTR_MASK_CLEAR_REG, DA8XX_INTR_USB_MASK | DA8XX_INTR_TX_MASK | DA8XX_INTR_RX_MASK); musb_writeb(musb->mregs, MUSB_DEVCTL, 0); musb_writel(reg_base, DA8XX_USB_END_OF_INTR_REG, 0); } #define portstate(stmt) stmt static void da8xx_musb_set_vbus(struct musb *musb, int is_on) { WARN_ON(is_on && is_peripheral_active(musb)); } #define POLL_SECONDS 2 static struct timer_list otg_workaround; static void otg_timer(unsigned long _musb) { struct musb *musb = (void *)_musb; void __iomem *mregs = musb->mregs; u8 devctl; unsigned long flags; /* * We poll because DaVinci's won't expose several OTG-critical * status change events (from the transceiver) otherwise. */ devctl = musb_readb(mregs, MUSB_DEVCTL); dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl, usb_otg_state_string(musb->xceiv->state)); spin_lock_irqsave(&musb->lock, flags); switch (musb->xceiv->state) { case OTG_STATE_A_WAIT_BCON: devctl &= ~MUSB_DEVCTL_SESSION; musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); devctl = musb_readb(musb->mregs, MUSB_DEVCTL); if (devctl & MUSB_DEVCTL_BDEVICE) { musb->xceiv->state = OTG_STATE_B_IDLE; MUSB_DEV_MODE(musb); } else { musb->xceiv->state = OTG_STATE_A_IDLE; MUSB_HST_MODE(musb); } break; case OTG_STATE_A_WAIT_VFALL: /* * Wait till VBUS falls below SessionEnd (~0.2 V); the 1.3 * RTL seems to mis-handle session "start" otherwise (or in * our case "recover"), in routine "VBUS was valid by the time * VBUSERR got reported during enumeration" cases. */ if (devctl & MUSB_DEVCTL_VBUS) { mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); break; } musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; musb_writel(musb->ctrl_base, DA8XX_USB_INTR_SRC_SET_REG, MUSB_INTR_VBUSERROR << DA8XX_INTR_USB_SHIFT); break; case OTG_STATE_B_IDLE: /* * There's no ID-changed IRQ, so we have no good way to tell * when to switch to the A-Default state machine (by setting * the DEVCTL.Session bit). * * Workaround: whenever we're in B_IDLE, try setting the * session flag every few seconds. If it works, ID was * grounded and we're now in the A-Default state machine. * * NOTE: setting the session flag is _supposed_ to trigger * SRP but clearly it doesn't. */ musb_writeb(mregs, MUSB_DEVCTL, devctl | MUSB_DEVCTL_SESSION); devctl = musb_readb(mregs, MUSB_DEVCTL); if (devctl & MUSB_DEVCTL_BDEVICE) mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); else musb->xceiv->state = OTG_STATE_A_IDLE; break; default: break; } spin_unlock_irqrestore(&musb->lock, flags); } static void da8xx_musb_try_idle(struct musb *musb, unsigned long timeout) { static unsigned long last_timer; if (timeout == 0) timeout = jiffies + msecs_to_jiffies(3); /* Never idle if active, or when VBUS timeout is not set as host */ if (musb->is_active || (musb->a_wait_bcon == 0 && musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) { dev_dbg(musb->controller, "%s active, deleting timer\n", usb_otg_state_string(musb->xceiv->state)); del_timer(&otg_workaround); last_timer = jiffies; return; } if (time_after(last_timer, timeout) && timer_pending(&otg_workaround)) { dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n"); return; } last_timer = timeout; dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n", usb_otg_state_string(musb->xceiv->state), jiffies_to_msecs(timeout - jiffies)); mod_timer(&otg_workaround, timeout); } static irqreturn_t da8xx_musb_interrupt(int irq, void *hci) { struct musb *musb = hci; void __iomem *reg_base = musb->ctrl_base; struct usb_otg *otg = musb->xceiv->otg; unsigned long flags; irqreturn_t ret = IRQ_NONE; u32 status; spin_lock_irqsave(&musb->lock, flags); /* * NOTE: DA8XX shadows the Mentor IRQs. Don't manage them through * the Mentor registers (except for setup), use the TI ones and EOI. */ /* Acknowledge and handle non-CPPI interrupts */ status = musb_readl(reg_base, DA8XX_USB_INTR_SRC_MASKED_REG); if (!status) goto eoi; musb_writel(reg_base, DA8XX_USB_INTR_SRC_CLEAR_REG, status); dev_dbg(musb->controller, "USB IRQ %08x\n", status); musb->int_rx = (status & DA8XX_INTR_RX_MASK) >> DA8XX_INTR_RX_SHIFT; musb->int_tx = (status & DA8XX_INTR_TX_MASK) >> DA8XX_INTR_TX_SHIFT; musb->int_usb = (status & DA8XX_INTR_USB_MASK) >> DA8XX_INTR_USB_SHIFT; /* * DRVVBUS IRQs are the only proxy we have (a very poor one!) for * DA8xx's missing ID change IRQ. We need an ID change IRQ to * switch appropriately between halves of the OTG state machine. * Managing DEVCTL.Session per Mentor docs requires that we know its * value but DEVCTL.BDevice is invalid without DEVCTL.Session set. * Also, DRVVBUS pulses for SRP (but not at 5 V)... */ if (status & (DA8XX_INTR_DRVVBUS << DA8XX_INTR_USB_SHIFT)) { int drvvbus = musb_readl(reg_base, DA8XX_USB_STAT_REG); void __iomem *mregs = musb->mregs; u8 devctl = musb_readb(mregs, MUSB_DEVCTL); int err; err = musb->int_usb & MUSB_INTR_VBUSERROR; if (err) { /* * The Mentor core doesn't debounce VBUS as needed * to cope with device connect current spikes. This * means it's not uncommon for bus-powered devices * to get VBUS errors during enumeration. * * This is a workaround, but newer RTL from Mentor * seems to allow a better one: "re"-starting sessions * without waiting for VBUS to stop registering in * devctl. */ musb->int_usb &= ~MUSB_INTR_VBUSERROR; musb->xceiv->state = OTG_STATE_A_WAIT_VFALL; mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); WARNING("VBUS error workaround (delay coming)\n"); } else if (drvvbus) { MUSB_HST_MODE(musb); otg->default_a = 1; musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; portstate(musb->port1_status |= USB_PORT_STAT_POWER); del_timer(&otg_workaround); } else { musb->is_active = 0; MUSB_DEV_MODE(musb); otg->default_a = 0; musb->xceiv->state = OTG_STATE_B_IDLE; portstate(musb->port1_status &= ~USB_PORT_STAT_POWER); } dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n", drvvbus ? "on" : "off", usb_otg_state_string(musb->xceiv->state), err ? " ERROR" : "", devctl); ret = IRQ_HANDLED; } if (musb->int_tx || musb->int_rx || musb->int_usb) ret |= musb_interrupt(musb); eoi: /* EOI needs to be written for the IRQ to be re-asserted. */ if (ret == IRQ_HANDLED || status) musb_writel(reg_base, DA8XX_USB_END_OF_INTR_REG, 0); /* Poll for ID change */ if (musb->xceiv->state == OTG_STATE_B_IDLE) mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); spin_unlock_irqrestore(&musb->lock, flags); return ret; } static int da8xx_musb_set_mode(struct musb *musb, u8 musb_mode) { u32 cfgchip2 = __raw_readl(CFGCHIP2); cfgchip2 &= ~CFGCHIP2_OTGMODE; switch (musb_mode) { case MUSB_HOST: /* Force VBUS valid, ID = 0 */ cfgchip2 |= CFGCHIP2_FORCE_HOST; break; case MUSB_PERIPHERAL: /* Force VBUS valid, ID = 1 */ cfgchip2 |= CFGCHIP2_FORCE_DEVICE; break; case MUSB_OTG: /* Don't override the VBUS/ID comparators */ cfgchip2 |= CFGCHIP2_NO_OVERRIDE; break; default: dev_dbg(musb->controller, "Trying to set unsupported mode %u\n", musb_mode); } __raw_writel(cfgchip2, CFGCHIP2); return 0; } static int da8xx_musb_init(struct musb *musb) { void __iomem *reg_base = musb->ctrl_base; u32 rev; int ret = -ENODEV; musb->mregs += DA8XX_MENTOR_CORE_OFFSET; /* Returns zero if e.g. not clocked */ rev = musb_readl(reg_base, DA8XX_USB_REVISION_REG); if (!rev) goto fail; usb_nop_xceiv_register(); musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); if (IS_ERR_OR_NULL(musb->xceiv)) { ret = -EPROBE_DEFER; goto fail; } setup_timer(&otg_workaround, otg_timer, (unsigned long)musb); /* Reset the controller */ musb_writel(reg_base, DA8XX_USB_CTRL_REG, DA8XX_SOFT_RESET_MASK); /* Start the on-chip PHY and its PLL. */ phy_on(); msleep(5); /* NOTE: IRQs are in mixed mode, not bypass to pure MUSB */ pr_debug("DA8xx OTG revision %08x, PHY %03x, control %02x\n", rev, __raw_readl(CFGCHIP2), musb_readb(reg_base, DA8XX_USB_CTRL_REG)); musb->isr = da8xx_musb_interrupt; return 0; fail: return ret; } static int da8xx_musb_exit(struct musb *musb) { del_timer_sync(&otg_workaround); phy_off(); usb_put_phy(musb->xceiv); usb_nop_xceiv_unregister(); return 0; } static const struct musb_platform_ops da8xx_ops = { .init = da8xx_musb_init, .exit = da8xx_musb_exit, .enable = da8xx_musb_enable, .disable = da8xx_musb_disable, .set_mode = da8xx_musb_set_mode, .try_idle = da8xx_musb_try_idle, .set_vbus = da8xx_musb_set_vbus, }; static u64 da8xx_dmamask = DMA_BIT_MASK(32); static int da8xx_probe(struct platform_device *pdev) { struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; struct platform_device *musb; struct da8xx_glue *glue; struct clk *clk; int ret = -ENOMEM; glue = kzalloc(sizeof(*glue), GFP_KERNEL); if (!glue) { dev_err(&pdev->dev, "failed to allocate glue context\n"); goto err0; } musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO); if (!musb) { dev_err(&pdev->dev, "failed to allocate musb device\n"); goto err1; } clk = clk_get(&pdev->dev, "usb20"); if (IS_ERR(clk)) { dev_err(&pdev->dev, "failed to get clock\n"); ret = PTR_ERR(clk); goto err3; } ret = clk_enable(clk); if (ret) { dev_err(&pdev->dev, "failed to enable clock\n"); goto err4; } musb->dev.parent = &pdev->dev; musb->dev.dma_mask = &da8xx_dmamask; musb->dev.coherent_dma_mask = da8xx_dmamask; glue->dev = &pdev->dev; glue->musb = musb; glue->clk = clk; pdata->platform_ops = &da8xx_ops; platform_set_drvdata(pdev, glue); ret = platform_device_add_resources(musb, pdev->resource, pdev->num_resources); if (ret) { dev_err(&pdev->dev, "failed to add resources\n"); goto err5; } ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); if (ret) { dev_err(&pdev->dev, "failed to add platform_data\n"); goto err5; } ret = platform_device_add(musb); if (ret) { dev_err(&pdev->dev, "failed to register musb device\n"); goto err5; } return 0; err5: clk_disable(clk); err4: clk_put(clk); err3: platform_device_put(musb); err1: kfree(glue); err0: return ret; } static int da8xx_remove(struct platform_device *pdev) { struct da8xx_glue *glue = platform_get_drvdata(pdev); platform_device_unregister(glue->musb); clk_disable(glue->clk); clk_put(glue->clk); kfree(glue); return 0; } static struct platform_driver da8xx_driver = { .probe = da8xx_probe, .remove = da8xx_remove, .driver = { .name = "musb-da8xx", }, }; MODULE_DESCRIPTION("DA8xx/OMAP-L1x MUSB Glue Layer"); MODULE_AUTHOR("Sergei Shtylyov <sshtylyov@ru.mvista.com>"); MODULE_LICENSE("GPL v2"); module_platform_driver(da8xx_driver);
gpl-2.0
dev-elixir/hx_wt88047
drivers/media/pci/ivtv/ivtv-ioctl.c
2090
53989
/* ioctl system call Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com> Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "ivtv-driver.h" #include "ivtv-version.h" #include "ivtv-mailbox.h" #include "ivtv-i2c.h" #include "ivtv-queue.h" #include "ivtv-fileops.h" #include "ivtv-vbi.h" #include "ivtv-routing.h" #include "ivtv-streams.h" #include "ivtv-yuv.h" #include "ivtv-ioctl.h" #include "ivtv-gpio.h" #include "ivtv-controls.h" #include "ivtv-cards.h" #include <media/saa7127.h> #include <media/tveeprom.h> #include <media/v4l2-chip-ident.h> #include <media/v4l2-event.h> #include <linux/dvb/audio.h> u16 ivtv_service2vbi(int type) { switch (type) { case V4L2_SLICED_TELETEXT_B: return IVTV_SLICED_TYPE_TELETEXT_B; case V4L2_SLICED_CAPTION_525: return IVTV_SLICED_TYPE_CAPTION_525; case V4L2_SLICED_WSS_625: return IVTV_SLICED_TYPE_WSS_625; case V4L2_SLICED_VPS: return IVTV_SLICED_TYPE_VPS; default: return 0; } } static int valid_service_line(int field, int line, int is_pal) { return (is_pal && line >= 6 && (line != 23 || field == 0)) || (!is_pal && line >= 10 && line < 22); } static u16 select_service_from_set(int field, int line, u16 set, int is_pal) { u16 valid_set = (is_pal ? V4L2_SLICED_VBI_625 : V4L2_SLICED_VBI_525); int i; set = set & valid_set; if (set == 0 || !valid_service_line(field, line, is_pal)) { return 0; } if (!is_pal) { if (line == 21 && (set & V4L2_SLICED_CAPTION_525)) return V4L2_SLICED_CAPTION_525; } else { if (line == 16 && field == 0 && (set & V4L2_SLICED_VPS)) return V4L2_SLICED_VPS; if (line == 23 && field == 0 && (set & V4L2_SLICED_WSS_625)) return V4L2_SLICED_WSS_625; if (line == 23) return 0; } for (i = 0; i < 32; i++) { if ((1 << i) & set) return 1 << i; } return 0; } void ivtv_expand_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal) { u16 set = fmt->service_set; int f, l; fmt->service_set = 0; for (f = 0; f < 2; f++) { for (l = 0; l < 24; l++) { fmt->service_lines[f][l] = select_service_from_set(f, l, set, is_pal); } } } static void check_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal) { int f, l; for (f = 0; f < 2; f++) { for (l = 0; l < 24; l++) { fmt->service_lines[f][l] = select_service_from_set(f, l, fmt->service_lines[f][l], is_pal); } } } u16 ivtv_get_service_set(struct v4l2_sliced_vbi_format *fmt) { int f, l; u16 set = 0; for (f = 0; f < 2; f++) { for (l = 0; l < 24; l++) { set |= fmt->service_lines[f][l]; } } return set; } void ivtv_set_osd_alpha(struct ivtv *itv) { ivtv_vapi(itv, CX2341X_OSD_SET_GLOBAL_ALPHA, 3, itv->osd_global_alpha_state, itv->osd_global_alpha, !itv->osd_local_alpha_state); ivtv_vapi(itv, CX2341X_OSD_SET_CHROMA_KEY, 2, itv->osd_chroma_key_state, itv->osd_chroma_key); } int ivtv_set_speed(struct ivtv *itv, int speed) { u32 data[CX2341X_MBOX_MAX_DATA]; int single_step = (speed == 1 || speed == -1); DEFINE_WAIT(wait); if (speed == 0) speed = 1000; /* No change? */ if (speed == itv->speed && !single_step) return 0; if (single_step && (speed < 0) == (itv->speed < 0)) { /* Single step video and no need to change direction */ ivtv_vapi(itv, CX2341X_DEC_STEP_VIDEO, 1, 0); itv->speed = speed; return 0; } if (single_step) /* Need to change direction */ speed = speed < 0 ? -1000 : 1000; data[0] = (speed > 1000 || speed < -1000) ? 0x80000000 : 0; data[0] |= (speed > 1000 || speed < -1500) ? 0x40000000 : 0; data[1] = (speed < 0); data[2] = speed < 0 ? 3 : 7; data[3] = v4l2_ctrl_g_ctrl(itv->cxhdl.video_b_frames); data[4] = (speed == 1500 || speed == 500) ? itv->speed_mute_audio : 0; data[5] = 0; data[6] = 0; if (speed == 1500 || speed == -1500) data[0] |= 1; else if (speed == 2000 || speed == -2000) data[0] |= 2; else if (speed > -1000 && speed < 0) data[0] |= (-1000 / speed); else if (speed < 1000 && speed > 0) data[0] |= (1000 / speed); /* If not decoding, just change speed setting */ if (atomic_read(&itv->decoding) > 0) { int got_sig = 0; /* Stop all DMA and decoding activity */ ivtv_vapi(itv, CX2341X_DEC_PAUSE_PLAYBACK, 1, 0); /* Wait for any DMA to finish */ mutex_unlock(&itv->serialize_lock); prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE); while (test_bit(IVTV_F_I_DMA, &itv->i_flags)) { got_sig = signal_pending(current); if (got_sig) break; got_sig = 0; schedule(); } finish_wait(&itv->dma_waitq, &wait); mutex_lock(&itv->serialize_lock); if (got_sig) return -EINTR; /* Change Speed safely */ ivtv_api(itv, CX2341X_DEC_SET_PLAYBACK_SPEED, 7, data); IVTV_DEBUG_INFO("Setting Speed to 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", data[0], data[1], data[2], data[3], data[4], data[5], data[6]); } if (single_step) { speed = (speed < 0) ? -1 : 1; ivtv_vapi(itv, CX2341X_DEC_STEP_VIDEO, 1, 0); } itv->speed = speed; return 0; } static int ivtv_validate_speed(int cur_speed, int new_speed) { int fact = new_speed < 0 ? -1 : 1; int s; if (cur_speed == 0) cur_speed = 1000; if (new_speed < 0) new_speed = -new_speed; if (cur_speed < 0) cur_speed = -cur_speed; if (cur_speed <= new_speed) { if (new_speed > 1500) return fact * 2000; if (new_speed > 1000) return fact * 1500; } else { if (new_speed >= 2000) return fact * 2000; if (new_speed >= 1500) return fact * 1500; if (new_speed >= 1000) return fact * 1000; } if (new_speed == 0) return 1000; if (new_speed == 1 || new_speed == 1000) return fact * new_speed; s = new_speed; new_speed = 1000 / new_speed; if (1000 / cur_speed == new_speed) new_speed += (cur_speed < s) ? -1 : 1; if (new_speed > 60) return 1000 / (fact * 60); return 1000 / (fact * new_speed); } static int ivtv_video_command(struct ivtv *itv, struct ivtv_open_id *id, struct v4l2_decoder_cmd *dc, int try) { struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG]; if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; switch (dc->cmd) { case V4L2_DEC_CMD_START: { dc->flags &= V4L2_DEC_CMD_START_MUTE_AUDIO; dc->start.speed = ivtv_validate_speed(itv->speed, dc->start.speed); if (dc->start.speed < 0) dc->start.format = V4L2_DEC_START_FMT_GOP; else dc->start.format = V4L2_DEC_START_FMT_NONE; if (dc->start.speed != 500 && dc->start.speed != 1500) dc->flags = dc->start.speed == 1000 ? 0 : V4L2_DEC_CMD_START_MUTE_AUDIO; if (try) break; itv->speed_mute_audio = dc->flags & V4L2_DEC_CMD_START_MUTE_AUDIO; if (ivtv_set_output_mode(itv, OUT_MPG) != OUT_MPG) return -EBUSY; if (test_and_clear_bit(IVTV_F_I_DEC_PAUSED, &itv->i_flags)) { /* forces ivtv_set_speed to be called */ itv->speed = 0; } return ivtv_start_decoding(id, dc->start.speed); } case V4L2_DEC_CMD_STOP: dc->flags &= V4L2_DEC_CMD_STOP_IMMEDIATELY | V4L2_DEC_CMD_STOP_TO_BLACK; if (dc->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY) dc->stop.pts = 0; if (try) break; if (atomic_read(&itv->decoding) == 0) return 0; if (itv->output_mode != OUT_MPG) return -EBUSY; itv->output_mode = OUT_NONE; return ivtv_stop_v4l2_decode_stream(s, dc->flags, dc->stop.pts); case V4L2_DEC_CMD_PAUSE: dc->flags &= V4L2_DEC_CMD_PAUSE_TO_BLACK; if (try) break; if (!atomic_read(&itv->decoding)) return -EPERM; if (itv->output_mode != OUT_MPG) return -EBUSY; if (atomic_read(&itv->decoding) > 0) { ivtv_vapi(itv, CX2341X_DEC_PAUSE_PLAYBACK, 1, (dc->flags & V4L2_DEC_CMD_PAUSE_TO_BLACK) ? 1 : 0); set_bit(IVTV_F_I_DEC_PAUSED, &itv->i_flags); } break; case V4L2_DEC_CMD_RESUME: dc->flags = 0; if (try) break; if (!atomic_read(&itv->decoding)) return -EPERM; if (itv->output_mode != OUT_MPG) return -EBUSY; if (test_and_clear_bit(IVTV_F_I_DEC_PAUSED, &itv->i_flags)) { int speed = itv->speed; itv->speed = 0; return ivtv_start_decoding(id, speed); } break; default: return -EINVAL; } return 0; } static int ivtv_g_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv *itv = fh2id(fh)->itv; struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced; vbifmt->reserved[0] = 0; vbifmt->reserved[1] = 0; if (!(itv->v4l2_cap & V4L2_CAP_SLICED_VBI_OUTPUT)) return -EINVAL; vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36; memset(vbifmt->service_lines, 0, sizeof(vbifmt->service_lines)); if (itv->is_60hz) { vbifmt->service_lines[0][21] = V4L2_SLICED_CAPTION_525; vbifmt->service_lines[1][21] = V4L2_SLICED_CAPTION_525; } else { vbifmt->service_lines[0][23] = V4L2_SLICED_WSS_625; vbifmt->service_lines[0][16] = V4L2_SLICED_VPS; } vbifmt->service_set = ivtv_get_service_set(vbifmt); return 0; } static int ivtv_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; struct v4l2_pix_format *pixfmt = &fmt->fmt.pix; pixfmt->width = itv->cxhdl.width; pixfmt->height = itv->cxhdl.height; pixfmt->colorspace = V4L2_COLORSPACE_SMPTE170M; pixfmt->field = V4L2_FIELD_INTERLACED; pixfmt->priv = 0; if (id->type == IVTV_ENC_STREAM_TYPE_YUV) { pixfmt->pixelformat = V4L2_PIX_FMT_HM12; /* YUV size is (Y=(h*720) + UV=(h*(720/2))) */ pixfmt->sizeimage = pixfmt->height * 720 * 3 / 2; pixfmt->bytesperline = 720; } else { pixfmt->pixelformat = V4L2_PIX_FMT_MPEG; pixfmt->sizeimage = 128 * 1024; pixfmt->bytesperline = 0; } return 0; } static int ivtv_g_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv *itv = fh2id(fh)->itv; struct v4l2_vbi_format *vbifmt = &fmt->fmt.vbi; vbifmt->sampling_rate = 27000000; vbifmt->offset = 248; vbifmt->samples_per_line = itv->vbi.raw_decoder_line_size - 4; vbifmt->sample_format = V4L2_PIX_FMT_GREY; vbifmt->start[0] = itv->vbi.start[0]; vbifmt->start[1] = itv->vbi.start[1]; vbifmt->count[0] = vbifmt->count[1] = itv->vbi.count; vbifmt->flags = 0; vbifmt->reserved[0] = 0; vbifmt->reserved[1] = 0; return 0; } static int ivtv_g_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced; struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; vbifmt->reserved[0] = 0; vbifmt->reserved[1] = 0; vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36; if (id->type == IVTV_DEC_STREAM_TYPE_VBI) { vbifmt->service_set = itv->is_50hz ? V4L2_SLICED_VBI_625 : V4L2_SLICED_VBI_525; ivtv_expand_service_set(vbifmt, itv->is_50hz); vbifmt->service_set = ivtv_get_service_set(vbifmt); return 0; } v4l2_subdev_call(itv->sd_video, vbi, g_sliced_fmt, vbifmt); vbifmt->service_set = ivtv_get_service_set(vbifmt); return 0; } static int ivtv_g_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; struct v4l2_pix_format *pixfmt = &fmt->fmt.pix; if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; pixfmt->width = itv->main_rect.width; pixfmt->height = itv->main_rect.height; pixfmt->colorspace = V4L2_COLORSPACE_SMPTE170M; pixfmt->field = V4L2_FIELD_INTERLACED; pixfmt->priv = 0; if (id->type == IVTV_DEC_STREAM_TYPE_YUV) { switch (itv->yuv_info.lace_mode & IVTV_YUV_MODE_MASK) { case IVTV_YUV_MODE_INTERLACED: pixfmt->field = (itv->yuv_info.lace_mode & IVTV_YUV_SYNC_MASK) ? V4L2_FIELD_INTERLACED_BT : V4L2_FIELD_INTERLACED_TB; break; case IVTV_YUV_MODE_PROGRESSIVE: pixfmt->field = V4L2_FIELD_NONE; break; default: pixfmt->field = V4L2_FIELD_ANY; break; } pixfmt->pixelformat = V4L2_PIX_FMT_HM12; pixfmt->bytesperline = 720; pixfmt->width = itv->yuv_info.v4l2_src_w; pixfmt->height = itv->yuv_info.v4l2_src_h; /* YUV size is (Y=(h*w) + UV=(h*(w/2))) */ pixfmt->sizeimage = 1080 * ((pixfmt->height + 31) & ~31); } else { pixfmt->pixelformat = V4L2_PIX_FMT_MPEG; pixfmt->sizeimage = 128 * 1024; pixfmt->bytesperline = 0; } return 0; } static int ivtv_g_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv *itv = fh2id(fh)->itv; struct v4l2_window *winfmt = &fmt->fmt.win; if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; winfmt->chromakey = itv->osd_chroma_key; winfmt->global_alpha = itv->osd_global_alpha; winfmt->field = V4L2_FIELD_INTERLACED; winfmt->clips = NULL; winfmt->clipcount = 0; winfmt->bitmap = NULL; winfmt->w.top = winfmt->w.left = 0; winfmt->w.width = itv->osd_rect.width; winfmt->w.height = itv->osd_rect.height; return 0; } static int ivtv_try_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt) { return ivtv_g_fmt_sliced_vbi_out(file, fh, fmt); } static int ivtv_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; int w = fmt->fmt.pix.width; int h = fmt->fmt.pix.height; int min_h = 2; w = min(w, 720); w = max(w, 2); if (id->type == IVTV_ENC_STREAM_TYPE_YUV) { /* YUV height must be a multiple of 32 */ h &= ~0x1f; min_h = 32; } h = min(h, itv->is_50hz ? 576 : 480); h = max(h, min_h); ivtv_g_fmt_vid_cap(file, fh, fmt); fmt->fmt.pix.width = w; fmt->fmt.pix.height = h; return 0; } static int ivtv_try_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt) { return ivtv_g_fmt_vbi_cap(file, fh, fmt); } static int ivtv_try_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced; struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; if (id->type == IVTV_DEC_STREAM_TYPE_VBI) return ivtv_g_fmt_sliced_vbi_cap(file, fh, fmt); /* set sliced VBI capture format */ vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36; vbifmt->reserved[0] = 0; vbifmt->reserved[1] = 0; if (vbifmt->service_set) ivtv_expand_service_set(vbifmt, itv->is_50hz); check_service_set(vbifmt, itv->is_50hz); vbifmt->service_set = ivtv_get_service_set(vbifmt); return 0; } static int ivtv_try_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv_open_id *id = fh2id(fh); s32 w = fmt->fmt.pix.width; s32 h = fmt->fmt.pix.height; int field = fmt->fmt.pix.field; int ret = ivtv_g_fmt_vid_out(file, fh, fmt); w = min(w, 720); w = max(w, 2); /* Why can the height be 576 even when the output is NTSC? Internally the buffers of the PVR350 are always set to 720x576. The decoded video frame will always be placed in the top left corner of this buffer. For any video which is not 720x576, the buffer will then be cropped to remove the unused right and lower areas, with the remaining image being scaled by the hardware to fit the display area. The video can be scaled both up and down, so a 720x480 video can be displayed full-screen on PAL and a 720x576 video can be displayed without cropping on NTSC. Note that the scaling only occurs on the video stream, the osd resolution is locked to the broadcast standard and not scaled. Thanks to Ian Armstrong for this explanation. */ h = min(h, 576); h = max(h, 2); if (id->type == IVTV_DEC_STREAM_TYPE_YUV) fmt->fmt.pix.field = field; fmt->fmt.pix.width = w; fmt->fmt.pix.height = h; return ret; } static int ivtv_try_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv *itv = fh2id(fh)->itv; u32 chromakey = fmt->fmt.win.chromakey; u8 global_alpha = fmt->fmt.win.global_alpha; if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; ivtv_g_fmt_vid_out_overlay(file, fh, fmt); fmt->fmt.win.chromakey = chromakey; fmt->fmt.win.global_alpha = global_alpha; return 0; } static int ivtv_s_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt) { return ivtv_g_fmt_sliced_vbi_out(file, fh, fmt); } static int ivtv_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; struct v4l2_mbus_framefmt mbus_fmt; int ret = ivtv_try_fmt_vid_cap(file, fh, fmt); int w = fmt->fmt.pix.width; int h = fmt->fmt.pix.height; if (ret) return ret; if (itv->cxhdl.width == w && itv->cxhdl.height == h) return 0; if (atomic_read(&itv->capturing) > 0) return -EBUSY; itv->cxhdl.width = w; itv->cxhdl.height = h; if (v4l2_ctrl_g_ctrl(itv->cxhdl.video_encoding) == V4L2_MPEG_VIDEO_ENCODING_MPEG_1) fmt->fmt.pix.width /= 2; mbus_fmt.width = fmt->fmt.pix.width; mbus_fmt.height = h; mbus_fmt.code = V4L2_MBUS_FMT_FIXED; v4l2_subdev_call(itv->sd_video, video, s_mbus_fmt, &mbus_fmt); return ivtv_g_fmt_vid_cap(file, fh, fmt); } static int ivtv_s_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv *itv = fh2id(fh)->itv; if (!ivtv_raw_vbi(itv) && atomic_read(&itv->capturing) > 0) return -EBUSY; itv->vbi.sliced_in->service_set = 0; itv->vbi.in.type = V4L2_BUF_TYPE_VBI_CAPTURE; v4l2_subdev_call(itv->sd_video, vbi, s_raw_fmt, &fmt->fmt.vbi); return ivtv_g_fmt_vbi_cap(file, fh, fmt); } static int ivtv_s_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt) { struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced; struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; int ret = ivtv_try_fmt_sliced_vbi_cap(file, fh, fmt); if (ret || id->type == IVTV_DEC_STREAM_TYPE_VBI) return ret; check_service_set(vbifmt, itv->is_50hz); if (ivtv_raw_vbi(itv) && atomic_read(&itv->capturing) > 0) return -EBUSY; itv->vbi.in.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE; v4l2_subdev_call(itv->sd_video, vbi, s_sliced_fmt, vbifmt); memcpy(itv->vbi.sliced_in, vbifmt, sizeof(*itv->vbi.sliced_in)); return 0; } static int ivtv_s_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; struct yuv_playback_info *yi = &itv->yuv_info; int ret = ivtv_try_fmt_vid_out(file, fh, fmt); if (ret) return ret; if (id->type != IVTV_DEC_STREAM_TYPE_YUV) return 0; /* Return now if we already have some frame data */ if (yi->stream_size) return -EBUSY; yi->v4l2_src_w = fmt->fmt.pix.width; yi->v4l2_src_h = fmt->fmt.pix.height; switch (fmt->fmt.pix.field) { case V4L2_FIELD_NONE: yi->lace_mode = IVTV_YUV_MODE_PROGRESSIVE; break; case V4L2_FIELD_ANY: yi->lace_mode = IVTV_YUV_MODE_AUTO; break; case V4L2_FIELD_INTERLACED_BT: yi->lace_mode = IVTV_YUV_MODE_INTERLACED|IVTV_YUV_SYNC_ODD; break; case V4L2_FIELD_INTERLACED_TB: default: yi->lace_mode = IVTV_YUV_MODE_INTERLACED; break; } yi->lace_sync_field = (yi->lace_mode & IVTV_YUV_SYNC_MASK) == IVTV_YUV_SYNC_EVEN ? 0 : 1; if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) itv->dma_data_req_size = 1080 * ((yi->v4l2_src_h + 31) & ~31); return 0; } static int ivtv_s_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_format *fmt) { struct ivtv *itv = fh2id(fh)->itv; int ret = ivtv_try_fmt_vid_out_overlay(file, fh, fmt); if (ret == 0) { itv->osd_chroma_key = fmt->fmt.win.chromakey; itv->osd_global_alpha = fmt->fmt.win.global_alpha; ivtv_set_osd_alpha(itv); } return ret; } static int ivtv_g_chip_ident(struct file *file, void *fh, struct v4l2_dbg_chip_ident *chip) { struct ivtv *itv = fh2id(fh)->itv; chip->ident = V4L2_IDENT_NONE; chip->revision = 0; if (chip->match.type == V4L2_CHIP_MATCH_HOST) { if (v4l2_chip_match_host(&chip->match)) chip->ident = itv->has_cx23415 ? V4L2_IDENT_CX23415 : V4L2_IDENT_CX23416; return 0; } if (chip->match.type != V4L2_CHIP_MATCH_I2C_DRIVER && chip->match.type != V4L2_CHIP_MATCH_I2C_ADDR) return -EINVAL; /* TODO: is this correct? */ return ivtv_call_all_err(itv, core, g_chip_ident, chip); } #ifdef CONFIG_VIDEO_ADV_DEBUG static int ivtv_itvc(struct ivtv *itv, bool get, u64 reg, u64 *val) { volatile u8 __iomem *reg_start; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (reg >= IVTV_REG_OFFSET && reg < IVTV_REG_OFFSET + IVTV_REG_SIZE) reg_start = itv->reg_mem - IVTV_REG_OFFSET; else if (itv->has_cx23415 && reg >= IVTV_DECODER_OFFSET && reg < IVTV_DECODER_OFFSET + IVTV_DECODER_SIZE) reg_start = itv->dec_mem - IVTV_DECODER_OFFSET; else if (reg < IVTV_ENCODER_SIZE) reg_start = itv->enc_mem; else return -EINVAL; if (get) *val = readl(reg + reg_start); else writel(*val, reg + reg_start); return 0; } static int ivtv_g_register(struct file *file, void *fh, struct v4l2_dbg_register *reg) { struct ivtv *itv = fh2id(fh)->itv; if (v4l2_chip_match_host(&reg->match)) { reg->size = 4; return ivtv_itvc(itv, true, reg->reg, &reg->val); } /* TODO: subdev errors should not be ignored, this should become a subdev helper function. */ ivtv_call_all(itv, core, g_register, reg); return 0; } static int ivtv_s_register(struct file *file, void *fh, const struct v4l2_dbg_register *reg) { struct ivtv *itv = fh2id(fh)->itv; if (v4l2_chip_match_host(&reg->match)) { u64 val = reg->val; return ivtv_itvc(itv, false, reg->reg, &val); } /* TODO: subdev errors should not be ignored, this should become a subdev helper function. */ ivtv_call_all(itv, core, s_register, reg); return 0; } #endif static int ivtv_querycap(struct file *file, void *fh, struct v4l2_capability *vcap) { struct ivtv_open_id *id = fh2id(file->private_data); struct ivtv *itv = id->itv; struct ivtv_stream *s = &itv->streams[id->type]; strlcpy(vcap->driver, IVTV_DRIVER_NAME, sizeof(vcap->driver)); strlcpy(vcap->card, itv->card_name, sizeof(vcap->card)); snprintf(vcap->bus_info, sizeof(vcap->bus_info), "PCI:%s", pci_name(itv->pdev)); vcap->capabilities = itv->v4l2_cap | V4L2_CAP_DEVICE_CAPS; vcap->device_caps = s->caps; return 0; } static int ivtv_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin) { struct ivtv *itv = fh2id(fh)->itv; return ivtv_get_audio_input(itv, vin->index, vin); } static int ivtv_g_audio(struct file *file, void *fh, struct v4l2_audio *vin) { struct ivtv *itv = fh2id(fh)->itv; vin->index = itv->audio_input; return ivtv_get_audio_input(itv, vin->index, vin); } static int ivtv_s_audio(struct file *file, void *fh, const struct v4l2_audio *vout) { struct ivtv *itv = fh2id(fh)->itv; if (vout->index >= itv->nof_audio_inputs) return -EINVAL; itv->audio_input = vout->index; ivtv_audio_set_io(itv); return 0; } static int ivtv_enumaudout(struct file *file, void *fh, struct v4l2_audioout *vin) { struct ivtv *itv = fh2id(fh)->itv; /* set it to defaults from our table */ return ivtv_get_audio_output(itv, vin->index, vin); } static int ivtv_g_audout(struct file *file, void *fh, struct v4l2_audioout *vin) { struct ivtv *itv = fh2id(fh)->itv; vin->index = 0; return ivtv_get_audio_output(itv, vin->index, vin); } static int ivtv_s_audout(struct file *file, void *fh, const struct v4l2_audioout *vout) { struct ivtv *itv = fh2id(fh)->itv; if (itv->card->video_outputs == NULL || vout->index != 0) return -EINVAL; return 0; } static int ivtv_enum_input(struct file *file, void *fh, struct v4l2_input *vin) { struct ivtv *itv = fh2id(fh)->itv; /* set it to defaults from our table */ return ivtv_get_input(itv, vin->index, vin); } static int ivtv_enum_output(struct file *file, void *fh, struct v4l2_output *vout) { struct ivtv *itv = fh2id(fh)->itv; return ivtv_get_output(itv, vout->index, vout); } static int ivtv_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; struct yuv_playback_info *yi = &itv->yuv_info; int streamtype; streamtype = id->type; if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; cropcap->bounds.top = cropcap->bounds.left = 0; cropcap->bounds.width = 720; if (cropcap->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { cropcap->bounds.height = itv->is_50hz ? 576 : 480; cropcap->pixelaspect.numerator = itv->is_50hz ? 59 : 10; cropcap->pixelaspect.denominator = itv->is_50hz ? 54 : 11; } else if (streamtype == IVTV_DEC_STREAM_TYPE_YUV) { if (yi->track_osd) { cropcap->bounds.width = yi->osd_full_w; cropcap->bounds.height = yi->osd_full_h; } else { cropcap->bounds.width = 720; cropcap->bounds.height = itv->is_out_50hz ? 576 : 480; } cropcap->pixelaspect.numerator = itv->is_out_50hz ? 59 : 10; cropcap->pixelaspect.denominator = itv->is_out_50hz ? 54 : 11; } else { cropcap->bounds.height = itv->is_out_50hz ? 576 : 480; cropcap->pixelaspect.numerator = itv->is_out_50hz ? 59 : 10; cropcap->pixelaspect.denominator = itv->is_out_50hz ? 54 : 11; } cropcap->defrect = cropcap->bounds; return 0; } static int ivtv_s_crop(struct file *file, void *fh, const struct v4l2_crop *crop) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; struct yuv_playback_info *yi = &itv->yuv_info; int streamtype; streamtype = id->type; if (crop->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) { if (streamtype == IVTV_DEC_STREAM_TYPE_YUV) { yi->main_rect = crop->c; return 0; } else { if (!ivtv_vapi(itv, CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, 4, crop->c.width, crop->c.height, crop->c.left, crop->c.top)) { itv->main_rect = crop->c; return 0; } } return -EINVAL; } return -EINVAL; } static int ivtv_g_crop(struct file *file, void *fh, struct v4l2_crop *crop) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; struct yuv_playback_info *yi = &itv->yuv_info; int streamtype; streamtype = id->type; if (crop->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) { if (streamtype == IVTV_DEC_STREAM_TYPE_YUV) crop->c = yi->main_rect; else crop->c = itv->main_rect; return 0; } return -EINVAL; } static int ivtv_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *fmt) { static const struct v4l2_fmtdesc hm12 = { 0, V4L2_BUF_TYPE_VIDEO_CAPTURE, 0, "HM12 (YUV 4:2:0)", V4L2_PIX_FMT_HM12, { 0, 0, 0, 0 } }; static const struct v4l2_fmtdesc mpeg = { 0, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FMT_FLAG_COMPRESSED, "MPEG", V4L2_PIX_FMT_MPEG, { 0, 0, 0, 0 } }; struct ivtv *itv = fh2id(fh)->itv; struct ivtv_stream *s = &itv->streams[fh2id(fh)->type]; if (fmt->index) return -EINVAL; if (s->type == IVTV_ENC_STREAM_TYPE_MPG) *fmt = mpeg; else if (s->type == IVTV_ENC_STREAM_TYPE_YUV) *fmt = hm12; else return -EINVAL; return 0; } static int ivtv_enum_fmt_vid_out(struct file *file, void *fh, struct v4l2_fmtdesc *fmt) { static const struct v4l2_fmtdesc hm12 = { 0, V4L2_BUF_TYPE_VIDEO_OUTPUT, 0, "HM12 (YUV 4:2:0)", V4L2_PIX_FMT_HM12, { 0, 0, 0, 0 } }; static const struct v4l2_fmtdesc mpeg = { 0, V4L2_BUF_TYPE_VIDEO_OUTPUT, V4L2_FMT_FLAG_COMPRESSED, "MPEG", V4L2_PIX_FMT_MPEG, { 0, 0, 0, 0 } }; struct ivtv *itv = fh2id(fh)->itv; struct ivtv_stream *s = &itv->streams[fh2id(fh)->type]; if (fmt->index) return -EINVAL; if (s->type == IVTV_DEC_STREAM_TYPE_MPG) *fmt = mpeg; else if (s->type == IVTV_DEC_STREAM_TYPE_YUV) *fmt = hm12; else return -EINVAL; return 0; } static int ivtv_g_input(struct file *file, void *fh, unsigned int *i) { struct ivtv *itv = fh2id(fh)->itv; *i = itv->active_input; return 0; } int ivtv_s_input(struct file *file, void *fh, unsigned int inp) { struct ivtv *itv = fh2id(fh)->itv; v4l2_std_id std; int i; if (inp >= itv->nof_inputs) return -EINVAL; if (inp == itv->active_input) { IVTV_DEBUG_INFO("Input unchanged\n"); return 0; } if (atomic_read(&itv->capturing) > 0) { return -EBUSY; } IVTV_DEBUG_INFO("Changing input from %d to %d\n", itv->active_input, inp); itv->active_input = inp; /* Set the audio input to whatever is appropriate for the input type. */ itv->audio_input = itv->card->video_inputs[inp].audio_index; if (itv->card->video_inputs[inp].video_type == IVTV_CARD_INPUT_VID_TUNER) std = itv->tuner_std; else std = V4L2_STD_ALL; for (i = 0; i <= IVTV_ENC_STREAM_TYPE_VBI; i++) itv->streams[i].vdev->tvnorms = std; /* prevent others from messing with the streams until we're finished changing inputs. */ ivtv_mute(itv); ivtv_video_set_io(itv); ivtv_audio_set_io(itv); ivtv_unmute(itv); return 0; } static int ivtv_g_output(struct file *file, void *fh, unsigned int *i) { struct ivtv *itv = fh2id(fh)->itv; if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; *i = itv->active_output; return 0; } static int ivtv_s_output(struct file *file, void *fh, unsigned int outp) { struct ivtv *itv = fh2id(fh)->itv; if (outp >= itv->card->nof_outputs) return -EINVAL; if (outp == itv->active_output) { IVTV_DEBUG_INFO("Output unchanged\n"); return 0; } IVTV_DEBUG_INFO("Changing output from %d to %d\n", itv->active_output, outp); itv->active_output = outp; ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_routing, SAA7127_INPUT_TYPE_NORMAL, itv->card->video_outputs[outp].video_output, 0); return 0; } static int ivtv_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf) { struct ivtv *itv = fh2id(fh)->itv; struct ivtv_stream *s = &itv->streams[fh2id(fh)->type]; if (s->vdev->vfl_dir) return -ENOTTY; if (vf->tuner != 0) return -EINVAL; ivtv_call_all(itv, tuner, g_frequency, vf); return 0; } int ivtv_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf) { struct ivtv *itv = fh2id(fh)->itv; struct ivtv_stream *s = &itv->streams[fh2id(fh)->type]; if (s->vdev->vfl_dir) return -ENOTTY; if (vf->tuner != 0) return -EINVAL; ivtv_mute(itv); IVTV_DEBUG_INFO("v4l2 ioctl: set frequency %d\n", vf->frequency); ivtv_call_all(itv, tuner, s_frequency, vf); ivtv_unmute(itv); return 0; } static int ivtv_g_std(struct file *file, void *fh, v4l2_std_id *std) { struct ivtv *itv = fh2id(fh)->itv; *std = itv->std; return 0; } void ivtv_s_std_enc(struct ivtv *itv, v4l2_std_id std) { itv->std = std; itv->is_60hz = (std & V4L2_STD_525_60) ? 1 : 0; itv->is_50hz = !itv->is_60hz; cx2341x_handler_set_50hz(&itv->cxhdl, itv->is_50hz); itv->cxhdl.width = 720; itv->cxhdl.height = itv->is_50hz ? 576 : 480; itv->vbi.count = itv->is_50hz ? 18 : 12; itv->vbi.start[0] = itv->is_50hz ? 6 : 10; itv->vbi.start[1] = itv->is_50hz ? 318 : 273; if (itv->hw_flags & IVTV_HW_CX25840) itv->vbi.sliced_decoder_line_size = itv->is_60hz ? 272 : 284; /* Tuner */ ivtv_call_all(itv, core, s_std, itv->std); } void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id std) { struct yuv_playback_info *yi = &itv->yuv_info; DEFINE_WAIT(wait); int f; /* set display standard */ itv->std_out = std; itv->is_out_60hz = (std & V4L2_STD_525_60) ? 1 : 0; itv->is_out_50hz = !itv->is_out_60hz; ivtv_call_all(itv, video, s_std_output, itv->std_out); /* * The next firmware call is time sensitive. Time it to * avoid risk of a hard lock, by trying to ensure the call * happens within the first 100 lines of the top field. * Make 4 attempts to sync to the decoder before giving up. */ mutex_unlock(&itv->serialize_lock); for (f = 0; f < 4; f++) { prepare_to_wait(&itv->vsync_waitq, &wait, TASK_UNINTERRUPTIBLE); if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100) break; schedule_timeout(msecs_to_jiffies(25)); } finish_wait(&itv->vsync_waitq, &wait); mutex_lock(&itv->serialize_lock); if (f == 4) IVTV_WARN("Mode change failed to sync to decoder\n"); ivtv_vapi(itv, CX2341X_DEC_SET_STANDARD, 1, itv->is_out_50hz); itv->main_rect.left = 0; itv->main_rect.top = 0; itv->main_rect.width = 720; itv->main_rect.height = itv->is_out_50hz ? 576 : 480; ivtv_vapi(itv, CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, 4, 720, itv->main_rect.height, 0, 0); yi->main_rect = itv->main_rect; if (!itv->osd_info) { yi->osd_full_w = 720; yi->osd_full_h = itv->is_out_50hz ? 576 : 480; } } static int ivtv_s_std(struct file *file, void *fh, v4l2_std_id std) { struct ivtv *itv = fh2id(fh)->itv; if ((std & V4L2_STD_ALL) == 0) return -EINVAL; if (std == itv->std) return 0; if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) || atomic_read(&itv->capturing) > 0 || atomic_read(&itv->decoding) > 0) { /* Switching standard would mess with already running streams, prevent that by returning EBUSY. */ return -EBUSY; } IVTV_DEBUG_INFO("Switching standard to %llx.\n", (unsigned long long)itv->std); ivtv_s_std_enc(itv, std); if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) ivtv_s_std_dec(itv, std); return 0; } static int ivtv_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; if (vt->index != 0) return -EINVAL; ivtv_call_all(itv, tuner, s_tuner, vt); return 0; } static int ivtv_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt) { struct ivtv *itv = fh2id(fh)->itv; if (vt->index != 0) return -EINVAL; ivtv_call_all(itv, tuner, g_tuner, vt); if (vt->type == V4L2_TUNER_RADIO) strlcpy(vt->name, "ivtv Radio Tuner", sizeof(vt->name)); else strlcpy(vt->name, "ivtv TV Tuner", sizeof(vt->name)); return 0; } static int ivtv_g_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_sliced_vbi_cap *cap) { struct ivtv *itv = fh2id(fh)->itv; int set = itv->is_50hz ? V4L2_SLICED_VBI_625 : V4L2_SLICED_VBI_525; int f, l; if (cap->type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE) { for (f = 0; f < 2; f++) { for (l = 0; l < 24; l++) { if (valid_service_line(f, l, itv->is_50hz)) cap->service_lines[f][l] = set; } } } else if (cap->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) { if (!(itv->v4l2_cap & V4L2_CAP_SLICED_VBI_OUTPUT)) return -EINVAL; if (itv->is_60hz) { cap->service_lines[0][21] = V4L2_SLICED_CAPTION_525; cap->service_lines[1][21] = V4L2_SLICED_CAPTION_525; } else { cap->service_lines[0][23] = V4L2_SLICED_WSS_625; cap->service_lines[0][16] = V4L2_SLICED_VPS; } } else { return -EINVAL; } set = 0; for (f = 0; f < 2; f++) for (l = 0; l < 24; l++) set |= cap->service_lines[f][l]; cap->service_set = set; return 0; } static int ivtv_g_enc_index(struct file *file, void *fh, struct v4l2_enc_idx *idx) { struct ivtv *itv = fh2id(fh)->itv; struct v4l2_enc_idx_entry *e = idx->entry; int entries; int i; entries = (itv->pgm_info_write_idx + IVTV_MAX_PGM_INDEX - itv->pgm_info_read_idx) % IVTV_MAX_PGM_INDEX; if (entries > V4L2_ENC_IDX_ENTRIES) entries = V4L2_ENC_IDX_ENTRIES; idx->entries = 0; idx->entries_cap = IVTV_MAX_PGM_INDEX; if (!atomic_read(&itv->capturing)) return 0; for (i = 0; i < entries; i++) { *e = itv->pgm_info[(itv->pgm_info_read_idx + i) % IVTV_MAX_PGM_INDEX]; if ((e->flags & V4L2_ENC_IDX_FRAME_MASK) <= V4L2_ENC_IDX_FRAME_B) { idx->entries++; e++; } } itv->pgm_info_read_idx = (itv->pgm_info_read_idx + idx->entries) % IVTV_MAX_PGM_INDEX; return 0; } static int ivtv_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *enc) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; switch (enc->cmd) { case V4L2_ENC_CMD_START: IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_START\n"); enc->flags = 0; return ivtv_start_capture(id); case V4L2_ENC_CMD_STOP: IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_STOP\n"); enc->flags &= V4L2_ENC_CMD_STOP_AT_GOP_END; ivtv_stop_capture(id, enc->flags & V4L2_ENC_CMD_STOP_AT_GOP_END); return 0; case V4L2_ENC_CMD_PAUSE: IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_PAUSE\n"); enc->flags = 0; if (!atomic_read(&itv->capturing)) return -EPERM; if (test_and_set_bit(IVTV_F_I_ENC_PAUSED, &itv->i_flags)) return 0; ivtv_mute(itv); ivtv_vapi(itv, CX2341X_ENC_PAUSE_ENCODER, 1, 0); break; case V4L2_ENC_CMD_RESUME: IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_RESUME\n"); enc->flags = 0; if (!atomic_read(&itv->capturing)) return -EPERM; if (!test_and_clear_bit(IVTV_F_I_ENC_PAUSED, &itv->i_flags)) return 0; ivtv_vapi(itv, CX2341X_ENC_PAUSE_ENCODER, 1, 1); ivtv_unmute(itv); break; default: IVTV_DEBUG_IOCTL("Unknown cmd %d\n", enc->cmd); return -EINVAL; } return 0; } static int ivtv_try_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *enc) { struct ivtv *itv = fh2id(fh)->itv; switch (enc->cmd) { case V4L2_ENC_CMD_START: IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_START\n"); enc->flags = 0; return 0; case V4L2_ENC_CMD_STOP: IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_STOP\n"); enc->flags &= V4L2_ENC_CMD_STOP_AT_GOP_END; return 0; case V4L2_ENC_CMD_PAUSE: IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_PAUSE\n"); enc->flags = 0; return 0; case V4L2_ENC_CMD_RESUME: IVTV_DEBUG_IOCTL("V4L2_ENC_CMD_RESUME\n"); enc->flags = 0; return 0; default: IVTV_DEBUG_IOCTL("Unknown cmd %d\n", enc->cmd); return -EINVAL; } } static int ivtv_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *fb) { struct ivtv *itv = fh2id(fh)->itv; u32 data[CX2341X_MBOX_MAX_DATA]; struct yuv_playback_info *yi = &itv->yuv_info; int pixfmt; static u32 pixel_format[16] = { V4L2_PIX_FMT_PAL8, /* Uses a 256-entry RGB colormap */ V4L2_PIX_FMT_RGB565, V4L2_PIX_FMT_RGB555, V4L2_PIX_FMT_RGB444, V4L2_PIX_FMT_RGB32, 0, 0, 0, V4L2_PIX_FMT_PAL8, /* Uses a 256-entry YUV colormap */ V4L2_PIX_FMT_YUV565, V4L2_PIX_FMT_YUV555, V4L2_PIX_FMT_YUV444, V4L2_PIX_FMT_YUV32, 0, 0, 0, }; if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) return -EINVAL; if (!itv->osd_video_pbase) return -EINVAL; fb->capability = V4L2_FBUF_CAP_EXTERNOVERLAY | V4L2_FBUF_CAP_CHROMAKEY | V4L2_FBUF_CAP_GLOBAL_ALPHA; ivtv_vapi_result(itv, data, CX2341X_OSD_GET_STATE, 0); data[0] |= (read_reg(0x2a00) >> 7) & 0x40; pixfmt = (data[0] >> 3) & 0xf; fb->fmt.pixelformat = pixel_format[pixfmt]; fb->fmt.width = itv->osd_rect.width; fb->fmt.height = itv->osd_rect.height; fb->fmt.field = V4L2_FIELD_INTERLACED; fb->fmt.bytesperline = fb->fmt.width; fb->fmt.colorspace = V4L2_COLORSPACE_SMPTE170M; fb->fmt.field = V4L2_FIELD_INTERLACED; fb->fmt.priv = 0; if (fb->fmt.pixelformat != V4L2_PIX_FMT_PAL8) fb->fmt.bytesperline *= 2; if (fb->fmt.pixelformat == V4L2_PIX_FMT_RGB32 || fb->fmt.pixelformat == V4L2_PIX_FMT_YUV32) fb->fmt.bytesperline *= 2; fb->fmt.sizeimage = fb->fmt.bytesperline * fb->fmt.height; fb->base = (void *)itv->osd_video_pbase; fb->flags = 0; if (itv->osd_chroma_key_state) fb->flags |= V4L2_FBUF_FLAG_CHROMAKEY; if (itv->osd_global_alpha_state) fb->flags |= V4L2_FBUF_FLAG_GLOBAL_ALPHA; if (yi->track_osd) fb->flags |= V4L2_FBUF_FLAG_OVERLAY; pixfmt &= 7; /* no local alpha for RGB565 or unknown formats */ if (pixfmt == 1 || pixfmt > 4) return 0; /* 16-bit formats have inverted local alpha */ if (pixfmt == 2 || pixfmt == 3) fb->capability |= V4L2_FBUF_CAP_LOCAL_INV_ALPHA; else fb->capability |= V4L2_FBUF_CAP_LOCAL_ALPHA; if (itv->osd_local_alpha_state) { /* 16-bit formats have inverted local alpha */ if (pixfmt == 2 || pixfmt == 3) fb->flags |= V4L2_FBUF_FLAG_LOCAL_INV_ALPHA; else fb->flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA; } return 0; } static int ivtv_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffer *fb) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; struct yuv_playback_info *yi = &itv->yuv_info; if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) return -EINVAL; if (!itv->osd_video_pbase) return -EINVAL; itv->osd_global_alpha_state = (fb->flags & V4L2_FBUF_FLAG_GLOBAL_ALPHA) != 0; itv->osd_local_alpha_state = (fb->flags & (V4L2_FBUF_FLAG_LOCAL_ALPHA|V4L2_FBUF_FLAG_LOCAL_INV_ALPHA)) != 0; itv->osd_chroma_key_state = (fb->flags & V4L2_FBUF_FLAG_CHROMAKEY) != 0; ivtv_set_osd_alpha(itv); yi->track_osd = (fb->flags & V4L2_FBUF_FLAG_OVERLAY) != 0; return 0; } static int ivtv_overlay(struct file *file, void *fh, unsigned int on) { struct ivtv_open_id *id = fh2id(fh); struct ivtv *itv = id->itv; if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) return -EINVAL; ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, on != 0); return 0; } static int ivtv_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub) { switch (sub->type) { case V4L2_EVENT_VSYNC: case V4L2_EVENT_EOS: return v4l2_event_subscribe(fh, sub, 0, NULL); case V4L2_EVENT_CTRL: return v4l2_event_subscribe(fh, sub, 0, &v4l2_ctrl_sub_ev_ops); default: return -EINVAL; } } static int ivtv_log_status(struct file *file, void *fh) { struct ivtv *itv = fh2id(fh)->itv; u32 data[CX2341X_MBOX_MAX_DATA]; int has_output = itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT; struct v4l2_input vidin; struct v4l2_audio audin; int i; IVTV_INFO("Version: %s Card: %s\n", IVTV_VERSION, itv->card_name); if (itv->hw_flags & IVTV_HW_TVEEPROM) { struct tveeprom tv; ivtv_read_eeprom(itv, &tv); } ivtv_call_all(itv, core, log_status); ivtv_get_input(itv, itv->active_input, &vidin); ivtv_get_audio_input(itv, itv->audio_input, &audin); IVTV_INFO("Video Input: %s\n", vidin.name); IVTV_INFO("Audio Input: %s%s\n", audin.name, (itv->dualwatch_stereo_mode & ~0x300) == 0x200 ? " (Bilingual)" : ""); if (has_output) { struct v4l2_output vidout; struct v4l2_audioout audout; int mode = itv->output_mode; static const char * const output_modes[5] = { "None", "MPEG Streaming", "YUV Streaming", "YUV Frames", "Passthrough", }; static const char * const alpha_mode[4] = { "None", "Global", "Local", "Global and Local" }; static const char * const pixel_format[16] = { "ARGB Indexed", "RGB 5:6:5", "ARGB 1:5:5:5", "ARGB 1:4:4:4", "ARGB 8:8:8:8", "5", "6", "7", "AYUV Indexed", "YUV 5:6:5", "AYUV 1:5:5:5", "AYUV 1:4:4:4", "AYUV 8:8:8:8", "13", "14", "15", }; ivtv_get_output(itv, itv->active_output, &vidout); ivtv_get_audio_output(itv, 0, &audout); IVTV_INFO("Video Output: %s\n", vidout.name); if (mode < 0 || mode > OUT_PASSTHROUGH) mode = OUT_NONE; IVTV_INFO("Output Mode: %s\n", output_modes[mode]); ivtv_vapi_result(itv, data, CX2341X_OSD_GET_STATE, 0); data[0] |= (read_reg(0x2a00) >> 7) & 0x40; IVTV_INFO("Overlay: %s, Alpha: %s, Pixel Format: %s\n", data[0] & 1 ? "On" : "Off", alpha_mode[(data[0] >> 1) & 0x3], pixel_format[(data[0] >> 3) & 0xf]); } IVTV_INFO("Tuner: %s\n", test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) ? "Radio" : "TV"); v4l2_ctrl_handler_log_status(&itv->cxhdl.hdl, itv->v4l2_dev.name); IVTV_INFO("Status flags: 0x%08lx\n", itv->i_flags); for (i = 0; i < IVTV_MAX_STREAMS; i++) { struct ivtv_stream *s = &itv->streams[i]; if (s->vdev == NULL || s->buffers == 0) continue; IVTV_INFO("Stream %s: status 0x%04lx, %d%% of %d KiB (%d buffers) in use\n", s->name, s->s_flags, (s->buffers - s->q_free.buffers) * 100 / s->buffers, (s->buffers * s->buf_size) / 1024, s->buffers); } IVTV_INFO("Read MPG/VBI: %lld/%lld bytes\n", (long long)itv->mpg_data_received, (long long)itv->vbi_data_inserted); return 0; } static int ivtv_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *dec) { struct ivtv_open_id *id = fh2id(file->private_data); struct ivtv *itv = id->itv; IVTV_DEBUG_IOCTL("VIDIOC_DECODER_CMD %d\n", dec->cmd); return ivtv_video_command(itv, id, dec, false); } static int ivtv_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *dec) { struct ivtv_open_id *id = fh2id(file->private_data); struct ivtv *itv = id->itv; IVTV_DEBUG_IOCTL("VIDIOC_TRY_DECODER_CMD %d\n", dec->cmd); return ivtv_video_command(itv, id, dec, true); } static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg) { struct ivtv_open_id *id = fh2id(filp->private_data); struct ivtv *itv = id->itv; int nonblocking = filp->f_flags & O_NONBLOCK; struct ivtv_stream *s = &itv->streams[id->type]; unsigned long iarg = (unsigned long)arg; switch (cmd) { case IVTV_IOC_DMA_FRAME: { struct ivtv_dma_frame *args = arg; IVTV_DEBUG_IOCTL("IVTV_IOC_DMA_FRAME\n"); if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; if (args->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; if (itv->output_mode == OUT_UDMA_YUV && args->y_source == NULL) return 0; if (ivtv_start_decoding(id, id->type)) { return -EBUSY; } if (ivtv_set_output_mode(itv, OUT_UDMA_YUV) != OUT_UDMA_YUV) { ivtv_release_stream(s); return -EBUSY; } /* Mark that this file handle started the UDMA_YUV mode */ id->yuv_frames = 1; if (args->y_source == NULL) return 0; return ivtv_yuv_prep_frame(itv, args); } case IVTV_IOC_PASSTHROUGH_MODE: IVTV_DEBUG_IOCTL("IVTV_IOC_PASSTHROUGH_MODE\n"); if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; return ivtv_passthrough_mode(itv, *(int *)arg != 0); case VIDEO_GET_PTS: { s64 *pts = arg; s64 frame; IVTV_DEBUG_IOCTL("VIDEO_GET_PTS\n"); if (s->type < IVTV_DEC_STREAM_TYPE_MPG) { *pts = s->dma_pts; break; } if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; return ivtv_g_pts_frame(itv, pts, &frame); } case VIDEO_GET_FRAME_COUNT: { s64 *frame = arg; s64 pts; IVTV_DEBUG_IOCTL("VIDEO_GET_FRAME_COUNT\n"); if (s->type < IVTV_DEC_STREAM_TYPE_MPG) { *frame = 0; break; } if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; return ivtv_g_pts_frame(itv, &pts, frame); } case VIDEO_PLAY: { struct v4l2_decoder_cmd dc; IVTV_DEBUG_IOCTL("VIDEO_PLAY\n"); memset(&dc, 0, sizeof(dc)); dc.cmd = V4L2_DEC_CMD_START; return ivtv_video_command(itv, id, &dc, 0); } case VIDEO_STOP: { struct v4l2_decoder_cmd dc; IVTV_DEBUG_IOCTL("VIDEO_STOP\n"); memset(&dc, 0, sizeof(dc)); dc.cmd = V4L2_DEC_CMD_STOP; dc.flags = V4L2_DEC_CMD_STOP_TO_BLACK | V4L2_DEC_CMD_STOP_IMMEDIATELY; return ivtv_video_command(itv, id, &dc, 0); } case VIDEO_FREEZE: { struct v4l2_decoder_cmd dc; IVTV_DEBUG_IOCTL("VIDEO_FREEZE\n"); memset(&dc, 0, sizeof(dc)); dc.cmd = V4L2_DEC_CMD_PAUSE; return ivtv_video_command(itv, id, &dc, 0); } case VIDEO_CONTINUE: { struct v4l2_decoder_cmd dc; IVTV_DEBUG_IOCTL("VIDEO_CONTINUE\n"); memset(&dc, 0, sizeof(dc)); dc.cmd = V4L2_DEC_CMD_RESUME; return ivtv_video_command(itv, id, &dc, 0); } case VIDEO_COMMAND: case VIDEO_TRY_COMMAND: { /* Note: struct v4l2_decoder_cmd has the same layout as struct video_command */ struct v4l2_decoder_cmd *dc = arg; int try = (cmd == VIDEO_TRY_COMMAND); if (try) IVTV_DEBUG_IOCTL("VIDEO_TRY_COMMAND %d\n", dc->cmd); else IVTV_DEBUG_IOCTL("VIDEO_COMMAND %d\n", dc->cmd); return ivtv_video_command(itv, id, dc, try); } case VIDEO_GET_EVENT: { struct video_event *ev = arg; DEFINE_WAIT(wait); IVTV_DEBUG_IOCTL("VIDEO_GET_EVENT\n"); if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; memset(ev, 0, sizeof(*ev)); set_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags); while (1) { if (test_and_clear_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags)) ev->type = VIDEO_EVENT_DECODER_STOPPED; else if (test_and_clear_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags)) { ev->type = VIDEO_EVENT_VSYNC; ev->u.vsync_field = test_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags) ? VIDEO_VSYNC_FIELD_ODD : VIDEO_VSYNC_FIELD_EVEN; if (itv->output_mode == OUT_UDMA_YUV && (itv->yuv_info.lace_mode & IVTV_YUV_MODE_MASK) == IVTV_YUV_MODE_PROGRESSIVE) { ev->u.vsync_field = VIDEO_VSYNC_FIELD_PROGRESSIVE; } } if (ev->type) return 0; if (nonblocking) return -EAGAIN; /* Wait for event. Note that serialize_lock is locked, so to allow other processes to access the driver while we are waiting unlock first and later lock again. */ mutex_unlock(&itv->serialize_lock); prepare_to_wait(&itv->event_waitq, &wait, TASK_INTERRUPTIBLE); if (!test_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags) && !test_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags)) schedule(); finish_wait(&itv->event_waitq, &wait); mutex_lock(&itv->serialize_lock); if (signal_pending(current)) { /* return if a signal was received */ IVTV_DEBUG_INFO("User stopped wait for event\n"); return -EINTR; } } break; } case VIDEO_SELECT_SOURCE: IVTV_DEBUG_IOCTL("VIDEO_SELECT_SOURCE\n"); if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)) return -EINVAL; return ivtv_passthrough_mode(itv, iarg == VIDEO_SOURCE_DEMUX); case AUDIO_SET_MUTE: IVTV_DEBUG_IOCTL("AUDIO_SET_MUTE\n"); itv->speed_mute_audio = iarg; return 0; case AUDIO_CHANNEL_SELECT: IVTV_DEBUG_IOCTL("AUDIO_CHANNEL_SELECT\n"); if (iarg > AUDIO_STEREO_SWAPPED) return -EINVAL; return v4l2_ctrl_s_ctrl(itv->ctrl_audio_playback, iarg + 1); case AUDIO_BILINGUAL_CHANNEL_SELECT: IVTV_DEBUG_IOCTL("AUDIO_BILINGUAL_CHANNEL_SELECT\n"); if (iarg > AUDIO_STEREO_SWAPPED) return -EINVAL; return v4l2_ctrl_s_ctrl(itv->ctrl_audio_multilingual_playback, iarg + 1); default: return -EINVAL; } return 0; } static long ivtv_default(struct file *file, void *fh, bool valid_prio, unsigned int cmd, void *arg) { struct ivtv *itv = fh2id(fh)->itv; if (!valid_prio) { switch (cmd) { case IVTV_IOC_PASSTHROUGH_MODE: case VIDEO_PLAY: case VIDEO_STOP: case VIDEO_FREEZE: case VIDEO_CONTINUE: case VIDEO_COMMAND: case VIDEO_SELECT_SOURCE: case AUDIO_SET_MUTE: case AUDIO_CHANNEL_SELECT: case AUDIO_BILINGUAL_CHANNEL_SELECT: return -EBUSY; } } switch (cmd) { case VIDIOC_INT_RESET: { u32 val = *(u32 *)arg; if ((val == 0 && itv->options.newi2c) || (val & 0x01)) ivtv_reset_ir_gpio(itv); if (val & 0x02) v4l2_subdev_call(itv->sd_video, core, reset, 0); break; } case IVTV_IOC_DMA_FRAME: case IVTV_IOC_PASSTHROUGH_MODE: case VIDEO_GET_PTS: case VIDEO_GET_FRAME_COUNT: case VIDEO_GET_EVENT: case VIDEO_PLAY: case VIDEO_STOP: case VIDEO_FREEZE: case VIDEO_CONTINUE: case VIDEO_COMMAND: case VIDEO_TRY_COMMAND: case VIDEO_SELECT_SOURCE: case AUDIO_SET_MUTE: case AUDIO_CHANNEL_SELECT: case AUDIO_BILINGUAL_CHANNEL_SELECT: return ivtv_decoder_ioctls(file, cmd, (void *)arg); default: return -ENOTTY; } return 0; } static const struct v4l2_ioctl_ops ivtv_ioctl_ops = { .vidioc_querycap = ivtv_querycap, .vidioc_s_audio = ivtv_s_audio, .vidioc_g_audio = ivtv_g_audio, .vidioc_enumaudio = ivtv_enumaudio, .vidioc_s_audout = ivtv_s_audout, .vidioc_g_audout = ivtv_g_audout, .vidioc_enum_input = ivtv_enum_input, .vidioc_enum_output = ivtv_enum_output, .vidioc_enumaudout = ivtv_enumaudout, .vidioc_cropcap = ivtv_cropcap, .vidioc_s_crop = ivtv_s_crop, .vidioc_g_crop = ivtv_g_crop, .vidioc_g_input = ivtv_g_input, .vidioc_s_input = ivtv_s_input, .vidioc_g_output = ivtv_g_output, .vidioc_s_output = ivtv_s_output, .vidioc_g_frequency = ivtv_g_frequency, .vidioc_s_frequency = ivtv_s_frequency, .vidioc_s_tuner = ivtv_s_tuner, .vidioc_g_tuner = ivtv_g_tuner, .vidioc_g_enc_index = ivtv_g_enc_index, .vidioc_g_fbuf = ivtv_g_fbuf, .vidioc_s_fbuf = ivtv_s_fbuf, .vidioc_g_std = ivtv_g_std, .vidioc_s_std = ivtv_s_std, .vidioc_overlay = ivtv_overlay, .vidioc_log_status = ivtv_log_status, .vidioc_enum_fmt_vid_cap = ivtv_enum_fmt_vid_cap, .vidioc_encoder_cmd = ivtv_encoder_cmd, .vidioc_try_encoder_cmd = ivtv_try_encoder_cmd, .vidioc_decoder_cmd = ivtv_decoder_cmd, .vidioc_try_decoder_cmd = ivtv_try_decoder_cmd, .vidioc_enum_fmt_vid_out = ivtv_enum_fmt_vid_out, .vidioc_g_fmt_vid_cap = ivtv_g_fmt_vid_cap, .vidioc_g_fmt_vbi_cap = ivtv_g_fmt_vbi_cap, .vidioc_g_fmt_sliced_vbi_cap = ivtv_g_fmt_sliced_vbi_cap, .vidioc_g_fmt_vid_out = ivtv_g_fmt_vid_out, .vidioc_g_fmt_vid_out_overlay = ivtv_g_fmt_vid_out_overlay, .vidioc_g_fmt_sliced_vbi_out = ivtv_g_fmt_sliced_vbi_out, .vidioc_s_fmt_vid_cap = ivtv_s_fmt_vid_cap, .vidioc_s_fmt_vbi_cap = ivtv_s_fmt_vbi_cap, .vidioc_s_fmt_sliced_vbi_cap = ivtv_s_fmt_sliced_vbi_cap, .vidioc_s_fmt_vid_out = ivtv_s_fmt_vid_out, .vidioc_s_fmt_vid_out_overlay = ivtv_s_fmt_vid_out_overlay, .vidioc_s_fmt_sliced_vbi_out = ivtv_s_fmt_sliced_vbi_out, .vidioc_try_fmt_vid_cap = ivtv_try_fmt_vid_cap, .vidioc_try_fmt_vbi_cap = ivtv_try_fmt_vbi_cap, .vidioc_try_fmt_sliced_vbi_cap = ivtv_try_fmt_sliced_vbi_cap, .vidioc_try_fmt_vid_out = ivtv_try_fmt_vid_out, .vidioc_try_fmt_vid_out_overlay = ivtv_try_fmt_vid_out_overlay, .vidioc_try_fmt_sliced_vbi_out = ivtv_try_fmt_sliced_vbi_out, .vidioc_g_sliced_vbi_cap = ivtv_g_sliced_vbi_cap, .vidioc_g_chip_ident = ivtv_g_chip_ident, #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_register = ivtv_g_register, .vidioc_s_register = ivtv_s_register, #endif .vidioc_default = ivtv_default, .vidioc_subscribe_event = ivtv_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; void ivtv_set_funcs(struct video_device *vdev) { vdev->ioctl_ops = &ivtv_ioctl_ops; }
gpl-2.0
dev-harsh1998/android_kernel_cyanogen_msm8916
drivers/staging/comedi/drivers/aio_iiro_16.c
2090
3186
/* comedi/drivers/aio_iiro_16.c Driver for Access I/O Products PC-104 AIO-IIRO-16 Digital I/O board Copyright (C) 2006 C&C Technologies, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: aio_iiro_16 Description: Access I/O Products PC-104 IIRO16 Relay And Isolated Input Board Author: Zachary Ware <zach.ware@cctechnol.com> Devices: [Access I/O] PC-104 AIO12-8 Status: experimental Configuration Options: [0] - I/O port base address */ #include "../comedidev.h" #include <linux/ioport.h> #define AIO_IIRO_16_SIZE 0x08 #define AIO_IIRO_16_RELAY_0_7 0x00 #define AIO_IIRO_16_INPUT_0_7 0x01 #define AIO_IIRO_16_IRQ 0x02 #define AIO_IIRO_16_RELAY_8_15 0x04 #define AIO_IIRO_16_INPUT_8_15 0x05 static int aio_iiro_16_dio_insn_bits_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (data[0]) { s->state &= ~data[0]; s->state |= data[0] & data[1]; outb(s->state & 0xff, dev->iobase + AIO_IIRO_16_RELAY_0_7); outb((s->state >> 8) & 0xff, dev->iobase + AIO_IIRO_16_RELAY_8_15); } data[1] = s->state; return insn->n; } static int aio_iiro_16_dio_insn_bits_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[1] = 0; data[1] |= inb(dev->iobase + AIO_IIRO_16_INPUT_0_7); data[1] |= inb(dev->iobase + AIO_IIRO_16_INPUT_8_15) << 8; return insn->n; } static int aio_iiro_16_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; int ret; ret = comedi_request_region(dev, it->options[0], AIO_IIRO_16_SIZE); if (ret) return ret; ret = comedi_alloc_subdevices(dev, 2); if (ret) return ret; s = &dev->subdevices[0]; s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_WRITABLE; s->n_chan = 16; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = aio_iiro_16_dio_insn_bits_write; s = &dev->subdevices[1]; s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE; s->n_chan = 16; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = aio_iiro_16_dio_insn_bits_read; return 1; } static struct comedi_driver aio_iiro_16_driver = { .driver_name = "aio_iiro_16", .module = THIS_MODULE, .attach = aio_iiro_16_attach, .detach = comedi_legacy_detach, }; module_comedi_driver(aio_iiro_16_driver); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
CyanogenMod/android_kernel_amazon_otter-common
block/blk-tag.c
3882
10125
/* * Functions related to tagged command queuing */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/slab.h> #include "blk.h" /** * blk_queue_find_tag - find a request by its tag and queue * @q: The request queue for the device * @tag: The tag of the request * * Notes: * Should be used when a device returns a tag and you want to match * it with a request. * * no locks need be held. **/ struct request *blk_queue_find_tag(struct request_queue *q, int tag) { return blk_map_queue_find_tag(q->queue_tags, tag); } EXPORT_SYMBOL(blk_queue_find_tag); /** * __blk_free_tags - release a given set of tag maintenance info * @bqt: the tag map to free * * Tries to free the specified @bqt. Returns true if it was * actually freed and false if there are still references using it */ static int __blk_free_tags(struct blk_queue_tag *bqt) { int retval; retval = atomic_dec_and_test(&bqt->refcnt); if (retval) { BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) < bqt->max_depth); kfree(bqt->tag_index); bqt->tag_index = NULL; kfree(bqt->tag_map); bqt->tag_map = NULL; kfree(bqt); } return retval; } /** * __blk_queue_free_tags - release tag maintenance info * @q: the request queue for the device * * Notes: * blk_cleanup_queue() will take care of calling this function, if tagging * has been used. So there's no need to call this directly. **/ void __blk_queue_free_tags(struct request_queue *q) { struct blk_queue_tag *bqt = q->queue_tags; if (!bqt) return; __blk_free_tags(bqt); q->queue_tags = NULL; queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); } /** * blk_free_tags - release a given set of tag maintenance info * @bqt: the tag map to free * * For externally managed @bqt frees the map. Callers of this * function must guarantee to have released all the queues that * might have been using this tag map. */ void blk_free_tags(struct blk_queue_tag *bqt) { if (unlikely(!__blk_free_tags(bqt))) BUG(); } EXPORT_SYMBOL(blk_free_tags); /** * blk_queue_free_tags - release tag maintenance info * @q: the request queue for the device * * Notes: * This is used to disable tagged queuing to a device, yet leave * queue in function. **/ void blk_queue_free_tags(struct request_queue *q) { queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); } EXPORT_SYMBOL(blk_queue_free_tags); static int init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth) { struct request **tag_index; unsigned long *tag_map; int nr_ulongs; if (q && depth > q->nr_requests * 2) { depth = q->nr_requests * 2; printk(KERN_ERR "%s: adjusted depth to %d\n", __func__, depth); } tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC); if (!tag_index) goto fail; nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG; tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); if (!tag_map) goto fail; tags->real_max_depth = depth; tags->max_depth = depth; tags->tag_index = tag_index; tags->tag_map = tag_map; return 0; fail: kfree(tag_index); return -ENOMEM; } static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, int depth) { struct blk_queue_tag *tags; tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC); if (!tags) goto fail; if (init_tag_map(q, tags, depth)) goto fail; atomic_set(&tags->refcnt, 1); return tags; fail: kfree(tags); return NULL; } /** * blk_init_tags - initialize the tag info for an external tag map * @depth: the maximum queue depth supported **/ struct blk_queue_tag *blk_init_tags(int depth) { return __blk_queue_init_tags(NULL, depth); } EXPORT_SYMBOL(blk_init_tags); /** * blk_queue_init_tags - initialize the queue tag info * @q: the request queue for the device * @depth: the maximum queue depth supported * @tags: the tag to use * * Queue lock must be held here if the function is called to resize an * existing map. **/ int blk_queue_init_tags(struct request_queue *q, int depth, struct blk_queue_tag *tags) { int rc; BUG_ON(tags && q->queue_tags && tags != q->queue_tags); if (!tags && !q->queue_tags) { tags = __blk_queue_init_tags(q, depth); if (!tags) goto fail; } else if (q->queue_tags) { rc = blk_queue_resize_tags(q, depth); if (rc) return rc; queue_flag_set(QUEUE_FLAG_QUEUED, q); return 0; } else atomic_inc(&tags->refcnt); /* * assign it, all done */ q->queue_tags = tags; queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q); INIT_LIST_HEAD(&q->tag_busy_list); return 0; fail: kfree(tags); return -ENOMEM; } EXPORT_SYMBOL(blk_queue_init_tags); /** * blk_queue_resize_tags - change the queueing depth * @q: the request queue for the device * @new_depth: the new max command queueing depth * * Notes: * Must be called with the queue lock held. **/ int blk_queue_resize_tags(struct request_queue *q, int new_depth) { struct blk_queue_tag *bqt = q->queue_tags; struct request **tag_index; unsigned long *tag_map; int max_depth, nr_ulongs; if (!bqt) return -ENXIO; /* * if we already have large enough real_max_depth. just * adjust max_depth. *NOTE* as requests with tag value * between new_depth and real_max_depth can be in-flight, tag * map can not be shrunk blindly here. */ if (new_depth <= bqt->real_max_depth) { bqt->max_depth = new_depth; return 0; } /* * Currently cannot replace a shared tag map with a new * one, so error out if this is the case */ if (atomic_read(&bqt->refcnt) != 1) return -EBUSY; /* * save the old state info, so we can copy it back */ tag_index = bqt->tag_index; tag_map = bqt->tag_map; max_depth = bqt->real_max_depth; if (init_tag_map(q, bqt, new_depth)) return -ENOMEM; memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *)); nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG; memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long)); kfree(tag_index); kfree(tag_map); return 0; } EXPORT_SYMBOL(blk_queue_resize_tags); /** * blk_queue_end_tag - end tag operations for a request * @q: the request queue for the device * @rq: the request that has completed * * Description: * Typically called when end_that_request_first() returns %0, meaning * all transfers have been done for a request. It's important to call * this function before end_that_request_last(), as that will put the * request back on the free list thus corrupting the internal tag list. * * Notes: * queue lock must be held. **/ void blk_queue_end_tag(struct request_queue *q, struct request *rq) { struct blk_queue_tag *bqt = q->queue_tags; int tag = rq->tag; BUG_ON(tag == -1); if (unlikely(tag >= bqt->real_max_depth)) /* * This can happen after tag depth has been reduced. * FIXME: how about a warning or info message here? */ return; list_del_init(&rq->queuelist); rq->cmd_flags &= ~REQ_QUEUED; rq->tag = -1; if (unlikely(bqt->tag_index[tag] == NULL)) printk(KERN_ERR "%s: tag %d is missing\n", __func__, tag); bqt->tag_index[tag] = NULL; if (unlikely(!test_bit(tag, bqt->tag_map))) { printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", __func__, tag); return; } /* * The tag_map bit acts as a lock for tag_index[bit], so we need * unlock memory barrier semantics. */ clear_bit_unlock(tag, bqt->tag_map); } EXPORT_SYMBOL(blk_queue_end_tag); /** * blk_queue_start_tag - find a free tag and assign it * @q: the request queue for the device * @rq: the block request that needs tagging * * Description: * This can either be used as a stand-alone helper, or possibly be * assigned as the queue &prep_rq_fn (in which case &struct request * automagically gets a tag assigned). Note that this function * assumes that any type of request can be queued! if this is not * true for your device, you must check the request type before * calling this function. The request will also be removed from * the request queue, so it's the drivers responsibility to readd * it if it should need to be restarted for some reason. * * Notes: * queue lock must be held. **/ int blk_queue_start_tag(struct request_queue *q, struct request *rq) { struct blk_queue_tag *bqt = q->queue_tags; unsigned max_depth; int tag; if (unlikely((rq->cmd_flags & REQ_QUEUED))) { printk(KERN_ERR "%s: request %p for device [%s] already tagged %d", __func__, rq, rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); BUG(); } /* * Protect against shared tag maps, as we may not have exclusive * access to the tag map. * * We reserve a few tags just for sync IO, since we don't want * to starve sync IO on behalf of flooding async IO. */ max_depth = bqt->max_depth; if (!rq_is_sync(rq) && max_depth > 1) { max_depth -= 2; if (!max_depth) max_depth = 1; if (q->in_flight[BLK_RW_ASYNC] > max_depth) return 1; } do { tag = find_first_zero_bit(bqt->tag_map, max_depth); if (tag >= max_depth) return 1; } while (test_and_set_bit_lock(tag, bqt->tag_map)); /* * We need lock ordering semantics given by test_and_set_bit_lock. * See blk_queue_end_tag for details. */ rq->cmd_flags |= REQ_QUEUED; rq->tag = tag; bqt->tag_index[tag] = rq; blk_start_request(rq); list_add(&rq->queuelist, &q->tag_busy_list); return 0; } EXPORT_SYMBOL(blk_queue_start_tag); /** * blk_queue_invalidate_tags - invalidate all pending tags * @q: the request queue for the device * * Description: * Hardware conditions may dictate a need to stop all pending requests. * In this case, we will safely clear the block side of the tag queue and * readd all requests to the request queue in the right order. * * Notes: * queue lock must be held. **/ void blk_queue_invalidate_tags(struct request_queue *q) { struct list_head *tmp, *n; list_for_each_safe(tmp, n, &q->tag_busy_list) blk_requeue_request(q, list_entry_rq(tmp)); } EXPORT_SYMBOL(blk_queue_invalidate_tags);
gpl-2.0
webore/lenovo
drivers/net/wireless/bcm4329/bcmsdspi_linux.c
4138
6215
/* * Broadcom SPI Host Controller Driver - Linux Per-port * * Copyright (C) 1999-2010, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * * $Id: bcmsdspi_linux.c,v 1.7.2.1.4.3 2008/06/30 21:09:36 Exp $ */ #include <typedefs.h> #include <pcicfg.h> #include <bcmutils.h> #include <sdio.h> /* SDIO Specs */ #include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */ #include <sdiovar.h> /* to get msglevel bit values */ #include <linux/sched.h> /* request_irq(), free_irq() */ #include <bcmsdspi.h> #include <bcmspi.h> extern uint sd_crc; module_param(sd_crc, uint, 0); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) #define KERNEL26 #endif struct sdos_info { sdioh_info_t *sd; spinlock_t lock; wait_queue_head_t intr_wait_queue; }; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) #define BLOCKABLE() (!in_atomic()) #else #define BLOCKABLE() (!in_interrupt()) #endif /* Interrupt handler */ static irqreturn_t sdspi_isr(int irq, void *dev_id #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) , struct pt_regs *ptregs #endif ) { sdioh_info_t *sd; struct sdos_info *sdos; bool ours; sd = (sdioh_info_t *)dev_id; sd->local_intrcount++; if (!sd->card_init_done) { sd_err(("%s: Hey Bogus intr...not even initted: irq %d\n", __FUNCTION__, irq)); return IRQ_RETVAL(FALSE); } else { ours = spi_check_client_intr(sd, NULL); /* For local interrupts, wake the waiting process */ if (ours && sd->got_hcint) { sdos = (struct sdos_info *)sd->sdos_info; wake_up_interruptible(&sdos->intr_wait_queue); } return IRQ_RETVAL(ours); } } /* Register with Linux for interrupts */ int spi_register_irq(sdioh_info_t *sd, uint irq) { sd_trace(("Entering %s: irq == %d\n", __FUNCTION__, irq)); if (request_irq(irq, sdspi_isr, IRQF_SHARED, "bcmsdspi", sd) < 0) { sd_err(("%s: request_irq() failed\n", __FUNCTION__)); return ERROR; } return SUCCESS; } /* Free Linux irq */ void spi_free_irq(uint irq, sdioh_info_t *sd) { free_irq(irq, sd); } /* Map Host controller registers */ uint32 * spi_reg_map(osl_t *osh, uintptr addr, int size) { return (uint32 *)REG_MAP(addr, size); } void spi_reg_unmap(osl_t *osh, uintptr addr, int size) { REG_UNMAP((void*)(uintptr)addr); } int spi_osinit(sdioh_info_t *sd) { struct sdos_info *sdos; sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info)); sd->sdos_info = (void*)sdos; if (sdos == NULL) return BCME_NOMEM; sdos->sd = sd; spin_lock_init(&sdos->lock); init_waitqueue_head(&sdos->intr_wait_queue); return BCME_OK; } void spi_osfree(sdioh_info_t *sd) { struct sdos_info *sdos; ASSERT(sd && sd->sdos_info); sdos = (struct sdos_info *)sd->sdos_info; MFREE(sd->osh, sdos, sizeof(struct sdos_info)); } /* Interrupt enable/disable */ SDIOH_API_RC sdioh_interrupt_set(sdioh_info_t *sd, bool enable) { ulong flags; struct sdos_info *sdos; sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling")); sdos = (struct sdos_info *)sd->sdos_info; ASSERT(sdos); if (!(sd->host_init_done && sd->card_init_done)) { sd_err(("%s: Card & Host are not initted - bailing\n", __FUNCTION__)); return SDIOH_API_RC_FAIL; } if (enable && !(sd->intr_handler && sd->intr_handler_arg)) { sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__)); return SDIOH_API_RC_FAIL; } /* Ensure atomicity for enable/disable calls */ spin_lock_irqsave(&sdos->lock, flags); sd->client_intr_enabled = enable; if (enable && !sd->lockcount) spi_devintr_on(sd); else spi_devintr_off(sd); spin_unlock_irqrestore(&sdos->lock, flags); return SDIOH_API_RC_SUCCESS; } /* Protect against reentrancy (disable device interrupts while executing) */ void spi_lock(sdioh_info_t *sd) { ulong flags; struct sdos_info *sdos; sdos = (struct sdos_info *)sd->sdos_info; ASSERT(sdos); sd_trace(("%s: %d\n", __FUNCTION__, sd->lockcount)); spin_lock_irqsave(&sdos->lock, flags); if (sd->lockcount) { sd_err(("%s: Already locked!\n", __FUNCTION__)); ASSERT(sd->lockcount == 0); } spi_devintr_off(sd); sd->lockcount++; spin_unlock_irqrestore(&sdos->lock, flags); } /* Enable client interrupt */ void spi_unlock(sdioh_info_t *sd) { ulong flags; struct sdos_info *sdos; sd_trace(("%s: %d, %d\n", __FUNCTION__, sd->lockcount, sd->client_intr_enabled)); ASSERT(sd->lockcount > 0); sdos = (struct sdos_info *)sd->sdos_info; ASSERT(sdos); spin_lock_irqsave(&sdos->lock, flags); if (--sd->lockcount == 0 && sd->client_intr_enabled) { spi_devintr_on(sd); } spin_unlock_irqrestore(&sdos->lock, flags); } void spi_waitbits(sdioh_info_t *sd, bool yield) { struct sdos_info *sdos; sdos = (struct sdos_info *)sd->sdos_info; #ifndef BCMSDYIELD ASSERT(!yield); #endif sd_trace(("%s: yield %d canblock %d\n", __FUNCTION__, yield, BLOCKABLE())); /* Clear the "interrupt happened" flag and last intrstatus */ sd->got_hcint = FALSE; #ifdef BCMSDYIELD if (yield && BLOCKABLE()) { /* Wait for the indication, the interrupt will be masked when the ISR fires. */ wait_event_interruptible(sdos->intr_wait_queue, (sd->got_hcint)); } else #endif /* BCMSDYIELD */ { spi_spinbits(sd); } }
gpl-2.0
jamison904/Galaxy_Note_3
arch/tile/kernel/compat_signal.c
4394
12202
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/unistd.h> #include <linux/stddef.h> #include <linux/personality.h> #include <linux/suspend.h> #include <linux/ptrace.h> #include <linux/elf.h> #include <linux/compat.h> #include <linux/syscalls.h> #include <linux/uaccess.h> #include <asm/processor.h> #include <asm/ucontext.h> #include <asm/sigframe.h> #include <asm/syscalls.h> #include <arch/interrupts.h> struct compat_sigaction { compat_uptr_t sa_handler; compat_ulong_t sa_flags; compat_uptr_t sa_restorer; sigset_t sa_mask __packed; }; struct compat_sigaltstack { compat_uptr_t ss_sp; int ss_flags; compat_size_t ss_size; }; struct compat_ucontext { compat_ulong_t uc_flags; compat_uptr_t uc_link; struct compat_sigaltstack uc_stack; struct sigcontext uc_mcontext; sigset_t uc_sigmask; /* mask last for extensibility */ }; #define COMPAT_SI_PAD_SIZE ((SI_MAX_SIZE - 3 * sizeof(int)) / sizeof(int)) struct compat_siginfo { int si_signo; int si_errno; int si_code; union { int _pad[COMPAT_SI_PAD_SIZE]; /* kill() */ struct { unsigned int _pid; /* sender's pid */ unsigned int _uid; /* sender's uid */ } _kill; /* POSIX.1b timers */ struct { compat_timer_t _tid; /* timer id */ int _overrun; /* overrun count */ compat_sigval_t _sigval; /* same as below */ int _sys_private; /* not to be passed to user */ int _overrun_incr; /* amount to add to overrun */ } _timer; /* POSIX.1b signals */ struct { unsigned int _pid; /* sender's pid */ unsigned int _uid; /* sender's uid */ compat_sigval_t _sigval; } _rt; /* SIGCHLD */ struct { unsigned int _pid; /* which child */ unsigned int _uid; /* sender's uid */ int _status; /* exit code */ compat_clock_t _utime; compat_clock_t _stime; } _sigchld; /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ struct { unsigned int _addr; /* faulting insn/memory ref. */ #ifdef __ARCH_SI_TRAPNO int _trapno; /* TRAP # which caused the signal */ #endif } _sigfault; /* SIGPOLL */ struct { int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ int _fd; } _sigpoll; } _sifields; }; struct compat_rt_sigframe { unsigned char save_area[C_ABI_SAVE_AREA_SIZE]; /* caller save area */ struct compat_siginfo info; struct compat_ucontext uc; }; #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act, struct compat_sigaction __user *oact, size_t sigsetsize) { struct k_sigaction new_sa, old_sa; int ret = -EINVAL; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(sigset_t)) goto out; if (act) { compat_uptr_t handler, restorer; if (!access_ok(VERIFY_READ, act, sizeof(*act)) || __get_user(handler, &act->sa_handler) || __get_user(new_sa.sa.sa_flags, &act->sa_flags) || __get_user(restorer, &act->sa_restorer) || __copy_from_user(&new_sa.sa.sa_mask, &act->sa_mask, sizeof(sigset_t))) return -EFAULT; new_sa.sa.sa_handler = compat_ptr(handler); new_sa.sa.sa_restorer = compat_ptr(restorer); } ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); if (!ret && oact) { if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || __put_user(ptr_to_compat(old_sa.sa.sa_handler), &oact->sa_handler) || __put_user(ptr_to_compat(old_sa.sa.sa_restorer), &oact->sa_restorer) || __put_user(old_sa.sa.sa_flags, &oact->sa_flags) || __copy_to_user(&oact->sa_mask, &old_sa.sa.sa_mask, sizeof(sigset_t))) return -EFAULT; } out: return ret; } long compat_sys_rt_sigqueueinfo(int pid, int sig, struct compat_siginfo __user *uinfo) { siginfo_t info; int ret; mm_segment_t old_fs = get_fs(); if (copy_siginfo_from_user32(&info, uinfo)) return -EFAULT; set_fs(KERNEL_DS); ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force __user *)&info); set_fs(old_fs); return ret; } int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from) { int err; if (!access_ok(VERIFY_WRITE, to, sizeof(struct compat_siginfo))) return -EFAULT; /* If you change siginfo_t structure, please make sure that this code is fixed accordingly. It should never copy any pad contained in the structure to avoid security leaks, but must copy the generic 3 ints plus the relevant union member. */ err = __put_user(from->si_signo, &to->si_signo); err |= __put_user(from->si_errno, &to->si_errno); err |= __put_user((short)from->si_code, &to->si_code); if (from->si_code < 0) { err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(ptr_to_compat(from->si_ptr), &to->si_ptr); } else { /* * First 32bits of unions are always present: * si_pid === si_band === si_tid === si_addr(LS half) */ err |= __put_user(from->_sifields._pad[0], &to->_sifields._pad[0]); switch (from->si_code >> 16) { case __SI_FAULT >> 16: break; case __SI_CHLD >> 16: err |= __put_user(from->si_utime, &to->si_utime); err |= __put_user(from->si_stime, &to->si_stime); err |= __put_user(from->si_status, &to->si_status); /* FALL THROUGH */ default: case __SI_KILL >> 16: err |= __put_user(from->si_uid, &to->si_uid); break; case __SI_POLL >> 16: err |= __put_user(from->si_fd, &to->si_fd); break; case __SI_TIMER >> 16: err |= __put_user(from->si_overrun, &to->si_overrun); err |= __put_user(ptr_to_compat(from->si_ptr), &to->si_ptr); break; /* This is not generated by the kernel as of now. */ case __SI_RT >> 16: case __SI_MESGQ >> 16: err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_int, &to->si_int); break; } } return err; } int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from) { int err; u32 ptr32; if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo))) return -EFAULT; err = __get_user(to->si_signo, &from->si_signo); err |= __get_user(to->si_errno, &from->si_errno); err |= __get_user(to->si_code, &from->si_code); err |= __get_user(to->si_pid, &from->si_pid); err |= __get_user(to->si_uid, &from->si_uid); err |= __get_user(ptr32, &from->si_ptr); to->si_ptr = compat_ptr(ptr32); return err; } long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr, struct compat_sigaltstack __user *uoss_ptr, struct pt_regs *regs) { stack_t uss, uoss; int ret; mm_segment_t seg; if (uss_ptr) { u32 ptr; memset(&uss, 0, sizeof(stack_t)); if (!access_ok(VERIFY_READ, uss_ptr, sizeof(*uss_ptr)) || __get_user(ptr, &uss_ptr->ss_sp) || __get_user(uss.ss_flags, &uss_ptr->ss_flags) || __get_user(uss.ss_size, &uss_ptr->ss_size)) return -EFAULT; uss.ss_sp = compat_ptr(ptr); } seg = get_fs(); set_fs(KERNEL_DS); ret = do_sigaltstack(uss_ptr ? (stack_t __user __force *)&uss : NULL, (stack_t __user __force *)&uoss, (unsigned long)compat_ptr(regs->sp)); set_fs(seg); if (ret >= 0 && uoss_ptr) { if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(*uoss_ptr)) || __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) || __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) || __put_user(uoss.ss_size, &uoss_ptr->ss_size)) ret = -EFAULT; } return ret; } /* The assembly shim for this function arranges to ignore the return value. */ long compat_sys_rt_sigreturn(struct pt_regs *regs) { struct compat_rt_sigframe __user *frame = (struct compat_rt_sigframe __user *) compat_ptr(regs->sp); sigset_t set; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) goto badframe; if (compat_sys_sigaltstack(&frame->uc.uc_stack, NULL, regs) != 0) goto badframe; return 0; badframe: signal_fault("bad sigreturn frame", regs, frame, 0); return 0; } /* * Determine which stack to use.. */ static inline void __user *compat_get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) { unsigned long sp; /* Default to using normal stack */ sp = (unsigned long)compat_ptr(regs->sp); /* * If we are on the alternate signal stack and would overflow * it, don't. Return an always-bogus address instead so we * will die with SIGSEGV. */ if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) return (void __user __force *)-1UL; /* This is the X/Open sanctioned signal stack switching. */ if (ka->sa.sa_flags & SA_ONSTACK) { if (sas_ss_flags(sp) == 0) sp = current->sas_ss_sp + current->sas_ss_size; } sp -= frame_size; /* * Align the stack pointer according to the TILE ABI, * i.e. so that on function entry (sp & 15) == 0. */ sp &= -16UL; return (void __user *) sp; } int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { unsigned long restorer; struct compat_rt_sigframe __user *frame; int err = 0; int usig; frame = compat_get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; usig = current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig; /* Always write at least the signal number for the stack backtracer. */ if (ka->sa.sa_flags & SA_SIGINFO) { /* At sigreturn time, restore the callee-save registers too. */ err |= copy_siginfo_to_user32(&frame->info, info); regs->flags |= PT_FLAGS_RESTORE_REGS; } else { err |= __put_user(info->si_signo, &frame->info.si_signo); } /* Create the ucontext. */ err |= __clear_user(&frame->save_area, sizeof(frame->save_area)); err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); err |= __put_user(ptr_to_compat((void *)(current->sas_ss_sp)), &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) goto give_sigsegv; restorer = VDSO_BASE; if (ka->sa.sa_flags & SA_RESTORER) restorer = ptr_to_compat_reg(ka->sa.sa_restorer); /* * Set up registers for signal handler. * Registers that we don't modify keep the value they had from * user-space at the time we took the signal. * We always pass siginfo and mcontext, regardless of SA_SIGINFO, * since some things rely on this (e.g. glibc's debug/segfault.c). */ regs->pc = ptr_to_compat_reg(ka->sa.sa_handler); regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */ regs->sp = ptr_to_compat_reg(frame); regs->lr = restorer; regs->regs[0] = (unsigned long) usig; regs->regs[1] = ptr_to_compat_reg(&frame->info); regs->regs[2] = ptr_to_compat_reg(&frame->uc); regs->flags |= PT_FLAGS_CALLER_SAVES; /* * Notify any tracer that was single-stepping it. * The tracer may want to single-step inside the * handler too. */ if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); return 0; give_sigsegv: signal_fault("bad setup frame", regs, frame, sig); return -EFAULT; }
gpl-2.0
flar2/m7-Sense-5.0.2
drivers/input/misc/yealink.c
4906
25248
/* * drivers/usb/input/yealink.c * * Copyright (c) 2005 Henk Vergonet <Henk.Vergonet@gmail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * Description: * Driver for the USB-P1K voip usb phone. * This device is produced by Yealink Network Technology Co Ltd * but may be branded under several names: * - Yealink usb-p1k * - Tiptel 115 * - ... * * This driver is based on: * - the usbb2k-api http://savannah.nongnu.org/projects/usbb2k-api/ * - information from http://memeteau.free.fr/usbb2k * - the xpad-driver drivers/input/joystick/xpad.c * * Thanks to: * - Olivier Vandorpe, for providing the usbb2k-api. * - Martin Diehl, for spotting my memory allocation bug. * * History: * 20050527 henk First version, functional keyboard. Keyboard events * will pop-up on the ../input/eventX bus. * 20050531 henk Added led, LCD, dialtone and sysfs interface. * 20050610 henk Cleanups, make it ready for public consumption. * 20050630 henk Cleanups, fixes in response to comments. * 20050701 henk sysfs write serialisation, fix potential unload races * 20050801 henk Added ringtone, restructure USB * 20050816 henk Merge 2.6.13-rc6 */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/rwsem.h> #include <linux/usb/input.h> #include <linux/map_to_7segment.h> #include "yealink.h" #define DRIVER_VERSION "yld-20051230" #define DRIVER_AUTHOR "Henk Vergonet" #define DRIVER_DESC "Yealink phone driver" #define YEALINK_POLLING_FREQUENCY 10 /* in [Hz] */ struct yld_status { u8 lcd[24]; u8 led; u8 dialtone; u8 ringtone; u8 keynum; } __attribute__ ((packed)); /* * Register the LCD segment and icon map */ #define _LOC(k,l) { .a = (k), .m = (l) } #define _SEG(t, a, am, b, bm, c, cm, d, dm, e, em, f, fm, g, gm) \ { .type = (t), \ .u = { .s = { _LOC(a, am), _LOC(b, bm), _LOC(c, cm), \ _LOC(d, dm), _LOC(e, em), _LOC(g, gm), \ _LOC(f, fm) } } } #define _PIC(t, h, hm, n) \ { .type = (t), \ .u = { .p = { .name = (n), .a = (h), .m = (hm) } } } static const struct lcd_segment_map { char type; union { struct pictogram_map { u8 a,m; char name[10]; } p; struct segment_map { u8 a,m; } s[7]; } u; } lcdMap[] = { #include "yealink.h" }; struct yealink_dev { struct input_dev *idev; /* input device */ struct usb_device *udev; /* usb device */ /* irq input channel */ struct yld_ctl_packet *irq_data; dma_addr_t irq_dma; struct urb *urb_irq; /* control output channel */ struct yld_ctl_packet *ctl_data; dma_addr_t ctl_dma; struct usb_ctrlrequest *ctl_req; struct urb *urb_ctl; char phys[64]; /* physical device path */ u8 lcdMap[ARRAY_SIZE(lcdMap)]; /* state of LCD, LED ... */ int key_code; /* last reported key */ unsigned int shutdown:1; int stat_ix; union { struct yld_status s; u8 b[sizeof(struct yld_status)]; } master, copy; }; /******************************************************************************* * Yealink lcd interface ******************************************************************************/ /* * Register a default 7 segment character set */ static SEG7_DEFAULT_MAP(map_seg7); /* Display a char, * char '\9' and '\n' are placeholders and do not overwrite the original text. * A space will always hide an icon. */ static int setChar(struct yealink_dev *yld, int el, int chr) { int i, a, m, val; if (el >= ARRAY_SIZE(lcdMap)) return -EINVAL; if (chr == '\t' || chr == '\n') return 0; yld->lcdMap[el] = chr; if (lcdMap[el].type == '.') { a = lcdMap[el].u.p.a; m = lcdMap[el].u.p.m; if (chr != ' ') yld->master.b[a] |= m; else yld->master.b[a] &= ~m; return 0; } val = map_to_seg7(&map_seg7, chr); for (i = 0; i < ARRAY_SIZE(lcdMap[0].u.s); i++) { m = lcdMap[el].u.s[i].m; if (m == 0) continue; a = lcdMap[el].u.s[i].a; if (val & 1) yld->master.b[a] |= m; else yld->master.b[a] &= ~m; val = val >> 1; } return 0; }; /******************************************************************************* * Yealink key interface ******************************************************************************/ /* Map device buttons to internal key events. * * USB-P1K button layout: * * up * IN OUT * down * * pickup C hangup * 1 2 3 * 4 5 6 * 7 8 9 * * 0 # * * The "up" and "down" keys, are symbolised by arrows on the button. * The "pickup" and "hangup" keys are symbolised by a green and red phone * on the button. */ static int map_p1k_to_key(int scancode) { switch(scancode) { /* phone key: */ case 0x23: return KEY_LEFT; /* IN */ case 0x33: return KEY_UP; /* up */ case 0x04: return KEY_RIGHT; /* OUT */ case 0x24: return KEY_DOWN; /* down */ case 0x03: return KEY_ENTER; /* pickup */ case 0x14: return KEY_BACKSPACE; /* C */ case 0x13: return KEY_ESC; /* hangup */ case 0x00: return KEY_1; /* 1 */ case 0x01: return KEY_2; /* 2 */ case 0x02: return KEY_3; /* 3 */ case 0x10: return KEY_4; /* 4 */ case 0x11: return KEY_5; /* 5 */ case 0x12: return KEY_6; /* 6 */ case 0x20: return KEY_7; /* 7 */ case 0x21: return KEY_8; /* 8 */ case 0x22: return KEY_9; /* 9 */ case 0x30: return KEY_KPASTERISK; /* * */ case 0x31: return KEY_0; /* 0 */ case 0x32: return KEY_LEFTSHIFT | KEY_3 << 8; /* # */ } return -EINVAL; } /* Completes a request by converting the data into events for the * input subsystem. * * The key parameter can be cascaded: key2 << 8 | key1 */ static void report_key(struct yealink_dev *yld, int key) { struct input_dev *idev = yld->idev; if (yld->key_code >= 0) { /* old key up */ input_report_key(idev, yld->key_code & 0xff, 0); if (yld->key_code >> 8) input_report_key(idev, yld->key_code >> 8, 0); } yld->key_code = key; if (key >= 0) { /* new valid key */ input_report_key(idev, key & 0xff, 1); if (key >> 8) input_report_key(idev, key >> 8, 1); } input_sync(idev); } /******************************************************************************* * Yealink usb communication interface ******************************************************************************/ static int yealink_cmd(struct yealink_dev *yld, struct yld_ctl_packet *p) { u8 *buf = (u8 *)p; int i; u8 sum = 0; for(i=0; i<USB_PKT_LEN-1; i++) sum -= buf[i]; p->sum = sum; return usb_control_msg(yld->udev, usb_sndctrlpipe(yld->udev, 0), USB_REQ_SET_CONFIGURATION, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT, 0x200, 3, p, sizeof(*p), USB_CTRL_SET_TIMEOUT); } static u8 default_ringtone[] = { 0xEF, /* volume [0-255] */ 0xFB, 0x1E, 0x00, 0x0C, /* 1250 [hz], 12/100 [s] */ 0xFC, 0x18, 0x00, 0x0C, /* 1000 [hz], 12/100 [s] */ 0xFB, 0x1E, 0x00, 0x0C, 0xFC, 0x18, 0x00, 0x0C, 0xFB, 0x1E, 0x00, 0x0C, 0xFC, 0x18, 0x00, 0x0C, 0xFB, 0x1E, 0x00, 0x0C, 0xFC, 0x18, 0x00, 0x0C, 0xFF, 0xFF, 0x01, 0x90, /* silent, 400/100 [s] */ 0x00, 0x00 /* end of sequence */ }; static int yealink_set_ringtone(struct yealink_dev *yld, u8 *buf, size_t size) { struct yld_ctl_packet *p = yld->ctl_data; int ix, len; if (size <= 0) return -EINVAL; /* Set the ringtone volume */ memset(yld->ctl_data, 0, sizeof(*(yld->ctl_data))); yld->ctl_data->cmd = CMD_RING_VOLUME; yld->ctl_data->size = 1; yld->ctl_data->data[0] = buf[0]; yealink_cmd(yld, p); buf++; size--; p->cmd = CMD_RING_NOTE; ix = 0; while (size != ix) { len = size - ix; if (len > sizeof(p->data)) len = sizeof(p->data); p->size = len; p->offset = cpu_to_be16(ix); memcpy(p->data, &buf[ix], len); yealink_cmd(yld, p); ix += len; } return 0; } /* keep stat_master & stat_copy in sync. */ static int yealink_do_idle_tasks(struct yealink_dev *yld) { u8 val; int i, ix, len; ix = yld->stat_ix; memset(yld->ctl_data, 0, sizeof(*(yld->ctl_data))); yld->ctl_data->cmd = CMD_KEYPRESS; yld->ctl_data->size = 1; yld->ctl_data->sum = 0xff - CMD_KEYPRESS; /* If state update pointer wraps do a KEYPRESS first. */ if (ix >= sizeof(yld->master)) { yld->stat_ix = 0; return 0; } /* find update candidates: copy != master */ do { val = yld->master.b[ix]; if (val != yld->copy.b[ix]) goto send_update; } while (++ix < sizeof(yld->master)); /* nothing todo, wait a bit and poll for a KEYPRESS */ yld->stat_ix = 0; /* TODO how can we wait abit. ?? * msleep_interruptible(1000 / YEALINK_POLLING_FREQUENCY); */ return 0; send_update: /* Setup an appropriate update request */ yld->copy.b[ix] = val; yld->ctl_data->data[0] = val; switch(ix) { case offsetof(struct yld_status, led): yld->ctl_data->cmd = CMD_LED; yld->ctl_data->sum = -1 - CMD_LED - val; break; case offsetof(struct yld_status, dialtone): yld->ctl_data->cmd = CMD_DIALTONE; yld->ctl_data->sum = -1 - CMD_DIALTONE - val; break; case offsetof(struct yld_status, ringtone): yld->ctl_data->cmd = CMD_RINGTONE; yld->ctl_data->sum = -1 - CMD_RINGTONE - val; break; case offsetof(struct yld_status, keynum): val--; val &= 0x1f; yld->ctl_data->cmd = CMD_SCANCODE; yld->ctl_data->offset = cpu_to_be16(val); yld->ctl_data->data[0] = 0; yld->ctl_data->sum = -1 - CMD_SCANCODE - val; break; default: len = sizeof(yld->master.s.lcd) - ix; if (len > sizeof(yld->ctl_data->data)) len = sizeof(yld->ctl_data->data); /* Combine up to <len> consecutive LCD bytes in a singe request */ yld->ctl_data->cmd = CMD_LCD; yld->ctl_data->offset = cpu_to_be16(ix); yld->ctl_data->size = len; yld->ctl_data->sum = -CMD_LCD - ix - val - len; for(i=1; i<len; i++) { ix++; val = yld->master.b[ix]; yld->copy.b[ix] = val; yld->ctl_data->data[i] = val; yld->ctl_data->sum -= val; } } yld->stat_ix = ix + 1; return 1; } /* Decide on how to handle responses * * The state transition diagram is somethhing like: * * syncState<--+ * | | * | idle * \|/ | * init --ok--> waitForKey --ok--> getKey * ^ ^ | * | +-------ok-------+ * error,start * */ static void urb_irq_callback(struct urb *urb) { struct yealink_dev *yld = urb->context; int ret, status = urb->status; if (status) err("%s - urb status %d", __func__, status); switch (yld->irq_data->cmd) { case CMD_KEYPRESS: yld->master.s.keynum = yld->irq_data->data[0]; break; case CMD_SCANCODE: dbg("get scancode %x", yld->irq_data->data[0]); report_key(yld, map_p1k_to_key(yld->irq_data->data[0])); break; default: err("unexpected response %x", yld->irq_data->cmd); } yealink_do_idle_tasks(yld); if (!yld->shutdown) { ret = usb_submit_urb(yld->urb_ctl, GFP_ATOMIC); if (ret && ret != -EPERM) err("%s - usb_submit_urb failed %d", __func__, ret); } } static void urb_ctl_callback(struct urb *urb) { struct yealink_dev *yld = urb->context; int ret = 0, status = urb->status; if (status) err("%s - urb status %d", __func__, status); switch (yld->ctl_data->cmd) { case CMD_KEYPRESS: case CMD_SCANCODE: /* ask for a response */ if (!yld->shutdown) ret = usb_submit_urb(yld->urb_irq, GFP_ATOMIC); break; default: /* send new command */ yealink_do_idle_tasks(yld); if (!yld->shutdown) ret = usb_submit_urb(yld->urb_ctl, GFP_ATOMIC); break; } if (ret && ret != -EPERM) err("%s - usb_submit_urb failed %d", __func__, ret); } /******************************************************************************* * input event interface ******************************************************************************/ /* TODO should we issue a ringtone on a SND_BELL event? static int input_ev(struct input_dev *dev, unsigned int type, unsigned int code, int value) { if (type != EV_SND) return -EINVAL; switch (code) { case SND_BELL: case SND_TONE: break; default: return -EINVAL; } return 0; } */ static int input_open(struct input_dev *dev) { struct yealink_dev *yld = input_get_drvdata(dev); int i, ret; dbg("%s", __func__); /* force updates to device */ for (i = 0; i<sizeof(yld->master); i++) yld->copy.b[i] = ~yld->master.b[i]; yld->key_code = -1; /* no keys pressed */ yealink_set_ringtone(yld, default_ringtone, sizeof(default_ringtone)); /* issue INIT */ memset(yld->ctl_data, 0, sizeof(*(yld->ctl_data))); yld->ctl_data->cmd = CMD_INIT; yld->ctl_data->size = 10; yld->ctl_data->sum = 0x100-CMD_INIT-10; if ((ret = usb_submit_urb(yld->urb_ctl, GFP_KERNEL)) != 0) { dbg("%s - usb_submit_urb failed with result %d", __func__, ret); return ret; } return 0; } static void input_close(struct input_dev *dev) { struct yealink_dev *yld = input_get_drvdata(dev); yld->shutdown = 1; /* * Make sure the flag is seen by other CPUs before we start * killing URBs so new URBs won't be submitted */ smp_wmb(); usb_kill_urb(yld->urb_ctl); usb_kill_urb(yld->urb_irq); yld->shutdown = 0; smp_wmb(); } /******************************************************************************* * sysfs interface ******************************************************************************/ static DECLARE_RWSEM(sysfs_rwsema); /* Interface to the 7-segments translation table aka. char set. */ static ssize_t show_map(struct device *dev, struct device_attribute *attr, char *buf) { memcpy(buf, &map_seg7, sizeof(map_seg7)); return sizeof(map_seg7); } static ssize_t store_map(struct device *dev, struct device_attribute *attr, const char *buf, size_t cnt) { if (cnt != sizeof(map_seg7)) return -EINVAL; memcpy(&map_seg7, buf, sizeof(map_seg7)); return sizeof(map_seg7); } /* Interface to the LCD. */ /* Reading /sys/../lineX will return the format string with its settings: * * Example: * cat ./line3 * 888888888888 * Linux Rocks! */ static ssize_t show_line(struct device *dev, char *buf, int a, int b) { struct yealink_dev *yld; int i; down_read(&sysfs_rwsema); yld = dev_get_drvdata(dev); if (yld == NULL) { up_read(&sysfs_rwsema); return -ENODEV; } for (i = a; i < b; i++) *buf++ = lcdMap[i].type; *buf++ = '\n'; for (i = a; i < b; i++) *buf++ = yld->lcdMap[i]; *buf++ = '\n'; *buf = 0; up_read(&sysfs_rwsema); return 3 + ((b - a) << 1); } static ssize_t show_line1(struct device *dev, struct device_attribute *attr, char *buf) { return show_line(dev, buf, LCD_LINE1_OFFSET, LCD_LINE2_OFFSET); } static ssize_t show_line2(struct device *dev, struct device_attribute *attr, char *buf) { return show_line(dev, buf, LCD_LINE2_OFFSET, LCD_LINE3_OFFSET); } static ssize_t show_line3(struct device *dev, struct device_attribute *attr, char *buf) { return show_line(dev, buf, LCD_LINE3_OFFSET, LCD_LINE4_OFFSET); } /* Writing to /sys/../lineX will set the coresponding LCD line. * - Excess characters are ignored. * - If less characters are written than allowed, the remaining digits are * unchanged. * - The '\n' or '\t' char is a placeholder, it does not overwrite the * original content. */ static ssize_t store_line(struct device *dev, const char *buf, size_t count, int el, size_t len) { struct yealink_dev *yld; int i; down_write(&sysfs_rwsema); yld = dev_get_drvdata(dev); if (yld == NULL) { up_write(&sysfs_rwsema); return -ENODEV; } if (len > count) len = count; for (i = 0; i < len; i++) setChar(yld, el++, buf[i]); up_write(&sysfs_rwsema); return count; } static ssize_t store_line1(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return store_line(dev, buf, count, LCD_LINE1_OFFSET, LCD_LINE1_SIZE); } static ssize_t store_line2(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return store_line(dev, buf, count, LCD_LINE2_OFFSET, LCD_LINE2_SIZE); } static ssize_t store_line3(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return store_line(dev, buf, count, LCD_LINE3_OFFSET, LCD_LINE3_SIZE); } /* Interface to visible and audible "icons", these include: * pictures on the LCD, the LED, and the dialtone signal. */ /* Get a list of "switchable elements" with their current state. */ static ssize_t get_icons(struct device *dev, struct device_attribute *attr, char *buf) { struct yealink_dev *yld; int i, ret = 1; down_read(&sysfs_rwsema); yld = dev_get_drvdata(dev); if (yld == NULL) { up_read(&sysfs_rwsema); return -ENODEV; } for (i = 0; i < ARRAY_SIZE(lcdMap); i++) { if (lcdMap[i].type != '.') continue; ret += sprintf(&buf[ret], "%s %s\n", yld->lcdMap[i] == ' ' ? " " : "on", lcdMap[i].u.p.name); } up_read(&sysfs_rwsema); return ret; } /* Change the visibility of a particular element. */ static ssize_t set_icon(struct device *dev, const char *buf, size_t count, int chr) { struct yealink_dev *yld; int i; down_write(&sysfs_rwsema); yld = dev_get_drvdata(dev); if (yld == NULL) { up_write(&sysfs_rwsema); return -ENODEV; } for (i = 0; i < ARRAY_SIZE(lcdMap); i++) { if (lcdMap[i].type != '.') continue; if (strncmp(buf, lcdMap[i].u.p.name, count) == 0) { setChar(yld, i, chr); break; } } up_write(&sysfs_rwsema); return count; } static ssize_t show_icon(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return set_icon(dev, buf, count, buf[0]); } static ssize_t hide_icon(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return set_icon(dev, buf, count, ' '); } /* Upload a ringtone to the device. */ /* Stores raw ringtone data in the phone */ static ssize_t store_ringtone(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct yealink_dev *yld; down_write(&sysfs_rwsema); yld = dev_get_drvdata(dev); if (yld == NULL) { up_write(&sysfs_rwsema); return -ENODEV; } /* TODO locking with async usb control interface??? */ yealink_set_ringtone(yld, (char *)buf, count); up_write(&sysfs_rwsema); return count; } #define _M444 S_IRUGO #define _M664 S_IRUGO|S_IWUSR|S_IWGRP #define _M220 S_IWUSR|S_IWGRP static DEVICE_ATTR(map_seg7 , _M664, show_map , store_map ); static DEVICE_ATTR(line1 , _M664, show_line1 , store_line1 ); static DEVICE_ATTR(line2 , _M664, show_line2 , store_line2 ); static DEVICE_ATTR(line3 , _M664, show_line3 , store_line3 ); static DEVICE_ATTR(get_icons , _M444, get_icons , NULL ); static DEVICE_ATTR(show_icon , _M220, NULL , show_icon ); static DEVICE_ATTR(hide_icon , _M220, NULL , hide_icon ); static DEVICE_ATTR(ringtone , _M220, NULL , store_ringtone); static struct attribute *yld_attributes[] = { &dev_attr_line1.attr, &dev_attr_line2.attr, &dev_attr_line3.attr, &dev_attr_get_icons.attr, &dev_attr_show_icon.attr, &dev_attr_hide_icon.attr, &dev_attr_map_seg7.attr, &dev_attr_ringtone.attr, NULL }; static struct attribute_group yld_attr_group = { .attrs = yld_attributes }; /******************************************************************************* * Linux interface and usb initialisation ******************************************************************************/ struct driver_info { char *name; }; static const struct driver_info info_P1K = { .name = "Yealink usb-p1k", }; static const struct usb_device_id usb_table [] = { { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x6993, .idProduct = 0xb001, .bInterfaceClass = USB_CLASS_HID, .bInterfaceSubClass = 0, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&info_P1K }, { } }; static int usb_cleanup(struct yealink_dev *yld, int err) { if (yld == NULL) return err; if (yld->idev) { if (err) input_free_device(yld->idev); else input_unregister_device(yld->idev); } usb_free_urb(yld->urb_irq); usb_free_urb(yld->urb_ctl); kfree(yld->ctl_req); usb_free_coherent(yld->udev, USB_PKT_LEN, yld->ctl_data, yld->ctl_dma); usb_free_coherent(yld->udev, USB_PKT_LEN, yld->irq_data, yld->irq_dma); kfree(yld); return err; } static void usb_disconnect(struct usb_interface *intf) { struct yealink_dev *yld; down_write(&sysfs_rwsema); yld = usb_get_intfdata(intf); sysfs_remove_group(&intf->dev.kobj, &yld_attr_group); usb_set_intfdata(intf, NULL); up_write(&sysfs_rwsema); usb_cleanup(yld, 0); } static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev (intf); struct driver_info *nfo = (struct driver_info *)id->driver_info; struct usb_host_interface *interface; struct usb_endpoint_descriptor *endpoint; struct yealink_dev *yld; struct input_dev *input_dev; int ret, pipe, i; interface = intf->cur_altsetting; endpoint = &interface->endpoint[0].desc; if (!usb_endpoint_is_int_in(endpoint)) return -ENODEV; yld = kzalloc(sizeof(struct yealink_dev), GFP_KERNEL); if (!yld) return -ENOMEM; yld->udev = udev; yld->idev = input_dev = input_allocate_device(); if (!input_dev) return usb_cleanup(yld, -ENOMEM); /* allocate usb buffers */ yld->irq_data = usb_alloc_coherent(udev, USB_PKT_LEN, GFP_ATOMIC, &yld->irq_dma); if (yld->irq_data == NULL) return usb_cleanup(yld, -ENOMEM); yld->ctl_data = usb_alloc_coherent(udev, USB_PKT_LEN, GFP_ATOMIC, &yld->ctl_dma); if (!yld->ctl_data) return usb_cleanup(yld, -ENOMEM); yld->ctl_req = kmalloc(sizeof(*(yld->ctl_req)), GFP_KERNEL); if (yld->ctl_req == NULL) return usb_cleanup(yld, -ENOMEM); /* allocate urb structures */ yld->urb_irq = usb_alloc_urb(0, GFP_KERNEL); if (yld->urb_irq == NULL) return usb_cleanup(yld, -ENOMEM); yld->urb_ctl = usb_alloc_urb(0, GFP_KERNEL); if (yld->urb_ctl == NULL) return usb_cleanup(yld, -ENOMEM); /* get a handle to the interrupt data pipe */ pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress); ret = usb_maxpacket(udev, pipe, usb_pipeout(pipe)); if (ret != USB_PKT_LEN) err("invalid payload size %d, expected %zd", ret, USB_PKT_LEN); /* initialise irq urb */ usb_fill_int_urb(yld->urb_irq, udev, pipe, yld->irq_data, USB_PKT_LEN, urb_irq_callback, yld, endpoint->bInterval); yld->urb_irq->transfer_dma = yld->irq_dma; yld->urb_irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; yld->urb_irq->dev = udev; /* initialise ctl urb */ yld->ctl_req->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT; yld->ctl_req->bRequest = USB_REQ_SET_CONFIGURATION; yld->ctl_req->wValue = cpu_to_le16(0x200); yld->ctl_req->wIndex = cpu_to_le16(interface->desc.bInterfaceNumber); yld->ctl_req->wLength = cpu_to_le16(USB_PKT_LEN); usb_fill_control_urb(yld->urb_ctl, udev, usb_sndctrlpipe(udev, 0), (void *)yld->ctl_req, yld->ctl_data, USB_PKT_LEN, urb_ctl_callback, yld); yld->urb_ctl->transfer_dma = yld->ctl_dma; yld->urb_ctl->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; yld->urb_ctl->dev = udev; /* find out the physical bus location */ usb_make_path(udev, yld->phys, sizeof(yld->phys)); strlcat(yld->phys, "/input0", sizeof(yld->phys)); /* register settings for the input device */ input_dev->name = nfo->name; input_dev->phys = yld->phys; usb_to_input_id(udev, &input_dev->id); input_dev->dev.parent = &intf->dev; input_set_drvdata(input_dev, yld); input_dev->open = input_open; input_dev->close = input_close; /* input_dev->event = input_ev; TODO */ /* register available key events */ input_dev->evbit[0] = BIT_MASK(EV_KEY); for (i = 0; i < 256; i++) { int k = map_p1k_to_key(i); if (k >= 0) { set_bit(k & 0xff, input_dev->keybit); if (k >> 8) set_bit(k >> 8, input_dev->keybit); } } ret = input_register_device(yld->idev); if (ret) return usb_cleanup(yld, ret); usb_set_intfdata(intf, yld); /* clear visible elements */ for (i = 0; i < ARRAY_SIZE(lcdMap); i++) setChar(yld, i, ' '); /* display driver version on LCD line 3 */ store_line3(&intf->dev, NULL, DRIVER_VERSION, sizeof(DRIVER_VERSION)); /* Register sysfs hooks (don't care about failure) */ ret = sysfs_create_group(&intf->dev.kobj, &yld_attr_group); return 0; } static struct usb_driver yealink_driver = { .name = "yealink", .probe = usb_probe, .disconnect = usb_disconnect, .id_table = usb_table, }; module_usb_driver(yealink_driver); MODULE_DEVICE_TABLE (usb, usb_table); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
gpl-2.0
vware/android_kernel_sony_apq8064
arch/sh/boards/mach-landisk/gio.c
12074
3422
/* * arch/sh/boards/landisk/gio.c - driver for landisk * * This driver will also support the I-O DATA Device, Inc. LANDISK Board. * LANDISK and USL-5P Button, LED and GIO driver drive function. * * Copylight (C) 2006 kogiidena * Copylight (C) 2002 Atom Create Engineering Co., Ltd. * * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/kdev_t.h> #include <linux/cdev.h> #include <linux/fs.h> #include <asm/io.h> #include <asm/uaccess.h> #include <mach-landisk/mach/gio.h> #include <mach-landisk/mach/iodata_landisk.h> #define DEVCOUNT 4 #define GIO_MINOR 2 /* GIO minor no. */ static dev_t dev; static struct cdev *cdev_p; static int openCnt; static int gio_open(struct inode *inode, struct file *filp) { int minor; int ret = -ENOENT; preempt_disable(); minor = MINOR(inode->i_rdev); if (minor < DEVCOUNT) { if (openCnt > 0) { ret = -EALREADY; } else { openCnt++; ret = 0; } } preempt_enable(); return ret; } static int gio_close(struct inode *inode, struct file *filp) { int minor; minor = MINOR(inode->i_rdev); if (minor < DEVCOUNT) { openCnt--; } return 0; } static long gio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { unsigned int data; static unsigned int addr = 0; if (cmd & 0x01) { /* write */ if (copy_from_user(&data, (int *)arg, sizeof(int))) { return -EFAULT; } } switch (cmd) { case GIODRV_IOCSGIOSETADDR: /* address set */ addr = data; break; case GIODRV_IOCSGIODATA1: /* write byte */ __raw_writeb((unsigned char)(0x0ff & data), addr); break; case GIODRV_IOCSGIODATA2: /* write word */ if (addr & 0x01) { return -EFAULT; } __raw_writew((unsigned short int)(0x0ffff & data), addr); break; case GIODRV_IOCSGIODATA4: /* write long */ if (addr & 0x03) { return -EFAULT; } __raw_writel(data, addr); break; case GIODRV_IOCGGIODATA1: /* read byte */ data = __raw_readb(addr); break; case GIODRV_IOCGGIODATA2: /* read word */ if (addr & 0x01) { return -EFAULT; } data = __raw_readw(addr); break; case GIODRV_IOCGGIODATA4: /* read long */ if (addr & 0x03) { return -EFAULT; } data = __raw_readl(addr); break; default: return -EFAULT; break; } if ((cmd & 0x01) == 0) { /* read */ if (copy_to_user((int *)arg, &data, sizeof(int))) { return -EFAULT; } } return 0; } static const struct file_operations gio_fops = { .owner = THIS_MODULE, .open = gio_open, /* open */ .release = gio_close, /* release */ .unlocked_ioctl = gio_ioctl, .llseek = noop_llseek, }; static int __init gio_init(void) { int error; printk(KERN_INFO "gio: driver initialized\n"); openCnt = 0; if ((error = alloc_chrdev_region(&dev, 0, DEVCOUNT, "gio")) < 0) { printk(KERN_ERR "gio: Couldn't alloc_chrdev_region, error=%d\n", error); return 1; } cdev_p = cdev_alloc(); cdev_p->ops = &gio_fops; error = cdev_add(cdev_p, dev, DEVCOUNT); if (error) { printk(KERN_ERR "gio: Couldn't cdev_add, error=%d\n", error); return 1; } return 0; } static void __exit gio_exit(void) { cdev_del(cdev_p); unregister_chrdev_region(dev, DEVCOUNT); } module_init(gio_init); module_exit(gio_exit); MODULE_LICENSE("GPL");
gpl-2.0
Nu3001/kernel_rk3188
arch/x86/boot/a20.c
14378
3548
/* -*- linux-c -*- ------------------------------------------------------- * * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright 2007-2008 rPath, Inc. - All Rights Reserved * Copyright 2009 Intel Corporation; author H. Peter Anvin * * This file is part of the Linux kernel, and is made available under * the terms of the GNU General Public License version 2. * * ----------------------------------------------------------------------- */ /* * Enable A20 gate (return -1 on failure) */ #include "boot.h" #define MAX_8042_LOOPS 100000 #define MAX_8042_FF 32 static int empty_8042(void) { u8 status; int loops = MAX_8042_LOOPS; int ffs = MAX_8042_FF; while (loops--) { io_delay(); status = inb(0x64); if (status == 0xff) { /* FF is a plausible, but very unlikely status */ if (!--ffs) return -1; /* Assume no KBC present */ } if (status & 1) { /* Read and discard input data */ io_delay(); (void)inb(0x60); } else if (!(status & 2)) { /* Buffers empty, finished! */ return 0; } } return -1; } /* Returns nonzero if the A20 line is enabled. The memory address used as a test is the int $0x80 vector, which should be safe. */ #define A20_TEST_ADDR (4*0x80) #define A20_TEST_SHORT 32 #define A20_TEST_LONG 2097152 /* 2^21 */ static int a20_test(int loops) { int ok = 0; int saved, ctr; set_fs(0x0000); set_gs(0xffff); saved = ctr = rdfs32(A20_TEST_ADDR); while (loops--) { wrfs32(++ctr, A20_TEST_ADDR); io_delay(); /* Serialize and make delay constant */ ok = rdgs32(A20_TEST_ADDR+0x10) ^ ctr; if (ok) break; } wrfs32(saved, A20_TEST_ADDR); return ok; } /* Quick test to see if A20 is already enabled */ static int a20_test_short(void) { return a20_test(A20_TEST_SHORT); } /* Longer test that actually waits for A20 to come on line; this is useful when dealing with the KBC or other slow external circuitry. */ static int a20_test_long(void) { return a20_test(A20_TEST_LONG); } static void enable_a20_bios(void) { struct biosregs ireg; initregs(&ireg); ireg.ax = 0x2401; intcall(0x15, &ireg, NULL); } static void enable_a20_kbc(void) { empty_8042(); outb(0xd1, 0x64); /* Command write */ empty_8042(); outb(0xdf, 0x60); /* A20 on */ empty_8042(); outb(0xff, 0x64); /* Null command, but UHCI wants it */ empty_8042(); } static void enable_a20_fast(void) { u8 port_a; port_a = inb(0x92); /* Configuration port A */ port_a |= 0x02; /* Enable A20 */ port_a &= ~0x01; /* Do not reset machine */ outb(port_a, 0x92); } /* * Actual routine to enable A20; return 0 on ok, -1 on failure */ #define A20_ENABLE_LOOPS 255 /* Number of times to try */ int enable_a20(void) { int loops = A20_ENABLE_LOOPS; int kbc_err; while (loops--) { /* First, check to see if A20 is already enabled (legacy free, etc.) */ if (a20_test_short()) return 0; /* Next, try the BIOS (INT 0x15, AX=0x2401) */ enable_a20_bios(); if (a20_test_short()) return 0; /* Try enabling A20 through the keyboard controller */ kbc_err = empty_8042(); if (a20_test_short()) return 0; /* BIOS worked, but with delayed reaction */ if (!kbc_err) { enable_a20_kbc(); if (a20_test_long()) return 0; } /* Finally, try enabling the "fast A20 gate" */ enable_a20_fast(); if (a20_test_long()) return 0; } return -1; }
gpl-2.0
raj-bhatia/grooveip-ios-public
submodules/externals/xerces-c/src/xercesc/util/NetAccessors/Curl/CurlURLInputStream.cpp
43
12325
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * $Id: CurlURLInputStream.cpp 936316 2010-04-21 14:19:58Z borisk $ */ #if HAVE_CONFIG_H #include <config.h> #endif #include <stdio.h> #include <stdlib.h> #include <string.h> #if HAVE_ERRNO_H #include <errno.h> #endif #if HAVE_UNISTD_H #include <unistd.h> #endif #if HAVE_SYS_TYPES_H #include <sys/types.h> #endif #if HAVE_SYS_TIME_H #include <sys/time.h> #endif #include <xercesc/util/XercesDefs.hpp> #include <xercesc/util/XMLNetAccessor.hpp> #include <xercesc/util/NetAccessors/Curl/CurlURLInputStream.hpp> #include <xercesc/util/XMLString.hpp> #include <xercesc/util/XMLExceptMsgs.hpp> #include <xercesc/util/Janitor.hpp> #include <xercesc/util/XMLUniDefs.hpp> #include <xercesc/util/TransService.hpp> #include <xercesc/util/TranscodingException.hpp> #include <xercesc/util/PlatformUtils.hpp> XERCES_CPP_NAMESPACE_BEGIN CurlURLInputStream::CurlURLInputStream(const XMLURL& urlSource, const XMLNetHTTPInfo* httpInfo/*=0*/) : fMulti(0) , fEasy(0) , fMemoryManager(urlSource.getMemoryManager()) , fURLSource(urlSource) , fTotalBytesRead(0) , fWritePtr(0) , fBytesRead(0) , fBytesToRead(0) , fDataAvailable(false) , fBufferHeadPtr(fBuffer) , fBufferTailPtr(fBuffer) , fPayload(0) , fPayloadLen(0) , fContentType(0) { // Allocate the curl multi handle fMulti = curl_multi_init(); // Allocate the curl easy handle fEasy = curl_easy_init(); // Set URL option TranscodeToStr url(fURLSource.getURLText(), "ISO8859-1", fMemoryManager); curl_easy_setopt(fEasy, CURLOPT_URL, (char*)url.str()); // Set up a way to recieve the data curl_easy_setopt(fEasy, CURLOPT_WRITEDATA, this); // Pass this pointer to write function curl_easy_setopt(fEasy, CURLOPT_WRITEFUNCTION, staticWriteCallback); // Our static write function // Do redirects curl_easy_setopt(fEasy, CURLOPT_FOLLOWLOCATION, (long)1); curl_easy_setopt(fEasy, CURLOPT_MAXREDIRS, (long)6); // Add username and password if authentication is required const XMLCh *username = urlSource.getUser(); const XMLCh *password = urlSource.getPassword(); if(username && password) { XMLBuffer userPassBuf(256, fMemoryManager); userPassBuf.append(username); userPassBuf.append(chColon); userPassBuf.append(password); TranscodeToStr userPass(userPassBuf.getRawBuffer(), "ISO8859-1", fMemoryManager); curl_easy_setopt(fEasy, CURLOPT_HTTPAUTH, (long)CURLAUTH_ANY); curl_easy_setopt(fEasy, CURLOPT_USERPWD, (char*)userPass.str()); } if(httpInfo) { // Set the correct HTTP method switch(httpInfo->fHTTPMethod) { case XMLNetHTTPInfo::GET: break; case XMLNetHTTPInfo::PUT: curl_easy_setopt(fEasy, CURLOPT_UPLOAD, (long)1); break; case XMLNetHTTPInfo::POST: curl_easy_setopt(fEasy, CURLOPT_POST, (long)1); break; } // Add custom headers if(httpInfo->fHeaders) { struct curl_slist *headersList = 0; const char *headersBuf = httpInfo->fHeaders; const char *headersBufEnd = httpInfo->fHeaders + httpInfo->fHeadersLen; const char *headerStart = headersBuf; while(headersBuf < headersBufEnd) { if(*headersBuf == '\r' && (headersBuf + 1) < headersBufEnd && *(headersBuf + 1) == '\n') { XMLSize_t length = headersBuf - headerStart; ArrayJanitor<char> header((char*)fMemoryManager->allocate((length + 1) * sizeof(char)), fMemoryManager); memcpy(header.get(), headerStart, length); header.get()[length] = 0; headersList = curl_slist_append(headersList, header.get()); headersBuf += 2; headerStart = headersBuf; continue; } ++headersBuf; } curl_easy_setopt(fEasy, CURLOPT_HTTPHEADER, headersList); curl_slist_free_all(headersList); } // Set up the payload if(httpInfo->fPayload) { fPayload = httpInfo->fPayload; fPayloadLen = httpInfo->fPayloadLen; curl_easy_setopt(fEasy, CURLOPT_READDATA, this); curl_easy_setopt(fEasy, CURLOPT_READFUNCTION, staticReadCallback); curl_easy_setopt(fEasy, CURLOPT_INFILESIZE_LARGE, (curl_off_t)fPayloadLen); } } // Add easy handle to the multi stack curl_multi_add_handle(fMulti, fEasy); // Start reading, to get the content type while(fBufferHeadPtr == fBuffer) { int runningHandles = 0; readMore(&runningHandles); if(runningHandles == 0) break; } // Find the content type char *contentType8 = 0; curl_easy_getinfo(fEasy, CURLINFO_CONTENT_TYPE, &contentType8); if(contentType8) fContentType = TranscodeFromStr((XMLByte*)contentType8, XMLString::stringLen(contentType8), "ISO8859-1", fMemoryManager).adopt(); } CurlURLInputStream::~CurlURLInputStream() { // Remove the easy handle from the multi stack curl_multi_remove_handle(fMulti, fEasy); // Cleanup the easy handle curl_easy_cleanup(fEasy); // Cleanup the multi handle curl_multi_cleanup(fMulti); if(fContentType) fMemoryManager->deallocate(fContentType); } size_t CurlURLInputStream::staticWriteCallback(char *buffer, size_t size, size_t nitems, void *outstream) { return ((CurlURLInputStream*)outstream)->writeCallback(buffer, size, nitems); } size_t CurlURLInputStream::staticReadCallback(char *buffer, size_t size, size_t nitems, void *stream) { return ((CurlURLInputStream*)stream)->readCallback(buffer, size, nitems); } size_t CurlURLInputStream::writeCallback(char *buffer, size_t size, size_t nitems) { XMLSize_t cnt = size * nitems; XMLSize_t totalConsumed = 0; // Consume as many bytes as possible immediately into the buffer XMLSize_t consume = (cnt > fBytesToRead) ? fBytesToRead : cnt; memcpy(fWritePtr, buffer, consume); fWritePtr += consume; fBytesRead += consume; fTotalBytesRead += consume; fBytesToRead -= consume; //printf("write callback consuming %d bytes\n", consume); // If bytes remain, rebuffer as many as possible into our holding buffer buffer += consume; totalConsumed += consume; cnt -= consume; if (cnt > 0) { XMLSize_t bufAvail = sizeof(fBuffer) - (fBufferHeadPtr - fBuffer); consume = (cnt > bufAvail) ? bufAvail : cnt; memcpy(fBufferHeadPtr, buffer, consume); fBufferHeadPtr += consume; buffer += consume; totalConsumed += consume; //printf("write callback rebuffering %d bytes\n", consume); } // Return the total amount we've consumed. If we don't consume all the bytes // then an error will be generated. Since our buffer size is equal to the // maximum size that curl will write, this should never happen unless there // is a logic error somewhere here. return totalConsumed; } size_t CurlURLInputStream::readCallback(char *buffer, size_t size, size_t nitems) { XMLSize_t len = size * nitems; if(len > fPayloadLen) len = fPayloadLen; memcpy(buffer, fPayload, len); fPayload += len; fPayloadLen -= len; return len; } bool CurlURLInputStream::readMore(int *runningHandles) { // Ask the curl to do some work CURLMcode curlResult = curl_multi_perform(fMulti, runningHandles); // Process messages from curl int msgsInQueue = 0; for (CURLMsg* msg = NULL; (msg = curl_multi_info_read(fMulti, &msgsInQueue)) != NULL; ) { //printf("msg %d, %d from curl\n", msg->msg, msg->data.result); if (msg->msg != CURLMSG_DONE) return true; switch (msg->data.result) { case CURLE_OK: // We completed successfully. runningHandles should have dropped to zero, so we'll bail out below... break; case CURLE_UNSUPPORTED_PROTOCOL: ThrowXMLwithMemMgr(MalformedURLException, XMLExcepts::URL_UnsupportedProto, fMemoryManager); break; case CURLE_COULDNT_RESOLVE_HOST: case CURLE_COULDNT_RESOLVE_PROXY: { if (fURLSource.getHost()) ThrowXMLwithMemMgr1(NetAccessorException, XMLExcepts::NetAcc_TargetResolution, fURLSource.getHost(), fMemoryManager); else ThrowXMLwithMemMgr1(NetAccessorException, XMLExcepts::File_CouldNotOpenFile, fURLSource.getURLText(), fMemoryManager); break; } case CURLE_COULDNT_CONNECT: ThrowXMLwithMemMgr1(NetAccessorException, XMLExcepts::NetAcc_ConnSocket, fURLSource.getURLText(), fMemoryManager); break; case CURLE_RECV_ERROR: ThrowXMLwithMemMgr1(NetAccessorException, XMLExcepts::NetAcc_ReadSocket, fURLSource.getURLText(), fMemoryManager); break; default: ThrowXMLwithMemMgr1(NetAccessorException, XMLExcepts::NetAcc_InternalError, fURLSource.getURLText(), fMemoryManager); break; } } // If nothing is running any longer, bail out if(*runningHandles == 0) return false; // If there is no further data to read, and we haven't // read any yet on this invocation, call select to wait for data if (curlResult != CURLM_CALL_MULTI_PERFORM && fBytesRead == 0) { fd_set readSet; fd_set writeSet; fd_set exceptSet; int fdcnt=0; FD_ZERO(&readSet); FD_ZERO(&writeSet); FD_ZERO(&exceptSet); // Ask curl for the file descriptors to wait on curl_multi_fdset(fMulti, &readSet, &writeSet, &exceptSet, &fdcnt); // Wait on the file descriptors timeval tv; tv.tv_sec = 2; tv.tv_usec = 0; select(fdcnt+1, &readSet, &writeSet, &exceptSet, &tv); } return curlResult == CURLM_CALL_MULTI_PERFORM; } XMLSize_t CurlURLInputStream::readBytes(XMLByte* const toFill , const XMLSize_t maxToRead) { fBytesRead = 0; fBytesToRead = maxToRead; fWritePtr = toFill; for (bool tryAgain = true; fBytesToRead > 0 && (tryAgain || fBytesRead == 0); ) { // First, any buffered data we have available XMLSize_t bufCnt = fBufferHeadPtr - fBufferTailPtr; bufCnt = (bufCnt > fBytesToRead) ? fBytesToRead : bufCnt; if (bufCnt > 0) { memcpy(fWritePtr, fBufferTailPtr, bufCnt); fWritePtr += bufCnt; fBytesRead += bufCnt; fTotalBytesRead += bufCnt; fBytesToRead -= bufCnt; fBufferTailPtr += bufCnt; if (fBufferTailPtr == fBufferHeadPtr) fBufferHeadPtr = fBufferTailPtr = fBuffer; //printf("consuming %d buffered bytes\n", bufCnt); tryAgain = true; continue; } // Ask the curl to do some work int runningHandles = 0; tryAgain = readMore(&runningHandles); // If nothing is running any longer, bail out if (runningHandles == 0) break; } return fBytesRead; } const XMLCh *CurlURLInputStream::getContentType() const { return fContentType; } XERCES_CPP_NAMESPACE_END
gpl-2.0
stevezilla/amherst-linux-imx6
sound/soc/fsl/imx-ssi.c
43
16108
/* * imx-ssi.c -- ALSA Soc Audio Layer * * Copyright 2009 Sascha Hauer <s.hauer@pengutronix.de> * * This code is based on code copyrighted by Freescale, * Liam Girdwood, Javier Martin and probably others. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * * The i.MX SSI core has some nasty limitations in AC97 mode. While most * sane processor vendors have a FIFO per AC97 slot, the i.MX has only * one FIFO which combines all valid receive slots. We cannot even select * which slots we want to receive. The WM9712 with which this driver * was developed with always sends GPIO status data in slot 12 which * we receive in our (PCM-) data stream. The only chance we have is to * manually skip this data in the FIQ handler. With sampling rates different * from 48000Hz not every frame has valid receive data, so the ratio * between pcm data and GPIO status data changes. Our FIQ handler is not * able to handle this, hence this driver only works with 48000Hz sampling * rate. * Reading and writing AC97 registers is another challenge. The core * provides us status bits when the read register is updated with *another* * value. When we read the same register two times (and the register still * contains the same value) these status bits are not set. We work * around this by not polling these bits but only wait a fixed delay. * */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <linux/platform_data/asoc-imx-ssi.h> #include "imx-ssi.h" #define SSI_SACNT_DEFAULT (SSI_SACNT_AC97EN | SSI_SACNT_FV) /* * SSI Network Mode or TDM slots configuration. * Should only be called when port is inactive (i.e. SSIEN = 0). */ static int imx_ssi_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) { struct imx_ssi *ssi = snd_soc_dai_get_drvdata(cpu_dai); u32 sccr; sccr = readl(ssi->base + SSI_STCCR); sccr &= ~SSI_STCCR_DC_MASK; sccr |= SSI_STCCR_DC(slots - 1); writel(sccr, ssi->base + SSI_STCCR); sccr = readl(ssi->base + SSI_SRCCR); sccr &= ~SSI_STCCR_DC_MASK; sccr |= SSI_STCCR_DC(slots - 1); writel(sccr, ssi->base + SSI_SRCCR); writel(tx_mask, ssi->base + SSI_STMSK); writel(rx_mask, ssi->base + SSI_SRMSK); return 0; } /* * SSI DAI format configuration. * Should only be called when port is inactive (i.e. SSIEN = 0). */ static int imx_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) { struct imx_ssi *ssi = snd_soc_dai_get_drvdata(cpu_dai); u32 strcr = 0, scr; scr = readl(ssi->base + SSI_SCR) & ~(SSI_SCR_SYN | SSI_SCR_NET); /* DAI mode */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: /* data on rising edge of bclk, frame low 1clk before data */ strcr |= SSI_STCR_TFSI | SSI_STCR_TEFS | SSI_STCR_TXBIT0; scr |= SSI_SCR_NET; if (ssi->flags & IMX_SSI_USE_I2S_SLAVE) { scr &= ~SSI_I2S_MODE_MASK; scr |= SSI_SCR_I2S_MODE_SLAVE; } break; case SND_SOC_DAIFMT_LEFT_J: /* data on rising edge of bclk, frame high with data */ strcr |= SSI_STCR_TXBIT0; break; case SND_SOC_DAIFMT_DSP_B: /* data on rising edge of bclk, frame high with data */ strcr |= SSI_STCR_TFSL | SSI_STCR_TXBIT0; break; case SND_SOC_DAIFMT_DSP_A: /* data on rising edge of bclk, frame high 1clk before data */ strcr |= SSI_STCR_TFSL | SSI_STCR_TXBIT0 | SSI_STCR_TEFS; break; } /* DAI clock inversion */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_IB_IF: strcr |= SSI_STCR_TFSI; strcr &= ~SSI_STCR_TSCKP; break; case SND_SOC_DAIFMT_IB_NF: strcr &= ~(SSI_STCR_TSCKP | SSI_STCR_TFSI); break; case SND_SOC_DAIFMT_NB_IF: strcr |= SSI_STCR_TFSI | SSI_STCR_TSCKP; break; case SND_SOC_DAIFMT_NB_NF: strcr &= ~SSI_STCR_TFSI; strcr |= SSI_STCR_TSCKP; break; } /* DAI clock master masks */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: break; default: /* Master mode not implemented, needs handling of clocks. */ return -EINVAL; } strcr |= SSI_STCR_TFEN0; if (ssi->flags & IMX_SSI_NET) scr |= SSI_SCR_NET; if (ssi->flags & IMX_SSI_SYN) scr |= SSI_SCR_SYN; writel(strcr, ssi->base + SSI_STCR); writel(strcr, ssi->base + SSI_SRCR); writel(scr, ssi->base + SSI_SCR); return 0; } /* * SSI system clock configuration. * Should only be called when port is inactive (i.e. SSIEN = 0). */ static int imx_ssi_set_dai_sysclk(struct snd_soc_dai *cpu_dai, int clk_id, unsigned int freq, int dir) { struct imx_ssi *ssi = snd_soc_dai_get_drvdata(cpu_dai); u32 scr; scr = readl(ssi->base + SSI_SCR); switch (clk_id) { case IMX_SSP_SYS_CLK: if (dir == SND_SOC_CLOCK_OUT) scr |= SSI_SCR_SYS_CLK_EN; else scr &= ~SSI_SCR_SYS_CLK_EN; break; default: return -EINVAL; } writel(scr, ssi->base + SSI_SCR); return 0; } /* * SSI Clock dividers * Should only be called when port is inactive (i.e. SSIEN = 0). */ static int imx_ssi_set_dai_clkdiv(struct snd_soc_dai *cpu_dai, int div_id, int div) { struct imx_ssi *ssi = snd_soc_dai_get_drvdata(cpu_dai); u32 stccr, srccr; stccr = readl(ssi->base + SSI_STCCR); srccr = readl(ssi->base + SSI_SRCCR); switch (div_id) { case IMX_SSI_TX_DIV_2: stccr &= ~SSI_STCCR_DIV2; stccr |= div; break; case IMX_SSI_TX_DIV_PSR: stccr &= ~SSI_STCCR_PSR; stccr |= div; break; case IMX_SSI_TX_DIV_PM: stccr &= ~0xff; stccr |= SSI_STCCR_PM(div); break; case IMX_SSI_RX_DIV_2: stccr &= ~SSI_STCCR_DIV2; stccr |= div; break; case IMX_SSI_RX_DIV_PSR: stccr &= ~SSI_STCCR_PSR; stccr |= div; break; case IMX_SSI_RX_DIV_PM: stccr &= ~0xff; stccr |= SSI_STCCR_PM(div); break; default: return -EINVAL; } writel(stccr, ssi->base + SSI_STCCR); writel(srccr, ssi->base + SSI_SRCCR); return 0; } /* * Should only be called when port is inactive (i.e. SSIEN = 0), * although can be called multiple times by upper layers. */ static int imx_ssi_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *cpu_dai) { struct imx_ssi *ssi = snd_soc_dai_get_drvdata(cpu_dai); u32 reg, sccr; /* Tx/Rx config */ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) reg = SSI_STCCR; else reg = SSI_SRCCR; if (ssi->flags & IMX_SSI_SYN) reg = SSI_STCCR; sccr = readl(ssi->base + reg) & ~SSI_STCCR_WL_MASK; /* DAI data (word) size */ switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: sccr |= SSI_SRCCR_WL(16); break; case SNDRV_PCM_FORMAT_S20_3LE: sccr |= SSI_SRCCR_WL(20); break; case SNDRV_PCM_FORMAT_S24_LE: sccr |= SSI_SRCCR_WL(24); break; } writel(sccr, ssi->base + reg); return 0; } static int imx_ssi_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct imx_ssi *ssi = snd_soc_dai_get_drvdata(dai); unsigned int sier_bits, sier; unsigned int scr; scr = readl(ssi->base + SSI_SCR); sier = readl(ssi->base + SSI_SIER); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { if (ssi->flags & IMX_SSI_DMA) sier_bits = SSI_SIER_TDMAE; else sier_bits = SSI_SIER_TIE | SSI_SIER_TFE0_EN; } else { if (ssi->flags & IMX_SSI_DMA) sier_bits = SSI_SIER_RDMAE; else sier_bits = SSI_SIER_RIE | SSI_SIER_RFF0_EN; } switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) scr |= SSI_SCR_TE; else scr |= SSI_SCR_RE; sier |= sier_bits; if (++ssi->enabled == 1) scr |= SSI_SCR_SSIEN; break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) scr &= ~SSI_SCR_TE; else scr &= ~SSI_SCR_RE; sier &= ~sier_bits; if (--ssi->enabled == 0) scr &= ~SSI_SCR_SSIEN; break; default: return -EINVAL; } if (!(ssi->flags & IMX_SSI_USE_AC97)) /* rx/tx are always enabled to access ac97 registers */ writel(scr, ssi->base + SSI_SCR); writel(sier, ssi->base + SSI_SIER); return 0; } static const struct snd_soc_dai_ops imx_ssi_pcm_dai_ops = { .hw_params = imx_ssi_hw_params, .set_fmt = imx_ssi_set_dai_fmt, .set_clkdiv = imx_ssi_set_dai_clkdiv, .set_sysclk = imx_ssi_set_dai_sysclk, .set_tdm_slot = imx_ssi_set_dai_tdm_slot, .trigger = imx_ssi_trigger, }; static int imx_ssi_dai_probe(struct snd_soc_dai *dai) { struct imx_ssi *ssi = dev_get_drvdata(dai->dev); uint32_t val; snd_soc_dai_set_drvdata(dai, ssi); val = SSI_SFCSR_TFWM0(ssi->dma_params_tx.maxburst) | SSI_SFCSR_RFWM0(ssi->dma_params_rx.maxburst); writel(val, ssi->base + SSI_SFCSR); /* Tx/Rx config */ dai->playback_dma_data = &ssi->dma_params_tx; dai->capture_dma_data = &ssi->dma_params_rx; return 0; } static struct snd_soc_dai_driver imx_ssi_dai = { .probe = imx_ssi_dai_probe, .playback = { .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_96000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .capture = { .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_96000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .ops = &imx_ssi_pcm_dai_ops, }; static struct snd_soc_dai_driver imx_ac97_dai = { .probe = imx_ssi_dai_probe, .ac97_control = 1, .playback = { .stream_name = "AC97 Playback", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .capture = { .stream_name = "AC97 Capture", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .ops = &imx_ssi_pcm_dai_ops, }; static const struct snd_soc_component_driver imx_component = { .name = DRV_NAME, }; static void setup_channel_to_ac97(struct imx_ssi *imx_ssi) { void __iomem *base = imx_ssi->base; writel(0x0, base + SSI_SCR); writel(0x0, base + SSI_STCR); writel(0x0, base + SSI_SRCR); writel(SSI_SCR_SYN | SSI_SCR_NET, base + SSI_SCR); writel(SSI_SFCSR_RFWM0(8) | SSI_SFCSR_TFWM0(8) | SSI_SFCSR_RFWM1(8) | SSI_SFCSR_TFWM1(8), base + SSI_SFCSR); writel(SSI_STCCR_WL(16) | SSI_STCCR_DC(12), base + SSI_STCCR); writel(SSI_STCCR_WL(16) | SSI_STCCR_DC(12), base + SSI_SRCCR); writel(SSI_SCR_SYN | SSI_SCR_NET | SSI_SCR_SSIEN, base + SSI_SCR); writel(SSI_SOR_WAIT(3), base + SSI_SOR); writel(SSI_SCR_SYN | SSI_SCR_NET | SSI_SCR_SSIEN | SSI_SCR_TE | SSI_SCR_RE, base + SSI_SCR); writel(SSI_SACNT_DEFAULT, base + SSI_SACNT); writel(0xff, base + SSI_SACCDIS); writel(0x300, base + SSI_SACCEN); } static struct imx_ssi *ac97_ssi; static void imx_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { struct imx_ssi *imx_ssi = ac97_ssi; void __iomem *base = imx_ssi->base; unsigned int lreg; unsigned int lval; if (reg > 0x7f) return; pr_debug("%s: 0x%02x 0x%04x\n", __func__, reg, val); lreg = reg << 12; writel(lreg, base + SSI_SACADD); lval = val << 4; writel(lval , base + SSI_SACDAT); writel(SSI_SACNT_DEFAULT | SSI_SACNT_WR, base + SSI_SACNT); udelay(100); } static unsigned short imx_ssi_ac97_read(struct snd_ac97 *ac97, unsigned short reg) { struct imx_ssi *imx_ssi = ac97_ssi; void __iomem *base = imx_ssi->base; unsigned short val = -1; unsigned int lreg; lreg = (reg & 0x7f) << 12 ; writel(lreg, base + SSI_SACADD); writel(SSI_SACNT_DEFAULT | SSI_SACNT_RD, base + SSI_SACNT); udelay(100); val = (readl(base + SSI_SACDAT) >> 4) & 0xffff; pr_debug("%s: 0x%02x 0x%04x\n", __func__, reg, val); return val; } static void imx_ssi_ac97_reset(struct snd_ac97 *ac97) { struct imx_ssi *imx_ssi = ac97_ssi; if (imx_ssi->ac97_reset) imx_ssi->ac97_reset(ac97); /* First read sometimes fails, do a dummy read */ imx_ssi_ac97_read(ac97, 0); } static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97) { struct imx_ssi *imx_ssi = ac97_ssi; if (imx_ssi->ac97_warm_reset) imx_ssi->ac97_warm_reset(ac97); /* First read sometimes fails, do a dummy read */ imx_ssi_ac97_read(ac97, 0); } struct snd_ac97_bus_ops soc_ac97_ops = { .read = imx_ssi_ac97_read, .write = imx_ssi_ac97_write, .reset = imx_ssi_ac97_reset, .warm_reset = imx_ssi_ac97_warm_reset }; EXPORT_SYMBOL_GPL(soc_ac97_ops); static int imx_ssi_probe(struct platform_device *pdev) { struct resource *res; struct imx_ssi *ssi; struct imx_ssi_platform_data *pdata = pdev->dev.platform_data; int ret = 0; struct snd_soc_dai_driver *dai; ssi = devm_kzalloc(&pdev->dev, sizeof(*ssi), GFP_KERNEL); if (!ssi) return -ENOMEM; dev_set_drvdata(&pdev->dev, ssi); if (pdata) { ssi->ac97_reset = pdata->ac97_reset; ssi->ac97_warm_reset = pdata->ac97_warm_reset; ssi->flags = pdata->flags; } ssi->irq = platform_get_irq(pdev, 0); ssi->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(ssi->clk)) { ret = PTR_ERR(ssi->clk); dev_err(&pdev->dev, "Cannot get the clock: %d\n", ret); goto failed_clk; } clk_prepare_enable(ssi->clk); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ssi->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(ssi->base)) { ret = PTR_ERR(ssi->base); goto failed_register; } if (ssi->flags & IMX_SSI_USE_AC97) { if (ac97_ssi) { dev_err(&pdev->dev, "AC'97 SSI already registered\n"); ret = -EBUSY; goto failed_register; } ac97_ssi = ssi; setup_channel_to_ac97(ssi); dai = &imx_ac97_dai; } else dai = &imx_ssi_dai; writel(0x0, ssi->base + SSI_SIER); ssi->dma_params_rx.addr = res->start + SSI_SRX0; ssi->dma_params_tx.addr = res->start + SSI_STX0; ssi->dma_params_tx.maxburst = 6; ssi->dma_params_rx.maxburst = 4; ssi->dma_params_tx.filter_data = &ssi->filter_data_tx; ssi->dma_params_rx.filter_data = &ssi->filter_data_rx; res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx0"); if (res) { imx_pcm_dma_params_init_data(&ssi->filter_data_tx, res->start, false); } res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx0"); if (res) { imx_pcm_dma_params_init_data(&ssi->filter_data_rx, res->start, false); } platform_set_drvdata(pdev, ssi); ret = snd_soc_register_component(&pdev->dev, &imx_component, dai, 1); if (ret) { dev_err(&pdev->dev, "register DAI failed\n"); goto failed_register; } ret = imx_pcm_fiq_init(pdev); if (ret) goto failed_pcm_fiq; ret = imx_pcm_dma_init(pdev, SND_DMAENGINE_PCM_FLAG_NO_RESIDUE | SND_DMAENGINE_PCM_FLAG_NO_DT | SND_DMAENGINE_PCM_FLAG_COMPAT, IMX_SSI_DMABUF_SIZE); if (ret) goto failed_pcm_dma; return 0; failed_pcm_dma: imx_pcm_fiq_exit(pdev); failed_pcm_fiq: snd_soc_unregister_component(&pdev->dev); failed_register: release_mem_region(res->start, resource_size(res)); clk_disable_unprepare(ssi->clk); failed_clk: return ret; } static int imx_ssi_remove(struct platform_device *pdev) { struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct imx_ssi *ssi = platform_get_drvdata(pdev); imx_pcm_dma_exit(pdev); imx_pcm_fiq_exit(pdev); snd_soc_unregister_component(&pdev->dev); if (ssi->flags & IMX_SSI_USE_AC97) ac97_ssi = NULL; release_mem_region(res->start, resource_size(res)); clk_disable_unprepare(ssi->clk); return 0; } static struct platform_driver imx_ssi_driver = { .probe = imx_ssi_probe, .remove = imx_ssi_remove, .driver = { .name = "imx-ssi", .owner = THIS_MODULE, }, }; module_platform_driver(imx_ssi_driver); /* Module information */ MODULE_AUTHOR("Sascha Hauer, <s.hauer@pengutronix.de>"); MODULE_DESCRIPTION("i.MX I2S/ac97 SoC Interface"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:imx-ssi");
gpl-2.0
glfernando/remoteproc
drivers/char/tpm/tpm_tis.c
43
22619
/* * Copyright (C) 2005, 2006 IBM Corporation * * Authors: * Leendert van Doorn <leendert@watson.ibm.com> * Kylene Hall <kjhall@us.ibm.com> * * Maintained by: <tpmdd-devel@lists.sourceforge.net> * * Device driver for TCG/TCPA TPM (trusted platform module). * Specifications at www.trustedcomputinggroup.org * * This device driver implements the TPM interface as defined in * the TCG TPM Interface Spec version 1.2, revision 1.0. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. */ #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/pnp.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/wait.h> #include <linux/acpi.h> #include <linux/freezer.h> #include "tpm.h" enum tis_access { TPM_ACCESS_VALID = 0x80, TPM_ACCESS_ACTIVE_LOCALITY = 0x20, TPM_ACCESS_REQUEST_PENDING = 0x04, TPM_ACCESS_REQUEST_USE = 0x02, }; enum tis_status { TPM_STS_VALID = 0x80, TPM_STS_COMMAND_READY = 0x40, TPM_STS_GO = 0x20, TPM_STS_DATA_AVAIL = 0x10, TPM_STS_DATA_EXPECT = 0x08, }; enum tis_int_flags { TPM_GLOBAL_INT_ENABLE = 0x80000000, TPM_INTF_BURST_COUNT_STATIC = 0x100, TPM_INTF_CMD_READY_INT = 0x080, TPM_INTF_INT_EDGE_FALLING = 0x040, TPM_INTF_INT_EDGE_RISING = 0x020, TPM_INTF_INT_LEVEL_LOW = 0x010, TPM_INTF_INT_LEVEL_HIGH = 0x008, TPM_INTF_LOCALITY_CHANGE_INT = 0x004, TPM_INTF_STS_VALID_INT = 0x002, TPM_INTF_DATA_AVAIL_INT = 0x001, }; enum tis_defaults { TIS_MEM_BASE = 0xFED40000, TIS_MEM_LEN = 0x5000, TIS_SHORT_TIMEOUT = 750, /* ms */ TIS_LONG_TIMEOUT = 2000, /* 2 sec */ }; #define TPM_ACCESS(l) (0x0000 | ((l) << 12)) #define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12)) #define TPM_INT_VECTOR(l) (0x000C | ((l) << 12)) #define TPM_INT_STATUS(l) (0x0010 | ((l) << 12)) #define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12)) #define TPM_STS(l) (0x0018 | ((l) << 12)) #define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12)) #define TPM_DID_VID(l) (0x0F00 | ((l) << 12)) #define TPM_RID(l) (0x0F04 | ((l) << 12)) static LIST_HEAD(tis_chips); static DEFINE_MUTEX(tis_lock); #if defined(CONFIG_PNP) && defined(CONFIG_ACPI) static int is_itpm(struct pnp_dev *dev) { struct acpi_device *acpi = pnp_acpi_device(dev); struct acpi_hardware_id *id; list_for_each_entry(id, &acpi->pnp.ids, list) { if (!strcmp("INTC0102", id->id)) return 1; } return 0; } #else static inline int is_itpm(struct pnp_dev *dev) { return 0; } #endif static int check_locality(struct tpm_chip *chip, int l) { if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) == (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) return chip->vendor.locality = l; return -1; } static void release_locality(struct tpm_chip *chip, int l, int force) { if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) & (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) == (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) iowrite8(TPM_ACCESS_ACTIVE_LOCALITY, chip->vendor.iobase + TPM_ACCESS(l)); } static int request_locality(struct tpm_chip *chip, int l) { unsigned long stop, timeout; long rc; if (check_locality(chip, l) >= 0) return l; iowrite8(TPM_ACCESS_REQUEST_USE, chip->vendor.iobase + TPM_ACCESS(l)); stop = jiffies + chip->vendor.timeout_a; if (chip->vendor.irq) { again: timeout = stop - jiffies; if ((long)timeout <= 0) return -1; rc = wait_event_interruptible_timeout(chip->vendor.int_queue, (check_locality (chip, l) >= 0), timeout); if (rc > 0) return l; if (rc == -ERESTARTSYS && freezing(current)) { clear_thread_flag(TIF_SIGPENDING); goto again; } } else { /* wait for burstcount */ do { if (check_locality(chip, l) >= 0) return l; msleep(TPM_TIMEOUT); } while (time_before(jiffies, stop)); } return -1; } static u8 tpm_tis_status(struct tpm_chip *chip) { return ioread8(chip->vendor.iobase + TPM_STS(chip->vendor.locality)); } static void tpm_tis_ready(struct tpm_chip *chip) { /* this causes the current command to be aborted */ iowrite8(TPM_STS_COMMAND_READY, chip->vendor.iobase + TPM_STS(chip->vendor.locality)); } static int get_burstcount(struct tpm_chip *chip) { unsigned long stop; int burstcnt; /* wait for burstcount */ /* which timeout value, spec has 2 answers (c & d) */ stop = jiffies + chip->vendor.timeout_d; do { burstcnt = ioread8(chip->vendor.iobase + TPM_STS(chip->vendor.locality) + 1); burstcnt += ioread8(chip->vendor.iobase + TPM_STS(chip->vendor.locality) + 2) << 8; if (burstcnt) return burstcnt; msleep(TPM_TIMEOUT); } while (time_before(jiffies, stop)); return -EBUSY; } static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) { int size = 0, burstcnt; while (size < count && wait_for_tpm_stat(chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, chip->vendor.timeout_c, &chip->vendor.read_queue) == 0) { burstcnt = get_burstcount(chip); for (; burstcnt > 0 && size < count; burstcnt--) buf[size++] = ioread8(chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor. locality)); } return size; } static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) { int size = 0; int expected, status; if (count < TPM_HEADER_SIZE) { size = -EIO; goto out; } /* read first 10 bytes, including tag, paramsize, and result */ if ((size = recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) { dev_err(chip->dev, "Unable to read header\n"); goto out; } expected = be32_to_cpu(*(__be32 *) (buf + 2)); if (expected > count) { size = -EIO; goto out; } if ((size += recv_data(chip, &buf[TPM_HEADER_SIZE], expected - TPM_HEADER_SIZE)) < expected) { dev_err(chip->dev, "Unable to read remainder of result\n"); size = -ETIME; goto out; } wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, &chip->vendor.int_queue); status = tpm_tis_status(chip); if (status & TPM_STS_DATA_AVAIL) { /* retry? */ dev_err(chip->dev, "Error left over data\n"); size = -EIO; goto out; } out: tpm_tis_ready(chip); release_locality(chip, chip->vendor.locality, 0); return size; } static bool itpm; module_param(itpm, bool, 0444); MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)"); /* * If interrupts are used (signaled by an irq set in the vendor structure) * tpm.c can skip polling for the data to be available as the interrupt is * waited for here */ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len) { int rc, status, burstcnt; size_t count = 0; if (request_locality(chip, 0) < 0) return -EBUSY; status = tpm_tis_status(chip); if ((status & TPM_STS_COMMAND_READY) == 0) { tpm_tis_ready(chip); if (wait_for_tpm_stat (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b, &chip->vendor.int_queue) < 0) { rc = -ETIME; goto out_err; } } while (count < len - 1) { burstcnt = get_burstcount(chip); for (; burstcnt > 0 && count < len - 1; burstcnt--) { iowrite8(buf[count], chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality)); count++; } wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, &chip->vendor.int_queue); status = tpm_tis_status(chip); if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) { rc = -EIO; goto out_err; } } /* write last byte */ iowrite8(buf[count], chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality)); wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, &chip->vendor.int_queue); status = tpm_tis_status(chip); if ((status & TPM_STS_DATA_EXPECT) != 0) { rc = -EIO; goto out_err; } return 0; out_err: tpm_tis_ready(chip); release_locality(chip, chip->vendor.locality, 0); return rc; } /* * If interrupts are used (signaled by an irq set in the vendor structure) * tpm.c can skip polling for the data to be available as the interrupt is * waited for here */ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) { int rc; u32 ordinal; rc = tpm_tis_send_data(chip, buf, len); if (rc < 0) return rc; /* go and do it */ iowrite8(TPM_STS_GO, chip->vendor.iobase + TPM_STS(chip->vendor.locality)); if (chip->vendor.irq) { ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); if (wait_for_tpm_stat (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, tpm_calc_ordinal_duration(chip, ordinal), &chip->vendor.read_queue) < 0) { rc = -ETIME; goto out_err; } } return len; out_err: tpm_tis_ready(chip); release_locality(chip, chip->vendor.locality, 0); return rc; } /* * Early probing for iTPM with STS_DATA_EXPECT flaw. * Try sending command without itpm flag set and if that * fails, repeat with itpm flag set. */ static int probe_itpm(struct tpm_chip *chip) { int rc = 0; u8 cmd_getticks[] = { 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0xf1 }; size_t len = sizeof(cmd_getticks); bool rem_itpm = itpm; u16 vendor = ioread16(chip->vendor.iobase + TPM_DID_VID(0)); /* probe only iTPMS */ if (vendor != TPM_VID_INTEL) return 0; itpm = 0; rc = tpm_tis_send_data(chip, cmd_getticks, len); if (rc == 0) goto out; tpm_tis_ready(chip); release_locality(chip, chip->vendor.locality, 0); itpm = 1; rc = tpm_tis_send_data(chip, cmd_getticks, len); if (rc == 0) { dev_info(chip->dev, "Detected an iTPM.\n"); rc = 1; } else rc = -EFAULT; out: itpm = rem_itpm; tpm_tis_ready(chip); release_locality(chip, chip->vendor.locality, 0); return rc; } static const struct file_operations tis_ops = { .owner = THIS_MODULE, .llseek = no_llseek, .open = tpm_open, .read = tpm_read, .write = tpm_write, .release = tpm_release, }; static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL); static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL); static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL); static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL); static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL); static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL); static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL); static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); static struct attribute *tis_attrs[] = { &dev_attr_pubek.attr, &dev_attr_pcrs.attr, &dev_attr_enabled.attr, &dev_attr_active.attr, &dev_attr_owned.attr, &dev_attr_temp_deactivated.attr, &dev_attr_caps.attr, &dev_attr_cancel.attr, &dev_attr_durations.attr, &dev_attr_timeouts.attr, NULL, }; static struct attribute_group tis_attr_grp = { .attrs = tis_attrs }; static struct tpm_vendor_specific tpm_tis = { .status = tpm_tis_status, .recv = tpm_tis_recv, .send = tpm_tis_send, .cancel = tpm_tis_ready, .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID, .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID, .req_canceled = TPM_STS_COMMAND_READY, .attr_group = &tis_attr_grp, .miscdev = { .fops = &tis_ops,}, }; static irqreturn_t tis_int_probe(int irq, void *dev_id) { struct tpm_chip *chip = dev_id; u32 interrupt; interrupt = ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality)); if (interrupt == 0) return IRQ_NONE; chip->vendor.probed_irq = irq; /* Clear interrupts handled with TPM_EOI */ iowrite32(interrupt, chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality)); return IRQ_HANDLED; } static irqreturn_t tis_int_handler(int dummy, void *dev_id) { struct tpm_chip *chip = dev_id; u32 interrupt; int i; interrupt = ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality)); if (interrupt == 0) return IRQ_NONE; if (interrupt & TPM_INTF_DATA_AVAIL_INT) wake_up_interruptible(&chip->vendor.read_queue); if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT) for (i = 0; i < 5; i++) if (check_locality(chip, i) >= 0) break; if (interrupt & (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT | TPM_INTF_CMD_READY_INT)) wake_up_interruptible(&chip->vendor.int_queue); /* Clear interrupts handled with TPM_EOI */ iowrite32(interrupt, chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality)); ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality)); return IRQ_HANDLED; } static bool interrupts = 1; module_param(interrupts, bool, 0444); MODULE_PARM_DESC(interrupts, "Enable interrupts"); static int tpm_tis_init(struct device *dev, resource_size_t start, resource_size_t len, unsigned int irq) { u32 vendor, intfcaps, intmask; int rc, i, irq_s, irq_e, probe; struct tpm_chip *chip; if (!(chip = tpm_register_hardware(dev, &tpm_tis))) return -ENODEV; chip->vendor.iobase = ioremap(start, len); if (!chip->vendor.iobase) { rc = -EIO; goto out_err; } /* Default timeouts */ chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT); chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT); chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT); chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT); if (request_locality(chip, 0) != 0) { rc = -ENODEV; goto out_err; } vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0)); dev_info(dev, "1.2 TPM (device-id 0x%X, rev-id %d)\n", vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); if (!itpm) { probe = probe_itpm(chip); if (probe < 0) { rc = -ENODEV; goto out_err; } itpm = (probe == 0) ? 0 : 1; } if (itpm) dev_info(dev, "Intel iTPM workaround enabled\n"); /* Figure out the capabilities */ intfcaps = ioread32(chip->vendor.iobase + TPM_INTF_CAPS(chip->vendor.locality)); dev_dbg(dev, "TPM interface capabilities (0x%x):\n", intfcaps); if (intfcaps & TPM_INTF_BURST_COUNT_STATIC) dev_dbg(dev, "\tBurst Count Static\n"); if (intfcaps & TPM_INTF_CMD_READY_INT) dev_dbg(dev, "\tCommand Ready Int Support\n"); if (intfcaps & TPM_INTF_INT_EDGE_FALLING) dev_dbg(dev, "\tInterrupt Edge Falling\n"); if (intfcaps & TPM_INTF_INT_EDGE_RISING) dev_dbg(dev, "\tInterrupt Edge Rising\n"); if (intfcaps & TPM_INTF_INT_LEVEL_LOW) dev_dbg(dev, "\tInterrupt Level Low\n"); if (intfcaps & TPM_INTF_INT_LEVEL_HIGH) dev_dbg(dev, "\tInterrupt Level High\n"); if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT) dev_dbg(dev, "\tLocality Change Int Support\n"); if (intfcaps & TPM_INTF_STS_VALID_INT) dev_dbg(dev, "\tSts Valid Int Support\n"); if (intfcaps & TPM_INTF_DATA_AVAIL_INT) dev_dbg(dev, "\tData Avail Int Support\n"); /* get the timeouts before testing for irqs */ if (tpm_get_timeouts(chip)) { dev_err(dev, "Could not get TPM timeouts and durations\n"); rc = -ENODEV; goto out_err; } if (tpm_do_selftest(chip)) { dev_err(dev, "TPM self test failed\n"); rc = -ENODEV; goto out_err; } /* INTERRUPT Setup */ init_waitqueue_head(&chip->vendor.read_queue); init_waitqueue_head(&chip->vendor.int_queue); intmask = ioread32(chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality)); intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT; iowrite32(intmask, chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality)); if (interrupts) chip->vendor.irq = irq; if (interrupts && !chip->vendor.irq) { irq_s = ioread8(chip->vendor.iobase + TPM_INT_VECTOR(chip->vendor.locality)); if (irq_s) { irq_e = irq_s; } else { irq_s = 3; irq_e = 15; } for (i = irq_s; i <= irq_e && chip->vendor.irq == 0; i++) { iowrite8(i, chip->vendor.iobase + TPM_INT_VECTOR(chip->vendor.locality)); if (request_irq (i, tis_int_probe, IRQF_SHARED, chip->vendor.miscdev.name, chip) != 0) { dev_info(chip->dev, "Unable to request irq: %d for probe\n", i); continue; } /* Clear all existing */ iowrite32(ioread32 (chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality)), chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality)); /* Turn on */ iowrite32(intmask | TPM_GLOBAL_INT_ENABLE, chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality)); chip->vendor.probed_irq = 0; /* Generate Interrupts */ tpm_gen_interrupt(chip); chip->vendor.irq = chip->vendor.probed_irq; /* free_irq will call into tis_int_probe; clear all irqs we haven't seen while doing tpm_gen_interrupt */ iowrite32(ioread32 (chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality)), chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality)); /* Turn off */ iowrite32(intmask, chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality)); free_irq(i, chip); } } if (chip->vendor.irq) { iowrite8(chip->vendor.irq, chip->vendor.iobase + TPM_INT_VECTOR(chip->vendor.locality)); if (request_irq (chip->vendor.irq, tis_int_handler, IRQF_SHARED, chip->vendor.miscdev.name, chip) != 0) { dev_info(chip->dev, "Unable to request irq: %d for use\n", chip->vendor.irq); chip->vendor.irq = 0; } else { /* Clear all existing */ iowrite32(ioread32 (chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality)), chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality)); /* Turn on */ iowrite32(intmask | TPM_GLOBAL_INT_ENABLE, chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality)); } } INIT_LIST_HEAD(&chip->vendor.list); mutex_lock(&tis_lock); list_add(&chip->vendor.list, &tis_chips); mutex_unlock(&tis_lock); return 0; out_err: if (chip->vendor.iobase) iounmap(chip->vendor.iobase); tpm_remove_hardware(chip->dev); return rc; } #if defined(CONFIG_PNP) || defined(CONFIG_PM_SLEEP) static void tpm_tis_reenable_interrupts(struct tpm_chip *chip) { u32 intmask; /* reenable interrupts that device may have lost or BIOS/firmware may have disabled */ iowrite8(chip->vendor.irq, chip->vendor.iobase + TPM_INT_VECTOR(chip->vendor.locality)); intmask = ioread32(chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality)); intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE; iowrite32(intmask, chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality)); } #endif #ifdef CONFIG_PNP static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev, const struct pnp_device_id *pnp_id) { resource_size_t start, len; unsigned int irq = 0; start = pnp_mem_start(pnp_dev, 0); len = pnp_mem_len(pnp_dev, 0); if (pnp_irq_valid(pnp_dev, 0)) irq = pnp_irq(pnp_dev, 0); else interrupts = 0; if (is_itpm(pnp_dev)) itpm = 1; return tpm_tis_init(&pnp_dev->dev, start, len, irq); } static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg) { return tpm_pm_suspend(&dev->dev); } static int tpm_tis_pnp_resume(struct pnp_dev *dev) { struct tpm_chip *chip = pnp_get_drvdata(dev); int ret; if (chip->vendor.irq) tpm_tis_reenable_interrupts(chip); ret = tpm_pm_resume(&dev->dev); if (!ret) tpm_do_selftest(chip); return ret; } static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = { {"PNP0C31", 0}, /* TPM */ {"ATM1200", 0}, /* Atmel */ {"IFX0102", 0}, /* Infineon */ {"BCM0101", 0}, /* Broadcom */ {"BCM0102", 0}, /* Broadcom */ {"NSC1200", 0}, /* National */ {"ICO0102", 0}, /* Intel */ /* Add new here */ {"", 0}, /* User Specified */ {"", 0} /* Terminator */ }; MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl); static __devexit void tpm_tis_pnp_remove(struct pnp_dev *dev) { struct tpm_chip *chip = pnp_get_drvdata(dev); tpm_dev_vendor_release(chip); kfree(chip); } static struct pnp_driver tis_pnp_driver = { .name = "tpm_tis", .id_table = tpm_pnp_tbl, .probe = tpm_tis_pnp_init, .suspend = tpm_tis_pnp_suspend, .resume = tpm_tis_pnp_resume, .remove = tpm_tis_pnp_remove, }; #define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2 module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id, sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444); MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe"); #endif #ifdef CONFIG_PM_SLEEP static int tpm_tis_resume(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); if (chip->vendor.irq) tpm_tis_reenable_interrupts(chip); return tpm_pm_resume(dev); } #endif static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume); static struct platform_driver tis_drv = { .driver = { .name = "tpm_tis", .owner = THIS_MODULE, .pm = &tpm_tis_pm, }, }; static struct platform_device *pdev; static bool force; module_param(force, bool, 0444); MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry"); static int __init init_tis(void) { int rc; #ifdef CONFIG_PNP if (!force) return pnp_register_driver(&tis_pnp_driver); #endif rc = platform_driver_register(&tis_drv); if (rc < 0) return rc; if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0))) return PTR_ERR(pdev); if((rc=tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0)) != 0) { platform_device_unregister(pdev); platform_driver_unregister(&tis_drv); } return rc; } static void __exit cleanup_tis(void) { struct tpm_vendor_specific *i, *j; struct tpm_chip *chip; mutex_lock(&tis_lock); list_for_each_entry_safe(i, j, &tis_chips, list) { chip = to_tpm_chip(i); tpm_remove_hardware(chip->dev); iowrite32(~TPM_GLOBAL_INT_ENABLE & ioread32(chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor. locality)), chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality)); release_locality(chip, chip->vendor.locality, 1); if (chip->vendor.irq) free_irq(chip->vendor.irq, chip); iounmap(i->iobase); list_del(&i->list); } mutex_unlock(&tis_lock); #ifdef CONFIG_PNP if (!force) { pnp_unregister_driver(&tis_pnp_driver); return; } #endif platform_device_unregister(pdev); platform_driver_unregister(&tis_drv); } module_init(init_tis); module_exit(cleanup_tis); MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)"); MODULE_DESCRIPTION("TPM Driver"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL");
gpl-2.0
wsrean/ReanEmu
src/server/scripts/EasternKingdoms/BlackwingLair/boss_ebonroc.cpp
43
3280
/* * Copyright (C) 2008-2012 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ /* ScriptData SDName: Boss_Ebonroc SD%Complete: 50 SDComment: Shadow of Ebonroc needs core support SDCategory: Blackwing Lair EndScriptData */ #include "ScriptPCH.h" #define SPELL_SHADOWFLAME 22539 #define SPELL_WINGBUFFET 18500 #define SPELL_SHADOWOFEBONROC 23340 #define SPELL_HEAL 41386 //Thea Heal spell of his Shadow class boss_ebonroc : public CreatureScript { public: boss_ebonroc() : CreatureScript("boss_ebonroc") { } CreatureAI* GetAI(Creature* creature) const { return new boss_ebonrocAI (creature); } struct boss_ebonrocAI : public ScriptedAI { boss_ebonrocAI(Creature* creature) : ScriptedAI(creature) {} uint32 ShadowFlame_Timer; uint32 WingBuffet_Timer; uint32 ShadowOfEbonroc_Timer; uint32 Heal_Timer; void Reset() { ShadowFlame_Timer = 15000; //These times are probably wrong WingBuffet_Timer = 30000; ShadowOfEbonroc_Timer = 45000; Heal_Timer = 1000; } void EnterCombat(Unit* /*who*/) { DoZoneInCombat(); } void UpdateAI(const uint32 diff) { if (!UpdateVictim()) return; //Shadowflame Timer if (ShadowFlame_Timer <= diff) { DoCast(me->getVictim(), SPELL_SHADOWFLAME); ShadowFlame_Timer = urand(12000, 15000); } else ShadowFlame_Timer -= diff; //Wing Buffet Timer if (WingBuffet_Timer <= diff) { DoCast(me->getVictim(), SPELL_WINGBUFFET); WingBuffet_Timer = 25000; } else WingBuffet_Timer -= diff; //Shadow of Ebonroc Timer if (ShadowOfEbonroc_Timer <= diff) { DoCast(me->getVictim(), SPELL_SHADOWOFEBONROC); ShadowOfEbonroc_Timer = urand(25000, 350000); } else ShadowOfEbonroc_Timer -= diff; if (me->getVictim()->HasAura(SPELL_SHADOWOFEBONROC)) { if (Heal_Timer <= diff) { DoCast(me, SPELL_HEAL); Heal_Timer = urand(1000, 3000); } else Heal_Timer -= diff; } DoMeleeAttackIfReady(); } }; }; void AddSC_boss_ebonroc() { new boss_ebonroc(); }
gpl-2.0
iwinoto/v4l-media_build
media/drivers/spi/spi-fsl-espi.c
43
19522
/* * Freescale eSPI controller driver. * * Copyright 2010 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/spi/spi.h> #include <linux/platform_device.h> #include <linux/fsl_devices.h> #include <linux/mm.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/interrupt.h> #include <linux/err.h> #include <sysdev/fsl_soc.h> #include "spi-fsl-lib.h" /* eSPI Controller registers */ struct fsl_espi_reg { __be32 mode; /* 0x000 - eSPI mode register */ __be32 event; /* 0x004 - eSPI event register */ __be32 mask; /* 0x008 - eSPI mask register */ __be32 command; /* 0x00c - eSPI command register */ __be32 transmit; /* 0x010 - eSPI transmit FIFO access register*/ __be32 receive; /* 0x014 - eSPI receive FIFO access register*/ u8 res[8]; /* 0x018 - 0x01c reserved */ __be32 csmode[4]; /* 0x020 - 0x02c eSPI cs mode register */ }; struct fsl_espi_transfer { const void *tx_buf; void *rx_buf; unsigned len; unsigned n_tx; unsigned n_rx; unsigned actual_length; int status; }; /* eSPI Controller mode register definitions */ #define SPMODE_ENABLE (1 << 31) #define SPMODE_LOOP (1 << 30) #define SPMODE_TXTHR(x) ((x) << 8) #define SPMODE_RXTHR(x) ((x) << 0) /* eSPI Controller CS mode register definitions */ #define CSMODE_CI_INACTIVEHIGH (1 << 31) #define CSMODE_CP_BEGIN_EDGECLK (1 << 30) #define CSMODE_REV (1 << 29) #define CSMODE_DIV16 (1 << 28) #define CSMODE_PM(x) ((x) << 24) #define CSMODE_POL_1 (1 << 20) #define CSMODE_LEN(x) ((x) << 16) #define CSMODE_BEF(x) ((x) << 12) #define CSMODE_AFT(x) ((x) << 8) #define CSMODE_CG(x) ((x) << 3) /* Default mode/csmode for eSPI controller */ #define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(3)) #define CSMODE_INIT_VAL (CSMODE_POL_1 | CSMODE_BEF(0) \ | CSMODE_AFT(0) | CSMODE_CG(1)) /* SPIE register values */ #define SPIE_NE 0x00000200 /* Not empty */ #define SPIE_NF 0x00000100 /* Not full */ /* SPIM register values */ #define SPIM_NE 0x00000200 /* Not empty */ #define SPIM_NF 0x00000100 /* Not full */ #define SPIE_RXCNT(reg) ((reg >> 24) & 0x3F) #define SPIE_TXCNT(reg) ((reg >> 16) & 0x3F) /* SPCOM register values */ #define SPCOM_CS(x) ((x) << 30) #define SPCOM_TRANLEN(x) ((x) << 0) #define SPCOM_TRANLEN_MAX 0xFFFF /* Max transaction length */ static void fsl_espi_change_mode(struct spi_device *spi) { struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); struct spi_mpc8xxx_cs *cs = spi->controller_state; struct fsl_espi_reg *reg_base = mspi->reg_base; __be32 __iomem *mode = &reg_base->csmode[spi->chip_select]; __be32 __iomem *espi_mode = &reg_base->mode; u32 tmp; unsigned long flags; /* Turn off IRQs locally to minimize time that SPI is disabled. */ local_irq_save(flags); /* Turn off SPI unit prior changing mode */ tmp = mpc8xxx_spi_read_reg(espi_mode); mpc8xxx_spi_write_reg(espi_mode, tmp & ~SPMODE_ENABLE); mpc8xxx_spi_write_reg(mode, cs->hw_mode); mpc8xxx_spi_write_reg(espi_mode, tmp); local_irq_restore(flags); } static u32 fsl_espi_tx_buf_lsb(struct mpc8xxx_spi *mpc8xxx_spi) { u32 data; u16 data_h; u16 data_l; const u32 *tx = mpc8xxx_spi->tx; if (!tx) return 0; data = *tx++ << mpc8xxx_spi->tx_shift; data_l = data & 0xffff; data_h = (data >> 16) & 0xffff; swab16s(&data_l); swab16s(&data_h); data = data_h | data_l; mpc8xxx_spi->tx = tx; return data; } static int fsl_espi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) { struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); int bits_per_word = 0; u8 pm; u32 hz = 0; struct spi_mpc8xxx_cs *cs = spi->controller_state; if (t) { bits_per_word = t->bits_per_word; hz = t->speed_hz; } /* spi_transfer level calls that work per-word */ if (!bits_per_word) bits_per_word = spi->bits_per_word; if (!hz) hz = spi->max_speed_hz; cs->rx_shift = 0; cs->tx_shift = 0; cs->get_rx = mpc8xxx_spi_rx_buf_u32; cs->get_tx = mpc8xxx_spi_tx_buf_u32; if (bits_per_word <= 8) { cs->rx_shift = 8 - bits_per_word; } else { cs->rx_shift = 16 - bits_per_word; if (spi->mode & SPI_LSB_FIRST) cs->get_tx = fsl_espi_tx_buf_lsb; } mpc8xxx_spi->rx_shift = cs->rx_shift; mpc8xxx_spi->tx_shift = cs->tx_shift; mpc8xxx_spi->get_rx = cs->get_rx; mpc8xxx_spi->get_tx = cs->get_tx; bits_per_word = bits_per_word - 1; /* mask out bits we are going to set */ cs->hw_mode &= ~(CSMODE_LEN(0xF) | CSMODE_DIV16 | CSMODE_PM(0xF)); cs->hw_mode |= CSMODE_LEN(bits_per_word); if ((mpc8xxx_spi->spibrg / hz) > 64) { cs->hw_mode |= CSMODE_DIV16; pm = DIV_ROUND_UP(mpc8xxx_spi->spibrg, hz * 16 * 4); WARN_ONCE(pm > 33, "%s: Requested speed is too low: %d Hz. " "Will use %d Hz instead.\n", dev_name(&spi->dev), hz, mpc8xxx_spi->spibrg / (4 * 16 * (32 + 1))); if (pm > 33) pm = 33; } else { pm = DIV_ROUND_UP(mpc8xxx_spi->spibrg, hz * 4); } if (pm) pm--; if (pm < 2) pm = 2; cs->hw_mode |= CSMODE_PM(pm); fsl_espi_change_mode(spi); return 0; } static int fsl_espi_cpu_bufs(struct mpc8xxx_spi *mspi, struct spi_transfer *t, unsigned int len) { u32 word; struct fsl_espi_reg *reg_base = mspi->reg_base; mspi->count = len; /* enable rx ints */ mpc8xxx_spi_write_reg(&reg_base->mask, SPIM_NE); /* transmit word */ word = mspi->get_tx(mspi); mpc8xxx_spi_write_reg(&reg_base->transmit, word); return 0; } static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t) { struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); struct fsl_espi_reg *reg_base = mpc8xxx_spi->reg_base; unsigned int len = t->len; int ret; mpc8xxx_spi->len = t->len; len = roundup(len, 4) / 4; mpc8xxx_spi->tx = t->tx_buf; mpc8xxx_spi->rx = t->rx_buf; reinit_completion(&mpc8xxx_spi->done); /* Set SPCOM[CS] and SPCOM[TRANLEN] field */ if ((t->len - 1) > SPCOM_TRANLEN_MAX) { dev_err(mpc8xxx_spi->dev, "Transaction length (%d)" " beyond the SPCOM[TRANLEN] field\n", t->len); return -EINVAL; } mpc8xxx_spi_write_reg(&reg_base->command, (SPCOM_CS(spi->chip_select) | SPCOM_TRANLEN(t->len - 1))); ret = fsl_espi_cpu_bufs(mpc8xxx_spi, t, len); if (ret) return ret; wait_for_completion(&mpc8xxx_spi->done); /* disable rx ints */ mpc8xxx_spi_write_reg(&reg_base->mask, 0); return mpc8xxx_spi->count; } static inline void fsl_espi_addr2cmd(unsigned int addr, u8 *cmd) { if (cmd) { cmd[1] = (u8)(addr >> 16); cmd[2] = (u8)(addr >> 8); cmd[3] = (u8)(addr >> 0); } } static inline unsigned int fsl_espi_cmd2addr(u8 *cmd) { if (cmd) return cmd[1] << 16 | cmd[2] << 8 | cmd[3] << 0; return 0; } static void fsl_espi_do_trans(struct spi_message *m, struct fsl_espi_transfer *tr) { struct spi_device *spi = m->spi; struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); struct fsl_espi_transfer *espi_trans = tr; struct spi_message message; struct spi_transfer *t, *first, trans; int status = 0; spi_message_init(&message); memset(&trans, 0, sizeof(trans)); first = list_first_entry(&m->transfers, struct spi_transfer, transfer_list); list_for_each_entry(t, &m->transfers, transfer_list) { if ((first->bits_per_word != t->bits_per_word) || (first->speed_hz != t->speed_hz)) { espi_trans->status = -EINVAL; dev_err(mspi->dev, "bits_per_word/speed_hz should be same for the same SPI transfer\n"); return; } trans.speed_hz = t->speed_hz; trans.bits_per_word = t->bits_per_word; trans.delay_usecs = max(first->delay_usecs, t->delay_usecs); } trans.len = espi_trans->len; trans.tx_buf = espi_trans->tx_buf; trans.rx_buf = espi_trans->rx_buf; spi_message_add_tail(&trans, &message); list_for_each_entry(t, &message.transfers, transfer_list) { if (t->bits_per_word || t->speed_hz) { status = -EINVAL; status = fsl_espi_setup_transfer(spi, t); if (status < 0) break; } if (t->len) status = fsl_espi_bufs(spi, t); if (status) { status = -EMSGSIZE; break; } if (t->delay_usecs) udelay(t->delay_usecs); } espi_trans->status = status; fsl_espi_setup_transfer(spi, NULL); } static void fsl_espi_cmd_trans(struct spi_message *m, struct fsl_espi_transfer *trans, u8 *rx_buff) { struct spi_transfer *t; u8 *local_buf; int i = 0; struct fsl_espi_transfer *espi_trans = trans; local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL); if (!local_buf) { espi_trans->status = -ENOMEM; return; } list_for_each_entry(t, &m->transfers, transfer_list) { if (t->tx_buf) { memcpy(local_buf + i, t->tx_buf, t->len); i += t->len; } } espi_trans->tx_buf = local_buf; espi_trans->rx_buf = local_buf + espi_trans->n_tx; fsl_espi_do_trans(m, espi_trans); espi_trans->actual_length = espi_trans->len; kfree(local_buf); } static void fsl_espi_rw_trans(struct spi_message *m, struct fsl_espi_transfer *trans, u8 *rx_buff) { struct fsl_espi_transfer *espi_trans = trans; unsigned int n_tx = espi_trans->n_tx; unsigned int n_rx = espi_trans->n_rx; struct spi_transfer *t; u8 *local_buf; u8 *rx_buf = rx_buff; unsigned int trans_len; unsigned int addr; int i, pos, loop; local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL); if (!local_buf) { espi_trans->status = -ENOMEM; return; } for (pos = 0, loop = 0; pos < n_rx; pos += trans_len, loop++) { trans_len = n_rx - pos; if (trans_len > SPCOM_TRANLEN_MAX - n_tx) trans_len = SPCOM_TRANLEN_MAX - n_tx; i = 0; list_for_each_entry(t, &m->transfers, transfer_list) { if (t->tx_buf) { memcpy(local_buf + i, t->tx_buf, t->len); i += t->len; } } if (pos > 0) { addr = fsl_espi_cmd2addr(local_buf); addr += pos; fsl_espi_addr2cmd(addr, local_buf); } espi_trans->n_tx = n_tx; espi_trans->n_rx = trans_len; espi_trans->len = trans_len + n_tx; espi_trans->tx_buf = local_buf; espi_trans->rx_buf = local_buf + n_tx; fsl_espi_do_trans(m, espi_trans); memcpy(rx_buf + pos, espi_trans->rx_buf + n_tx, trans_len); if (loop > 0) espi_trans->actual_length += espi_trans->len - n_tx; else espi_trans->actual_length += espi_trans->len; } kfree(local_buf); } static void fsl_espi_do_one_msg(struct spi_message *m) { struct spi_transfer *t; u8 *rx_buf = NULL; unsigned int n_tx = 0; unsigned int n_rx = 0; struct fsl_espi_transfer espi_trans; list_for_each_entry(t, &m->transfers, transfer_list) { if (t->tx_buf) n_tx += t->len; if (t->rx_buf) { n_rx += t->len; rx_buf = t->rx_buf; } } espi_trans.n_tx = n_tx; espi_trans.n_rx = n_rx; espi_trans.len = n_tx + n_rx; espi_trans.actual_length = 0; espi_trans.status = 0; if (!rx_buf) fsl_espi_cmd_trans(m, &espi_trans, NULL); else fsl_espi_rw_trans(m, &espi_trans, rx_buf); m->actual_length = espi_trans.actual_length; m->status = espi_trans.status; if (m->complete) m->complete(m->context); } static int fsl_espi_setup(struct spi_device *spi) { struct mpc8xxx_spi *mpc8xxx_spi; struct fsl_espi_reg *reg_base; int retval; u32 hw_mode; u32 loop_mode; struct spi_mpc8xxx_cs *cs = spi->controller_state; if (!spi->max_speed_hz) return -EINVAL; if (!cs) { cs = kzalloc(sizeof *cs, GFP_KERNEL); if (!cs) return -ENOMEM; spi->controller_state = cs; } mpc8xxx_spi = spi_master_get_devdata(spi->master); reg_base = mpc8xxx_spi->reg_base; hw_mode = cs->hw_mode; /* Save original settings */ cs->hw_mode = mpc8xxx_spi_read_reg( &reg_base->csmode[spi->chip_select]); /* mask out bits we are going to set */ cs->hw_mode &= ~(CSMODE_CP_BEGIN_EDGECLK | CSMODE_CI_INACTIVEHIGH | CSMODE_REV); if (spi->mode & SPI_CPHA) cs->hw_mode |= CSMODE_CP_BEGIN_EDGECLK; if (spi->mode & SPI_CPOL) cs->hw_mode |= CSMODE_CI_INACTIVEHIGH; if (!(spi->mode & SPI_LSB_FIRST)) cs->hw_mode |= CSMODE_REV; /* Handle the loop mode */ loop_mode = mpc8xxx_spi_read_reg(&reg_base->mode); loop_mode &= ~SPMODE_LOOP; if (spi->mode & SPI_LOOP) loop_mode |= SPMODE_LOOP; mpc8xxx_spi_write_reg(&reg_base->mode, loop_mode); retval = fsl_espi_setup_transfer(spi, NULL); if (retval < 0) { cs->hw_mode = hw_mode; /* Restore settings */ return retval; } return 0; } void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) { struct fsl_espi_reg *reg_base = mspi->reg_base; /* We need handle RX first */ if (events & SPIE_NE) { u32 rx_data, tmp; u8 rx_data_8; /* Spin until RX is done */ while (SPIE_RXCNT(events) < min(4, mspi->len)) { cpu_relax(); events = mpc8xxx_spi_read_reg(&reg_base->event); } if (mspi->len >= 4) { rx_data = mpc8xxx_spi_read_reg(&reg_base->receive); } else { tmp = mspi->len; rx_data = 0; while (tmp--) { rx_data_8 = in_8((u8 *)&reg_base->receive); rx_data |= (rx_data_8 << (tmp * 8)); } rx_data <<= (4 - mspi->len) * 8; } mspi->len -= 4; if (mspi->rx) mspi->get_rx(rx_data, mspi); } if (!(events & SPIE_NF)) { int ret; /* spin until TX is done */ ret = spin_event_timeout(((events = mpc8xxx_spi_read_reg( &reg_base->event)) & SPIE_NF) == 0, 1000, 0); if (!ret) { dev_err(mspi->dev, "tired waiting for SPIE_NF\n"); return; } } /* Clear the events */ mpc8xxx_spi_write_reg(&reg_base->event, events); mspi->count -= 1; if (mspi->count) { u32 word = mspi->get_tx(mspi); mpc8xxx_spi_write_reg(&reg_base->transmit, word); } else { complete(&mspi->done); } } static irqreturn_t fsl_espi_irq(s32 irq, void *context_data) { struct mpc8xxx_spi *mspi = context_data; struct fsl_espi_reg *reg_base = mspi->reg_base; irqreturn_t ret = IRQ_NONE; u32 events; /* Get interrupt events(tx/rx) */ events = mpc8xxx_spi_read_reg(&reg_base->event); if (events) ret = IRQ_HANDLED; dev_vdbg(mspi->dev, "%s: events %x\n", __func__, events); fsl_espi_cpu_irq(mspi, events); return ret; } static void fsl_espi_remove(struct mpc8xxx_spi *mspi) { iounmap(mspi->reg_base); } static struct spi_master * fsl_espi_probe(struct device *dev, struct resource *mem, unsigned int irq) { struct fsl_spi_platform_data *pdata = dev_get_platdata(dev); struct spi_master *master; struct mpc8xxx_spi *mpc8xxx_spi; struct fsl_espi_reg *reg_base; u32 regval; int i, ret = 0; master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi)); if (!master) { ret = -ENOMEM; goto err; } dev_set_drvdata(dev, master); ret = mpc8xxx_spi_probe(dev, mem, irq); if (ret) goto err_probe; master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); master->setup = fsl_espi_setup; mpc8xxx_spi = spi_master_get_devdata(master); mpc8xxx_spi->spi_do_one_msg = fsl_espi_do_one_msg; mpc8xxx_spi->spi_remove = fsl_espi_remove; mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem)); if (!mpc8xxx_spi->reg_base) { ret = -ENOMEM; goto err_probe; } reg_base = mpc8xxx_spi->reg_base; /* Register for SPI Interrupt */ ret = request_irq(mpc8xxx_spi->irq, fsl_espi_irq, 0, "fsl_espi", mpc8xxx_spi); if (ret) goto free_irq; if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { mpc8xxx_spi->rx_shift = 16; mpc8xxx_spi->tx_shift = 24; } /* SPI controller initializations */ mpc8xxx_spi_write_reg(&reg_base->mode, 0); mpc8xxx_spi_write_reg(&reg_base->mask, 0); mpc8xxx_spi_write_reg(&reg_base->command, 0); mpc8xxx_spi_write_reg(&reg_base->event, 0xffffffff); /* Init eSPI CS mode register */ for (i = 0; i < pdata->max_chipselect; i++) mpc8xxx_spi_write_reg(&reg_base->csmode[i], CSMODE_INIT_VAL); /* Enable SPI interface */ regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; mpc8xxx_spi_write_reg(&reg_base->mode, regval); ret = spi_register_master(master); if (ret < 0) goto unreg_master; dev_info(dev, "at 0x%p (irq = %d)\n", reg_base, mpc8xxx_spi->irq); return master; unreg_master: free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); free_irq: iounmap(mpc8xxx_spi->reg_base); err_probe: spi_master_put(master); err: return ERR_PTR(ret); } static int of_fsl_espi_get_chipselects(struct device *dev) { struct device_node *np = dev->of_node; struct fsl_spi_platform_data *pdata = dev_get_platdata(dev); const u32 *prop; int len; prop = of_get_property(np, "fsl,espi-num-chipselects", &len); if (!prop || len < sizeof(*prop)) { dev_err(dev, "No 'fsl,espi-num-chipselects' property\n"); return -EINVAL; } pdata->max_chipselect = *prop; pdata->cs_control = NULL; return 0; } static int of_fsl_espi_probe(struct platform_device *ofdev) { struct device *dev = &ofdev->dev; struct device_node *np = ofdev->dev.of_node; struct spi_master *master; struct resource mem; unsigned int irq; int ret = -ENOMEM; ret = of_mpc8xxx_spi_probe(ofdev); if (ret) return ret; ret = of_fsl_espi_get_chipselects(dev); if (ret) goto err; ret = of_address_to_resource(np, 0, &mem); if (ret) goto err; irq = irq_of_parse_and_map(np, 0); if (!irq) { ret = -EINVAL; goto err; } master = fsl_espi_probe(dev, &mem, irq); if (IS_ERR(master)) { ret = PTR_ERR(master); goto err; } return 0; err: return ret; } static int of_fsl_espi_remove(struct platform_device *dev) { return mpc8xxx_spi_remove(&dev->dev); } #ifdef CONFIG_PM_SLEEP static int of_fsl_espi_suspend(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); struct mpc8xxx_spi *mpc8xxx_spi; struct fsl_espi_reg *reg_base; u32 regval; int ret; mpc8xxx_spi = spi_master_get_devdata(master); reg_base = mpc8xxx_spi->reg_base; ret = spi_master_suspend(master); if (ret) { dev_warn(dev, "cannot suspend master\n"); return ret; } regval = mpc8xxx_spi_read_reg(&reg_base->mode); regval &= ~SPMODE_ENABLE; mpc8xxx_spi_write_reg(&reg_base->mode, regval); return 0; } static int of_fsl_espi_resume(struct device *dev) { struct fsl_spi_platform_data *pdata = dev_get_platdata(dev); struct spi_master *master = dev_get_drvdata(dev); struct mpc8xxx_spi *mpc8xxx_spi; struct fsl_espi_reg *reg_base; u32 regval; int i; mpc8xxx_spi = spi_master_get_devdata(master); reg_base = mpc8xxx_spi->reg_base; /* SPI controller initializations */ mpc8xxx_spi_write_reg(&reg_base->mode, 0); mpc8xxx_spi_write_reg(&reg_base->mask, 0); mpc8xxx_spi_write_reg(&reg_base->command, 0); mpc8xxx_spi_write_reg(&reg_base->event, 0xffffffff); /* Init eSPI CS mode register */ for (i = 0; i < pdata->max_chipselect; i++) mpc8xxx_spi_write_reg(&reg_base->csmode[i], CSMODE_INIT_VAL); /* Enable SPI interface */ regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; mpc8xxx_spi_write_reg(&reg_base->mode, regval); return spi_master_resume(master); } #endif /* CONFIG_PM_SLEEP */ static const struct dev_pm_ops espi_pm = { SET_SYSTEM_SLEEP_PM_OPS(of_fsl_espi_suspend, of_fsl_espi_resume) }; static const struct of_device_id of_fsl_espi_match[] = { { .compatible = "fsl,mpc8536-espi" }, {} }; MODULE_DEVICE_TABLE(of, of_fsl_espi_match); static struct platform_driver fsl_espi_driver = { .driver = { .name = "fsl_espi", .owner = THIS_MODULE, .of_match_table = of_fsl_espi_match, .pm = &espi_pm, }, .probe = of_fsl_espi_probe, .remove = of_fsl_espi_remove, }; module_platform_driver(fsl_espi_driver); MODULE_AUTHOR("Mingkai Hu"); MODULE_DESCRIPTION("Enhanced Freescale SPI Driver"); MODULE_LICENSE("GPL");
gpl-2.0
hexianren/linux-3.7-Panda
drivers/staging/vt6655/hostap.c
43
23624
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * File: hostap.c * * Purpose: handle hostap deamon ioctl input/out functions * * Author: Lyndon Chen * * Date: Oct. 20, 2003 * * Functions: * * Revision History: * */ #include "hostap.h" #include "iocmd.h" #include "mac.h" #include "card.h" #include "baseband.h" #include "wpactl.h" #include "key.h" #define VIAWGET_HOSTAPD_MAX_BUF_SIZE 1024 #define HOSTAP_CRYPT_FLAG_SET_TX_KEY BIT0 #define HOSTAP_CRYPT_ERR_UNKNOWN_ADDR 3 #define HOSTAP_CRYPT_ERR_KEY_SET_FAILED 5 /*--------------------- Static Definitions -------------------------*/ /*--------------------- Static Classes ----------------------------*/ /*--------------------- Static Variables --------------------------*/ //static int msglevel =MSG_LEVEL_DEBUG; static int msglevel =MSG_LEVEL_INFO; /*--------------------- Static Functions --------------------------*/ /*--------------------- Export Variables --------------------------*/ /* * Description: * register net_device (AP) for hostap deamon * * Parameters: * In: * pDevice - * rtnl_locked - * Out: * * Return Value: * */ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked) { PSDevice apdev_priv; struct net_device *dev = pDevice->dev; int ret; const struct net_device_ops apdev_netdev_ops = { .ndo_start_xmit = pDevice->tx_80211, }; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name); pDevice->apdev = kzalloc(sizeof(struct net_device), GFP_KERNEL); if (pDevice->apdev == NULL) return -ENOMEM; apdev_priv = netdev_priv(pDevice->apdev); *apdev_priv = *pDevice; memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN); pDevice->apdev->netdev_ops = &apdev_netdev_ops; pDevice->apdev->type = ARPHRD_IEEE80211; pDevice->apdev->base_addr = dev->base_addr; pDevice->apdev->irq = dev->irq; pDevice->apdev->mem_start = dev->mem_start; pDevice->apdev->mem_end = dev->mem_end; sprintf(pDevice->apdev->name, "%sap", dev->name); if (rtnl_locked) ret = register_netdevice(pDevice->apdev); else ret = register_netdev(pDevice->apdev); if (ret) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: register_netdevice(AP) failed!\n", dev->name); return -1; } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Registered netdevice %s for AP management\n", dev->name, pDevice->apdev->name); KeyvInitTable(&pDevice->sKey, pDevice->PortOffset); return 0; } /* * Description: * unregister net_device(AP) * * Parameters: * In: * pDevice - * rtnl_locked - * Out: * * Return Value: * */ static int hostap_disable_hostapd(PSDevice pDevice, int rtnl_locked) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: disabling hostapd mode\n", pDevice->dev->name); if (pDevice->apdev && pDevice->apdev->name && pDevice->apdev->name[0]) { if (rtnl_locked) unregister_netdevice(pDevice->apdev); else unregister_netdev(pDevice->apdev); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n", pDevice->dev->name, pDevice->apdev->name); } kfree(pDevice->apdev); pDevice->apdev = NULL; pDevice->bEnable8021x = false; pDevice->bEnableHostWEP = false; pDevice->bEncryptionEnable = false; //4.2007-0118-03,<Add> by EinsnLiu //execute some clear work pDevice->pMgmt->byCSSPK=KEY_CTL_NONE; pDevice->pMgmt->byCSSGK=KEY_CTL_NONE; KeyvInitTable(&pDevice->sKey,pDevice->PortOffset); return 0; } /* * Description: * Set enable/disable hostapd mode * * Parameters: * In: * pDevice - * rtnl_locked - * Out: * * Return Value: * */ int vt6655_hostap_set_hostapd(PSDevice pDevice, int val, int rtnl_locked) { if (val < 0 || val > 1) return -EINVAL; if (pDevice->bEnableHostapd == val) return 0; pDevice->bEnableHostapd = val; if (val) return hostap_enable_hostapd(pDevice, rtnl_locked); else return hostap_disable_hostapd(pDevice, rtnl_locked); } /* * Description: * remove station function supported for hostap deamon * * Parameters: * In: * pDevice - * param - * Out: * * Return Value: * */ static int hostap_remove_sta(PSDevice pDevice, struct viawget_hostapd_param *param) { unsigned int uNodeIndex; if (BSSDBbIsSTAInNodeDB(pDevice->pMgmt, param->sta_addr, &uNodeIndex)) { BSSvRemoveOneNode(pDevice, uNodeIndex); } else { return -ENOENT; } return 0; } /* * Description: * add a station from hostap deamon * * Parameters: * In: * pDevice - * param - * Out: * * Return Value: * */ static int hostap_add_sta(PSDevice pDevice, struct viawget_hostapd_param *param) { PSMgmtObject pMgmt = pDevice->pMgmt; unsigned int uNodeIndex; if (!BSSDBbIsSTAInNodeDB(pMgmt, param->sta_addr, &uNodeIndex)) { BSSvCreateOneNode((PSDevice)pDevice, &uNodeIndex); } memcpy(pMgmt->sNodeDBTable[uNodeIndex].abyMACAddr, param->sta_addr, WLAN_ADDR_LEN); pMgmt->sNodeDBTable[uNodeIndex].eNodeState = NODE_ASSOC; pMgmt->sNodeDBTable[uNodeIndex].wCapInfo = param->u.add_sta.capability; // TODO listenInterval // pMgmt->sNodeDBTable[uNodeIndex].wListenInterval = 1; pMgmt->sNodeDBTable[uNodeIndex].bPSEnable = false; pMgmt->sNodeDBTable[uNodeIndex].bySuppRate = param->u.add_sta.tx_supp_rates; // set max tx rate pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate = pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate; // set max basic rate pMgmt->sNodeDBTable[uNodeIndex].wMaxBasicRate = RATE_2M; // Todo: check sta preamble, if ap can't support, set status code pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble = WLAN_GET_CAP_INFO_SHORTPREAMBLE(pMgmt->sNodeDBTable[uNodeIndex].wCapInfo); pMgmt->sNodeDBTable[uNodeIndex].wAID = (unsigned short)param->u.add_sta.aid; pMgmt->sNodeDBTable[uNodeIndex].ulLastRxJiffer = jiffies; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Add STA AID= %d \n", pMgmt->sNodeDBTable[uNodeIndex].wAID); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "MAC=%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X \n", param->sta_addr[0], param->sta_addr[1], param->sta_addr[2], param->sta_addr[3], param->sta_addr[4], param->sta_addr[5] ) ; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Max Support rate = %d \n", pMgmt->sNodeDBTable[uNodeIndex].wMaxSuppRate); return 0; } /* * Description: * get station info * * Parameters: * In: * pDevice - * param - * Out: * * Return Value: * */ static int hostap_get_info_sta(PSDevice pDevice, struct viawget_hostapd_param *param) { PSMgmtObject pMgmt = pDevice->pMgmt; unsigned int uNodeIndex; if (BSSDBbIsSTAInNodeDB(pMgmt, param->sta_addr, &uNodeIndex)) { param->u.get_info_sta.inactive_sec = (jiffies - pMgmt->sNodeDBTable[uNodeIndex].ulLastRxJiffer) / HZ; //param->u.get_info_sta.txexc = pMgmt->sNodeDBTable[uNodeIndex].uTxAttempts; } else { return -ENOENT; } return 0; } /* * Description: * reset txexec * * Parameters: * In: * pDevice - * param - * Out: * true, false * * Return Value: * */ /* static int hostap_reset_txexc_sta(PSDevice pDevice, struct viawget_hostapd_param *param) { PSMgmtObject pMgmt = pDevice->pMgmt; unsigned int uNodeIndex; if (BSSDBbIsSTAInNodeDB(pMgmt, param->sta_addr, &uNodeIndex)) { pMgmt->sNodeDBTable[uNodeIndex].uTxAttempts = 0; } else { return -ENOENT; } return 0; } */ /* * Description: * set station flag * * Parameters: * In: * pDevice - * param - * Out: * * Return Value: * */ static int hostap_set_flags_sta(PSDevice pDevice, struct viawget_hostapd_param *param) { PSMgmtObject pMgmt = pDevice->pMgmt; unsigned int uNodeIndex; if (BSSDBbIsSTAInNodeDB(pMgmt, param->sta_addr, &uNodeIndex)) { pMgmt->sNodeDBTable[uNodeIndex].dwFlags |= param->u.set_flags_sta.flags_or; pMgmt->sNodeDBTable[uNodeIndex].dwFlags &= param->u.set_flags_sta.flags_and; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " dwFlags = %x \n", (unsigned int)pMgmt->sNodeDBTable[uNodeIndex].dwFlags); } else { return -ENOENT; } return 0; } /* * Description: * set generic element (wpa ie) * * Parameters: * In: * pDevice - * param - * Out: * * Return Value: * */ static int hostap_set_generic_element(PSDevice pDevice, struct viawget_hostapd_param *param) { PSMgmtObject pMgmt = pDevice->pMgmt; memcpy( pMgmt->abyWPAIE, param->u.generic_elem.data, param->u.generic_elem.len ); pMgmt->wWPAIELen = param->u.generic_elem.len; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pMgmt->wWPAIELen = %d\n", pMgmt->wWPAIELen); // disable wpa if (pMgmt->wWPAIELen == 0) { pMgmt->eAuthenMode = WMAC_AUTH_OPEN; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " No WPAIE, Disable WPA \n"); } else { // enable wpa if ((pMgmt->abyWPAIE[0] == WLAN_EID_RSN_WPA) || (pMgmt->abyWPAIE[0] == WLAN_EID_RSN)) { pMgmt->eAuthenMode = WMAC_AUTH_WPANONE; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Set WPAIE enable WPA\n"); } else return -EINVAL; } return 0; } /* * Description: * flush station nodes table. * * Parameters: * In: * pDevice - * Out: * * Return Value: * */ static void hostap_flush_sta(PSDevice pDevice) { // reserved node index =0 for multicast node. BSSvClearNodeDBTable(pDevice, 1); pDevice->uAssocCount = 0; return; } /* * Description: * set each stations encryption key * * Parameters: * In: * pDevice - * param - * Out: * * Return Value: * */ static int hostap_set_encryption(PSDevice pDevice, struct viawget_hostapd_param *param, int param_len) { PSMgmtObject pMgmt = pDevice->pMgmt; unsigned long dwKeyIndex = 0; unsigned char abyKey[MAX_KEY_LEN]; unsigned char abySeq[MAX_KEY_LEN]; NDIS_802_11_KEY_RSC KeyRSC; unsigned char byKeyDecMode = KEY_CTL_WEP; int ret = 0; int iNodeIndex = -1; int ii; bool bKeyTableFull = false; unsigned short wKeyCtl = 0; param->u.crypt.err = 0; /* if (param_len != (int) ((char *) param->u.crypt.key - (char *) param) + param->u.crypt.key_len) return -EINVAL; */ if (param->u.crypt.alg > WPA_ALG_CCMP) return -EINVAL; if ((param->u.crypt.idx > 3) || (param->u.crypt.key_len > MAX_KEY_LEN)) { param->u.crypt.err = HOSTAP_CRYPT_ERR_KEY_SET_FAILED; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " HOSTAP_CRYPT_ERR_KEY_SET_FAILED\n"); return -EINVAL; } if (is_broadcast_ether_addr(param->sta_addr)) { if (param->u.crypt.idx >= MAX_GROUP_KEY) return -EINVAL; iNodeIndex = 0; } else { if (BSSDBbIsSTAInNodeDB(pMgmt, param->sta_addr, &iNodeIndex) == false) { param->u.crypt.err = HOSTAP_CRYPT_ERR_UNKNOWN_ADDR; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " HOSTAP_CRYPT_ERR_UNKNOWN_ADDR\n"); return -EINVAL; } } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " hostap_set_encryption: sta_index %d \n", iNodeIndex); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " hostap_set_encryption: alg %d \n", param->u.crypt.alg); if (param->u.crypt.alg == WPA_ALG_NONE) { if (pMgmt->sNodeDBTable[iNodeIndex].bOnFly == true) { if (KeybRemoveKey(&(pDevice->sKey), param->sta_addr, pMgmt->sNodeDBTable[iNodeIndex].dwKeyIndex, pDevice->PortOffset) == false) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "KeybRemoveKey fail \n"); } pMgmt->sNodeDBTable[iNodeIndex].bOnFly = false; } pMgmt->sNodeDBTable[iNodeIndex].byKeyIndex = 0; pMgmt->sNodeDBTable[iNodeIndex].dwKeyIndex = 0; pMgmt->sNodeDBTable[iNodeIndex].uWepKeyLength = 0; pMgmt->sNodeDBTable[iNodeIndex].KeyRSC = 0; pMgmt->sNodeDBTable[iNodeIndex].dwTSC47_16 = 0; pMgmt->sNodeDBTable[iNodeIndex].wTSC15_0 = 0; pMgmt->sNodeDBTable[iNodeIndex].byCipherSuite = 0; memset(&pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[0], 0, MAX_KEY_LEN ); return ret; } memcpy(abyKey, param->u.crypt.key, param->u.crypt.key_len); // copy to node key tbl pMgmt->sNodeDBTable[iNodeIndex].byKeyIndex = param->u.crypt.idx; pMgmt->sNodeDBTable[iNodeIndex].uWepKeyLength = param->u.crypt.key_len; memcpy(&pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[0], param->u.crypt.key, param->u.crypt.key_len ); dwKeyIndex = (unsigned long)(param->u.crypt.idx); if (param->u.crypt.flags & HOSTAP_CRYPT_FLAG_SET_TX_KEY) { pDevice->byKeyIndex = (unsigned char)dwKeyIndex; pDevice->bTransmitKey = true; dwKeyIndex |= (1 << 31); } if (param->u.crypt.alg == WPA_ALG_WEP) { if ((pDevice->bEnable8021x == false) || (iNodeIndex == 0)) { KeybSetDefaultKey(&(pDevice->sKey), dwKeyIndex & ~(BIT30 | USE_KEYRSC), param->u.crypt.key_len, NULL, abyKey, KEY_CTL_WEP, pDevice->PortOffset, pDevice->byLocalID); } else { // 8021x enable, individual key dwKeyIndex |= (1 << 30); // set pairwise key if (KeybSetKey(&(pDevice->sKey), &param->sta_addr[0], dwKeyIndex & ~(USE_KEYRSC), param->u.crypt.key_len, (PQWORD) &(KeyRSC), (unsigned char *)abyKey, KEY_CTL_WEP, pDevice->PortOffset, pDevice->byLocalID) == true) { pMgmt->sNodeDBTable[iNodeIndex].bOnFly = true; } else { // Key Table Full pMgmt->sNodeDBTable[iNodeIndex].bOnFly = false; bKeyTableFull = true; } } pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled; pDevice->bEncryptionEnable = true; pMgmt->byCSSPK = KEY_CTL_WEP; pMgmt->byCSSGK = KEY_CTL_WEP; pMgmt->sNodeDBTable[iNodeIndex].byCipherSuite = KEY_CTL_WEP; pMgmt->sNodeDBTable[iNodeIndex].dwKeyIndex = dwKeyIndex; return ret; } if (param->u.crypt.seq) { memcpy(&abySeq, param->u.crypt.seq, 8); for (ii = 0 ; ii < 8 ; ii++) { KeyRSC |= (abySeq[ii] << (ii * 8)); } dwKeyIndex |= 1 << 29; pMgmt->sNodeDBTable[iNodeIndex].KeyRSC = KeyRSC; } if (param->u.crypt.alg == WPA_ALG_TKIP) { if (param->u.crypt.key_len != MAX_KEY_LEN) return -EINVAL; pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled; byKeyDecMode = KEY_CTL_TKIP; pMgmt->byCSSPK = KEY_CTL_TKIP; pMgmt->byCSSGK = KEY_CTL_TKIP; } if (param->u.crypt.alg == WPA_ALG_CCMP) { if ((param->u.crypt.key_len != AES_KEY_LEN) || (pDevice->byLocalID <= REV_ID_VT3253_A1)) return -EINVAL; pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled; byKeyDecMode = KEY_CTL_CCMP; pMgmt->byCSSPK = KEY_CTL_CCMP; pMgmt->byCSSGK = KEY_CTL_CCMP; } if (iNodeIndex == 0) { KeybSetDefaultKey(&(pDevice->sKey), dwKeyIndex, param->u.crypt.key_len, (PQWORD) &(KeyRSC), abyKey, byKeyDecMode, pDevice->PortOffset, pDevice->byLocalID); pMgmt->sNodeDBTable[iNodeIndex].bOnFly = true; } else { dwKeyIndex |= (1 << 30); // set pairwise key if (KeybSetKey(&(pDevice->sKey), &param->sta_addr[0], dwKeyIndex, param->u.crypt.key_len, (PQWORD) &(KeyRSC), (unsigned char *)abyKey, byKeyDecMode, pDevice->PortOffset, pDevice->byLocalID) == true) { pMgmt->sNodeDBTable[iNodeIndex].bOnFly = true; } else { // Key Table Full pMgmt->sNodeDBTable[iNodeIndex].bOnFly = false; bKeyTableFull = true; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " Key Table Full\n"); } } if (bKeyTableFull == true) { wKeyCtl &= 0x7F00; // clear all key control filed wKeyCtl |= (byKeyDecMode << 4); wKeyCtl |= (byKeyDecMode); wKeyCtl |= 0x0044; // use group key for all address wKeyCtl |= 0x4000; // disable KeyTable[MAX_KEY_TABLE-1] on-fly to genernate rx int MACvSetDefaultKeyCtl(pDevice->PortOffset, wKeyCtl, MAX_KEY_TABLE-1, pDevice->byLocalID); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " Set key sta_index= %d \n", iNodeIndex); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " tx_index=%d len=%d \n", param->u.crypt.idx, param->u.crypt.key_len ); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " key=%x-%x-%x-%x-%x-xxxxx \n", pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[0], pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[1], pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[2], pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[3], pMgmt->sNodeDBTable[iNodeIndex].abyWepKey[4] ); // set wep key pDevice->bEncryptionEnable = true; pMgmt->sNodeDBTable[iNodeIndex].byCipherSuite = byKeyDecMode; pMgmt->sNodeDBTable[iNodeIndex].dwKeyIndex = dwKeyIndex; pMgmt->sNodeDBTable[iNodeIndex].dwTSC47_16 = 0; pMgmt->sNodeDBTable[iNodeIndex].wTSC15_0 = 0; return ret; } /* * Description: * get each stations encryption key * * Parameters: * In: * pDevice - * param - * Out: * * Return Value: * */ static int hostap_get_encryption(PSDevice pDevice, struct viawget_hostapd_param *param, int param_len) { PSMgmtObject pMgmt = pDevice->pMgmt; int ret = 0; int ii; int iNodeIndex =0; param->u.crypt.err = 0; if (is_broadcast_ether_addr(param->sta_addr)) { iNodeIndex = 0; } else { if (BSSDBbIsSTAInNodeDB(pMgmt, param->sta_addr, &iNodeIndex) == false) { param->u.crypt.err = HOSTAP_CRYPT_ERR_UNKNOWN_ADDR; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "hostap_get_encryption: HOSTAP_CRYPT_ERR_UNKNOWN_ADDR\n"); return -EINVAL; } } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "hostap_get_encryption: %d\n", iNodeIndex); memset(param->u.crypt.seq, 0, 8); for (ii = 0 ; ii < 8 ; ii++) { param->u.crypt.seq[ii] = (unsigned char)pMgmt->sNodeDBTable[iNodeIndex].KeyRSC >> (ii * 8); } return ret; } /* * Description: * vt6655_hostap_ioctl main function supported for hostap deamon. * * Parameters: * In: * pDevice - * iw_point - * Out: * * Return Value: * */ int vt6655_hostap_ioctl(PSDevice pDevice, struct iw_point *p) { struct viawget_hostapd_param *param; int ret = 0; int ap_ioctl = 0; if (p->length < sizeof(struct viawget_hostapd_param) || p->length > VIAWGET_HOSTAPD_MAX_BUF_SIZE || !p->pointer) return -EINVAL; param = kmalloc((int)p->length, (int)GFP_KERNEL); if (param == NULL) return -ENOMEM; if (copy_from_user(param, p->pointer, p->length)) { ret = -EFAULT; goto out; } switch (param->cmd) { case VIAWGET_HOSTAPD_SET_ENCRYPTION: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_SET_ENCRYPTION \n"); spin_lock_irq(&pDevice->lock); ret = hostap_set_encryption(pDevice, param, p->length); spin_unlock_irq(&pDevice->lock); break; case VIAWGET_HOSTAPD_GET_ENCRYPTION: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_GET_ENCRYPTION \n"); spin_lock_irq(&pDevice->lock); ret = hostap_get_encryption(pDevice, param, p->length); spin_unlock_irq(&pDevice->lock); break; case VIAWGET_HOSTAPD_SET_ASSOC_AP_ADDR: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_SET_ASSOC_AP_ADDR \n"); return -EOPNOTSUPP; break; case VIAWGET_HOSTAPD_FLUSH: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_FLUSH \n"); spin_lock_irq(&pDevice->lock); hostap_flush_sta(pDevice); spin_unlock_irq(&pDevice->lock); break; case VIAWGET_HOSTAPD_ADD_STA: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_ADD_STA \n"); spin_lock_irq(&pDevice->lock); ret = hostap_add_sta(pDevice, param); spin_unlock_irq(&pDevice->lock); break; case VIAWGET_HOSTAPD_REMOVE_STA: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_REMOVE_STA \n"); spin_lock_irq(&pDevice->lock); ret = hostap_remove_sta(pDevice, param); spin_unlock_irq(&pDevice->lock); break; case VIAWGET_HOSTAPD_GET_INFO_STA: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_GET_INFO_STA \n"); ret = hostap_get_info_sta(pDevice, param); ap_ioctl = 1; break; /* case VIAWGET_HOSTAPD_RESET_TXEXC_STA: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_RESET_TXEXC_STA \n"); ret = hostap_reset_txexc_sta(pDevice, param); break; */ case VIAWGET_HOSTAPD_SET_FLAGS_STA: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_SET_FLAGS_STA \n"); ret = hostap_set_flags_sta(pDevice, param); break; case VIAWGET_HOSTAPD_MLME: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_MLME \n"); return -EOPNOTSUPP; case VIAWGET_HOSTAPD_SET_GENERIC_ELEMENT: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_SET_GENERIC_ELEMENT \n"); ret = hostap_set_generic_element(pDevice, param); break; case VIAWGET_HOSTAPD_SCAN_REQ: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_SCAN_REQ \n"); return -EOPNOTSUPP; case VIAWGET_HOSTAPD_STA_CLEAR_STATS: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_HOSTAPD_STA_CLEAR_STATS \n"); return -EOPNOTSUPP; default: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "vt6655_hostap_ioctl: unknown cmd=%d\n", (int)param->cmd); return -EOPNOTSUPP; break; } if ((ret == 0) && ap_ioctl) { if (copy_to_user(p->pointer, param, p->length)) { ret = -EFAULT; goto out; } } out: kfree(param); return ret; }
gpl-2.0
u9621071/kernel-uek-UEK3
drivers/ata/sata_highbank.c
43
11873
/* * Calxeda Highbank AHCI SATA platform driver * Copyright 2012 Calxeda, Inc. * * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/err.h> #include <linux/io.h> #include <linux/spinlock.h> #include <linux/device.h> #include <linux/of_device.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/libata.h> #include <linux/ahci_platform.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/export.h> #include "ahci.h" #define CPHY_MAP(dev, addr) ((((dev) & 0x1f) << 7) | (((addr) >> 9) & 0x7f)) #define CPHY_ADDR(addr) (((addr) & 0x1ff) << 2) #define SERDES_CR_CTL 0x80a0 #define SERDES_CR_ADDR 0x80a1 #define SERDES_CR_DATA 0x80a2 #define CR_BUSY 0x0001 #define CR_START 0x0001 #define CR_WR_RDN 0x0002 #define CPHY_RX_INPUT_STS 0x2002 #define CPHY_SATA_OVERRIDE 0x4000 #define CPHY_OVERRIDE 0x2005 #define SPHY_LANE 0x100 #define SPHY_HALF_RATE 0x0001 #define CPHY_SATA_DPLL_MODE 0x0700 #define CPHY_SATA_DPLL_SHIFT 8 #define CPHY_SATA_DPLL_RESET (1 << 11) #define CPHY_PHY_COUNT 6 #define CPHY_LANE_COUNT 4 #define CPHY_PORT_COUNT (CPHY_PHY_COUNT * CPHY_LANE_COUNT) static DEFINE_SPINLOCK(cphy_lock); /* Each of the 6 phys can have up to 4 sata ports attached to i. Map 0-based * sata ports to their phys and then to their lanes within the phys */ struct phy_lane_info { void __iomem *phy_base; u8 lane_mapping; u8 phy_devs; }; static struct phy_lane_info port_data[CPHY_PORT_COUNT]; static u32 __combo_phy_reg_read(u8 sata_port, u32 addr) { u32 data; u8 dev = port_data[sata_port].phy_devs; spin_lock(&cphy_lock); writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800); data = readl(port_data[sata_port].phy_base + CPHY_ADDR(addr)); spin_unlock(&cphy_lock); return data; } static void __combo_phy_reg_write(u8 sata_port, u32 addr, u32 data) { u8 dev = port_data[sata_port].phy_devs; spin_lock(&cphy_lock); writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800); writel(data, port_data[sata_port].phy_base + CPHY_ADDR(addr)); spin_unlock(&cphy_lock); } static void combo_phy_wait_for_ready(u8 sata_port) { while (__combo_phy_reg_read(sata_port, SERDES_CR_CTL) & CR_BUSY) udelay(5); } static u32 combo_phy_read(u8 sata_port, u32 addr) { combo_phy_wait_for_ready(sata_port); __combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr); __combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_START); combo_phy_wait_for_ready(sata_port); return __combo_phy_reg_read(sata_port, SERDES_CR_DATA); } static void combo_phy_write(u8 sata_port, u32 addr, u32 data) { combo_phy_wait_for_ready(sata_port); __combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr); __combo_phy_reg_write(sata_port, SERDES_CR_DATA, data); __combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_WR_RDN | CR_START); } static void highbank_cphy_disable_overrides(u8 sata_port) { u8 lane = port_data[sata_port].lane_mapping; u32 tmp; if (unlikely(port_data[sata_port].phy_base == NULL)) return; tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE); tmp &= ~CPHY_SATA_OVERRIDE; combo_phy_write(sata_port, CPHY_OVERRIDE + lane * SPHY_LANE, tmp); } static void cphy_override_rx_mode(u8 sata_port, u32 val) { u8 lane = port_data[sata_port].lane_mapping; u32 tmp; tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE); tmp &= ~CPHY_SATA_OVERRIDE; combo_phy_write(sata_port, CPHY_OVERRIDE + lane * SPHY_LANE, tmp); tmp |= CPHY_SATA_OVERRIDE; combo_phy_write(sata_port, CPHY_OVERRIDE + lane * SPHY_LANE, tmp); tmp &= ~CPHY_SATA_DPLL_MODE; tmp |= val << CPHY_SATA_DPLL_SHIFT; combo_phy_write(sata_port, CPHY_OVERRIDE + lane * SPHY_LANE, tmp); tmp |= CPHY_SATA_DPLL_RESET; combo_phy_write(sata_port, CPHY_OVERRIDE + lane * SPHY_LANE, tmp); tmp &= ~CPHY_SATA_DPLL_RESET; combo_phy_write(sata_port, CPHY_OVERRIDE + lane * SPHY_LANE, tmp); msleep(15); } static void highbank_cphy_override_lane(u8 sata_port) { u8 lane = port_data[sata_port].lane_mapping; u32 tmp, k = 0; if (unlikely(port_data[sata_port].phy_base == NULL)) return; do { tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE); } while ((tmp & SPHY_HALF_RATE) && (k++ < 1000)); cphy_override_rx_mode(sata_port, 3); } static int highbank_initialize_phys(struct device *dev, void __iomem *addr) { struct device_node *sata_node = dev->of_node; int phy_count = 0, phy, port = 0; void __iomem *cphy_base[CPHY_PHY_COUNT]; struct device_node *phy_nodes[CPHY_PHY_COUNT]; memset(port_data, 0, sizeof(struct phy_lane_info) * CPHY_PORT_COUNT); memset(phy_nodes, 0, sizeof(struct device_node*) * CPHY_PHY_COUNT); do { u32 tmp; struct of_phandle_args phy_data; if (of_parse_phandle_with_args(sata_node, "calxeda,port-phys", "#phy-cells", port, &phy_data)) break; for (phy = 0; phy < phy_count; phy++) { if (phy_nodes[phy] == phy_data.np) break; } if (phy_nodes[phy] == NULL) { phy_nodes[phy] = phy_data.np; cphy_base[phy] = of_iomap(phy_nodes[phy], 0); if (cphy_base[phy] == NULL) { return 0; } phy_count += 1; } port_data[port].lane_mapping = phy_data.args[0]; of_property_read_u32(phy_nodes[phy], "phydev", &tmp); port_data[port].phy_devs = tmp; port_data[port].phy_base = cphy_base[phy]; of_node_put(phy_data.np); port += 1; } while (port < CPHY_PORT_COUNT); return 0; } static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline) { const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); struct ata_port *ap = link->ap; struct ahci_port_priv *pp = ap->private_data; u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; struct ata_taskfile tf; bool online; u32 sstatus; int rc; int retry = 10; ahci_stop_engine(ap); /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); tf.command = ATA_BUSY; ata_tf_to_fis(&tf, 0, 0, d2h_fis); do { highbank_cphy_disable_overrides(link->ap->port_no); rc = sata_link_hardreset(link, timing, deadline, &online, NULL); highbank_cphy_override_lane(link->ap->port_no); /* If the status is 1, we are connected, but the link did not * come up. So retry resetting the link again. */ if (sata_scr_read(link, SCR_STATUS, &sstatus)) break; if (!(sstatus & 0x3)) break; } while (!online && retry--); ahci_start_engine(ap); if (online) *class = ahci_dev_classify(ap); return rc; } static struct ata_port_operations ahci_highbank_ops = { .inherits = &ahci_ops, .hardreset = ahci_highbank_hardreset, }; static const struct ata_port_info ahci_highbank_port_info = { .flags = AHCI_FLAG_COMMON, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &ahci_highbank_ops, }; static struct scsi_host_template ahci_highbank_platform_sht = { AHCI_SHT("sata_highbank"), }; static const struct of_device_id ahci_of_match[] = { { .compatible = "calxeda,hb-ahci" }, {}, }; MODULE_DEVICE_TABLE(of, ahci_of_match); static int ahci_highbank_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct ahci_host_priv *hpriv; struct ata_host *host; struct resource *mem; int irq; int n_ports; int i; int rc; struct ata_port_info pi = ahci_highbank_port_info; const struct ata_port_info *ppi[] = { &pi, NULL }; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(dev, "no mmio space\n"); return -EINVAL; } irq = platform_get_irq(pdev, 0); if (irq <= 0) { dev_err(dev, "no irq\n"); return -EINVAL; } hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); if (!hpriv) { dev_err(dev, "can't alloc ahci_host_priv\n"); return -ENOMEM; } hpriv->flags |= (unsigned long)pi.private_data; hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem)); if (!hpriv->mmio) { dev_err(dev, "can't map %pR\n", mem); return -ENOMEM; } rc = highbank_initialize_phys(dev, hpriv->mmio); if (rc) return rc; ahci_save_initial_config(dev, hpriv, 0, 0); /* prepare host */ if (hpriv->cap & HOST_CAP_NCQ) pi.flags |= ATA_FLAG_NCQ; if (hpriv->cap & HOST_CAP_PMP) pi.flags |= ATA_FLAG_PMP; ahci_set_em_messages(hpriv, &pi); /* CAP.NP sometimes indicate the index of the last enabled * port, at other times, that of the last possible port, so * determining the maximum port number requires looking at * both CAP.NP and port_map. */ n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map)); host = ata_host_alloc_pinfo(dev, ppi, n_ports); if (!host) { rc = -ENOMEM; goto err0; } host->private_data = hpriv; if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) host->flags |= ATA_HOST_PARALLEL_SCAN; if (pi.flags & ATA_FLAG_EM) ahci_reset_em(host); for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; ata_port_desc(ap, "mmio %pR", mem); ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80); /* set enclosure management message type */ if (ap->flags & ATA_FLAG_EM) ap->em_message_type = hpriv->em_msg_type; /* disabled/not-implemented port */ if (!(hpriv->port_map & (1 << i))) ap->ops = &ata_dummy_port_ops; } rc = ahci_reset_controller(host); if (rc) goto err0; ahci_init_controller(host); ahci_print_info(host, "platform"); rc = ata_host_activate(host, irq, ahci_interrupt, 0, &ahci_highbank_platform_sht); if (rc) goto err0; return 0; err0: return rc; } #ifdef CONFIG_PM_SLEEP static int ahci_highbank_suspend(struct device *dev) { struct ata_host *host = dev_get_drvdata(dev); struct ahci_host_priv *hpriv = host->private_data; void __iomem *mmio = hpriv->mmio; u32 ctl; int rc; if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) { dev_err(dev, "firmware update required for suspend/resume\n"); return -EIO; } /* * AHCI spec rev1.1 section 8.3.3: * Software must disable interrupts prior to requesting a * transition of the HBA to D3 state. */ ctl = readl(mmio + HOST_CTL); ctl &= ~HOST_IRQ_EN; writel(ctl, mmio + HOST_CTL); readl(mmio + HOST_CTL); /* flush */ rc = ata_host_suspend(host, PMSG_SUSPEND); if (rc) return rc; return 0; } static int ahci_highbank_resume(struct device *dev) { struct ata_host *host = dev_get_drvdata(dev); int rc; if (dev->power.power_state.event == PM_EVENT_SUSPEND) { rc = ahci_reset_controller(host); if (rc) return rc; ahci_init_controller(host); } ata_host_resume(host); return 0; } #endif SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops, ahci_highbank_suspend, ahci_highbank_resume); static struct platform_driver ahci_highbank_driver = { .remove = ata_platform_remove_one, .driver = { .name = "highbank-ahci", .owner = THIS_MODULE, .of_match_table = ahci_of_match, .pm = &ahci_highbank_pm_ops, }, .probe = ahci_highbank_probe, }; module_platform_driver(ahci_highbank_driver); MODULE_DESCRIPTION("Calxeda Highbank AHCI SATA platform driver"); MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("sata:highbank");
gpl-2.0
timmytim/honeybutter_kernel
drivers/net/wireless/ath/ath9k/ar9002_hw.c
43
18482
/* * Copyright (c) 2008-2010 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "hw.h" #include "ar5008_initvals.h" #include "ar9001_initvals.h" #include "ar9002_initvals.h" #include "ar9002_phy.h" int modparam_force_new_ani; module_param_named(force_new_ani, modparam_force_new_ani, int, 0444); MODULE_PARM_DESC(nohwcrypt, "Force new ANI for AR5008, AR9001, AR9002"); /* General hardware code for the A5008/AR9001/AR9002 hadware families */ static bool ar9002_hw_macversion_supported(u32 macversion) { switch (macversion) { case AR_SREV_VERSION_5416_PCI: case AR_SREV_VERSION_5416_PCIE: case AR_SREV_VERSION_9160: case AR_SREV_VERSION_9100: case AR_SREV_VERSION_9280: case AR_SREV_VERSION_9285: case AR_SREV_VERSION_9287: case AR_SREV_VERSION_9271: return true; default: break; } return false; } static void ar9002_hw_init_mode_regs(struct ath_hw *ah) { if (AR_SREV_9271(ah)) { INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271, ARRAY_SIZE(ar9271Modes_9271), 6); INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271, ARRAY_SIZE(ar9271Common_9271), 2); INIT_INI_ARRAY(&ah->iniCommon_normal_cck_fir_coeff_9271, ar9271Common_normal_cck_fir_coeff_9271, ARRAY_SIZE(ar9271Common_normal_cck_fir_coeff_9271), 2); INIT_INI_ARRAY(&ah->iniCommon_japan_2484_cck_fir_coeff_9271, ar9271Common_japan_2484_cck_fir_coeff_9271, ARRAY_SIZE(ar9271Common_japan_2484_cck_fir_coeff_9271), 2); INIT_INI_ARRAY(&ah->iniModes_9271_1_0_only, ar9271Modes_9271_1_0_only, ARRAY_SIZE(ar9271Modes_9271_1_0_only), 6); INIT_INI_ARRAY(&ah->iniModes_9271_ANI_reg, ar9271Modes_9271_ANI_reg, ARRAY_SIZE(ar9271Modes_9271_ANI_reg), 6); INIT_INI_ARRAY(&ah->iniModes_high_power_tx_gain_9271, ar9271Modes_high_power_tx_gain_9271, ARRAY_SIZE(ar9271Modes_high_power_tx_gain_9271), 6); INIT_INI_ARRAY(&ah->iniModes_normal_power_tx_gain_9271, ar9271Modes_normal_power_tx_gain_9271, ARRAY_SIZE(ar9271Modes_normal_power_tx_gain_9271), 6); return; } if (AR_SREV_9287_11_OR_LATER(ah)) { INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1, ARRAY_SIZE(ar9287Modes_9287_1_1), 6); INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_1, ARRAY_SIZE(ar9287Common_9287_1_1), 2); if (ah->config.pcie_clock_req) INIT_INI_ARRAY(&ah->iniPcieSerdes, ar9287PciePhy_clkreq_off_L1_9287_1_1, ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_1), 2); else INIT_INI_ARRAY(&ah->iniPcieSerdes, ar9287PciePhy_clkreq_always_on_L1_9287_1_1, ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_1), 2); } else if (AR_SREV_9285_12_OR_LATER(ah)) { INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285_1_2, ARRAY_SIZE(ar9285Modes_9285_1_2), 6); INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285_1_2, ARRAY_SIZE(ar9285Common_9285_1_2), 2); if (ah->config.pcie_clock_req) { INIT_INI_ARRAY(&ah->iniPcieSerdes, ar9285PciePhy_clkreq_off_L1_9285_1_2, ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285_1_2), 2); } else { INIT_INI_ARRAY(&ah->iniPcieSerdes, ar9285PciePhy_clkreq_always_on_L1_9285_1_2, ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285_1_2), 2); } } else if (AR_SREV_9280_20_OR_LATER(ah)) { INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2, ARRAY_SIZE(ar9280Modes_9280_2), 6); INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280_2, ARRAY_SIZE(ar9280Common_9280_2), 2); if (ah->config.pcie_clock_req) { INIT_INI_ARRAY(&ah->iniPcieSerdes, ar9280PciePhy_clkreq_off_L1_9280, ARRAY_SIZE(ar9280PciePhy_clkreq_off_L1_9280), 2); } else { INIT_INI_ARRAY(&ah->iniPcieSerdes, ar9280PciePhy_clkreq_always_on_L1_9280, ARRAY_SIZE(ar9280PciePhy_clkreq_always_on_L1_9280), 2); } INIT_INI_ARRAY(&ah->iniModesAdditional, ar9280Modes_fast_clock_9280_2, ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 3); } else if (AR_SREV_9160_10_OR_LATER(ah)) { INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9160, ARRAY_SIZE(ar5416Modes_9160), 6); INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9160, ARRAY_SIZE(ar5416Common_9160), 2); INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9160, ARRAY_SIZE(ar5416Bank0_9160), 2); INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9160, ARRAY_SIZE(ar5416BB_RfGain_9160), 3); INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9160, ARRAY_SIZE(ar5416Bank1_9160), 2); INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9160, ARRAY_SIZE(ar5416Bank2_9160), 2); INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9160, ARRAY_SIZE(ar5416Bank3_9160), 3); INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9160, ARRAY_SIZE(ar5416Bank6_9160), 3); INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9160, ARRAY_SIZE(ar5416Bank6TPC_9160), 3); INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9160, ARRAY_SIZE(ar5416Bank7_9160), 2); if (AR_SREV_9160_11(ah)) { INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9160_1_1, ARRAY_SIZE(ar5416Addac_9160_1_1), 2); } else { INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9160, ARRAY_SIZE(ar5416Addac_9160), 2); } } else if (AR_SREV_9100_OR_LATER(ah)) { INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9100, ARRAY_SIZE(ar5416Modes_9100), 6); INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9100, ARRAY_SIZE(ar5416Common_9100), 2); INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9100, ARRAY_SIZE(ar5416Bank0_9100), 2); INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9100, ARRAY_SIZE(ar5416BB_RfGain_9100), 3); INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9100, ARRAY_SIZE(ar5416Bank1_9100), 2); INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9100, ARRAY_SIZE(ar5416Bank2_9100), 2); INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9100, ARRAY_SIZE(ar5416Bank3_9100), 3); INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9100, ARRAY_SIZE(ar5416Bank6_9100), 3); INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9100, ARRAY_SIZE(ar5416Bank6TPC_9100), 3); INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9100, ARRAY_SIZE(ar5416Bank7_9100), 2); INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9100, ARRAY_SIZE(ar5416Addac_9100), 2); } else { INIT_INI_ARRAY(&ah->iniModes, ar5416Modes, ARRAY_SIZE(ar5416Modes), 6); INIT_INI_ARRAY(&ah->iniCommon, ar5416Common, ARRAY_SIZE(ar5416Common), 2); INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0, ARRAY_SIZE(ar5416Bank0), 2); INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain, ARRAY_SIZE(ar5416BB_RfGain), 3); INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1, ARRAY_SIZE(ar5416Bank1), 2); INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2, ARRAY_SIZE(ar5416Bank2), 2); INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3, ARRAY_SIZE(ar5416Bank3), 3); INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6, ARRAY_SIZE(ar5416Bank6), 3); INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC, ARRAY_SIZE(ar5416Bank6TPC), 3); INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7, ARRAY_SIZE(ar5416Bank7), 2); INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac, ARRAY_SIZE(ar5416Addac), 2); } } /* Support for Japan ch.14 (2484) spread */ void ar9002_hw_cck_chan14_spread(struct ath_hw *ah) { if (AR_SREV_9287_11_OR_LATER(ah)) { INIT_INI_ARRAY(&ah->iniCckfirNormal, ar9287Common_normal_cck_fir_coeff_9287_1_1, ARRAY_SIZE(ar9287Common_normal_cck_fir_coeff_9287_1_1), 2); INIT_INI_ARRAY(&ah->iniCckfirJapan2484, ar9287Common_japan_2484_cck_fir_coeff_9287_1_1, ARRAY_SIZE(ar9287Common_japan_2484_cck_fir_coeff_9287_1_1), 2); } } static void ar9280_20_hw_init_rxgain_ini(struct ath_hw *ah) { u32 rxgain_type; if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_17) { rxgain_type = ah->eep_ops->get_eeprom(ah, EEP_RXGAIN_TYPE); if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF) INIT_INI_ARRAY(&ah->iniModesRxGain, ar9280Modes_backoff_13db_rxgain_9280_2, ARRAY_SIZE(ar9280Modes_backoff_13db_rxgain_9280_2), 6); else if (rxgain_type == AR5416_EEP_RXGAIN_23DB_BACKOFF) INIT_INI_ARRAY(&ah->iniModesRxGain, ar9280Modes_backoff_23db_rxgain_9280_2, ARRAY_SIZE(ar9280Modes_backoff_23db_rxgain_9280_2), 6); else INIT_INI_ARRAY(&ah->iniModesRxGain, ar9280Modes_original_rxgain_9280_2, ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6); } else { INIT_INI_ARRAY(&ah->iniModesRxGain, ar9280Modes_original_rxgain_9280_2, ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6); } } static void ar9280_20_hw_init_txgain_ini(struct ath_hw *ah) { u32 txgain_type; if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_19) { txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE); if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) INIT_INI_ARRAY(&ah->iniModesTxGain, ar9280Modes_high_power_tx_gain_9280_2, ARRAY_SIZE(ar9280Modes_high_power_tx_gain_9280_2), 6); else INIT_INI_ARRAY(&ah->iniModesTxGain, ar9280Modes_original_tx_gain_9280_2, ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6); } else { INIT_INI_ARRAY(&ah->iniModesTxGain, ar9280Modes_original_tx_gain_9280_2, ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6); } } static void ar9002_hw_init_mode_gain_regs(struct ath_hw *ah) { if (AR_SREV_9287_11_OR_LATER(ah)) INIT_INI_ARRAY(&ah->iniModesRxGain, ar9287Modes_rx_gain_9287_1_1, ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 6); else if (AR_SREV_9280_20(ah)) ar9280_20_hw_init_rxgain_ini(ah); if (AR_SREV_9287_11_OR_LATER(ah)) { INIT_INI_ARRAY(&ah->iniModesTxGain, ar9287Modes_tx_gain_9287_1_1, ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 6); } else if (AR_SREV_9280_20(ah)) { ar9280_20_hw_init_txgain_ini(ah); } else if (AR_SREV_9285_12_OR_LATER(ah)) { u32 txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE); /* txgain table */ if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) { if (AR_SREV_9285E_20(ah)) { INIT_INI_ARRAY(&ah->iniModesTxGain, ar9285Modes_XE2_0_high_power, ARRAY_SIZE( ar9285Modes_XE2_0_high_power), 6); } else { INIT_INI_ARRAY(&ah->iniModesTxGain, ar9285Modes_high_power_tx_gain_9285_1_2, ARRAY_SIZE( ar9285Modes_high_power_tx_gain_9285_1_2), 6); } } else { if (AR_SREV_9285E_20(ah)) { INIT_INI_ARRAY(&ah->iniModesTxGain, ar9285Modes_XE2_0_normal_power, ARRAY_SIZE( ar9285Modes_XE2_0_normal_power), 6); } else { INIT_INI_ARRAY(&ah->iniModesTxGain, ar9285Modes_original_tx_gain_9285_1_2, ARRAY_SIZE( ar9285Modes_original_tx_gain_9285_1_2), 6); } } } } /* * Helper for ASPM support. * * Disable PLL when in L0s as well as receiver clock when in L1. * This power saving option must be enabled through the SerDes. * * Programming the SerDes must go through the same 288 bit serial shift * register as the other analog registers. Hence the 9 writes. */ static void ar9002_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off) { u8 i; u32 val; if (ah->is_pciexpress != true) return; /* Do not touch SerDes registers */ if (ah->config.pcie_powersave_enable == 2) return; /* Nothing to do on restore for 11N */ if (!restore) { if (AR_SREV_9280_20_OR_LATER(ah)) { /* * AR9280 2.0 or later chips use SerDes values from the * initvals.h initialized depending on chipset during * __ath9k_hw_init() */ for (i = 0; i < ah->iniPcieSerdes.ia_rows; i++) { REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0), INI_RA(&ah->iniPcieSerdes, i, 1)); } } else { ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00); REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924); /* RX shut off when elecidle is asserted */ REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039); REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824); REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579); /* * Ignore ah->ah_config.pcie_clock_req setting for * pre-AR9280 11n */ REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff); REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40); REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554); REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007); /* Load the new settings */ REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); REGWRITE_BUFFER_FLUSH(ah); DISABLE_REGWRITE_BUFFER(ah); } udelay(1000); } if (power_off) { /* clear bit 19 to disable L1 */ REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA); val = REG_READ(ah, AR_WA); /* * Set PCIe workaround bits * In AR9280 and AR9285, bit 14 in WA register (disable L1) * should only be set when device enters D3 and be * cleared when device comes back to D0. */ if (ah->config.pcie_waen) { if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE) val |= AR_WA_D3_L1_DISABLE; } else { if (((AR_SREV_9285(ah) || AR_SREV_9271(ah) || AR_SREV_9287(ah)) && (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) || (AR_SREV_9280(ah) && (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) { val |= AR_WA_D3_L1_DISABLE; } } if (AR_SREV_9280(ah) || AR_SREV_9285(ah) || AR_SREV_9287(ah)) { /* * Disable bit 6 and 7 before entering D3 to * prevent system hang. */ val &= ~(AR_WA_BIT6 | AR_WA_BIT7); } if (AR_SREV_9280(ah)) val |= AR_WA_BIT22; if (AR_SREV_9285E_20(ah)) val |= AR_WA_BIT23; REG_WRITE(ah, AR_WA, val); } else { if (ah->config.pcie_waen) { val = ah->config.pcie_waen; if (!power_off) val &= (~AR_WA_D3_L1_DISABLE); } else { if (AR_SREV_9285(ah) || AR_SREV_9271(ah) || AR_SREV_9287(ah)) { val = AR9285_WA_DEFAULT; if (!power_off) val &= (~AR_WA_D3_L1_DISABLE); } else if (AR_SREV_9280(ah)) { /* * For AR9280 chips, bit 22 of 0x4004 * needs to be set. */ val = AR9280_WA_DEFAULT; if (!power_off) val &= (~AR_WA_D3_L1_DISABLE); } else { val = AR_WA_DEFAULT; } } /* WAR for ASPM system hang */ if (AR_SREV_9280(ah) || AR_SREV_9285(ah) || AR_SREV_9287(ah)) { val |= (AR_WA_BIT6 | AR_WA_BIT7); } if (AR_SREV_9285E_20(ah)) val |= AR_WA_BIT23; REG_WRITE(ah, AR_WA, val); /* set bit 19 to allow forcing of pcie core into L1 state */ REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA); } } static int ar9002_hw_get_radiorev(struct ath_hw *ah) { u32 val; int i; ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_PHY(0x36), 0x00007058); for (i = 0; i < 8; i++) REG_WRITE(ah, AR_PHY(0x20), 0x00010000); REGWRITE_BUFFER_FLUSH(ah); DISABLE_REGWRITE_BUFFER(ah); val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff; val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4); return ath9k_hw_reverse_bits(val, 8); } int ar9002_hw_rf_claim(struct ath_hw *ah) { u32 val; REG_WRITE(ah, AR_PHY(0), 0x00000007); val = ar9002_hw_get_radiorev(ah); switch (val & AR_RADIO_SREV_MAJOR) { case 0: val = AR_RAD5133_SREV_MAJOR; break; case AR_RAD5133_SREV_MAJOR: case AR_RAD5122_SREV_MAJOR: case AR_RAD2133_SREV_MAJOR: case AR_RAD2122_SREV_MAJOR: break; default: ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, "Radio Chip Rev 0x%02X not supported\n", val & AR_RADIO_SREV_MAJOR); return -EOPNOTSUPP; } ah->hw_version.analog5GhzRev = val; return 0; } void ar9002_hw_enable_async_fifo(struct ath_hw *ah) { if (AR_SREV_9287_13_OR_LATER(ah)) { REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3, AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL); REG_SET_BIT(ah, AR_PHY_MODE, AR_PHY_MODE_ASYNCFIFO); REG_CLR_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3, AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET); REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3, AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET); } } /* * If Async FIFO is enabled, the following counters change as MAC now runs * at 117 Mhz instead of 88/44MHz when async FIFO is disabled. * * The values below tested for ht40 2 chain. * Overwrite the delay/timeouts initialized in process ini. */ void ar9002_hw_update_async_fifo(struct ath_hw *ah) { if (AR_SREV_9287_13_OR_LATER(ah)) { REG_WRITE(ah, AR_D_GBL_IFS_SIFS, AR_D_GBL_IFS_SIFS_ASYNC_FIFO_DUR); REG_WRITE(ah, AR_D_GBL_IFS_SLOT, AR_D_GBL_IFS_SLOT_ASYNC_FIFO_DUR); REG_WRITE(ah, AR_D_GBL_IFS_EIFS, AR_D_GBL_IFS_EIFS_ASYNC_FIFO_DUR); REG_WRITE(ah, AR_TIME_OUT, AR_TIME_OUT_ACK_CTS_ASYNC_FIFO_DUR); REG_WRITE(ah, AR_USEC, AR_USEC_ASYNC_FIFO_DUR); REG_SET_BIT(ah, AR_MAC_PCU_LOGIC_ANALYZER, AR_MAC_PCU_LOGIC_ANALYZER_DISBUG20768); REG_RMW_FIELD(ah, AR_AHB_MODE, AR_AHB_CUSTOM_BURST_EN, AR_AHB_CUSTOM_BURST_ASYNC_FIFO_VAL); } } /* * We don't enable WEP aggregation on mac80211 but we keep this * around for HAL unification purposes. */ void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah) { if (AR_SREV_9287_13_OR_LATER(ah)) { REG_SET_BIT(ah, AR_PCU_MISC_MODE2, AR_PCU_MISC_MODE2_ENABLE_AGGWEP); } } /* Sets up the AR5008/AR9001/AR9002 hardware familiy callbacks */ void ar9002_hw_attach_ops(struct ath_hw *ah) { struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); struct ath_hw_ops *ops = ath9k_hw_ops(ah); priv_ops->init_mode_regs = ar9002_hw_init_mode_regs; priv_ops->init_mode_gain_regs = ar9002_hw_init_mode_gain_regs; priv_ops->macversion_supported = ar9002_hw_macversion_supported; ops->config_pci_powersave = ar9002_hw_configpcipowersave; ar5008_hw_attach_phy_ops(ah); if (AR_SREV_9280_10_OR_LATER(ah)) ar9002_hw_attach_phy_ops(ah); ar9002_hw_attach_calib_ops(ah); ar9002_hw_attach_mac_ops(ah); if (modparam_force_new_ani) ath9k_hw_attach_ani_ops_new(ah); else ath9k_hw_attach_ani_ops_old(ah); }
gpl-2.0
grancier/linux-3.10.33-chromeos
drivers/s390/cio/device.c
555
55354
/* * bus driver for ccw devices * * Copyright IBM Corp. 2002, 2008 * Author(s): Arnd Bergmann (arndb@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com) */ #define KMSG_COMPONENT "cio" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/device.h> #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/kernel_stat.h> #include <asm/ccwdev.h> #include <asm/cio.h> #include <asm/param.h> /* HZ */ #include <asm/cmb.h> #include <asm/isc.h> #include "chp.h" #include "cio.h" #include "cio_debug.h" #include "css.h" #include "device.h" #include "ioasm.h" #include "io_sch.h" #include "blacklist.h" #include "chsc.h" static struct timer_list recovery_timer; static DEFINE_SPINLOCK(recovery_lock); static int recovery_phase; static const unsigned long recovery_delay[] = { 3, 30, 300 }; static atomic_t ccw_device_init_count = ATOMIC_INIT(0); static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq); static struct bus_type ccw_bus_type; /******************* bus type handling ***********************/ /* The Linux driver model distinguishes between a bus type and * the bus itself. Of course we only have one channel * subsystem driver and one channel system per machine, but * we still use the abstraction. T.R. says it's a good idea. */ static int ccw_bus_match (struct device * dev, struct device_driver * drv) { struct ccw_device *cdev = to_ccwdev(dev); struct ccw_driver *cdrv = to_ccwdrv(drv); const struct ccw_device_id *ids = cdrv->ids, *found; if (!ids) return 0; found = ccw_device_id_match(ids, &cdev->id); if (!found) return 0; cdev->id.driver_info = found->driver_info; return 1; } /* Store modalias string delimited by prefix/suffix string into buffer with * specified size. Return length of resulting string (excluding trailing '\0') * even if string doesn't fit buffer (snprintf semantics). */ static int snprint_alias(char *buf, size_t size, struct ccw_device_id *id, const char *suffix) { int len; len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model); if (len > size) return len; buf += len; size -= len; if (id->dev_type != 0) len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type, id->dev_model, suffix); else len += snprintf(buf, size, "dtdm%s", suffix); return len; } /* Set up environment variables for ccw device uevent. Return 0 on success, * non-zero otherwise. */ static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env) { struct ccw_device *cdev = to_ccwdev(dev); struct ccw_device_id *id = &(cdev->id); int ret; char modalias_buf[30]; /* CU_TYPE= */ ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type); if (ret) return ret; /* CU_MODEL= */ ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model); if (ret) return ret; /* The next two can be zero, that's ok for us */ /* DEV_TYPE= */ ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type); if (ret) return ret; /* DEV_MODEL= */ ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model); if (ret) return ret; /* MODALIAS= */ snprint_alias(modalias_buf, sizeof(modalias_buf), id, ""); ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf); return ret; } static void io_subchannel_irq(struct subchannel *); static int io_subchannel_probe(struct subchannel *); static int io_subchannel_remove(struct subchannel *); static void io_subchannel_shutdown(struct subchannel *); static int io_subchannel_sch_event(struct subchannel *, int); static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, int); static void recovery_func(unsigned long data); static struct css_device_id io_subchannel_ids[] = { { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, { /* end of list */ }, }; MODULE_DEVICE_TABLE(css, io_subchannel_ids); static int io_subchannel_prepare(struct subchannel *sch) { struct ccw_device *cdev; /* * Don't allow suspend while a ccw device registration * is still outstanding. */ cdev = sch_get_cdev(sch); if (cdev && !device_is_registered(&cdev->dev)) return -EAGAIN; return 0; } static int io_subchannel_settle(void) { int ret; ret = wait_event_interruptible(ccw_device_init_wq, atomic_read(&ccw_device_init_count) == 0); if (ret) return -EINTR; flush_workqueue(cio_work_q); return 0; } static struct css_driver io_subchannel_driver = { .drv = { .owner = THIS_MODULE, .name = "io_subchannel", }, .subchannel_type = io_subchannel_ids, .irq = io_subchannel_irq, .sch_event = io_subchannel_sch_event, .chp_event = io_subchannel_chp_event, .probe = io_subchannel_probe, .remove = io_subchannel_remove, .shutdown = io_subchannel_shutdown, .prepare = io_subchannel_prepare, .settle = io_subchannel_settle, }; int __init io_subchannel_init(void) { int ret; setup_timer(&recovery_timer, recovery_func, 0); ret = bus_register(&ccw_bus_type); if (ret) return ret; ret = css_driver_register(&io_subchannel_driver); if (ret) bus_unregister(&ccw_bus_type); return ret; } /************************ device handling **************************/ /* * A ccw_device has some interfaces in sysfs in addition to the * standard ones. * The following entries are designed to export the information which * resided in 2.4 in /proc/subchannels. Subchannel and device number * are obvious, so they don't have an entry :) * TODO: Split chpids and pimpampom up? Where is "in use" in the tree? */ static ssize_t chpids_show (struct device * dev, struct device_attribute *attr, char * buf) { struct subchannel *sch = to_subchannel(dev); struct chsc_ssd_info *ssd = &sch->ssd_info; ssize_t ret = 0; int chp; int mask; for (chp = 0; chp < 8; chp++) { mask = 0x80 >> chp; if (ssd->path_mask & mask) ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id); else ret += sprintf(buf + ret, "00 "); } ret += sprintf (buf+ret, "\n"); return min((ssize_t)PAGE_SIZE, ret); } static ssize_t pimpampom_show (struct device * dev, struct device_attribute *attr, char * buf) { struct subchannel *sch = to_subchannel(dev); struct pmcw *pmcw = &sch->schib.pmcw; return sprintf (buf, "%02x %02x %02x\n", pmcw->pim, pmcw->pam, pmcw->pom); } static ssize_t devtype_show (struct device *dev, struct device_attribute *attr, char *buf) { struct ccw_device *cdev = to_ccwdev(dev); struct ccw_device_id *id = &(cdev->id); if (id->dev_type != 0) return sprintf(buf, "%04x/%02x\n", id->dev_type, id->dev_model); else return sprintf(buf, "n/a\n"); } static ssize_t cutype_show (struct device *dev, struct device_attribute *attr, char *buf) { struct ccw_device *cdev = to_ccwdev(dev); struct ccw_device_id *id = &(cdev->id); return sprintf(buf, "%04x/%02x\n", id->cu_type, id->cu_model); } static ssize_t modalias_show (struct device *dev, struct device_attribute *attr, char *buf) { struct ccw_device *cdev = to_ccwdev(dev); struct ccw_device_id *id = &(cdev->id); int len; len = snprint_alias(buf, PAGE_SIZE, id, "\n"); return len > PAGE_SIZE ? PAGE_SIZE : len; } static ssize_t online_show (struct device *dev, struct device_attribute *attr, char *buf) { struct ccw_device *cdev = to_ccwdev(dev); return sprintf(buf, cdev->online ? "1\n" : "0\n"); } int ccw_device_is_orphan(struct ccw_device *cdev) { return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent)); } static void ccw_device_unregister(struct ccw_device *cdev) { if (device_is_registered(&cdev->dev)) { /* Undo device_add(). */ device_del(&cdev->dev); } if (cdev->private->flags.initialized) { cdev->private->flags.initialized = 0; /* Release reference from device_initialize(). */ put_device(&cdev->dev); } } static void io_subchannel_quiesce(struct subchannel *); /** * ccw_device_set_offline() - disable a ccw device for I/O * @cdev: target ccw device * * This function calls the driver's set_offline() function for @cdev, if * given, and then disables @cdev. * Returns: * %0 on success and a negative error value on failure. * Context: * enabled, ccw device lock not held */ int ccw_device_set_offline(struct ccw_device *cdev) { struct subchannel *sch; int ret, state; if (!cdev) return -ENODEV; if (!cdev->online || !cdev->drv) return -EINVAL; if (cdev->drv->set_offline) { ret = cdev->drv->set_offline(cdev); if (ret != 0) return ret; } cdev->online = 0; spin_lock_irq(cdev->ccwlock); sch = to_subchannel(cdev->dev.parent); /* Wait until a final state or DISCONNECTED is reached */ while (!dev_fsm_final_state(cdev) && cdev->private->state != DEV_STATE_DISCONNECTED) { spin_unlock_irq(cdev->ccwlock); wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || cdev->private->state == DEV_STATE_DISCONNECTED)); spin_lock_irq(cdev->ccwlock); } do { ret = ccw_device_offline(cdev); if (!ret) break; CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device " "0.%x.%04x\n", ret, cdev->private->dev_id.ssid, cdev->private->dev_id.devno); if (ret != -EBUSY) goto error; state = cdev->private->state; spin_unlock_irq(cdev->ccwlock); io_subchannel_quiesce(sch); spin_lock_irq(cdev->ccwlock); cdev->private->state = state; } while (ret == -EBUSY); spin_unlock_irq(cdev->ccwlock); wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || cdev->private->state == DEV_STATE_DISCONNECTED)); /* Inform the user if set offline failed. */ if (cdev->private->state == DEV_STATE_BOXED) { pr_warning("%s: The device entered boxed state while " "being set offline\n", dev_name(&cdev->dev)); } else if (cdev->private->state == DEV_STATE_NOT_OPER) { pr_warning("%s: The device stopped operating while " "being set offline\n", dev_name(&cdev->dev)); } /* Give up reference from ccw_device_set_online(). */ put_device(&cdev->dev); return 0; error: cdev->private->state = DEV_STATE_OFFLINE; dev_fsm_event(cdev, DEV_EVENT_NOTOPER); spin_unlock_irq(cdev->ccwlock); /* Give up reference from ccw_device_set_online(). */ put_device(&cdev->dev); return -ENODEV; } /** * ccw_device_set_online() - enable a ccw device for I/O * @cdev: target ccw device * * This function first enables @cdev and then calls the driver's set_online() * function for @cdev, if given. If set_online() returns an error, @cdev is * disabled again. * Returns: * %0 on success and a negative error value on failure. * Context: * enabled, ccw device lock not held */ int ccw_device_set_online(struct ccw_device *cdev) { int ret; int ret2; if (!cdev) return -ENODEV; if (cdev->online || !cdev->drv) return -EINVAL; /* Hold on to an extra reference while device is online. */ if (!get_device(&cdev->dev)) return -ENODEV; spin_lock_irq(cdev->ccwlock); ret = ccw_device_online(cdev); spin_unlock_irq(cdev->ccwlock); if (ret == 0) wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); else { CIO_MSG_EVENT(0, "ccw_device_online returned %d, " "device 0.%x.%04x\n", ret, cdev->private->dev_id.ssid, cdev->private->dev_id.devno); /* Give up online reference since onlining failed. */ put_device(&cdev->dev); return ret; } spin_lock_irq(cdev->ccwlock); /* Check if online processing was successful */ if ((cdev->private->state != DEV_STATE_ONLINE) && (cdev->private->state != DEV_STATE_W4SENSE)) { spin_unlock_irq(cdev->ccwlock); /* Inform the user that set online failed. */ if (cdev->private->state == DEV_STATE_BOXED) { pr_warning("%s: Setting the device online failed " "because it is boxed\n", dev_name(&cdev->dev)); } else if (cdev->private->state == DEV_STATE_NOT_OPER) { pr_warning("%s: Setting the device online failed " "because it is not operational\n", dev_name(&cdev->dev)); } /* Give up online reference since onlining failed. */ put_device(&cdev->dev); return -ENODEV; } spin_unlock_irq(cdev->ccwlock); if (cdev->drv->set_online) ret = cdev->drv->set_online(cdev); if (ret) goto rollback; cdev->online = 1; return 0; rollback: spin_lock_irq(cdev->ccwlock); /* Wait until a final state or DISCONNECTED is reached */ while (!dev_fsm_final_state(cdev) && cdev->private->state != DEV_STATE_DISCONNECTED) { spin_unlock_irq(cdev->ccwlock); wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || cdev->private->state == DEV_STATE_DISCONNECTED)); spin_lock_irq(cdev->ccwlock); } ret2 = ccw_device_offline(cdev); if (ret2) goto error; spin_unlock_irq(cdev->ccwlock); wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || cdev->private->state == DEV_STATE_DISCONNECTED)); /* Give up online reference since onlining failed. */ put_device(&cdev->dev); return ret; error: CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, " "device 0.%x.%04x\n", ret2, cdev->private->dev_id.ssid, cdev->private->dev_id.devno); cdev->private->state = DEV_STATE_OFFLINE; spin_unlock_irq(cdev->ccwlock); /* Give up online reference since onlining failed. */ put_device(&cdev->dev); return ret; } static int online_store_handle_offline(struct ccw_device *cdev) { if (cdev->private->state == DEV_STATE_DISCONNECTED) { spin_lock_irq(cdev->ccwlock); ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL); spin_unlock_irq(cdev->ccwlock); return 0; } if (cdev->drv && cdev->drv->set_offline) return ccw_device_set_offline(cdev); return -EINVAL; } static int online_store_recog_and_online(struct ccw_device *cdev) { /* Do device recognition, if needed. */ if (cdev->private->state == DEV_STATE_BOXED) { spin_lock_irq(cdev->ccwlock); ccw_device_recognition(cdev); spin_unlock_irq(cdev->ccwlock); wait_event(cdev->private->wait_q, cdev->private->flags.recog_done); if (cdev->private->state != DEV_STATE_OFFLINE) /* recognition failed */ return -EAGAIN; } if (cdev->drv && cdev->drv->set_online) return ccw_device_set_online(cdev); return -EINVAL; } static int online_store_handle_online(struct ccw_device *cdev, int force) { int ret; ret = online_store_recog_and_online(cdev); if (ret && !force) return ret; if (force && cdev->private->state == DEV_STATE_BOXED) { ret = ccw_device_stlck(cdev); if (ret) return ret; if (cdev->id.cu_type == 0) cdev->private->state = DEV_STATE_NOT_OPER; ret = online_store_recog_and_online(cdev); if (ret) return ret; } return 0; } static ssize_t online_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ccw_device *cdev = to_ccwdev(dev); int force, ret; unsigned long i; /* Prevent conflict between multiple on-/offline processing requests. */ if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) return -EAGAIN; /* Prevent conflict between internal I/Os and on-/offline processing. */ if (!dev_fsm_final_state(cdev) && cdev->private->state != DEV_STATE_DISCONNECTED) { ret = -EAGAIN; goto out_onoff; } /* Prevent conflict between pending work and on-/offline processing.*/ if (work_pending(&cdev->private->todo_work)) { ret = -EAGAIN; goto out_onoff; } if (cdev->drv && !try_module_get(cdev->drv->driver.owner)) { ret = -EINVAL; goto out_onoff; } if (!strncmp(buf, "force\n", count)) { force = 1; i = 1; ret = 0; } else { force = 0; ret = strict_strtoul(buf, 16, &i); } if (ret) goto out; switch (i) { case 0: ret = online_store_handle_offline(cdev); break; case 1: ret = online_store_handle_online(cdev, force); break; default: ret = -EINVAL; } out: if (cdev->drv) module_put(cdev->drv->driver.owner); out_onoff: atomic_set(&cdev->private->onoff, 0); return (ret < 0) ? ret : count; } static ssize_t available_show (struct device *dev, struct device_attribute *attr, char *buf) { struct ccw_device *cdev = to_ccwdev(dev); struct subchannel *sch; if (ccw_device_is_orphan(cdev)) return sprintf(buf, "no device\n"); switch (cdev->private->state) { case DEV_STATE_BOXED: return sprintf(buf, "boxed\n"); case DEV_STATE_DISCONNECTED: case DEV_STATE_DISCONNECTED_SENSE_ID: case DEV_STATE_NOT_OPER: sch = to_subchannel(dev->parent); if (!sch->lpm) return sprintf(buf, "no path\n"); else return sprintf(buf, "no device\n"); default: /* All other states considered fine. */ return sprintf(buf, "good\n"); } } static ssize_t initiate_logging(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct subchannel *sch = to_subchannel(dev); int rc; rc = chsc_siosl(sch->schid); if (rc < 0) { pr_warning("Logging for subchannel 0.%x.%04x failed with " "errno=%d\n", sch->schid.ssid, sch->schid.sch_no, rc); return rc; } pr_notice("Logging for subchannel 0.%x.%04x was triggered\n", sch->schid.ssid, sch->schid.sch_no); return count; } static ssize_t vpm_show(struct device *dev, struct device_attribute *attr, char *buf) { struct subchannel *sch = to_subchannel(dev); return sprintf(buf, "%02x\n", sch->vpm); } static DEVICE_ATTR(chpids, 0444, chpids_show, NULL); static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL); static DEVICE_ATTR(devtype, 0444, devtype_show, NULL); static DEVICE_ATTR(cutype, 0444, cutype_show, NULL); static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); static DEVICE_ATTR(online, 0644, online_show, online_store); static DEVICE_ATTR(availability, 0444, available_show, NULL); static DEVICE_ATTR(logging, 0200, NULL, initiate_logging); static DEVICE_ATTR(vpm, 0444, vpm_show, NULL); static struct attribute *io_subchannel_attrs[] = { &dev_attr_chpids.attr, &dev_attr_pimpampom.attr, &dev_attr_logging.attr, &dev_attr_vpm.attr, NULL, }; static struct attribute_group io_subchannel_attr_group = { .attrs = io_subchannel_attrs, }; static struct attribute * ccwdev_attrs[] = { &dev_attr_devtype.attr, &dev_attr_cutype.attr, &dev_attr_modalias.attr, &dev_attr_online.attr, &dev_attr_cmb_enable.attr, &dev_attr_availability.attr, NULL, }; static struct attribute_group ccwdev_attr_group = { .attrs = ccwdev_attrs, }; static const struct attribute_group *ccwdev_attr_groups[] = { &ccwdev_attr_group, NULL, }; /* this is a simple abstraction for device_register that sets the * correct bus type and adds the bus specific files */ static int ccw_device_register(struct ccw_device *cdev) { struct device *dev = &cdev->dev; int ret; dev->bus = &ccw_bus_type; ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid, cdev->private->dev_id.devno); if (ret) return ret; return device_add(dev); } static int match_dev_id(struct device *dev, void *data) { struct ccw_device *cdev = to_ccwdev(dev); struct ccw_dev_id *dev_id = data; return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id); } /** * get_ccwdev_by_dev_id() - obtain device from a ccw device id * @dev_id: id of the device to be searched * * This function searches all devices attached to the ccw bus for a device * matching @dev_id. * Returns: * If a device is found its reference count is increased and returned; * else %NULL is returned. */ struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id) { struct device *dev; dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id); return dev ? to_ccwdev(dev) : NULL; } EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id); static void ccw_device_do_unbind_bind(struct ccw_device *cdev) { int ret; if (device_is_registered(&cdev->dev)) { device_release_driver(&cdev->dev); ret = device_attach(&cdev->dev); WARN_ON(ret == -ENODEV); } } static void ccw_device_release(struct device *dev) { struct ccw_device *cdev; cdev = to_ccwdev(dev); /* Release reference of parent subchannel. */ put_device(cdev->dev.parent); kfree(cdev->private); kfree(cdev); } static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch) { struct ccw_device *cdev; cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); if (cdev) { cdev->private = kzalloc(sizeof(struct ccw_device_private), GFP_KERNEL | GFP_DMA); if (cdev->private) return cdev; } kfree(cdev); return ERR_PTR(-ENOMEM); } static void ccw_device_todo(struct work_struct *work); static int io_subchannel_initialize_dev(struct subchannel *sch, struct ccw_device *cdev) { cdev->private->cdev = cdev; cdev->private->int_class = IRQIO_CIO; atomic_set(&cdev->private->onoff, 0); cdev->dev.parent = &sch->dev; cdev->dev.release = ccw_device_release; INIT_WORK(&cdev->private->todo_work, ccw_device_todo); cdev->dev.groups = ccwdev_attr_groups; /* Do first half of device_register. */ device_initialize(&cdev->dev); if (!get_device(&sch->dev)) { /* Release reference from device_initialize(). */ put_device(&cdev->dev); return -ENODEV; } cdev->private->flags.initialized = 1; return 0; } static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch) { struct ccw_device *cdev; int ret; cdev = io_subchannel_allocate_dev(sch); if (!IS_ERR(cdev)) { ret = io_subchannel_initialize_dev(sch, cdev); if (ret) cdev = ERR_PTR(ret); } return cdev; } static void io_subchannel_recog(struct ccw_device *, struct subchannel *); static void sch_create_and_recog_new_device(struct subchannel *sch) { struct ccw_device *cdev; /* Need to allocate a new ccw device. */ cdev = io_subchannel_create_ccwdev(sch); if (IS_ERR(cdev)) { /* OK, we did everything we could... */ css_sch_device_unregister(sch); return; } /* Start recognition for the new ccw device. */ io_subchannel_recog(cdev, sch); } /* * Register recognized device. */ static void io_subchannel_register(struct ccw_device *cdev) { struct subchannel *sch; int ret, adjust_init_count = 1; unsigned long flags; sch = to_subchannel(cdev->dev.parent); /* * Check if subchannel is still registered. It may have become * unregistered if a machine check hit us after finishing * device recognition but before the register work could be * queued. */ if (!device_is_registered(&sch->dev)) goto out_err; css_update_ssd_info(sch); /* * io_subchannel_register() will also be called after device * recognition has been done for a boxed device (which will already * be registered). We need to reprobe since we may now have sense id * information. */ if (device_is_registered(&cdev->dev)) { if (!cdev->drv) { ret = device_reprobe(&cdev->dev); if (ret) /* We can't do much here. */ CIO_MSG_EVENT(0, "device_reprobe() returned" " %d for 0.%x.%04x\n", ret, cdev->private->dev_id.ssid, cdev->private->dev_id.devno); } adjust_init_count = 0; goto out; } /* * Now we know this subchannel will stay, we can throw * our delayed uevent. */ dev_set_uevent_suppress(&sch->dev, 0); kobject_uevent(&sch->dev.kobj, KOBJ_ADD); /* make it known to the system */ ret = ccw_device_register(cdev); if (ret) { CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n", cdev->private->dev_id.ssid, cdev->private->dev_id.devno, ret); spin_lock_irqsave(sch->lock, flags); sch_set_cdev(sch, NULL); spin_unlock_irqrestore(sch->lock, flags); /* Release initial device reference. */ put_device(&cdev->dev); goto out_err; } out: cdev->private->flags.recog_done = 1; wake_up(&cdev->private->wait_q); out_err: if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count)) wake_up(&ccw_device_init_wq); } static void ccw_device_call_sch_unregister(struct ccw_device *cdev) { struct subchannel *sch; /* Get subchannel reference for local processing. */ if (!get_device(cdev->dev.parent)) return; sch = to_subchannel(cdev->dev.parent); css_sch_device_unregister(sch); /* Release subchannel reference for local processing. */ put_device(&sch->dev); } /* * subchannel recognition done. Called from the state machine. */ void io_subchannel_recog_done(struct ccw_device *cdev) { if (css_init_done == 0) { cdev->private->flags.recog_done = 1; return; } switch (cdev->private->state) { case DEV_STATE_BOXED: /* Device did not respond in time. */ case DEV_STATE_NOT_OPER: cdev->private->flags.recog_done = 1; /* Remove device found not operational. */ ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); if (atomic_dec_and_test(&ccw_device_init_count)) wake_up(&ccw_device_init_wq); break; case DEV_STATE_OFFLINE: /* * We can't register the device in interrupt context so * we schedule a work item. */ ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER); break; } } static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) { struct ccw_device_private *priv; cdev->ccwlock = sch->lock; /* Init private data. */ priv = cdev->private; priv->dev_id.devno = sch->schib.pmcw.dev; priv->dev_id.ssid = sch->schid.ssid; priv->schid = sch->schid; priv->state = DEV_STATE_NOT_OPER; INIT_LIST_HEAD(&priv->cmb_list); init_waitqueue_head(&priv->wait_q); init_timer(&priv->timer); /* Increase counter of devices currently in recognition. */ atomic_inc(&ccw_device_init_count); /* Start async. device sensing. */ spin_lock_irq(sch->lock); sch_set_cdev(sch, cdev); ccw_device_recognition(cdev); spin_unlock_irq(sch->lock); } static int ccw_device_move_to_sch(struct ccw_device *cdev, struct subchannel *sch) { struct subchannel *old_sch; int rc, old_enabled = 0; old_sch = to_subchannel(cdev->dev.parent); /* Obtain child reference for new parent. */ if (!get_device(&sch->dev)) return -ENODEV; if (!sch_is_pseudo_sch(old_sch)) { spin_lock_irq(old_sch->lock); old_enabled = old_sch->schib.pmcw.ena; rc = 0; if (old_enabled) rc = cio_disable_subchannel(old_sch); spin_unlock_irq(old_sch->lock); if (rc == -EBUSY) { /* Release child reference for new parent. */ put_device(&sch->dev); return rc; } } mutex_lock(&sch->reg_mutex); rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV); mutex_unlock(&sch->reg_mutex); if (rc) { CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n", cdev->private->dev_id.ssid, cdev->private->dev_id.devno, sch->schid.ssid, sch->schib.pmcw.dev, rc); if (old_enabled) { /* Try to reenable the old subchannel. */ spin_lock_irq(old_sch->lock); cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch); spin_unlock_irq(old_sch->lock); } /* Release child reference for new parent. */ put_device(&sch->dev); return rc; } /* Clean up old subchannel. */ if (!sch_is_pseudo_sch(old_sch)) { spin_lock_irq(old_sch->lock); sch_set_cdev(old_sch, NULL); spin_unlock_irq(old_sch->lock); css_schedule_eval(old_sch->schid); } /* Release child reference for old parent. */ put_device(&old_sch->dev); /* Initialize new subchannel. */ spin_lock_irq(sch->lock); cdev->private->schid = sch->schid; cdev->ccwlock = sch->lock; if (!sch_is_pseudo_sch(sch)) sch_set_cdev(sch, cdev); spin_unlock_irq(sch->lock); if (!sch_is_pseudo_sch(sch)) css_update_ssd_info(sch); return 0; } static int ccw_device_move_to_orph(struct ccw_device *cdev) { struct subchannel *sch = to_subchannel(cdev->dev.parent); struct channel_subsystem *css = to_css(sch->dev.parent); return ccw_device_move_to_sch(cdev, css->pseudo_subchannel); } static void io_subchannel_irq(struct subchannel *sch) { struct ccw_device *cdev; cdev = sch_get_cdev(sch); CIO_TRACE_EVENT(6, "IRQ"); CIO_TRACE_EVENT(6, dev_name(&sch->dev)); if (cdev) dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); else inc_irq_stat(IRQIO_CIO); } void io_subchannel_init_config(struct subchannel *sch) { memset(&sch->config, 0, sizeof(sch->config)); sch->config.csense = 1; } static void io_subchannel_init_fields(struct subchannel *sch) { if (cio_is_console(sch->schid)) sch->opm = 0xff; else sch->opm = chp_get_sch_opm(sch); sch->lpm = sch->schib.pmcw.pam & sch->opm; sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC; CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X" " - PIM = %02X, PAM = %02X, POM = %02X\n", sch->schib.pmcw.dev, sch->schid.ssid, sch->schid.sch_no, sch->schib.pmcw.pim, sch->schib.pmcw.pam, sch->schib.pmcw.pom); io_subchannel_init_config(sch); } /* * Note: We always return 0 so that we bind to the device even on error. * This is needed so that our remove function is called on unregister. */ static int io_subchannel_probe(struct subchannel *sch) { struct io_subchannel_private *io_priv; struct ccw_device *cdev; int rc; if (cio_is_console(sch->schid)) { rc = sysfs_create_group(&sch->dev.kobj, &io_subchannel_attr_group); if (rc) CIO_MSG_EVENT(0, "Failed to create io subchannel " "attributes for subchannel " "0.%x.%04x (rc=%d)\n", sch->schid.ssid, sch->schid.sch_no, rc); /* * The console subchannel already has an associated ccw_device. * Throw the delayed uevent for the subchannel, register * the ccw_device and exit. */ dev_set_uevent_suppress(&sch->dev, 0); kobject_uevent(&sch->dev.kobj, KOBJ_ADD); cdev = sch_get_cdev(sch); rc = ccw_device_register(cdev); if (rc) { /* Release online reference. */ put_device(&cdev->dev); goto out_schedule; } if (atomic_dec_and_test(&ccw_device_init_count)) wake_up(&ccw_device_init_wq); return 0; } io_subchannel_init_fields(sch); rc = cio_commit_config(sch); if (rc) goto out_schedule; rc = sysfs_create_group(&sch->dev.kobj, &io_subchannel_attr_group); if (rc) goto out_schedule; /* Allocate I/O subchannel private data. */ io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA); if (!io_priv) goto out_schedule; set_io_private(sch, io_priv); css_schedule_eval(sch->schid); return 0; out_schedule: spin_lock_irq(sch->lock); css_sched_sch_todo(sch, SCH_TODO_UNREG); spin_unlock_irq(sch->lock); return 0; } static int io_subchannel_remove (struct subchannel *sch) { struct io_subchannel_private *io_priv = to_io_private(sch); struct ccw_device *cdev; cdev = sch_get_cdev(sch); if (!cdev) goto out_free; io_subchannel_quiesce(sch); /* Set ccw device to not operational and drop reference. */ spin_lock_irq(cdev->ccwlock); sch_set_cdev(sch, NULL); set_io_private(sch, NULL); cdev->private->state = DEV_STATE_NOT_OPER; spin_unlock_irq(cdev->ccwlock); ccw_device_unregister(cdev); out_free: kfree(io_priv); sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); return 0; } static void io_subchannel_verify(struct subchannel *sch) { struct ccw_device *cdev; cdev = sch_get_cdev(sch); if (cdev) dev_fsm_event(cdev, DEV_EVENT_VERIFY); } static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask) { struct ccw_device *cdev; cdev = sch_get_cdev(sch); if (!cdev) return; if (cio_update_schib(sch)) goto err; /* Check for I/O on path. */ if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask) goto out; if (cdev->private->state == DEV_STATE_ONLINE) { ccw_device_kill_io(cdev); goto out; } if (cio_clear(sch)) goto err; out: /* Trigger path verification. */ dev_fsm_event(cdev, DEV_EVENT_VERIFY); return; err: dev_fsm_event(cdev, DEV_EVENT_NOTOPER); } static int io_subchannel_chp_event(struct subchannel *sch, struct chp_link *link, int event) { struct ccw_device *cdev = sch_get_cdev(sch); int mask; mask = chp_ssd_get_mask(&sch->ssd_info, link); if (!mask) return 0; switch (event) { case CHP_VARY_OFF: sch->opm &= ~mask; sch->lpm &= ~mask; if (cdev) cdev->private->path_gone_mask |= mask; io_subchannel_terminate_path(sch, mask); break; case CHP_VARY_ON: sch->opm |= mask; sch->lpm |= mask; if (cdev) cdev->private->path_new_mask |= mask; io_subchannel_verify(sch); break; case CHP_OFFLINE: if (cio_update_schib(sch)) return -ENODEV; if (cdev) cdev->private->path_gone_mask |= mask; io_subchannel_terminate_path(sch, mask); break; case CHP_ONLINE: if (cio_update_schib(sch)) return -ENODEV; sch->lpm |= mask & sch->opm; if (cdev) cdev->private->path_new_mask |= mask; io_subchannel_verify(sch); break; } return 0; } static void io_subchannel_quiesce(struct subchannel *sch) { struct ccw_device *cdev; int ret; spin_lock_irq(sch->lock); cdev = sch_get_cdev(sch); if (cio_is_console(sch->schid)) goto out_unlock; if (!sch->schib.pmcw.ena) goto out_unlock; ret = cio_disable_subchannel(sch); if (ret != -EBUSY) goto out_unlock; if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO)); while (ret == -EBUSY) { cdev->private->state = DEV_STATE_QUIESCE; cdev->private->iretry = 255; ret = ccw_device_cancel_halt_clear(cdev); if (ret == -EBUSY) { ccw_device_set_timeout(cdev, HZ/10); spin_unlock_irq(sch->lock); wait_event(cdev->private->wait_q, cdev->private->state != DEV_STATE_QUIESCE); spin_lock_irq(sch->lock); } ret = cio_disable_subchannel(sch); } out_unlock: spin_unlock_irq(sch->lock); } static void io_subchannel_shutdown(struct subchannel *sch) { io_subchannel_quiesce(sch); } static int device_is_disconnected(struct ccw_device *cdev) { if (!cdev) return 0; return (cdev->private->state == DEV_STATE_DISCONNECTED || cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID); } static int recovery_check(struct device *dev, void *data) { struct ccw_device *cdev = to_ccwdev(dev); int *redo = data; spin_lock_irq(cdev->ccwlock); switch (cdev->private->state) { case DEV_STATE_DISCONNECTED: CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n", cdev->private->dev_id.ssid, cdev->private->dev_id.devno); dev_fsm_event(cdev, DEV_EVENT_VERIFY); *redo = 1; break; case DEV_STATE_DISCONNECTED_SENSE_ID: *redo = 1; break; } spin_unlock_irq(cdev->ccwlock); return 0; } static void recovery_work_func(struct work_struct *unused) { int redo = 0; bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check); if (redo) { spin_lock_irq(&recovery_lock); if (!timer_pending(&recovery_timer)) { if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1) recovery_phase++; mod_timer(&recovery_timer, jiffies + recovery_delay[recovery_phase] * HZ); } spin_unlock_irq(&recovery_lock); } else CIO_MSG_EVENT(4, "recovery: end\n"); } static DECLARE_WORK(recovery_work, recovery_work_func); static void recovery_func(unsigned long data) { /* * We can't do our recovery in softirq context and it's not * performance critical, so we schedule it. */ schedule_work(&recovery_work); } static void ccw_device_schedule_recovery(void) { unsigned long flags; CIO_MSG_EVENT(4, "recovery: schedule\n"); spin_lock_irqsave(&recovery_lock, flags); if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { recovery_phase = 0; mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ); } spin_unlock_irqrestore(&recovery_lock, flags); } static int purge_fn(struct device *dev, void *data) { struct ccw_device *cdev = to_ccwdev(dev); struct ccw_dev_id *id = &cdev->private->dev_id; spin_lock_irq(cdev->ccwlock); if (is_blacklisted(id->ssid, id->devno) && (cdev->private->state == DEV_STATE_OFFLINE) && (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) { CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid, id->devno); ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); atomic_set(&cdev->private->onoff, 0); } spin_unlock_irq(cdev->ccwlock); /* Abort loop in case of pending signal. */ if (signal_pending(current)) return -EINTR; return 0; } /** * ccw_purge_blacklisted - purge unused, blacklisted devices * * Unregister all ccw devices that are offline and on the blacklist. */ int ccw_purge_blacklisted(void) { CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n"); bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn); return 0; } void ccw_device_set_disconnected(struct ccw_device *cdev) { if (!cdev) return; ccw_device_set_timeout(cdev, 0); cdev->private->flags.fake_irb = 0; cdev->private->state = DEV_STATE_DISCONNECTED; if (cdev->online) ccw_device_schedule_recovery(); } void ccw_device_set_notoper(struct ccw_device *cdev) { struct subchannel *sch = to_subchannel(cdev->dev.parent); CIO_TRACE_EVENT(2, "notoper"); CIO_TRACE_EVENT(2, dev_name(&sch->dev)); ccw_device_set_timeout(cdev, 0); cio_disable_subchannel(sch); cdev->private->state = DEV_STATE_NOT_OPER; } enum io_sch_action { IO_SCH_UNREG, IO_SCH_ORPH_UNREG, IO_SCH_ATTACH, IO_SCH_UNREG_ATTACH, IO_SCH_ORPH_ATTACH, IO_SCH_REPROBE, IO_SCH_VERIFY, IO_SCH_DISC, IO_SCH_NOP, }; static enum io_sch_action sch_get_action(struct subchannel *sch) { struct ccw_device *cdev; cdev = sch_get_cdev(sch); if (cio_update_schib(sch)) { /* Not operational. */ if (!cdev) return IO_SCH_UNREG; if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) return IO_SCH_UNREG; return IO_SCH_ORPH_UNREG; } /* Operational. */ if (!cdev) return IO_SCH_ATTACH; if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) return IO_SCH_UNREG_ATTACH; return IO_SCH_ORPH_ATTACH; } if ((sch->schib.pmcw.pam & sch->opm) == 0) { if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) return IO_SCH_UNREG; return IO_SCH_DISC; } if (device_is_disconnected(cdev)) return IO_SCH_REPROBE; if (cdev->online && !cdev->private->flags.resuming) return IO_SCH_VERIFY; if (cdev->private->state == DEV_STATE_NOT_OPER) return IO_SCH_UNREG_ATTACH; return IO_SCH_NOP; } /** * io_subchannel_sch_event - process subchannel event * @sch: subchannel * @process: non-zero if function is called in process context * * An unspecified event occurred for this subchannel. Adjust data according * to the current operational state of the subchannel and device. Return * zero when the event has been handled sufficiently or -EAGAIN when this * function should be called again in process context. */ static int io_subchannel_sch_event(struct subchannel *sch, int process) { unsigned long flags; struct ccw_device *cdev; struct ccw_dev_id dev_id; enum io_sch_action action; int rc = -EAGAIN; spin_lock_irqsave(sch->lock, flags); if (!device_is_registered(&sch->dev)) goto out_unlock; if (work_pending(&sch->todo_work)) goto out_unlock; cdev = sch_get_cdev(sch); if (cdev && work_pending(&cdev->private->todo_work)) goto out_unlock; action = sch_get_action(sch); CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n", sch->schid.ssid, sch->schid.sch_no, process, action); /* Perform immediate actions while holding the lock. */ switch (action) { case IO_SCH_REPROBE: /* Trigger device recognition. */ ccw_device_trigger_reprobe(cdev); rc = 0; goto out_unlock; case IO_SCH_VERIFY: /* Trigger path verification. */ io_subchannel_verify(sch); rc = 0; goto out_unlock; case IO_SCH_DISC: ccw_device_set_disconnected(cdev); rc = 0; goto out_unlock; case IO_SCH_ORPH_UNREG: case IO_SCH_ORPH_ATTACH: ccw_device_set_disconnected(cdev); break; case IO_SCH_UNREG_ATTACH: case IO_SCH_UNREG: if (!cdev) break; if (cdev->private->state == DEV_STATE_SENSE_ID) { /* * Note: delayed work triggered by this event * and repeated calls to sch_event are synchronized * by the above check for work_pending(cdev). */ dev_fsm_event(cdev, DEV_EVENT_NOTOPER); } else ccw_device_set_notoper(cdev); break; case IO_SCH_NOP: rc = 0; goto out_unlock; default: break; } spin_unlock_irqrestore(sch->lock, flags); /* All other actions require process context. */ if (!process) goto out; /* Handle attached ccw device. */ switch (action) { case IO_SCH_ORPH_UNREG: case IO_SCH_ORPH_ATTACH: /* Move ccw device to orphanage. */ rc = ccw_device_move_to_orph(cdev); if (rc) goto out; break; case IO_SCH_UNREG_ATTACH: spin_lock_irqsave(sch->lock, flags); if (cdev->private->flags.resuming) { /* Device will be handled later. */ rc = 0; goto out_unlock; } sch_set_cdev(sch, NULL); spin_unlock_irqrestore(sch->lock, flags); /* Unregister ccw device. */ ccw_device_unregister(cdev); break; default: break; } /* Handle subchannel. */ switch (action) { case IO_SCH_ORPH_UNREG: case IO_SCH_UNREG: if (!cdev || !cdev->private->flags.resuming) css_sch_device_unregister(sch); break; case IO_SCH_ORPH_ATTACH: case IO_SCH_UNREG_ATTACH: case IO_SCH_ATTACH: dev_id.ssid = sch->schid.ssid; dev_id.devno = sch->schib.pmcw.dev; cdev = get_ccwdev_by_dev_id(&dev_id); if (!cdev) { sch_create_and_recog_new_device(sch); break; } rc = ccw_device_move_to_sch(cdev, sch); if (rc) { /* Release reference from get_ccwdev_by_dev_id() */ put_device(&cdev->dev); goto out; } spin_lock_irqsave(sch->lock, flags); ccw_device_trigger_reprobe(cdev); spin_unlock_irqrestore(sch->lock, flags); /* Release reference from get_ccwdev_by_dev_id() */ put_device(&cdev->dev); break; default: break; } return 0; out_unlock: spin_unlock_irqrestore(sch->lock, flags); out: return rc; } #ifdef CONFIG_CCW_CONSOLE static int ccw_device_console_enable(struct ccw_device *cdev, struct subchannel *sch) { int rc; io_subchannel_init_fields(sch); rc = cio_commit_config(sch); if (rc) return rc; sch->driver = &io_subchannel_driver; sch_set_cdev(sch, cdev); io_subchannel_recog(cdev, sch); /* Now wait for the async. recognition to come to an end. */ spin_lock_irq(cdev->ccwlock); while (!dev_fsm_final_state(cdev)) ccw_device_wait_idle(cdev); /* Hold on to an extra reference while device is online. */ get_device(&cdev->dev); rc = ccw_device_online(cdev); if (rc) goto out_unlock; while (!dev_fsm_final_state(cdev)) ccw_device_wait_idle(cdev); if (cdev->private->state == DEV_STATE_ONLINE) cdev->online = 1; else rc = -EIO; out_unlock: spin_unlock_irq(cdev->ccwlock); if (rc) /* Give up online reference since onlining failed. */ put_device(&cdev->dev); return rc; } struct ccw_device *ccw_device_probe_console(void) { struct io_subchannel_private *io_priv; struct ccw_device *cdev; struct subchannel *sch; int ret; sch = cio_probe_console(); if (IS_ERR(sch)) return ERR_CAST(sch); io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA); if (!io_priv) { put_device(&sch->dev); return ERR_PTR(-ENOMEM); } cdev = io_subchannel_create_ccwdev(sch); if (IS_ERR(cdev)) { put_device(&sch->dev); kfree(io_priv); return cdev; } set_io_private(sch, io_priv); ret = ccw_device_console_enable(cdev, sch); if (ret) { set_io_private(sch, NULL); put_device(&sch->dev); put_device(&cdev->dev); kfree(io_priv); return ERR_PTR(ret); } return cdev; } /** * ccw_device_wait_idle() - busy wait for device to become idle * @cdev: ccw device * * Poll until activity control is zero, that is, no function or data * transfer is pending/active. * Called with device lock being held. */ void ccw_device_wait_idle(struct ccw_device *cdev) { struct subchannel *sch = to_subchannel(cdev->dev.parent); while (1) { cio_tsch(sch); if (sch->schib.scsw.cmd.actl == 0) break; udelay_simple(100); } } static int ccw_device_pm_restore(struct device *dev); int ccw_device_force_console(struct ccw_device *cdev) { return ccw_device_pm_restore(&cdev->dev); } EXPORT_SYMBOL_GPL(ccw_device_force_console); #endif /* * get ccw_device matching the busid, but only if owned by cdrv */ static int __ccwdev_check_busid(struct device *dev, void *id) { char *bus_id; bus_id = id; return (strcmp(bus_id, dev_name(dev)) == 0); } /** * get_ccwdev_by_busid() - obtain device from a bus id * @cdrv: driver the device is owned by * @bus_id: bus id of the device to be searched * * This function searches all devices owned by @cdrv for a device with a bus * id matching @bus_id. * Returns: * If a match is found, its reference count of the found device is increased * and it is returned; else %NULL is returned. */ struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, const char *bus_id) { struct device *dev; dev = driver_find_device(&cdrv->driver, NULL, (void *)bus_id, __ccwdev_check_busid); return dev ? to_ccwdev(dev) : NULL; } /************************** device driver handling ************************/ /* This is the implementation of the ccw_driver class. The probe, remove * and release methods are initially very similar to the device_driver * implementations, with the difference that they have ccw_device * arguments. * * A ccw driver also contains the information that is needed for * device matching. */ static int ccw_device_probe (struct device *dev) { struct ccw_device *cdev = to_ccwdev(dev); struct ccw_driver *cdrv = to_ccwdrv(dev->driver); int ret; cdev->drv = cdrv; /* to let the driver call _set_online */ /* Note: we interpret class 0 in this context as an uninitialized * field since it translates to a non-I/O interrupt class. */ if (cdrv->int_class != 0) cdev->private->int_class = cdrv->int_class; else cdev->private->int_class = IRQIO_CIO; ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV; if (ret) { cdev->drv = NULL; cdev->private->int_class = IRQIO_CIO; return ret; } return 0; } static int ccw_device_remove (struct device *dev) { struct ccw_device *cdev = to_ccwdev(dev); struct ccw_driver *cdrv = cdev->drv; int ret; if (cdrv->remove) cdrv->remove(cdev); if (cdev->online) { cdev->online = 0; spin_lock_irq(cdev->ccwlock); ret = ccw_device_offline(cdev); spin_unlock_irq(cdev->ccwlock); if (ret == 0) wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); else CIO_MSG_EVENT(0, "ccw_device_offline returned %d, " "device 0.%x.%04x\n", ret, cdev->private->dev_id.ssid, cdev->private->dev_id.devno); /* Give up reference obtained in ccw_device_set_online(). */ put_device(&cdev->dev); } ccw_device_set_timeout(cdev, 0); cdev->drv = NULL; cdev->private->int_class = IRQIO_CIO; return 0; } static void ccw_device_shutdown(struct device *dev) { struct ccw_device *cdev; cdev = to_ccwdev(dev); if (cdev->drv && cdev->drv->shutdown) cdev->drv->shutdown(cdev); disable_cmf(cdev); } static int ccw_device_pm_prepare(struct device *dev) { struct ccw_device *cdev = to_ccwdev(dev); if (work_pending(&cdev->private->todo_work)) return -EAGAIN; /* Fail while device is being set online/offline. */ if (atomic_read(&cdev->private->onoff)) return -EAGAIN; if (cdev->online && cdev->drv && cdev->drv->prepare) return cdev->drv->prepare(cdev); return 0; } static void ccw_device_pm_complete(struct device *dev) { struct ccw_device *cdev = to_ccwdev(dev); if (cdev->online && cdev->drv && cdev->drv->complete) cdev->drv->complete(cdev); } static int ccw_device_pm_freeze(struct device *dev) { struct ccw_device *cdev = to_ccwdev(dev); struct subchannel *sch = to_subchannel(cdev->dev.parent); int ret, cm_enabled; /* Fail suspend while device is in transistional state. */ if (!dev_fsm_final_state(cdev)) return -EAGAIN; if (!cdev->online) return 0; if (cdev->drv && cdev->drv->freeze) { ret = cdev->drv->freeze(cdev); if (ret) return ret; } spin_lock_irq(sch->lock); cm_enabled = cdev->private->cmb != NULL; spin_unlock_irq(sch->lock); if (cm_enabled) { /* Don't have the css write on memory. */ ret = ccw_set_cmf(cdev, 0); if (ret) return ret; } /* From here on, disallow device driver I/O. */ spin_lock_irq(sch->lock); ret = cio_disable_subchannel(sch); spin_unlock_irq(sch->lock); return ret; } static int ccw_device_pm_thaw(struct device *dev) { struct ccw_device *cdev = to_ccwdev(dev); struct subchannel *sch = to_subchannel(cdev->dev.parent); int ret, cm_enabled; if (!cdev->online) return 0; spin_lock_irq(sch->lock); /* Allow device driver I/O again. */ ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); cm_enabled = cdev->private->cmb != NULL; spin_unlock_irq(sch->lock); if (ret) return ret; if (cm_enabled) { ret = ccw_set_cmf(cdev, 1); if (ret) return ret; } if (cdev->drv && cdev->drv->thaw) ret = cdev->drv->thaw(cdev); return ret; } static void __ccw_device_pm_restore(struct ccw_device *cdev) { struct subchannel *sch = to_subchannel(cdev->dev.parent); spin_lock_irq(sch->lock); if (cio_is_console(sch->schid)) { cio_enable_subchannel(sch, (u32)(addr_t)sch); goto out_unlock; } /* * While we were sleeping, devices may have gone or become * available again. Kick re-detection. */ cdev->private->flags.resuming = 1; cdev->private->path_new_mask = LPM_ANYPATH; css_sched_sch_todo(sch, SCH_TODO_EVAL); spin_unlock_irq(sch->lock); css_wait_for_slow_path(); /* cdev may have been moved to a different subchannel. */ sch = to_subchannel(cdev->dev.parent); spin_lock_irq(sch->lock); if (cdev->private->state != DEV_STATE_ONLINE && cdev->private->state != DEV_STATE_OFFLINE) goto out_unlock; ccw_device_recognition(cdev); spin_unlock_irq(sch->lock); wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) || cdev->private->state == DEV_STATE_DISCONNECTED); spin_lock_irq(sch->lock); out_unlock: cdev->private->flags.resuming = 0; spin_unlock_irq(sch->lock); } static int resume_handle_boxed(struct ccw_device *cdev) { cdev->private->state = DEV_STATE_BOXED; if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK) return 0; ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); return -ENODEV; } static int resume_handle_disc(struct ccw_device *cdev) { cdev->private->state = DEV_STATE_DISCONNECTED; if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK) return 0; ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); return -ENODEV; } static int ccw_device_pm_restore(struct device *dev) { struct ccw_device *cdev = to_ccwdev(dev); struct subchannel *sch; int ret = 0; __ccw_device_pm_restore(cdev); sch = to_subchannel(cdev->dev.parent); spin_lock_irq(sch->lock); if (cio_is_console(sch->schid)) goto out_restore; /* check recognition results */ switch (cdev->private->state) { case DEV_STATE_OFFLINE: case DEV_STATE_ONLINE: cdev->private->flags.donotify = 0; break; case DEV_STATE_BOXED: ret = resume_handle_boxed(cdev); if (ret) goto out_unlock; goto out_restore; default: ret = resume_handle_disc(cdev); if (ret) goto out_unlock; goto out_restore; } /* check if the device type has changed */ if (!ccw_device_test_sense_data(cdev)) { ccw_device_update_sense_data(cdev); ccw_device_sched_todo(cdev, CDEV_TODO_REBIND); ret = -ENODEV; goto out_unlock; } if (!cdev->online) goto out_unlock; if (ccw_device_online(cdev)) { ret = resume_handle_disc(cdev); if (ret) goto out_unlock; goto out_restore; } spin_unlock_irq(sch->lock); wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); spin_lock_irq(sch->lock); if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) { ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); ret = -ENODEV; goto out_unlock; } /* reenable cmf, if needed */ if (cdev->private->cmb) { spin_unlock_irq(sch->lock); ret = ccw_set_cmf(cdev, 1); spin_lock_irq(sch->lock); if (ret) { CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed " "(rc=%d)\n", cdev->private->dev_id.ssid, cdev->private->dev_id.devno, ret); ret = 0; } } out_restore: spin_unlock_irq(sch->lock); if (cdev->online && cdev->drv && cdev->drv->restore) ret = cdev->drv->restore(cdev); return ret; out_unlock: spin_unlock_irq(sch->lock); return ret; } static const struct dev_pm_ops ccw_pm_ops = { .prepare = ccw_device_pm_prepare, .complete = ccw_device_pm_complete, .freeze = ccw_device_pm_freeze, .thaw = ccw_device_pm_thaw, .restore = ccw_device_pm_restore, }; static struct bus_type ccw_bus_type = { .name = "ccw", .match = ccw_bus_match, .uevent = ccw_uevent, .probe = ccw_device_probe, .remove = ccw_device_remove, .shutdown = ccw_device_shutdown, .pm = &ccw_pm_ops, }; /** * ccw_driver_register() - register a ccw driver * @cdriver: driver to be registered * * This function is mainly a wrapper around driver_register(). * Returns: * %0 on success and a negative error value on failure. */ int ccw_driver_register(struct ccw_driver *cdriver) { struct device_driver *drv = &cdriver->driver; drv->bus = &ccw_bus_type; return driver_register(drv); } /** * ccw_driver_unregister() - deregister a ccw driver * @cdriver: driver to be deregistered * * This function is mainly a wrapper around driver_unregister(). */ void ccw_driver_unregister(struct ccw_driver *cdriver) { driver_unregister(&cdriver->driver); } static void ccw_device_todo(struct work_struct *work) { struct ccw_device_private *priv; struct ccw_device *cdev; struct subchannel *sch; enum cdev_todo todo; priv = container_of(work, struct ccw_device_private, todo_work); cdev = priv->cdev; sch = to_subchannel(cdev->dev.parent); /* Find out todo. */ spin_lock_irq(cdev->ccwlock); todo = priv->todo; priv->todo = CDEV_TODO_NOTHING; CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n", priv->dev_id.ssid, priv->dev_id.devno, todo); spin_unlock_irq(cdev->ccwlock); /* Perform todo. */ switch (todo) { case CDEV_TODO_ENABLE_CMF: cmf_reenable(cdev); break; case CDEV_TODO_REBIND: ccw_device_do_unbind_bind(cdev); break; case CDEV_TODO_REGISTER: io_subchannel_register(cdev); break; case CDEV_TODO_UNREG_EVAL: if (!sch_is_pseudo_sch(sch)) css_schedule_eval(sch->schid); /* fall-through */ case CDEV_TODO_UNREG: if (sch_is_pseudo_sch(sch)) ccw_device_unregister(cdev); else ccw_device_call_sch_unregister(cdev); break; default: break; } /* Release workqueue ref. */ put_device(&cdev->dev); } /** * ccw_device_sched_todo - schedule ccw device operation * @cdev: ccw device * @todo: todo * * Schedule the operation identified by @todo to be performed on the slow path * workqueue. Do nothing if another operation with higher priority is already * scheduled. Needs to be called with ccwdev lock held. */ void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo) { CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n", cdev->private->dev_id.ssid, cdev->private->dev_id.devno, todo); if (cdev->private->todo >= todo) return; cdev->private->todo = todo; /* Get workqueue ref. */ if (!get_device(&cdev->dev)) return; if (!queue_work(cio_work_q, &cdev->private->todo_work)) { /* Already queued, release workqueue ref. */ put_device(&cdev->dev); } } /** * ccw_device_siosl() - initiate logging * @cdev: ccw device * * This function is used to invoke model-dependent logging within the channel * subsystem. */ int ccw_device_siosl(struct ccw_device *cdev) { struct subchannel *sch = to_subchannel(cdev->dev.parent); return chsc_siosl(sch->schid); } EXPORT_SYMBOL_GPL(ccw_device_siosl); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(ccw_device_set_online); EXPORT_SYMBOL(ccw_device_set_offline); EXPORT_SYMBOL(ccw_driver_register); EXPORT_SYMBOL(ccw_driver_unregister); EXPORT_SYMBOL(get_ccwdev_by_busid);
gpl-2.0
playfulgod/kernel-LG-Marquee-LS855
arch/arm/mach-s3c64xx/mach-hmt.c
811
6322
/* mach-hmt.c - Platform code for Airgoo HMT * * Copyright 2009 Peter Korsgaard <jacmet@sunsite.dk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/serial_core.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/fb.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/leds.h> #include <linux/pwm_backlight.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <mach/regs-fb.h> #include <mach/map.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <plat/regs-serial.h> #include <plat/iic.h> #include <plat/fb.h> #include <plat/nand.h> #include <mach/s3c6410.h> #include <plat/clock.h> #include <plat/devs.h> #include <plat/cpu.h> #define UCON S3C2410_UCON_DEFAULT #define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE) #define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE) static struct s3c2410_uartcfg hmt_uartcfgs[] __initdata = { [0] = { .hwport = 0, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, }, [1] = { .hwport = 1, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, }, [2] = { .hwport = 2, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, }, }; static int hmt_bl_init(struct device *dev) { int ret; ret = gpio_request(S3C64XX_GPB(4), "lcd backlight enable"); if (!ret) ret = gpio_direction_output(S3C64XX_GPB(4), 0); return ret; } static int hmt_bl_notify(struct device *dev, int brightness) { /* * translate from CIELUV/CIELAB L*->brightness, E.G. from * perceived luminance to light output. Assumes range 0..25600 */ if (brightness < 0x800) { /* Y = Yn * L / 903.3 */ brightness = (100*256 * brightness + 231245/2) / 231245; } else { /* Y = Yn * ((L + 16) / 116 )^3 */ int t = (brightness*4 + 16*1024 + 58)/116; brightness = 25 * ((t * t * t + 0x100000/2) / 0x100000); } gpio_set_value(S3C64XX_GPB(4), brightness); return brightness; } static void hmt_bl_exit(struct device *dev) { gpio_free(S3C64XX_GPB(4)); } static struct platform_pwm_backlight_data hmt_backlight_data = { .pwm_id = 1, .max_brightness = 100 * 256, .dft_brightness = 40 * 256, .pwm_period_ns = 1000000000 / (100 * 256 * 20), .init = hmt_bl_init, .notify = hmt_bl_notify, .exit = hmt_bl_exit, }; static struct platform_device hmt_backlight_device = { .name = "pwm-backlight", .dev = { .parent = &s3c_device_timer[1].dev, .platform_data = &hmt_backlight_data, }, }; static struct s3c_fb_pd_win hmt_fb_win0 = { .win_mode = { .pixclock = 41094, .left_margin = 8, .right_margin = 13, .upper_margin = 7, .lower_margin = 5, .hsync_len = 3, .vsync_len = 1, .xres = 800, .yres = 480, }, .max_bpp = 32, .default_bpp = 16, }; /* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */ static struct s3c_fb_platdata hmt_lcd_pdata __initdata = { .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, .win[0] = &hmt_fb_win0, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, }; static struct mtd_partition hmt_nand_part[] = { [0] = { .name = "uboot", .size = SZ_512K, .offset = 0, }, [1] = { .name = "uboot-env1", .size = SZ_256K, .offset = SZ_512K, }, [2] = { .name = "uboot-env2", .size = SZ_256K, .offset = SZ_512K + SZ_256K, }, [3] = { .name = "kernel", .size = SZ_2M, .offset = SZ_1M, }, [4] = { .name = "rootfs", .size = MTDPART_SIZ_FULL, .offset = SZ_1M + SZ_2M, }, }; static struct s3c2410_nand_set hmt_nand_sets[] = { [0] = { .name = "nand", .nr_chips = 1, .nr_partitions = ARRAY_SIZE(hmt_nand_part), .partitions = hmt_nand_part, }, }; static struct s3c2410_platform_nand hmt_nand_info = { .tacls = 25, .twrph0 = 55, .twrph1 = 40, .nr_sets = ARRAY_SIZE(hmt_nand_sets), .sets = hmt_nand_sets, }; static struct gpio_led hmt_leds[] = { { /* left function keys */ .name = "left:blue", .gpio = S3C64XX_GPO(12), .default_trigger = "default-on", }, { /* right function keys - red */ .name = "right:red", .gpio = S3C64XX_GPO(13), }, { /* right function keys - green */ .name = "right:green", .gpio = S3C64XX_GPO(14), }, { /* right function keys - blue */ .name = "right:blue", .gpio = S3C64XX_GPO(15), .default_trigger = "default-on", }, }; static struct gpio_led_platform_data hmt_led_data = { .num_leds = ARRAY_SIZE(hmt_leds), .leds = hmt_leds, }; static struct platform_device hmt_leds_device = { .name = "leds-gpio", .id = -1, .dev.platform_data = &hmt_led_data, }; static struct map_desc hmt_iodesc[] = {}; static struct platform_device *hmt_devices[] __initdata = { &s3c_device_i2c0, &s3c_device_nand, &s3c_device_fb, &s3c_device_ohci, &s3c_device_timer[1], &hmt_backlight_device, &hmt_leds_device, }; static void __init hmt_map_io(void) { s3c64xx_init_io(hmt_iodesc, ARRAY_SIZE(hmt_iodesc)); s3c24xx_init_clocks(12000000); s3c24xx_init_uarts(hmt_uartcfgs, ARRAY_SIZE(hmt_uartcfgs)); } static void __init hmt_machine_init(void) { s3c_i2c0_set_platdata(NULL); s3c_fb_set_platdata(&hmt_lcd_pdata); s3c_nand_set_platdata(&hmt_nand_info); gpio_request(S3C64XX_GPC(7), "usb power"); gpio_direction_output(S3C64XX_GPC(7), 0); gpio_request(S3C64XX_GPM(0), "usb power"); gpio_direction_output(S3C64XX_GPM(0), 1); gpio_request(S3C64XX_GPK(7), "usb power"); gpio_direction_output(S3C64XX_GPK(7), 1); gpio_request(S3C64XX_GPF(13), "usb power"); gpio_direction_output(S3C64XX_GPF(13), 1); platform_add_devices(hmt_devices, ARRAY_SIZE(hmt_devices)); } MACHINE_START(HMT, "Airgoo-HMT") /* Maintainer: Peter Korsgaard <jacmet@sunsite.dk> */ .phys_io = S3C_PA_UART & 0xfff00000, .io_pg_offst = (((u32)S3C_VA_UART) >> 18) & 0xfffc, .boot_params = S3C64XX_PA_SDRAM + 0x100, .init_irq = s3c6410_init_irq, .map_io = hmt_map_io, .init_machine = hmt_machine_init, .timer = &s3c24xx_timer, MACHINE_END
gpl-2.0
playfulgod/tiamat-8x60-kernel
arch/arm/mm/mmap.c
1323
3463
/* * linux/arch/arm/mm/mmap.c */ #include <linux/fs.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/shm.h> #include <linux/sched.h> #include <linux/io.h> #include <asm/cputype.h> #include <asm/system.h> #define COLOUR_ALIGN(addr,pgoff) \ ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) /* * We need to ensure that shared mappings are correctly aligned to * avoid aliasing issues with VIPT caches. We need to ensure that * a specific page of an object is always mapped at a multiple of * SHMLBA bytes. * * We unconditionally provide this function for all cases, however * in the VIVT case, we optimise out the alignment rules. */ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long start_addr; #ifdef CONFIG_CPU_V6 unsigned int cache_type; int do_align = 0, aliasing = 0; /* * We only need to do colour alignment if either the I or D * caches alias. This is indicated by bits 9 and 21 of the * cache type register. */ cache_type = read_cpuid_cachetype(); if (cache_type != read_cpuid_id()) { aliasing = (cache_type | cache_type >> 12) & (1 << 11); if (aliasing) do_align = filp || flags & MAP_SHARED; } #else #define do_align 0 #define aliasing 0 #endif /* * We enforce the MAP_FIXED case. */ if (flags & MAP_FIXED) { if (aliasing && flags & MAP_SHARED && (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) return -EINVAL; return addr; } if (len > TASK_SIZE) return -ENOMEM; if (addr) { if (do_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } if (len > mm->cached_hole_size) { start_addr = addr = mm->free_area_cache; } else { start_addr = addr = TASK_UNMAPPED_BASE; mm->cached_hole_size = 0; } full_search: if (do_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr) { /* * Start a new search - just in case we missed * some holes. */ if (start_addr != TASK_UNMAPPED_BASE) { start_addr = addr = TASK_UNMAPPED_BASE; mm->cached_hole_size = 0; goto full_search; } return -ENOMEM; } if (!vma || addr + len <= vma->vm_start) { /* * Remember the place where we stopped the search: */ mm->free_area_cache = addr + len; return addr; } if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; addr = vma->vm_end; if (do_align) addr = COLOUR_ALIGN(addr, pgoff); } } /* * You really shouldn't be using read() or write() on /dev/mem. This * might go away in the future. */ int valid_phys_addr_range(unsigned long addr, size_t size) { if (addr < PHYS_OFFSET) return 0; if (addr + size > __pa(high_memory - 1) + 1) return 0; return 1; } /* * We don't use supersection mappings for mmap() on /dev/mem, which * means that we can't map the memory area above the 4G barrier into * userspace. */ int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) { return !(pfn + (size >> PAGE_SHIFT) > 0x00100000); }
gpl-2.0
M8s-dev/kernel_htc_msm8939
drivers/watchdog/s3c2410_wdt.c
1835
12441
/* linux/drivers/char/watchdog/s3c2410_wdt.c * * Copyright (c) 2004 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * S3C2410 Watchdog Timer Support * * Based on, softdog.c by Alan Cox, * (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/timer.h> #include <linux/miscdevice.h> /* for MODULE_ALIAS_MISCDEV */ #include <linux/watchdog.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/clk.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/cpufreq.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/of.h> #include <mach/map.h> #undef S3C_VA_WATCHDOG #define S3C_VA_WATCHDOG (0) #include <plat/regs-watchdog.h> #define CONFIG_S3C2410_WATCHDOG_ATBOOT (0) #define CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME (15) static bool nowayout = WATCHDOG_NOWAYOUT; static int tmr_margin; static int tmr_atboot = CONFIG_S3C2410_WATCHDOG_ATBOOT; static int soft_noboot; static int debug; module_param(tmr_margin, int, 0); module_param(tmr_atboot, int, 0); module_param(nowayout, bool, 0); module_param(soft_noboot, int, 0); module_param(debug, int, 0); MODULE_PARM_DESC(tmr_margin, "Watchdog tmr_margin in seconds. (default=" __MODULE_STRING(CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME) ")"); MODULE_PARM_DESC(tmr_atboot, "Watchdog is started at boot time if set to 1, default=" __MODULE_STRING(CONFIG_S3C2410_WATCHDOG_ATBOOT)); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); MODULE_PARM_DESC(soft_noboot, "Watchdog action, set to 1 to ignore reboots, " "0 to reboot (default 0)"); MODULE_PARM_DESC(debug, "Watchdog debug, set to >1 for debug (default 0)"); static struct device *wdt_dev; /* platform device attached to */ static struct resource *wdt_mem; static struct resource *wdt_irq; static struct clk *wdt_clock; static void __iomem *wdt_base; static unsigned int wdt_count; static DEFINE_SPINLOCK(wdt_lock); /* watchdog control routines */ #define DBG(fmt, ...) \ do { \ if (debug) \ pr_info(fmt, ##__VA_ARGS__); \ } while (0) /* functions */ static int s3c2410wdt_keepalive(struct watchdog_device *wdd) { spin_lock(&wdt_lock); writel(wdt_count, wdt_base + S3C2410_WTCNT); spin_unlock(&wdt_lock); return 0; } static void __s3c2410wdt_stop(void) { unsigned long wtcon; wtcon = readl(wdt_base + S3C2410_WTCON); wtcon &= ~(S3C2410_WTCON_ENABLE | S3C2410_WTCON_RSTEN); writel(wtcon, wdt_base + S3C2410_WTCON); } static int s3c2410wdt_stop(struct watchdog_device *wdd) { spin_lock(&wdt_lock); __s3c2410wdt_stop(); spin_unlock(&wdt_lock); return 0; } static int s3c2410wdt_start(struct watchdog_device *wdd) { unsigned long wtcon; spin_lock(&wdt_lock); __s3c2410wdt_stop(); wtcon = readl(wdt_base + S3C2410_WTCON); wtcon |= S3C2410_WTCON_ENABLE | S3C2410_WTCON_DIV128; if (soft_noboot) { wtcon |= S3C2410_WTCON_INTEN; wtcon &= ~S3C2410_WTCON_RSTEN; } else { wtcon &= ~S3C2410_WTCON_INTEN; wtcon |= S3C2410_WTCON_RSTEN; } DBG("%s: wdt_count=0x%08x, wtcon=%08lx\n", __func__, wdt_count, wtcon); writel(wdt_count, wdt_base + S3C2410_WTDAT); writel(wdt_count, wdt_base + S3C2410_WTCNT); writel(wtcon, wdt_base + S3C2410_WTCON); spin_unlock(&wdt_lock); return 0; } static inline int s3c2410wdt_is_running(void) { return readl(wdt_base + S3C2410_WTCON) & S3C2410_WTCON_ENABLE; } static int s3c2410wdt_set_heartbeat(struct watchdog_device *wdd, unsigned timeout) { unsigned long freq = clk_get_rate(wdt_clock); unsigned int count; unsigned int divisor = 1; unsigned long wtcon; if (timeout < 1) return -EINVAL; freq /= 128; count = timeout * freq; DBG("%s: count=%d, timeout=%d, freq=%lu\n", __func__, count, timeout, freq); /* if the count is bigger than the watchdog register, then work out what we need to do (and if) we can actually make this value */ if (count >= 0x10000) { for (divisor = 1; divisor <= 0x100; divisor++) { if ((count / divisor) < 0x10000) break; } if ((count / divisor) >= 0x10000) { dev_err(wdt_dev, "timeout %d too big\n", timeout); return -EINVAL; } } DBG("%s: timeout=%d, divisor=%d, count=%d (%08x)\n", __func__, timeout, divisor, count, count/divisor); count /= divisor; wdt_count = count; /* update the pre-scaler */ wtcon = readl(wdt_base + S3C2410_WTCON); wtcon &= ~S3C2410_WTCON_PRESCALE_MASK; wtcon |= S3C2410_WTCON_PRESCALE(divisor-1); writel(count, wdt_base + S3C2410_WTDAT); writel(wtcon, wdt_base + S3C2410_WTCON); wdd->timeout = (count * divisor) / freq; return 0; } #define OPTIONS (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE) static const struct watchdog_info s3c2410_wdt_ident = { .options = OPTIONS, .firmware_version = 0, .identity = "S3C2410 Watchdog", }; static struct watchdog_ops s3c2410wdt_ops = { .owner = THIS_MODULE, .start = s3c2410wdt_start, .stop = s3c2410wdt_stop, .ping = s3c2410wdt_keepalive, .set_timeout = s3c2410wdt_set_heartbeat, }; static struct watchdog_device s3c2410_wdd = { .info = &s3c2410_wdt_ident, .ops = &s3c2410wdt_ops, .timeout = CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME, }; /* interrupt handler code */ static irqreturn_t s3c2410wdt_irq(int irqno, void *param) { dev_info(wdt_dev, "watchdog timer expired (irq)\n"); s3c2410wdt_keepalive(&s3c2410_wdd); return IRQ_HANDLED; } #ifdef CONFIG_CPU_FREQ static int s3c2410wdt_cpufreq_transition(struct notifier_block *nb, unsigned long val, void *data) { int ret; if (!s3c2410wdt_is_running()) goto done; if (val == CPUFREQ_PRECHANGE) { /* To ensure that over the change we don't cause the * watchdog to trigger, we perform an keep-alive if * the watchdog is running. */ s3c2410wdt_keepalive(&s3c2410_wdd); } else if (val == CPUFREQ_POSTCHANGE) { s3c2410wdt_stop(&s3c2410_wdd); ret = s3c2410wdt_set_heartbeat(&s3c2410_wdd, s3c2410_wdd.timeout); if (ret >= 0) s3c2410wdt_start(&s3c2410_wdd); else goto err; } done: return 0; err: dev_err(wdt_dev, "cannot set new value for timeout %d\n", s3c2410_wdd.timeout); return ret; } static struct notifier_block s3c2410wdt_cpufreq_transition_nb = { .notifier_call = s3c2410wdt_cpufreq_transition, }; static inline int s3c2410wdt_cpufreq_register(void) { return cpufreq_register_notifier(&s3c2410wdt_cpufreq_transition_nb, CPUFREQ_TRANSITION_NOTIFIER); } static inline void s3c2410wdt_cpufreq_deregister(void) { cpufreq_unregister_notifier(&s3c2410wdt_cpufreq_transition_nb, CPUFREQ_TRANSITION_NOTIFIER); } #else static inline int s3c2410wdt_cpufreq_register(void) { return 0; } static inline void s3c2410wdt_cpufreq_deregister(void) { } #endif static int s3c2410wdt_probe(struct platform_device *pdev) { struct device *dev; unsigned int wtcon; int started = 0; int ret; DBG("%s: probe=%p\n", __func__, pdev); dev = &pdev->dev; wdt_dev = &pdev->dev; wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (wdt_mem == NULL) { dev_err(dev, "no memory resource specified\n"); return -ENOENT; } wdt_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (wdt_irq == NULL) { dev_err(dev, "no irq resource specified\n"); ret = -ENOENT; goto err; } /* get the memory region for the watchdog timer */ wdt_base = devm_ioremap_resource(dev, wdt_mem); if (IS_ERR(wdt_base)) { ret = PTR_ERR(wdt_base); goto err; } DBG("probe: mapped wdt_base=%p\n", wdt_base); wdt_clock = devm_clk_get(dev, "watchdog"); if (IS_ERR(wdt_clock)) { dev_err(dev, "failed to find watchdog clock source\n"); ret = PTR_ERR(wdt_clock); goto err; } clk_prepare_enable(wdt_clock); ret = s3c2410wdt_cpufreq_register(); if (ret < 0) { pr_err("failed to register cpufreq\n"); goto err_clk; } /* see if we can actually set the requested timer margin, and if * not, try the default value */ watchdog_init_timeout(&s3c2410_wdd, tmr_margin, &pdev->dev); if (s3c2410wdt_set_heartbeat(&s3c2410_wdd, s3c2410_wdd.timeout)) { started = s3c2410wdt_set_heartbeat(&s3c2410_wdd, CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME); if (started == 0) dev_info(dev, "tmr_margin value out of range, default %d used\n", CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME); else dev_info(dev, "default timer value is out of range, " "cannot start\n"); } ret = devm_request_irq(dev, wdt_irq->start, s3c2410wdt_irq, 0, pdev->name, pdev); if (ret != 0) { dev_err(dev, "failed to install irq (%d)\n", ret); goto err_cpufreq; } watchdog_set_nowayout(&s3c2410_wdd, nowayout); ret = watchdog_register_device(&s3c2410_wdd); if (ret) { dev_err(dev, "cannot register watchdog (%d)\n", ret); goto err_cpufreq; } if (tmr_atboot && started == 0) { dev_info(dev, "starting watchdog timer\n"); s3c2410wdt_start(&s3c2410_wdd); } else if (!tmr_atboot) { /* if we're not enabling the watchdog, then ensure it is * disabled if it has been left running from the bootloader * or other source */ s3c2410wdt_stop(&s3c2410_wdd); } /* print out a statement of readiness */ wtcon = readl(wdt_base + S3C2410_WTCON); dev_info(dev, "watchdog %sactive, reset %sabled, irq %sabled\n", (wtcon & S3C2410_WTCON_ENABLE) ? "" : "in", (wtcon & S3C2410_WTCON_RSTEN) ? "en" : "dis", (wtcon & S3C2410_WTCON_INTEN) ? "en" : "dis"); return 0; err_cpufreq: s3c2410wdt_cpufreq_deregister(); err_clk: clk_disable_unprepare(wdt_clock); wdt_clock = NULL; err: wdt_irq = NULL; wdt_mem = NULL; return ret; } static int s3c2410wdt_remove(struct platform_device *dev) { watchdog_unregister_device(&s3c2410_wdd); s3c2410wdt_cpufreq_deregister(); clk_disable_unprepare(wdt_clock); wdt_clock = NULL; wdt_irq = NULL; wdt_mem = NULL; return 0; } static void s3c2410wdt_shutdown(struct platform_device *dev) { s3c2410wdt_stop(&s3c2410_wdd); } #ifdef CONFIG_PM static unsigned long wtcon_save; static unsigned long wtdat_save; static int s3c2410wdt_suspend(struct platform_device *dev, pm_message_t state) { /* Save watchdog state, and turn it off. */ wtcon_save = readl(wdt_base + S3C2410_WTCON); wtdat_save = readl(wdt_base + S3C2410_WTDAT); /* Note that WTCNT doesn't need to be saved. */ s3c2410wdt_stop(&s3c2410_wdd); return 0; } static int s3c2410wdt_resume(struct platform_device *dev) { /* Restore watchdog state. */ writel(wtdat_save, wdt_base + S3C2410_WTDAT); writel(wtdat_save, wdt_base + S3C2410_WTCNT); /* Reset count */ writel(wtcon_save, wdt_base + S3C2410_WTCON); pr_info("watchdog %sabled\n", (wtcon_save & S3C2410_WTCON_ENABLE) ? "en" : "dis"); return 0; } #else #define s3c2410wdt_suspend NULL #define s3c2410wdt_resume NULL #endif /* CONFIG_PM */ #ifdef CONFIG_OF static const struct of_device_id s3c2410_wdt_match[] = { { .compatible = "samsung,s3c2410-wdt" }, {}, }; MODULE_DEVICE_TABLE(of, s3c2410_wdt_match); #endif static struct platform_driver s3c2410wdt_driver = { .probe = s3c2410wdt_probe, .remove = s3c2410wdt_remove, .shutdown = s3c2410wdt_shutdown, .suspend = s3c2410wdt_suspend, .resume = s3c2410wdt_resume, .driver = { .owner = THIS_MODULE, .name = "s3c2410-wdt", .of_match_table = of_match_ptr(s3c2410_wdt_match), }, }; module_platform_driver(s3c2410wdt_driver); MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>, " "Dimitry Andric <dimitry.andric@tomtom.com>"); MODULE_DESCRIPTION("S3C2410 Watchdog Device Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:s3c2410-wdt");
gpl-2.0
Blechd0se/kernel-moto-g
arch/arm/kernel/sched_clock.c
1835
4732
/* * sched_clock.c: support for extending counters to full 64-bit ns counter * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/clocksource.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/syscore_ops.h> #include <linux/timer.h> #include <asm/sched_clock.h> struct clock_data { u64 epoch_ns; u32 epoch_cyc; u32 epoch_cyc_copy; u32 mult; u32 shift; bool suspended; bool needs_suspend; }; static void sched_clock_poll(unsigned long wrap_ticks); static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0); static struct clock_data cd = { .mult = NSEC_PER_SEC / HZ, }; static u32 __read_mostly sched_clock_mask = 0xffffffff; static u32 notrace jiffy_sched_clock_read(void) { return (u32)(jiffies - INITIAL_JIFFIES); } static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) { return (cyc * mult) >> shift; } static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask) { u64 epoch_ns; u32 epoch_cyc; if (cd.suspended) return cd.epoch_ns; /* * Load the epoch_cyc and epoch_ns atomically. We do this by * ensuring that we always write epoch_cyc, epoch_ns and * epoch_cyc_copy in strict order, and read them in strict order. * If epoch_cyc and epoch_cyc_copy are not equal, then we're in * the middle of an update, and we should repeat the load. */ do { epoch_cyc = cd.epoch_cyc; smp_rmb(); epoch_ns = cd.epoch_ns; smp_rmb(); } while (epoch_cyc != cd.epoch_cyc_copy); cyc = read_sched_clock(); cyc = (cyc - epoch_cyc) & sched_clock_mask; return epoch_ns + cyc_to_ns(cyc, cd.mult, cd.shift); } /* * Atomically update the sched_clock epoch. */ static void notrace update_sched_clock(void) { unsigned long flags; u32 cyc; u64 ns; cyc = read_sched_clock(); ns = cd.epoch_ns + cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, cd.mult, cd.shift); /* * Write epoch_cyc and epoch_ns in a way that the update is * detectable in cyc_to_fixed_sched_clock(). */ raw_local_irq_save(flags); cd.epoch_cyc_copy = cyc; smp_wmb(); cd.epoch_ns = ns; smp_wmb(); cd.epoch_cyc = cyc; raw_local_irq_restore(flags); } static void sched_clock_poll(unsigned long wrap_ticks) { mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks)); update_sched_clock(); } void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) { unsigned long r, w; u64 res, wrap; char r_unit; BUG_ON(bits > 32); WARN_ON(!irqs_disabled()); WARN_ON(read_sched_clock != jiffy_sched_clock_read); read_sched_clock = read; sched_clock_mask = (1 << bits) - 1; /* calculate the mult/shift to convert counter ticks to ns. */ clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0); r = rate; if (r >= 4000000) { r /= 1000000; r_unit = 'M'; } else if (r >= 1000) { r /= 1000; r_unit = 'k'; } else r_unit = ' '; /* calculate how many ns until we wrap */ wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift); do_div(wrap, NSEC_PER_MSEC); w = wrap; /* calculate the ns resolution of this counter */ res = cyc_to_ns(1ULL, cd.mult, cd.shift); pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n", bits, r, r_unit, res, w); /* * Start the timer to keep sched_clock() properly updated and * sets the initial epoch. */ sched_clock_timer.data = msecs_to_jiffies(w - (w / 10)); update_sched_clock(); /* * Ensure that sched_clock() starts off at 0ns */ cd.epoch_ns = 0; pr_debug("Registered %pF as sched_clock source\n", read); } unsigned long long notrace sched_clock(void) { u32 cyc = read_sched_clock(); return cyc_to_sched_clock(cyc, sched_clock_mask); } void __init sched_clock_postinit(void) { /* * If no sched_clock function has been provided at that point, * make it the final one one. */ if (read_sched_clock == jiffy_sched_clock_read) setup_sched_clock(jiffy_sched_clock_read, 32, HZ); sched_clock_poll(sched_clock_timer.data); } static int sched_clock_suspend(void) { sched_clock_poll(sched_clock_timer.data); cd.suspended = true; return 0; } static void sched_clock_resume(void) { cd.epoch_cyc = read_sched_clock(); cd.epoch_cyc_copy = cd.epoch_cyc; cd.suspended = false; } static struct syscore_ops sched_clock_ops = { .suspend = sched_clock_suspend, .resume = sched_clock_resume, }; static int __init sched_clock_syscore_init(void) { register_syscore_ops(&sched_clock_ops); return 0; } device_initcall(sched_clock_syscore_init);
gpl-2.0
AndroidOpenDevelopment-Devices/android_kernel_moto_shamu
drivers/staging/comedi/drivers/addi_watchdog.c
2091
4446
/* * COMEDI driver for the watchdog subdevice found on some addi-data boards * Copyright (c) 2013 H Hartley Sweeten <hsweeten@visionengravers.com> * * Based on implementations in various addi-data COMEDI drivers. * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 1998 David A. Schleef <ds@schleef.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "../comedidev.h" #include "addi_watchdog.h" /* * Register offsets/defines for the addi-data watchdog */ #define ADDI_WDOG_REG 0x00 #define ADDI_WDOG_RELOAD_REG 0x04 #define ADDI_WDOG_TIMEBASE 0x08 #define ADDI_WDOG_CTRL_REG 0x0c #define ADDI_WDOG_CTRL_ENABLE (1 << 0) #define ADDI_WDOG_CTRL_SW_TRIG (1 << 9) #define ADDI_WDOG_STATUS_REG 0x10 #define ADDI_WDOG_STATUS_ENABLED (1 << 0) #define ADDI_WDOG_STATUS_SW_TRIG (1 << 1) struct addi_watchdog_private { unsigned long iobase; unsigned int wdog_ctrl; }; /* * The watchdog subdevice is configured with two INSN_CONFIG instructions: * * Enable the watchdog and set the reload timeout: * data[0] = INSN_CONFIG_ARM * data[1] = timeout reload value * * Disable the watchdog: * data[0] = INSN_CONFIG_DISARM */ static int addi_watchdog_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct addi_watchdog_private *spriv = s->private; unsigned int reload; switch (data[0]) { case INSN_CONFIG_ARM: spriv->wdog_ctrl = ADDI_WDOG_CTRL_ENABLE; reload = data[1] & s->maxdata; outl(reload, spriv->iobase + ADDI_WDOG_RELOAD_REG); /* Time base is 20ms, let the user know the timeout */ dev_info(dev->class_dev, "watchdog enabled, timeout:%dms\n", 20 * reload + 20); break; case INSN_CONFIG_DISARM: spriv->wdog_ctrl = 0; break; default: return -EINVAL; } outl(spriv->wdog_ctrl, spriv->iobase + ADDI_WDOG_CTRL_REG); return insn->n; } static int addi_watchdog_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct addi_watchdog_private *spriv = s->private; int i; for (i = 0; i < insn->n; i++) data[i] = inl(spriv->iobase + ADDI_WDOG_STATUS_REG); return insn->n; } static int addi_watchdog_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct addi_watchdog_private *spriv = s->private; int i; if (spriv->wdog_ctrl == 0) { dev_warn(dev->class_dev, "watchdog is disabled\n"); return -EINVAL; } /* "ping" the watchdog */ for (i = 0; i < insn->n; i++) { outl(spriv->wdog_ctrl | ADDI_WDOG_CTRL_SW_TRIG, spriv->iobase + ADDI_WDOG_CTRL_REG); } return insn->n; } void addi_watchdog_reset(unsigned long iobase) { outl(0x0, iobase + ADDI_WDOG_CTRL_REG); outl(0x0, iobase + ADDI_WDOG_RELOAD_REG); } EXPORT_SYMBOL_GPL(addi_watchdog_reset); int addi_watchdog_init(struct comedi_subdevice *s, unsigned long iobase) { struct addi_watchdog_private *spriv; spriv = kzalloc(sizeof(*spriv), GFP_KERNEL); if (!spriv) return -ENOMEM; spriv->iobase = iobase; s->private = spriv; s->type = COMEDI_SUBD_TIMER; s->subdev_flags = SDF_WRITEABLE; s->n_chan = 1; s->maxdata = 0xff; s->insn_config = addi_watchdog_insn_config; s->insn_read = addi_watchdog_insn_read; s->insn_write = addi_watchdog_insn_write; return 0; } EXPORT_SYMBOL_GPL(addi_watchdog_init); static int __init addi_watchdog_module_init(void) { return 0; } module_init(addi_watchdog_module_init); static void __exit addi_watchdog_module_exit(void) { } module_exit(addi_watchdog_module_exit); MODULE_DESCRIPTION("ADDI-DATA Watchdog subdevice"); MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>"); MODULE_LICENSE("GPL");
gpl-2.0
akhilnarang/ThugLife_sprout
drivers/staging/comedi/drivers/addi_watchdog.c
2091
4446
/* * COMEDI driver for the watchdog subdevice found on some addi-data boards * Copyright (c) 2013 H Hartley Sweeten <hsweeten@visionengravers.com> * * Based on implementations in various addi-data COMEDI drivers. * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 1998 David A. Schleef <ds@schleef.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "../comedidev.h" #include "addi_watchdog.h" /* * Register offsets/defines for the addi-data watchdog */ #define ADDI_WDOG_REG 0x00 #define ADDI_WDOG_RELOAD_REG 0x04 #define ADDI_WDOG_TIMEBASE 0x08 #define ADDI_WDOG_CTRL_REG 0x0c #define ADDI_WDOG_CTRL_ENABLE (1 << 0) #define ADDI_WDOG_CTRL_SW_TRIG (1 << 9) #define ADDI_WDOG_STATUS_REG 0x10 #define ADDI_WDOG_STATUS_ENABLED (1 << 0) #define ADDI_WDOG_STATUS_SW_TRIG (1 << 1) struct addi_watchdog_private { unsigned long iobase; unsigned int wdog_ctrl; }; /* * The watchdog subdevice is configured with two INSN_CONFIG instructions: * * Enable the watchdog and set the reload timeout: * data[0] = INSN_CONFIG_ARM * data[1] = timeout reload value * * Disable the watchdog: * data[0] = INSN_CONFIG_DISARM */ static int addi_watchdog_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct addi_watchdog_private *spriv = s->private; unsigned int reload; switch (data[0]) { case INSN_CONFIG_ARM: spriv->wdog_ctrl = ADDI_WDOG_CTRL_ENABLE; reload = data[1] & s->maxdata; outl(reload, spriv->iobase + ADDI_WDOG_RELOAD_REG); /* Time base is 20ms, let the user know the timeout */ dev_info(dev->class_dev, "watchdog enabled, timeout:%dms\n", 20 * reload + 20); break; case INSN_CONFIG_DISARM: spriv->wdog_ctrl = 0; break; default: return -EINVAL; } outl(spriv->wdog_ctrl, spriv->iobase + ADDI_WDOG_CTRL_REG); return insn->n; } static int addi_watchdog_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct addi_watchdog_private *spriv = s->private; int i; for (i = 0; i < insn->n; i++) data[i] = inl(spriv->iobase + ADDI_WDOG_STATUS_REG); return insn->n; } static int addi_watchdog_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct addi_watchdog_private *spriv = s->private; int i; if (spriv->wdog_ctrl == 0) { dev_warn(dev->class_dev, "watchdog is disabled\n"); return -EINVAL; } /* "ping" the watchdog */ for (i = 0; i < insn->n; i++) { outl(spriv->wdog_ctrl | ADDI_WDOG_CTRL_SW_TRIG, spriv->iobase + ADDI_WDOG_CTRL_REG); } return insn->n; } void addi_watchdog_reset(unsigned long iobase) { outl(0x0, iobase + ADDI_WDOG_CTRL_REG); outl(0x0, iobase + ADDI_WDOG_RELOAD_REG); } EXPORT_SYMBOL_GPL(addi_watchdog_reset); int addi_watchdog_init(struct comedi_subdevice *s, unsigned long iobase) { struct addi_watchdog_private *spriv; spriv = kzalloc(sizeof(*spriv), GFP_KERNEL); if (!spriv) return -ENOMEM; spriv->iobase = iobase; s->private = spriv; s->type = COMEDI_SUBD_TIMER; s->subdev_flags = SDF_WRITEABLE; s->n_chan = 1; s->maxdata = 0xff; s->insn_config = addi_watchdog_insn_config; s->insn_read = addi_watchdog_insn_read; s->insn_write = addi_watchdog_insn_write; return 0; } EXPORT_SYMBOL_GPL(addi_watchdog_init); static int __init addi_watchdog_module_init(void) { return 0; } module_init(addi_watchdog_module_init); static void __exit addi_watchdog_module_exit(void) { } module_exit(addi_watchdog_module_exit); MODULE_DESCRIPTION("ADDI-DATA Watchdog subdevice"); MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>"); MODULE_LICENSE("GPL");
gpl-2.0
Pafcholini/emotion_tw_COI3
drivers/mfd/t7l66xb.c
2347
11187
/* * * Toshiba T7L66XB core mfd support * * Copyright (c) 2005, 2007, 2008 Ian Molton * Copyright (c) 2008 Dmitry Baryshkov * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * T7L66 features: * * Supported in this driver: * SD/MMC * SM/NAND flash controller * * As yet not supported * GPIO interface (on NAND pins) * Serial interface * TFT 'interface converter' * PCMCIA interface logic */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/err.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/irq.h> #include <linux/clk.h> #include <linux/platform_device.h> #include <linux/mfd/core.h> #include <linux/mfd/tmio.h> #include <linux/mfd/t7l66xb.h> enum { T7L66XB_CELL_NAND, T7L66XB_CELL_MMC, }; static const struct resource t7l66xb_mmc_resources[] = { { .start = 0x800, .end = 0x9ff, .flags = IORESOURCE_MEM, }, { .start = IRQ_T7L66XB_MMC, .end = IRQ_T7L66XB_MMC, .flags = IORESOURCE_IRQ, }, }; #define SCR_REVID 0x08 /* b Revision ID */ #define SCR_IMR 0x42 /* b Interrupt Mask */ #define SCR_DEV_CTL 0xe0 /* b Device control */ #define SCR_ISR 0xe1 /* b Interrupt Status */ #define SCR_GPO_OC 0xf0 /* b GPO output control */ #define SCR_GPO_OS 0xf1 /* b GPO output enable */ #define SCR_GPI_S 0xf2 /* w GPI status */ #define SCR_APDC 0xf8 /* b Active pullup down ctrl */ #define SCR_DEV_CTL_USB BIT(0) /* USB enable */ #define SCR_DEV_CTL_MMC BIT(1) /* MMC enable */ /*--------------------------------------------------------------------------*/ struct t7l66xb { void __iomem *scr; /* Lock to protect registers requiring read/modify/write ops. */ spinlock_t lock; struct resource rscr; struct clk *clk48m; struct clk *clk32k; int irq; int irq_base; }; /*--------------------------------------------------------------------------*/ static int t7l66xb_mmc_enable(struct platform_device *mmc) { struct platform_device *dev = to_platform_device(mmc->dev.parent); struct t7l66xb *t7l66xb = platform_get_drvdata(dev); unsigned long flags; u8 dev_ctl; clk_enable(t7l66xb->clk32k); spin_lock_irqsave(&t7l66xb->lock, flags); dev_ctl = tmio_ioread8(t7l66xb->scr + SCR_DEV_CTL); dev_ctl |= SCR_DEV_CTL_MMC; tmio_iowrite8(dev_ctl, t7l66xb->scr + SCR_DEV_CTL); spin_unlock_irqrestore(&t7l66xb->lock, flags); tmio_core_mmc_enable(t7l66xb->scr + 0x200, 0, t7l66xb_mmc_resources[0].start & 0xfffe); return 0; } static int t7l66xb_mmc_disable(struct platform_device *mmc) { struct platform_device *dev = to_platform_device(mmc->dev.parent); struct t7l66xb *t7l66xb = platform_get_drvdata(dev); unsigned long flags; u8 dev_ctl; spin_lock_irqsave(&t7l66xb->lock, flags); dev_ctl = tmio_ioread8(t7l66xb->scr + SCR_DEV_CTL); dev_ctl &= ~SCR_DEV_CTL_MMC; tmio_iowrite8(dev_ctl, t7l66xb->scr + SCR_DEV_CTL); spin_unlock_irqrestore(&t7l66xb->lock, flags); clk_disable(t7l66xb->clk32k); return 0; } static void t7l66xb_mmc_pwr(struct platform_device *mmc, int state) { struct platform_device *dev = to_platform_device(mmc->dev.parent); struct t7l66xb *t7l66xb = platform_get_drvdata(dev); tmio_core_mmc_pwr(t7l66xb->scr + 0x200, 0, state); } static void t7l66xb_mmc_clk_div(struct platform_device *mmc, int state) { struct platform_device *dev = to_platform_device(mmc->dev.parent); struct t7l66xb *t7l66xb = platform_get_drvdata(dev); tmio_core_mmc_clk_div(t7l66xb->scr + 0x200, 0, state); } /*--------------------------------------------------------------------------*/ static struct tmio_mmc_data t7166xb_mmc_data = { .hclk = 24000000, .set_pwr = t7l66xb_mmc_pwr, .set_clk_div = t7l66xb_mmc_clk_div, }; static const struct resource t7l66xb_nand_resources[] = { { .start = 0xc00, .end = 0xc07, .flags = IORESOURCE_MEM, }, { .start = 0x0100, .end = 0x01ff, .flags = IORESOURCE_MEM, }, { .start = IRQ_T7L66XB_NAND, .end = IRQ_T7L66XB_NAND, .flags = IORESOURCE_IRQ, }, }; static struct mfd_cell t7l66xb_cells[] = { [T7L66XB_CELL_MMC] = { .name = "tmio-mmc", .enable = t7l66xb_mmc_enable, .disable = t7l66xb_mmc_disable, .platform_data = &t7166xb_mmc_data, .pdata_size = sizeof(t7166xb_mmc_data), .num_resources = ARRAY_SIZE(t7l66xb_mmc_resources), .resources = t7l66xb_mmc_resources, }, [T7L66XB_CELL_NAND] = { .name = "tmio-nand", .num_resources = ARRAY_SIZE(t7l66xb_nand_resources), .resources = t7l66xb_nand_resources, }, }; /*--------------------------------------------------------------------------*/ /* Handle the T7L66XB interrupt mux */ static void t7l66xb_irq(unsigned int irq, struct irq_desc *desc) { struct t7l66xb *t7l66xb = irq_get_handler_data(irq); unsigned int isr; unsigned int i, irq_base; irq_base = t7l66xb->irq_base; while ((isr = tmio_ioread8(t7l66xb->scr + SCR_ISR) & ~tmio_ioread8(t7l66xb->scr + SCR_IMR))) for (i = 0; i < T7L66XB_NR_IRQS; i++) if (isr & (1 << i)) generic_handle_irq(irq_base + i); } static void t7l66xb_irq_mask(struct irq_data *data) { struct t7l66xb *t7l66xb = irq_data_get_irq_chip_data(data); unsigned long flags; u8 imr; spin_lock_irqsave(&t7l66xb->lock, flags); imr = tmio_ioread8(t7l66xb->scr + SCR_IMR); imr |= 1 << (data->irq - t7l66xb->irq_base); tmio_iowrite8(imr, t7l66xb->scr + SCR_IMR); spin_unlock_irqrestore(&t7l66xb->lock, flags); } static void t7l66xb_irq_unmask(struct irq_data *data) { struct t7l66xb *t7l66xb = irq_data_get_irq_chip_data(data); unsigned long flags; u8 imr; spin_lock_irqsave(&t7l66xb->lock, flags); imr = tmio_ioread8(t7l66xb->scr + SCR_IMR); imr &= ~(1 << (data->irq - t7l66xb->irq_base)); tmio_iowrite8(imr, t7l66xb->scr + SCR_IMR); spin_unlock_irqrestore(&t7l66xb->lock, flags); } static struct irq_chip t7l66xb_chip = { .name = "t7l66xb", .irq_ack = t7l66xb_irq_mask, .irq_mask = t7l66xb_irq_mask, .irq_unmask = t7l66xb_irq_unmask, }; /*--------------------------------------------------------------------------*/ /* Install the IRQ handler */ static void t7l66xb_attach_irq(struct platform_device *dev) { struct t7l66xb *t7l66xb = platform_get_drvdata(dev); unsigned int irq, irq_base; irq_base = t7l66xb->irq_base; for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) { irq_set_chip_and_handler(irq, &t7l66xb_chip, handle_level_irq); irq_set_chip_data(irq, t7l66xb); #ifdef CONFIG_ARM set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); #endif } irq_set_irq_type(t7l66xb->irq, IRQ_TYPE_EDGE_FALLING); irq_set_handler_data(t7l66xb->irq, t7l66xb); irq_set_chained_handler(t7l66xb->irq, t7l66xb_irq); } static void t7l66xb_detach_irq(struct platform_device *dev) { struct t7l66xb *t7l66xb = platform_get_drvdata(dev); unsigned int irq, irq_base; irq_base = t7l66xb->irq_base; irq_set_chained_handler(t7l66xb->irq, NULL); irq_set_handler_data(t7l66xb->irq, NULL); for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) { #ifdef CONFIG_ARM set_irq_flags(irq, 0); #endif irq_set_chip(irq, NULL); irq_set_chip_data(irq, NULL); } } /*--------------------------------------------------------------------------*/ #ifdef CONFIG_PM static int t7l66xb_suspend(struct platform_device *dev, pm_message_t state) { struct t7l66xb *t7l66xb = platform_get_drvdata(dev); struct t7l66xb_platform_data *pdata = dev->dev.platform_data; if (pdata && pdata->suspend) pdata->suspend(dev); clk_disable(t7l66xb->clk48m); return 0; } static int t7l66xb_resume(struct platform_device *dev) { struct t7l66xb *t7l66xb = platform_get_drvdata(dev); struct t7l66xb_platform_data *pdata = dev->dev.platform_data; clk_enable(t7l66xb->clk48m); if (pdata && pdata->resume) pdata->resume(dev); tmio_core_mmc_enable(t7l66xb->scr + 0x200, 0, t7l66xb_mmc_resources[0].start & 0xfffe); return 0; } #else #define t7l66xb_suspend NULL #define t7l66xb_resume NULL #endif /*--------------------------------------------------------------------------*/ static int t7l66xb_probe(struct platform_device *dev) { struct t7l66xb_platform_data *pdata = dev->dev.platform_data; struct t7l66xb *t7l66xb; struct resource *iomem, *rscr; int ret; if (pdata == NULL) return -EINVAL; iomem = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!iomem) return -EINVAL; t7l66xb = kzalloc(sizeof *t7l66xb, GFP_KERNEL); if (!t7l66xb) return -ENOMEM; spin_lock_init(&t7l66xb->lock); platform_set_drvdata(dev, t7l66xb); ret = platform_get_irq(dev, 0); if (ret >= 0) t7l66xb->irq = ret; else goto err_noirq; t7l66xb->irq_base = pdata->irq_base; t7l66xb->clk32k = clk_get(&dev->dev, "CLK_CK32K"); if (IS_ERR(t7l66xb->clk32k)) { ret = PTR_ERR(t7l66xb->clk32k); goto err_clk32k_get; } t7l66xb->clk48m = clk_get(&dev->dev, "CLK_CK48M"); if (IS_ERR(t7l66xb->clk48m)) { ret = PTR_ERR(t7l66xb->clk48m); goto err_clk48m_get; } rscr = &t7l66xb->rscr; rscr->name = "t7l66xb-core"; rscr->start = iomem->start; rscr->end = iomem->start + 0xff; rscr->flags = IORESOURCE_MEM; ret = request_resource(iomem, rscr); if (ret) goto err_request_scr; t7l66xb->scr = ioremap(rscr->start, resource_size(rscr)); if (!t7l66xb->scr) { ret = -ENOMEM; goto err_ioremap; } clk_enable(t7l66xb->clk48m); if (pdata && pdata->enable) pdata->enable(dev); /* Mask all interrupts */ tmio_iowrite8(0xbf, t7l66xb->scr + SCR_IMR); printk(KERN_INFO "%s rev %d @ 0x%08lx, irq %d\n", dev->name, tmio_ioread8(t7l66xb->scr + SCR_REVID), (unsigned long)iomem->start, t7l66xb->irq); t7l66xb_attach_irq(dev); t7l66xb_cells[T7L66XB_CELL_NAND].platform_data = pdata->nand_data; t7l66xb_cells[T7L66XB_CELL_NAND].pdata_size = sizeof(*pdata->nand_data); ret = mfd_add_devices(&dev->dev, dev->id, t7l66xb_cells, ARRAY_SIZE(t7l66xb_cells), iomem, t7l66xb->irq_base, NULL); if (!ret) return 0; t7l66xb_detach_irq(dev); iounmap(t7l66xb->scr); err_ioremap: release_resource(&t7l66xb->rscr); err_request_scr: clk_put(t7l66xb->clk48m); err_clk48m_get: clk_put(t7l66xb->clk32k); err_clk32k_get: err_noirq: kfree(t7l66xb); return ret; } static int t7l66xb_remove(struct platform_device *dev) { struct t7l66xb_platform_data *pdata = dev->dev.platform_data; struct t7l66xb *t7l66xb = platform_get_drvdata(dev); int ret; ret = pdata->disable(dev); clk_disable(t7l66xb->clk48m); clk_put(t7l66xb->clk48m); clk_disable(t7l66xb->clk32k); clk_put(t7l66xb->clk32k); t7l66xb_detach_irq(dev); iounmap(t7l66xb->scr); release_resource(&t7l66xb->rscr); mfd_remove_devices(&dev->dev); platform_set_drvdata(dev, NULL); kfree(t7l66xb); return ret; } static struct platform_driver t7l66xb_platform_driver = { .driver = { .name = "t7l66xb", .owner = THIS_MODULE, }, .suspend = t7l66xb_suspend, .resume = t7l66xb_resume, .probe = t7l66xb_probe, .remove = t7l66xb_remove, }; /*--------------------------------------------------------------------------*/ module_platform_driver(t7l66xb_platform_driver); MODULE_DESCRIPTION("Toshiba T7L66XB core driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Ian Molton"); MODULE_ALIAS("platform:t7l66xb");
gpl-2.0
sktjdgns1189/android_kernel_samsung_smdk4412
drivers/watchdog/pcwd_usb.c
4139
22745
/* * Berkshire USB-PC Watchdog Card Driver * * (c) Copyright 2004-2007 Wim Van Sebroeck <wim@iguana.be>. * * Based on source code of the following authors: * Ken Hollis <kenji@bitgate.com>, * Alan Cox <alan@lxorguk.ukuu.org.uk>, * Matt Domsch <Matt_Domsch@dell.com>, * Rob Radez <rob@osinvestor.com>, * Greg Kroah-Hartman <greg@kroah.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Neither Wim Van Sebroeck nor Iguana vzw. admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * * Thanks also to Simon Machell at Berkshire Products Inc. for * providing the test hardware. More info is available at * http://www.berkprod.com/ or http://www.pcwatchdog.com/ */ #include <linux/module.h> /* For module specific items */ #include <linux/moduleparam.h> /* For new moduleparam's */ #include <linux/types.h> /* For standard types (like size_t) */ #include <linux/errno.h> /* For the -ENODEV/... values */ #include <linux/kernel.h> /* For printk/panic/... */ #include <linux/delay.h> /* For mdelay function */ #include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR) */ #include <linux/watchdog.h> /* For the watchdog specific items */ #include <linux/notifier.h> /* For notifier support */ #include <linux/reboot.h> /* For reboot_notifier stuff */ #include <linux/init.h> /* For __init/__exit/... */ #include <linux/fs.h> /* For file operations */ #include <linux/usb.h> /* For USB functions */ #include <linux/slab.h> /* For kmalloc, ... */ #include <linux/mutex.h> /* For mutex locking */ #include <linux/hid.h> /* For HID_REQ_SET_REPORT & HID_DT_REPORT */ #include <linux/uaccess.h> /* For copy_to_user/put_user/... */ #ifdef CONFIG_USB_DEBUG static int debug = 1; #else static int debug; #endif /* Use our own dbg macro */ #undef dbg #define dbg(format, arg...) \ do { if (debug) printk(KERN_DEBUG PFX format "\n" , ## arg); } while (0) /* Module and Version Information */ #define DRIVER_VERSION "1.02" #define DRIVER_AUTHOR "Wim Van Sebroeck <wim@iguana.be>" #define DRIVER_DESC "Berkshire USB-PC Watchdog driver" #define DRIVER_LICENSE "GPL" #define DRIVER_NAME "pcwd_usb" #define PFX DRIVER_NAME ": " MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE(DRIVER_LICENSE); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS_MISCDEV(TEMP_MINOR); /* Module Parameters */ module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug enabled or not"); #define WATCHDOG_HEARTBEAT 0 /* default heartbeat = delay-time from dip-switches */ static int heartbeat = WATCHDOG_HEARTBEAT; module_param(heartbeat, int, 0); MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. " "(0<heartbeat<65536 or 0=delay-time from dip-switches, default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")"); static int nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); /* The vendor and product id's for the USB-PC Watchdog card */ #define USB_PCWD_VENDOR_ID 0x0c98 #define USB_PCWD_PRODUCT_ID 0x1140 /* table of devices that work with this driver */ static struct usb_device_id usb_pcwd_table[] = { { USB_DEVICE(USB_PCWD_VENDOR_ID, USB_PCWD_PRODUCT_ID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, usb_pcwd_table); /* according to documentation max. time to process a command for the USB * watchdog card is 100 or 200 ms, so we give it 250 ms to do it's job */ #define USB_COMMAND_TIMEOUT 250 /* Watchdog's internal commands */ #define CMD_READ_TEMP 0x02 /* Read Temperature; Re-trigger Watchdog */ #define CMD_TRIGGER CMD_READ_TEMP #define CMD_GET_STATUS 0x04 /* Get Status Information */ #define CMD_GET_FIRMWARE_VERSION 0x08 /* Get Firmware Version */ #define CMD_GET_DIP_SWITCH_SETTINGS 0x0c /* Get Dip Switch Settings */ #define CMD_READ_WATCHDOG_TIMEOUT 0x18 /* Read Current Watchdog Time */ #define CMD_WRITE_WATCHDOG_TIMEOUT 0x19 /* Write Current WatchdogTime */ #define CMD_ENABLE_WATCHDOG 0x30 /* Enable / Disable Watchdog */ #define CMD_DISABLE_WATCHDOG CMD_ENABLE_WATCHDOG /* Watchdog's Dip Switch heartbeat values */ static const int heartbeat_tbl[] = { 5, /* OFF-OFF-OFF = 5 Sec */ 10, /* OFF-OFF-ON = 10 Sec */ 30, /* OFF-ON-OFF = 30 Sec */ 60, /* OFF-ON-ON = 1 Min */ 300, /* ON-OFF-OFF = 5 Min */ 600, /* ON-OFF-ON = 10 Min */ 1800, /* ON-ON-OFF = 30 Min */ 3600, /* ON-ON-ON = 1 hour */ }; /* We can only use 1 card due to the /dev/watchdog restriction */ static int cards_found; /* some internal variables */ static unsigned long is_active; static char expect_release; /* Structure to hold all of our device specific stuff */ struct usb_pcwd_private { /* save off the usb device pointer */ struct usb_device *udev; /* the interface for this device */ struct usb_interface *interface; /* the interface number used for cmd's */ unsigned int interface_number; /* the buffer to intr data */ unsigned char *intr_buffer; /* the dma address for the intr buffer */ dma_addr_t intr_dma; /* the size of the intr buffer */ size_t intr_size; /* the urb used for the intr pipe */ struct urb *intr_urb; /* The command that is reported back */ unsigned char cmd_command; /* The data MSB that is reported back */ unsigned char cmd_data_msb; /* The data LSB that is reported back */ unsigned char cmd_data_lsb; /* true if we received a report after a command */ atomic_t cmd_received; /* Wether or not the device exists */ int exists; /* locks this structure */ struct mutex mtx; }; static struct usb_pcwd_private *usb_pcwd_device; /* prevent races between open() and disconnect() */ static DEFINE_MUTEX(disconnect_mutex); /* local function prototypes */ static int usb_pcwd_probe(struct usb_interface *interface, const struct usb_device_id *id); static void usb_pcwd_disconnect(struct usb_interface *interface); /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver usb_pcwd_driver = { .name = DRIVER_NAME, .probe = usb_pcwd_probe, .disconnect = usb_pcwd_disconnect, .id_table = usb_pcwd_table, }; static void usb_pcwd_intr_done(struct urb *urb) { struct usb_pcwd_private *usb_pcwd = (struct usb_pcwd_private *)urb->context; unsigned char *data = usb_pcwd->intr_buffer; int retval; switch (urb->status) { case 0: /* success */ break; case -ECONNRESET: /* unlink */ case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dbg("%s - urb shutting down with status: %d", __func__, urb->status); return; /* -EPIPE: should clear the halt */ default: /* error */ dbg("%s - nonzero urb status received: %d", __func__, urb->status); goto resubmit; } dbg("received following data cmd=0x%02x msb=0x%02x lsb=0x%02x", data[0], data[1], data[2]); usb_pcwd->cmd_command = data[0]; usb_pcwd->cmd_data_msb = data[1]; usb_pcwd->cmd_data_lsb = data[2]; /* notify anyone waiting that the cmd has finished */ atomic_set(&usb_pcwd->cmd_received, 1); resubmit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) printk(KERN_ERR PFX "can't resubmit intr, " "usb_submit_urb failed with result %d\n", retval); } static int usb_pcwd_send_command(struct usb_pcwd_private *usb_pcwd, unsigned char cmd, unsigned char *msb, unsigned char *lsb) { int got_response, count; unsigned char buf[6]; /* We will not send any commands if the USB PCWD device does * not exist */ if ((!usb_pcwd) || (!usb_pcwd->exists)) return -1; /* The USB PC Watchdog uses a 6 byte report format. * The board currently uses only 3 of the six bytes of the report. */ buf[0] = cmd; /* Byte 0 = CMD */ buf[1] = *msb; /* Byte 1 = Data MSB */ buf[2] = *lsb; /* Byte 2 = Data LSB */ buf[3] = buf[4] = buf[5] = 0; /* All other bytes not used */ dbg("sending following data cmd=0x%02x msb=0x%02x lsb=0x%02x", buf[0], buf[1], buf[2]); atomic_set(&usb_pcwd->cmd_received, 0); if (usb_control_msg(usb_pcwd->udev, usb_sndctrlpipe(usb_pcwd->udev, 0), HID_REQ_SET_REPORT, HID_DT_REPORT, 0x0200, usb_pcwd->interface_number, buf, sizeof(buf), USB_COMMAND_TIMEOUT) != sizeof(buf)) { dbg("usb_pcwd_send_command: error in usb_control_msg for " "cmd 0x%x 0x%x 0x%x\n", cmd, *msb, *lsb); } /* wait till the usb card processed the command, * with a max. timeout of USB_COMMAND_TIMEOUT */ got_response = 0; for (count = 0; (count < USB_COMMAND_TIMEOUT) && (!got_response); count++) { mdelay(1); if (atomic_read(&usb_pcwd->cmd_received)) got_response = 1; } if ((got_response) && (cmd == usb_pcwd->cmd_command)) { /* read back response */ *msb = usb_pcwd->cmd_data_msb; *lsb = usb_pcwd->cmd_data_lsb; } return got_response; } static int usb_pcwd_start(struct usb_pcwd_private *usb_pcwd) { unsigned char msb = 0x00; unsigned char lsb = 0x00; int retval; /* Enable Watchdog */ retval = usb_pcwd_send_command(usb_pcwd, CMD_ENABLE_WATCHDOG, &msb, &lsb); if ((retval == 0) || (lsb == 0)) { printk(KERN_ERR PFX "Card did not acknowledge enable attempt\n"); return -1; } return 0; } static int usb_pcwd_stop(struct usb_pcwd_private *usb_pcwd) { unsigned char msb = 0xA5; unsigned char lsb = 0xC3; int retval; /* Disable Watchdog */ retval = usb_pcwd_send_command(usb_pcwd, CMD_DISABLE_WATCHDOG, &msb, &lsb); if ((retval == 0) || (lsb != 0)) { printk(KERN_ERR PFX "Card did not acknowledge disable attempt\n"); return -1; } return 0; } static int usb_pcwd_keepalive(struct usb_pcwd_private *usb_pcwd) { unsigned char dummy; /* Re-trigger Watchdog */ usb_pcwd_send_command(usb_pcwd, CMD_TRIGGER, &dummy, &dummy); return 0; } static int usb_pcwd_set_heartbeat(struct usb_pcwd_private *usb_pcwd, int t) { unsigned char msb = t / 256; unsigned char lsb = t % 256; if ((t < 0x0001) || (t > 0xFFFF)) return -EINVAL; /* Write new heartbeat to watchdog */ usb_pcwd_send_command(usb_pcwd, CMD_WRITE_WATCHDOG_TIMEOUT, &msb, &lsb); heartbeat = t; return 0; } static int usb_pcwd_get_temperature(struct usb_pcwd_private *usb_pcwd, int *temperature) { unsigned char msb, lsb; usb_pcwd_send_command(usb_pcwd, CMD_READ_TEMP, &msb, &lsb); /* * Convert celsius to fahrenheit, since this was * the decided 'standard' for this return value. */ *temperature = (lsb * 9 / 5) + 32; return 0; } static int usb_pcwd_get_timeleft(struct usb_pcwd_private *usb_pcwd, int *time_left) { unsigned char msb, lsb; /* Read the time that's left before rebooting */ /* Note: if the board is not yet armed then we will read 0xFFFF */ usb_pcwd_send_command(usb_pcwd, CMD_READ_WATCHDOG_TIMEOUT, &msb, &lsb); *time_left = (msb << 8) + lsb; return 0; } /* * /dev/watchdog handling */ static ssize_t usb_pcwd_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { /* See if we got the magic character 'V' and reload the timer */ if (len) { if (!nowayout) { size_t i; /* note: just in case someone wrote the magic character * five months ago... */ expect_release = 0; /* scan to see whether or not we got the * magic character */ for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') expect_release = 42; } } /* someone wrote to us, we should reload the timer */ usb_pcwd_keepalive(usb_pcwd_device); } return len; } static long usb_pcwd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; static const struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, .firmware_version = 1, .identity = DRIVER_NAME, }; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_GETTEMP: { int temperature; if (usb_pcwd_get_temperature(usb_pcwd_device, &temperature)) return -EFAULT; return put_user(temperature, p); } case WDIOC_SETOPTIONS: { int new_options, retval = -EINVAL; if (get_user(new_options, p)) return -EFAULT; if (new_options & WDIOS_DISABLECARD) { usb_pcwd_stop(usb_pcwd_device); retval = 0; } if (new_options & WDIOS_ENABLECARD) { usb_pcwd_start(usb_pcwd_device); retval = 0; } return retval; } case WDIOC_KEEPALIVE: usb_pcwd_keepalive(usb_pcwd_device); return 0; case WDIOC_SETTIMEOUT: { int new_heartbeat; if (get_user(new_heartbeat, p)) return -EFAULT; if (usb_pcwd_set_heartbeat(usb_pcwd_device, new_heartbeat)) return -EINVAL; usb_pcwd_keepalive(usb_pcwd_device); /* Fall */ } case WDIOC_GETTIMEOUT: return put_user(heartbeat, p); case WDIOC_GETTIMELEFT: { int time_left; if (usb_pcwd_get_timeleft(usb_pcwd_device, &time_left)) return -EFAULT; return put_user(time_left, p); } default: return -ENOTTY; } } static int usb_pcwd_open(struct inode *inode, struct file *file) { /* /dev/watchdog can only be opened once */ if (test_and_set_bit(0, &is_active)) return -EBUSY; /* Activate */ usb_pcwd_start(usb_pcwd_device); usb_pcwd_keepalive(usb_pcwd_device); return nonseekable_open(inode, file); } static int usb_pcwd_release(struct inode *inode, struct file *file) { /* * Shut off the timer. */ if (expect_release == 42) { usb_pcwd_stop(usb_pcwd_device); } else { printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n"); usb_pcwd_keepalive(usb_pcwd_device); } expect_release = 0; clear_bit(0, &is_active); return 0; } /* * /dev/temperature handling */ static ssize_t usb_pcwd_temperature_read(struct file *file, char __user *data, size_t len, loff_t *ppos) { int temperature; if (usb_pcwd_get_temperature(usb_pcwd_device, &temperature)) return -EFAULT; if (copy_to_user(data, &temperature, 1)) return -EFAULT; return 1; } static int usb_pcwd_temperature_open(struct inode *inode, struct file *file) { return nonseekable_open(inode, file); } static int usb_pcwd_temperature_release(struct inode *inode, struct file *file) { return 0; } /* * Notify system */ static int usb_pcwd_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) usb_pcwd_stop(usb_pcwd_device); /* Turn the WDT off */ return NOTIFY_DONE; } /* * Kernel Interfaces */ static const struct file_operations usb_pcwd_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = usb_pcwd_write, .unlocked_ioctl = usb_pcwd_ioctl, .open = usb_pcwd_open, .release = usb_pcwd_release, }; static struct miscdevice usb_pcwd_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &usb_pcwd_fops, }; static const struct file_operations usb_pcwd_temperature_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = usb_pcwd_temperature_read, .open = usb_pcwd_temperature_open, .release = usb_pcwd_temperature_release, }; static struct miscdevice usb_pcwd_temperature_miscdev = { .minor = TEMP_MINOR, .name = "temperature", .fops = &usb_pcwd_temperature_fops, }; static struct notifier_block usb_pcwd_notifier = { .notifier_call = usb_pcwd_notify_sys, }; /** * usb_pcwd_delete */ static inline void usb_pcwd_delete(struct usb_pcwd_private *usb_pcwd) { usb_free_urb(usb_pcwd->intr_urb); if (usb_pcwd->intr_buffer != NULL) usb_free_coherent(usb_pcwd->udev, usb_pcwd->intr_size, usb_pcwd->intr_buffer, usb_pcwd->intr_dma); kfree(usb_pcwd); } /** * usb_pcwd_probe * * Called by the usb core when a new device is connected that it thinks * this driver might be interested in. */ static int usb_pcwd_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; struct usb_pcwd_private *usb_pcwd = NULL; int pipe, maxp; int retval = -ENOMEM; int got_fw_rev; unsigned char fw_rev_major, fw_rev_minor; char fw_ver_str[20]; unsigned char option_switches, dummy; cards_found++; if (cards_found > 1) { printk(KERN_ERR PFX "This driver only supports 1 device\n"); return -ENODEV; } /* get the active interface descriptor */ iface_desc = interface->cur_altsetting; /* check out that we have a HID device */ if (!(iface_desc->desc.bInterfaceClass == USB_CLASS_HID)) { printk(KERN_ERR PFX "The device isn't a Human Interface Device\n"); return -ENODEV; } /* check out the endpoint: it has to be Interrupt & IN */ endpoint = &iface_desc->endpoint[0].desc; if (!usb_endpoint_is_int_in(endpoint)) { /* we didn't find a Interrupt endpoint with direction IN */ printk(KERN_ERR PFX "Couldn't find an INTR & IN endpoint\n"); return -ENODEV; } /* get a handle to the interrupt data pipe */ pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress); maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe)); /* allocate memory for our device and initialize it */ usb_pcwd = kzalloc(sizeof(struct usb_pcwd_private), GFP_KERNEL); if (usb_pcwd == NULL) { printk(KERN_ERR PFX "Out of memory\n"); goto error; } usb_pcwd_device = usb_pcwd; mutex_init(&usb_pcwd->mtx); usb_pcwd->udev = udev; usb_pcwd->interface = interface; usb_pcwd->interface_number = iface_desc->desc.bInterfaceNumber; usb_pcwd->intr_size = (le16_to_cpu(endpoint->wMaxPacketSize) > 8 ? le16_to_cpu(endpoint->wMaxPacketSize) : 8); /* set up the memory buffer's */ usb_pcwd->intr_buffer = usb_alloc_coherent(udev, usb_pcwd->intr_size, GFP_ATOMIC, &usb_pcwd->intr_dma); if (!usb_pcwd->intr_buffer) { printk(KERN_ERR PFX "Out of memory\n"); goto error; } /* allocate the urb's */ usb_pcwd->intr_urb = usb_alloc_urb(0, GFP_KERNEL); if (!usb_pcwd->intr_urb) { printk(KERN_ERR PFX "Out of memory\n"); goto error; } /* initialise the intr urb's */ usb_fill_int_urb(usb_pcwd->intr_urb, udev, pipe, usb_pcwd->intr_buffer, usb_pcwd->intr_size, usb_pcwd_intr_done, usb_pcwd, endpoint->bInterval); usb_pcwd->intr_urb->transfer_dma = usb_pcwd->intr_dma; usb_pcwd->intr_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; /* register our interrupt URB with the USB system */ if (usb_submit_urb(usb_pcwd->intr_urb, GFP_KERNEL)) { printk(KERN_ERR PFX "Problem registering interrupt URB\n"); retval = -EIO; /* failure */ goto error; } /* The device exists and can be communicated with */ usb_pcwd->exists = 1; /* disable card */ usb_pcwd_stop(usb_pcwd); /* Get the Firmware Version */ got_fw_rev = usb_pcwd_send_command(usb_pcwd, CMD_GET_FIRMWARE_VERSION, &fw_rev_major, &fw_rev_minor); if (got_fw_rev) sprintf(fw_ver_str, "%u.%02u", fw_rev_major, fw_rev_minor); else sprintf(fw_ver_str, "<card no answer>"); printk(KERN_INFO PFX "Found card (Firmware: %s) with temp option\n", fw_ver_str); /* Get switch settings */ usb_pcwd_send_command(usb_pcwd, CMD_GET_DIP_SWITCH_SETTINGS, &dummy, &option_switches); printk(KERN_INFO PFX "Option switches (0x%02x): " "Temperature Reset Enable=%s, Power On Delay=%s\n", option_switches, ((option_switches & 0x10) ? "ON" : "OFF"), ((option_switches & 0x08) ? "ON" : "OFF")); /* If heartbeat = 0 then we use the heartbeat from the dip-switches */ if (heartbeat == 0) heartbeat = heartbeat_tbl[(option_switches & 0x07)]; /* Check that the heartbeat value is within it's range ; * if not reset to the default */ if (usb_pcwd_set_heartbeat(usb_pcwd, heartbeat)) { usb_pcwd_set_heartbeat(usb_pcwd, WATCHDOG_HEARTBEAT); printk(KERN_INFO PFX "heartbeat value must be 0<heartbeat<65536, using %d\n", WATCHDOG_HEARTBEAT); } retval = register_reboot_notifier(&usb_pcwd_notifier); if (retval != 0) { printk(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", retval); goto error; } retval = misc_register(&usb_pcwd_temperature_miscdev); if (retval != 0) { printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", TEMP_MINOR, retval); goto err_out_unregister_reboot; } retval = misc_register(&usb_pcwd_miscdev); if (retval != 0) { printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, retval); goto err_out_misc_deregister; } /* we can register the device now, as it is ready */ usb_set_intfdata(interface, usb_pcwd); printk(KERN_INFO PFX "initialized. heartbeat=%d sec (nowayout=%d)\n", heartbeat, nowayout); return 0; err_out_misc_deregister: misc_deregister(&usb_pcwd_temperature_miscdev); err_out_unregister_reboot: unregister_reboot_notifier(&usb_pcwd_notifier); error: if (usb_pcwd) usb_pcwd_delete(usb_pcwd); usb_pcwd_device = NULL; return retval; } /** * usb_pcwd_disconnect * * Called by the usb core when the device is removed from the system. * * This routine guarantees that the driver will not submit any more urbs * by clearing dev->udev. */ static void usb_pcwd_disconnect(struct usb_interface *interface) { struct usb_pcwd_private *usb_pcwd; /* prevent races with open() */ mutex_lock(&disconnect_mutex); usb_pcwd = usb_get_intfdata(interface); usb_set_intfdata(interface, NULL); mutex_lock(&usb_pcwd->mtx); /* Stop the timer before we leave */ if (!nowayout) usb_pcwd_stop(usb_pcwd); /* We should now stop communicating with the USB PCWD device */ usb_pcwd->exists = 0; /* Deregister */ misc_deregister(&usb_pcwd_miscdev); misc_deregister(&usb_pcwd_temperature_miscdev); unregister_reboot_notifier(&usb_pcwd_notifier); mutex_unlock(&usb_pcwd->mtx); /* Delete the USB PCWD device */ usb_pcwd_delete(usb_pcwd); cards_found--; mutex_unlock(&disconnect_mutex); printk(KERN_INFO PFX "USB PC Watchdog disconnected\n"); } /** * usb_pcwd_init */ static int __init usb_pcwd_init(void) { int result; /* register this driver with the USB subsystem */ result = usb_register(&usb_pcwd_driver); if (result) { printk(KERN_ERR PFX "usb_register failed. Error number %d\n", result); return result; } printk(KERN_INFO PFX DRIVER_DESC " v" DRIVER_VERSION "\n"); return 0; } /** * usb_pcwd_exit */ static void __exit usb_pcwd_exit(void) { /* deregister this driver with the USB subsystem */ usb_deregister(&usb_pcwd_driver); } module_init(usb_pcwd_init); module_exit(usb_pcwd_exit);
gpl-2.0
KylinMod/android_kernel_lge_gproj
drivers/media/video/m5mols/m5mols_core.c
4651
27439
/* * Driver for M-5MOLS 8M Pixel camera sensor with ISP * * Copyright (C) 2011 Samsung Electronics Co., Ltd. * Author: HeungJun Kim <riverful.kim@samsung.com> * * Copyright (C) 2009 Samsung Electronics Co., Ltd. * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/i2c.h> #include <linux/slab.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/videodev2.h> #include <linux/module.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-subdev.h> #include <media/m5mols.h> #include "m5mols.h" #include "m5mols_reg.h" int m5mols_debug; module_param(m5mols_debug, int, 0644); #define MODULE_NAME "M5MOLS" #define M5MOLS_I2C_CHECK_RETRY 500 /* The regulator consumer names for external voltage regulators */ static struct regulator_bulk_data supplies[] = { { .supply = "core", /* ARM core power, 1.2V */ }, { .supply = "dig_18", /* digital power 1, 1.8V */ }, { .supply = "d_sensor", /* sensor power 1, 1.8V */ }, { .supply = "dig_28", /* digital power 2, 2.8V */ }, { .supply = "a_sensor", /* analog power */ }, { .supply = "dig_12", /* digital power 3, 1.2V */ }, }; static struct v4l2_mbus_framefmt m5mols_default_ffmt[M5MOLS_RESTYPE_MAX] = { [M5MOLS_RESTYPE_MONITOR] = { .width = 1920, .height = 1080, .code = V4L2_MBUS_FMT_VYUY8_2X8, .field = V4L2_FIELD_NONE, .colorspace = V4L2_COLORSPACE_JPEG, }, [M5MOLS_RESTYPE_CAPTURE] = { .width = 1920, .height = 1080, .code = V4L2_MBUS_FMT_JPEG_1X8, .field = V4L2_FIELD_NONE, .colorspace = V4L2_COLORSPACE_JPEG, }, }; #define SIZE_DEFAULT_FFMT ARRAY_SIZE(m5mols_default_ffmt) static const struct m5mols_resolution m5mols_reg_res[] = { { 0x01, M5MOLS_RESTYPE_MONITOR, 128, 96 }, /* SUB-QCIF */ { 0x03, M5MOLS_RESTYPE_MONITOR, 160, 120 }, /* QQVGA */ { 0x05, M5MOLS_RESTYPE_MONITOR, 176, 144 }, /* QCIF */ { 0x06, M5MOLS_RESTYPE_MONITOR, 176, 176 }, { 0x08, M5MOLS_RESTYPE_MONITOR, 240, 320 }, /* QVGA */ { 0x09, M5MOLS_RESTYPE_MONITOR, 320, 240 }, /* QVGA */ { 0x0c, M5MOLS_RESTYPE_MONITOR, 240, 400 }, /* WQVGA */ { 0x0d, M5MOLS_RESTYPE_MONITOR, 400, 240 }, /* WQVGA */ { 0x0e, M5MOLS_RESTYPE_MONITOR, 352, 288 }, /* CIF */ { 0x13, M5MOLS_RESTYPE_MONITOR, 480, 360 }, { 0x15, M5MOLS_RESTYPE_MONITOR, 640, 360 }, /* qHD */ { 0x17, M5MOLS_RESTYPE_MONITOR, 640, 480 }, /* VGA */ { 0x18, M5MOLS_RESTYPE_MONITOR, 720, 480 }, { 0x1a, M5MOLS_RESTYPE_MONITOR, 800, 480 }, /* WVGA */ { 0x1f, M5MOLS_RESTYPE_MONITOR, 800, 600 }, /* SVGA */ { 0x21, M5MOLS_RESTYPE_MONITOR, 1280, 720 }, /* HD */ { 0x25, M5MOLS_RESTYPE_MONITOR, 1920, 1080 }, /* 1080p */ { 0x29, M5MOLS_RESTYPE_MONITOR, 3264, 2448 }, /* 2.63fps 8M */ { 0x39, M5MOLS_RESTYPE_MONITOR, 800, 602 }, /* AHS_MON debug */ { 0x02, M5MOLS_RESTYPE_CAPTURE, 320, 240 }, /* QVGA */ { 0x04, M5MOLS_RESTYPE_CAPTURE, 400, 240 }, /* WQVGA */ { 0x07, M5MOLS_RESTYPE_CAPTURE, 480, 360 }, { 0x08, M5MOLS_RESTYPE_CAPTURE, 640, 360 }, /* qHD */ { 0x09, M5MOLS_RESTYPE_CAPTURE, 640, 480 }, /* VGA */ { 0x0a, M5MOLS_RESTYPE_CAPTURE, 800, 480 }, /* WVGA */ { 0x10, M5MOLS_RESTYPE_CAPTURE, 1280, 720 }, /* HD */ { 0x14, M5MOLS_RESTYPE_CAPTURE, 1280, 960 }, /* 1M */ { 0x17, M5MOLS_RESTYPE_CAPTURE, 1600, 1200 }, /* 2M */ { 0x19, M5MOLS_RESTYPE_CAPTURE, 1920, 1080 }, /* Full-HD */ { 0x1a, M5MOLS_RESTYPE_CAPTURE, 2048, 1152 }, /* 3Mega */ { 0x1b, M5MOLS_RESTYPE_CAPTURE, 2048, 1536 }, { 0x1c, M5MOLS_RESTYPE_CAPTURE, 2560, 1440 }, /* 4Mega */ { 0x1d, M5MOLS_RESTYPE_CAPTURE, 2560, 1536 }, { 0x1f, M5MOLS_RESTYPE_CAPTURE, 2560, 1920 }, /* 5Mega */ { 0x21, M5MOLS_RESTYPE_CAPTURE, 3264, 1836 }, /* 6Mega */ { 0x22, M5MOLS_RESTYPE_CAPTURE, 3264, 1960 }, { 0x25, M5MOLS_RESTYPE_CAPTURE, 3264, 2448 }, /* 8Mega */ }; /** * m5mols_swap_byte - an byte array to integer conversion function * @size: size in bytes of I2C packet defined in the M-5MOLS datasheet * * Convert I2C data byte array with performing any required byte * reordering to assure proper values for each data type, regardless * of the architecture endianness. */ static u32 m5mols_swap_byte(u8 *data, u8 length) { if (length == 1) return *data; else if (length == 2) return be16_to_cpu(*((u16 *)data)); else return be32_to_cpu(*((u32 *)data)); } /** * m5mols_read - I2C read function * @reg: combination of size, category and command for the I2C packet * @size: desired size of I2C packet * @val: read value * * Returns 0 on success, or else negative errno. */ static int m5mols_read(struct v4l2_subdev *sd, u32 size, u32 reg, u32 *val) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct m5mols_info *info = to_m5mols(sd); u8 rbuf[M5MOLS_I2C_MAX_SIZE + 1]; u8 category = I2C_CATEGORY(reg); u8 cmd = I2C_COMMAND(reg); struct i2c_msg msg[2]; u8 wbuf[5]; int ret; if (!client->adapter) return -ENODEV; msg[0].addr = client->addr; msg[0].flags = 0; msg[0].len = 5; msg[0].buf = wbuf; wbuf[0] = 5; wbuf[1] = M5MOLS_BYTE_READ; wbuf[2] = category; wbuf[3] = cmd; wbuf[4] = size; msg[1].addr = client->addr; msg[1].flags = I2C_M_RD; msg[1].len = size + 1; msg[1].buf = rbuf; /* minimum stabilization time */ usleep_range(200, 200); ret = i2c_transfer(client->adapter, msg, 2); if (ret == 2) { *val = m5mols_swap_byte(&rbuf[1], size); return 0; } if (info->isp_ready) v4l2_err(sd, "read failed: size:%d cat:%02x cmd:%02x. %d\n", size, category, cmd, ret); return ret < 0 ? ret : -EIO; } int m5mols_read_u8(struct v4l2_subdev *sd, u32 reg, u8 *val) { u32 val_32; int ret; if (I2C_SIZE(reg) != 1) { v4l2_err(sd, "Wrong data size\n"); return -EINVAL; } ret = m5mols_read(sd, I2C_SIZE(reg), reg, &val_32); if (ret) return ret; *val = (u8)val_32; return ret; } int m5mols_read_u16(struct v4l2_subdev *sd, u32 reg, u16 *val) { u32 val_32; int ret; if (I2C_SIZE(reg) != 2) { v4l2_err(sd, "Wrong data size\n"); return -EINVAL; } ret = m5mols_read(sd, I2C_SIZE(reg), reg, &val_32); if (ret) return ret; *val = (u16)val_32; return ret; } int m5mols_read_u32(struct v4l2_subdev *sd, u32 reg, u32 *val) { if (I2C_SIZE(reg) != 4) { v4l2_err(sd, "Wrong data size\n"); return -EINVAL; } return m5mols_read(sd, I2C_SIZE(reg), reg, val); } /** * m5mols_write - I2C command write function * @reg: combination of size, category and command for the I2C packet * @val: value to write * * Returns 0 on success, or else negative errno. */ int m5mols_write(struct v4l2_subdev *sd, u32 reg, u32 val) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct m5mols_info *info = to_m5mols(sd); u8 wbuf[M5MOLS_I2C_MAX_SIZE + 4]; u8 category = I2C_CATEGORY(reg); u8 cmd = I2C_COMMAND(reg); u8 size = I2C_SIZE(reg); u32 *buf = (u32 *)&wbuf[4]; struct i2c_msg msg[1]; int ret; if (!client->adapter) return -ENODEV; if (size != 1 && size != 2 && size != 4) { v4l2_err(sd, "Wrong data size\n"); return -EINVAL; } msg->addr = client->addr; msg->flags = 0; msg->len = (u16)size + 4; msg->buf = wbuf; wbuf[0] = size + 4; wbuf[1] = M5MOLS_BYTE_WRITE; wbuf[2] = category; wbuf[3] = cmd; *buf = m5mols_swap_byte((u8 *)&val, size); usleep_range(200, 200); ret = i2c_transfer(client->adapter, msg, 1); if (ret == 1) return 0; if (info->isp_ready) v4l2_err(sd, "write failed: cat:%02x cmd:%02x ret:%d\n", category, cmd, ret); return ret < 0 ? ret : -EIO; } /** * m5mols_busy_wait - Busy waiting with I2C register polling * @reg: the I2C_REG() address of an 8-bit status register to check * @value: expected status register value * @mask: bit mask for the read status register value * @timeout: timeout in miliseconds, or -1 for default timeout * * The @reg register value is ORed with @mask before comparing with @value. * * Return: 0 if the requested condition became true within less than * @timeout ms, or else negative errno. */ int m5mols_busy_wait(struct v4l2_subdev *sd, u32 reg, u32 value, u32 mask, int timeout) { int ms = timeout < 0 ? M5MOLS_BUSY_WAIT_DEF_TIMEOUT : timeout; unsigned long end = jiffies + msecs_to_jiffies(ms); u8 status; do { int ret = m5mols_read_u8(sd, reg, &status); if (ret < 0 && !(mask & M5MOLS_I2C_RDY_WAIT_FL)) return ret; if (!ret && (status & mask & 0xff) == (value & 0xff)) return 0; usleep_range(100, 250); } while (ms > 0 && time_is_after_jiffies(end)); return -EBUSY; } /** * m5mols_enable_interrupt - Clear interrupt pending bits and unmask interrupts * * Before writing desired interrupt value the INT_FACTOR register should * be read to clear pending interrupts. */ int m5mols_enable_interrupt(struct v4l2_subdev *sd, u8 reg) { struct m5mols_info *info = to_m5mols(sd); u8 mask = is_available_af(info) ? REG_INT_AF : 0; u8 dummy; int ret; ret = m5mols_read_u8(sd, SYSTEM_INT_FACTOR, &dummy); if (!ret) ret = m5mols_write(sd, SYSTEM_INT_ENABLE, reg & ~mask); return ret; } int m5mols_wait_interrupt(struct v4l2_subdev *sd, u8 irq_mask, u32 timeout) { struct m5mols_info *info = to_m5mols(sd); int ret = wait_event_interruptible_timeout(info->irq_waitq, atomic_add_unless(&info->irq_done, -1, 0), msecs_to_jiffies(timeout)); if (ret <= 0) return ret ? ret : -ETIMEDOUT; return m5mols_busy_wait(sd, SYSTEM_INT_FACTOR, irq_mask, M5MOLS_I2C_RDY_WAIT_FL | irq_mask, -1); } /** * m5mols_reg_mode - Write the mode and check busy status * * It always accompanies a little delay changing the M-5MOLS mode, so it is * needed checking current busy status to guarantee right mode. */ static int m5mols_reg_mode(struct v4l2_subdev *sd, u8 mode) { int ret = m5mols_write(sd, SYSTEM_SYSMODE, mode); if (ret < 0) return ret; return m5mols_busy_wait(sd, SYSTEM_SYSMODE, mode, 0xff, M5MOLS_MODE_CHANGE_TIMEOUT); } /** * m5mols_mode - manage the M-5MOLS's mode * @mode: the required operation mode * * The commands of M-5MOLS are grouped into specific modes. Each functionality * can be guaranteed only when the sensor is operating in mode which which * a command belongs to. */ int m5mols_mode(struct m5mols_info *info, u8 mode) { struct v4l2_subdev *sd = &info->sd; int ret = -EINVAL; u8 reg; if (mode < REG_PARAMETER || mode > REG_CAPTURE) return ret; ret = m5mols_read_u8(sd, SYSTEM_SYSMODE, &reg); if (ret || reg == mode) return ret; switch (reg) { case REG_PARAMETER: ret = m5mols_reg_mode(sd, REG_MONITOR); if (mode == REG_MONITOR) break; if (!ret) ret = m5mols_reg_mode(sd, REG_CAPTURE); break; case REG_MONITOR: if (mode == REG_PARAMETER) { ret = m5mols_reg_mode(sd, REG_PARAMETER); break; } ret = m5mols_reg_mode(sd, REG_CAPTURE); break; case REG_CAPTURE: ret = m5mols_reg_mode(sd, REG_MONITOR); if (mode == REG_MONITOR) break; if (!ret) ret = m5mols_reg_mode(sd, REG_PARAMETER); break; default: v4l2_warn(sd, "Wrong mode: %d\n", mode); } if (!ret) info->mode = mode; return ret; } /** * m5mols_get_version - retrieve full revisions information of M-5MOLS * * The version information includes revisions of hardware and firmware, * AutoFocus alghorithm version and the version string. */ static int m5mols_get_version(struct v4l2_subdev *sd) { struct m5mols_info *info = to_m5mols(sd); struct m5mols_version *ver = &info->ver; u8 *str = ver->str; int i; int ret; ret = m5mols_read_u8(sd, SYSTEM_VER_CUSTOMER, &ver->customer); if (!ret) ret = m5mols_read_u8(sd, SYSTEM_VER_PROJECT, &ver->project); if (!ret) ret = m5mols_read_u16(sd, SYSTEM_VER_FIRMWARE, &ver->fw); if (!ret) ret = m5mols_read_u16(sd, SYSTEM_VER_HARDWARE, &ver->hw); if (!ret) ret = m5mols_read_u16(sd, SYSTEM_VER_PARAMETER, &ver->param); if (!ret) ret = m5mols_read_u16(sd, SYSTEM_VER_AWB, &ver->awb); if (!ret) ret = m5mols_read_u8(sd, AF_VERSION, &ver->af); if (ret) return ret; for (i = 0; i < VERSION_STRING_SIZE; i++) { ret = m5mols_read_u8(sd, SYSTEM_VER_STRING, &str[i]); if (ret) return ret; } ver->fw = be16_to_cpu(ver->fw); ver->hw = be16_to_cpu(ver->hw); ver->param = be16_to_cpu(ver->param); ver->awb = be16_to_cpu(ver->awb); v4l2_info(sd, "Manufacturer\t[%s]\n", is_manufacturer(info, REG_SAMSUNG_ELECTRO) ? "Samsung Electro-Machanics" : is_manufacturer(info, REG_SAMSUNG_OPTICS) ? "Samsung Fiber-Optics" : is_manufacturer(info, REG_SAMSUNG_TECHWIN) ? "Samsung Techwin" : "None"); v4l2_info(sd, "Customer/Project\t[0x%02x/0x%02x]\n", info->ver.customer, info->ver.project); if (!is_available_af(info)) v4l2_info(sd, "No support Auto Focus on this firmware\n"); return ret; } /** * __find_restype - Lookup M-5MOLS resolution type according to pixel code * @code: pixel code */ static enum m5mols_restype __find_restype(enum v4l2_mbus_pixelcode code) { enum m5mols_restype type = M5MOLS_RESTYPE_MONITOR; do { if (code == m5mols_default_ffmt[type].code) return type; } while (type++ != SIZE_DEFAULT_FFMT); return 0; } /** * __find_resolution - Lookup preset and type of M-5MOLS's resolution * @mf: pixel format to find/negotiate the resolution preset for * @type: M-5MOLS resolution type * @resolution: M-5MOLS resolution preset register value * * Find nearest resolution matching resolution preset and adjust mf * to supported values. */ static int __find_resolution(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf, enum m5mols_restype *type, u32 *resolution) { const struct m5mols_resolution *fsize = &m5mols_reg_res[0]; const struct m5mols_resolution *match = NULL; enum m5mols_restype stype = __find_restype(mf->code); int i = ARRAY_SIZE(m5mols_reg_res); unsigned int min_err = ~0; while (i--) { int err; if (stype == fsize->type) { err = abs(fsize->width - mf->width) + abs(fsize->height - mf->height); if (err < min_err) { min_err = err; match = fsize; } } fsize++; } if (match) { mf->width = match->width; mf->height = match->height; *resolution = match->reg; *type = stype; return 0; } return -EINVAL; } static struct v4l2_mbus_framefmt *__find_format(struct m5mols_info *info, struct v4l2_subdev_fh *fh, enum v4l2_subdev_format_whence which, enum m5mols_restype type) { if (which == V4L2_SUBDEV_FORMAT_TRY) return fh ? v4l2_subdev_get_try_format(fh, 0) : NULL; return &info->ffmt[type]; } static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh, struct v4l2_subdev_format *fmt) { struct m5mols_info *info = to_m5mols(sd); struct v4l2_mbus_framefmt *format; format = __find_format(info, fh, fmt->which, info->res_type); if (!format) return -EINVAL; fmt->format = *format; return 0; } static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh, struct v4l2_subdev_format *fmt) { struct m5mols_info *info = to_m5mols(sd); struct v4l2_mbus_framefmt *format = &fmt->format; struct v4l2_mbus_framefmt *sfmt; enum m5mols_restype type; u32 resolution = 0; int ret; ret = __find_resolution(sd, format, &type, &resolution); if (ret < 0) return ret; sfmt = __find_format(info, fh, fmt->which, type); if (!sfmt) return 0; format->code = m5mols_default_ffmt[type].code; format->colorspace = V4L2_COLORSPACE_JPEG; format->field = V4L2_FIELD_NONE; if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) { *sfmt = *format; info->resolution = resolution; info->res_type = type; } return 0; } static int m5mols_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh, struct v4l2_subdev_mbus_code_enum *code) { if (!code || code->index >= SIZE_DEFAULT_FFMT) return -EINVAL; code->code = m5mols_default_ffmt[code->index].code; return 0; } static struct v4l2_subdev_pad_ops m5mols_pad_ops = { .enum_mbus_code = m5mols_enum_mbus_code, .get_fmt = m5mols_get_fmt, .set_fmt = m5mols_set_fmt, }; /** * m5mols_restore_controls - Apply current control values to the registers * * m5mols_do_scenemode() handles all parameters for which there is yet no * individual control. It should be replaced at some point by setting each * control individually, in required register set up order. */ int m5mols_restore_controls(struct m5mols_info *info) { int ret; if (info->ctrl_sync) return 0; ret = m5mols_do_scenemode(info, REG_SCENE_NORMAL); if (ret) return ret; ret = v4l2_ctrl_handler_setup(&info->handle); info->ctrl_sync = !ret; return ret; } /** * m5mols_start_monitor - Start the monitor mode * * Before applying the controls setup the resolution and frame rate * in PARAMETER mode, and then switch over to MONITOR mode. */ static int m5mols_start_monitor(struct m5mols_info *info) { struct v4l2_subdev *sd = &info->sd; int ret; ret = m5mols_mode(info, REG_PARAMETER); if (!ret) ret = m5mols_write(sd, PARM_MON_SIZE, info->resolution); if (!ret) ret = m5mols_write(sd, PARM_MON_FPS, REG_FPS_30); if (!ret) ret = m5mols_mode(info, REG_MONITOR); if (!ret) ret = m5mols_restore_controls(info); return ret; } static int m5mols_s_stream(struct v4l2_subdev *sd, int enable) { struct m5mols_info *info = to_m5mols(sd); u32 code = info->ffmt[info->res_type].code; if (enable) { int ret = -EINVAL; if (is_code(code, M5MOLS_RESTYPE_MONITOR)) ret = m5mols_start_monitor(info); if (is_code(code, M5MOLS_RESTYPE_CAPTURE)) ret = m5mols_start_capture(info); return ret; } return m5mols_mode(info, REG_PARAMETER); } static const struct v4l2_subdev_video_ops m5mols_video_ops = { .s_stream = m5mols_s_stream, }; static int m5mols_s_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = to_sd(ctrl); struct m5mols_info *info = to_m5mols(sd); int ispstate = info->mode; int ret; /* * If needed, defer restoring the controls until * the device is fully initialized. */ if (!info->isp_ready) { info->ctrl_sync = 0; return 0; } ret = m5mols_mode(info, REG_PARAMETER); if (ret < 0) return ret; ret = m5mols_set_ctrl(ctrl); if (ret < 0) return ret; return m5mols_mode(info, ispstate); } static const struct v4l2_ctrl_ops m5mols_ctrl_ops = { .s_ctrl = m5mols_s_ctrl, }; static int m5mols_sensor_power(struct m5mols_info *info, bool enable) { struct v4l2_subdev *sd = &info->sd; struct i2c_client *client = v4l2_get_subdevdata(sd); const struct m5mols_platform_data *pdata = info->pdata; int ret; if (info->power == enable) return 0; if (enable) { if (info->set_power) { ret = info->set_power(&client->dev, 1); if (ret) return ret; } ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies); if (ret) { info->set_power(&client->dev, 0); return ret; } gpio_set_value(pdata->gpio_reset, !pdata->reset_polarity); info->power = 1; return ret; } ret = regulator_bulk_disable(ARRAY_SIZE(supplies), supplies); if (ret) return ret; if (info->set_power) info->set_power(&client->dev, 0); gpio_set_value(pdata->gpio_reset, pdata->reset_polarity); info->isp_ready = 0; info->power = 0; return ret; } /* m5mols_update_fw - optional firmware update routine */ int __attribute__ ((weak)) m5mols_update_fw(struct v4l2_subdev *sd, int (*set_power)(struct m5mols_info *, bool)) { return 0; } /** * m5mols_fw_start - M-5MOLS internal ARM controller initialization * * Execute the M-5MOLS internal ARM controller initialization sequence. * This function should be called after the supply voltage has been * applied and before any requests to the device are made. */ static int m5mols_fw_start(struct v4l2_subdev *sd) { struct m5mols_info *info = to_m5mols(sd); int ret; atomic_set(&info->irq_done, 0); /* Wait until I2C slave is initialized in Flash Writer mode */ ret = m5mols_busy_wait(sd, FLASH_CAM_START, REG_IN_FLASH_MODE, M5MOLS_I2C_RDY_WAIT_FL | 0xff, -1); if (!ret) ret = m5mols_write(sd, FLASH_CAM_START, REG_START_ARM_BOOT); if (!ret) ret = m5mols_wait_interrupt(sd, REG_INT_MODE, 2000); if (ret < 0) return ret; info->isp_ready = 1; ret = m5mols_get_version(sd); if (!ret) ret = m5mols_update_fw(sd, m5mols_sensor_power); if (ret) return ret; v4l2_dbg(1, m5mols_debug, sd, "Success ARM Booting\n"); ret = m5mols_write(sd, PARM_INTERFACE, REG_INTERFACE_MIPI); if (!ret) ret = m5mols_enable_interrupt(sd, REG_INT_AF | REG_INT_CAPTURE); return ret; } static int m5mols_init_controls(struct m5mols_info *info) { struct v4l2_subdev *sd = &info->sd; u16 max_exposure; u16 step_zoom; int ret; /* Determine value's range & step of controls for various FW version */ ret = m5mols_read_u16(sd, AE_MAX_GAIN_MON, &max_exposure); if (!ret) step_zoom = is_manufacturer(info, REG_SAMSUNG_OPTICS) ? 31 : 1; if (ret) return ret; v4l2_ctrl_handler_init(&info->handle, 6); info->autowb = v4l2_ctrl_new_std(&info->handle, &m5mols_ctrl_ops, V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 0); info->saturation = v4l2_ctrl_new_std(&info->handle, &m5mols_ctrl_ops, V4L2_CID_SATURATION, 1, 5, 1, 3); info->zoom = v4l2_ctrl_new_std(&info->handle, &m5mols_ctrl_ops, V4L2_CID_ZOOM_ABSOLUTE, 1, 70, step_zoom, 1); info->exposure = v4l2_ctrl_new_std(&info->handle, &m5mols_ctrl_ops, V4L2_CID_EXPOSURE, 0, max_exposure, 1, (int)max_exposure/2); info->colorfx = v4l2_ctrl_new_std_menu(&info->handle, &m5mols_ctrl_ops, V4L2_CID_COLORFX, 4, (1 << V4L2_COLORFX_BW), V4L2_COLORFX_NONE); info->autoexposure = v4l2_ctrl_new_std_menu(&info->handle, &m5mols_ctrl_ops, V4L2_CID_EXPOSURE_AUTO, 1, 0, V4L2_EXPOSURE_AUTO); sd->ctrl_handler = &info->handle; if (info->handle.error) { v4l2_err(sd, "Failed to initialize controls: %d\n", ret); v4l2_ctrl_handler_free(&info->handle); return info->handle.error; } v4l2_ctrl_cluster(2, &info->autoexposure); return 0; } /** * m5mols_s_power - Main sensor power control function * * To prevent breaking the lens when the sensor is powered off the Soft-Landing * algorithm is called where available. The Soft-Landing algorithm availability * dependends on the firmware provider. */ static int m5mols_s_power(struct v4l2_subdev *sd, int on) { struct m5mols_info *info = to_m5mols(sd); int ret; if (on) { ret = m5mols_sensor_power(info, true); if (!ret) ret = m5mols_fw_start(sd); return ret; } if (is_manufacturer(info, REG_SAMSUNG_TECHWIN)) { ret = m5mols_mode(info, REG_MONITOR); if (!ret) ret = m5mols_write(sd, AF_EXECUTE, REG_AF_STOP); if (!ret) ret = m5mols_write(sd, AF_MODE, REG_AF_POWEROFF); if (!ret) ret = m5mols_busy_wait(sd, SYSTEM_STATUS, REG_AF_IDLE, 0xff, -1); if (ret < 0) v4l2_warn(sd, "Soft landing lens failed\n"); } ret = m5mols_sensor_power(info, false); info->ctrl_sync = 0; return ret; } static int m5mols_log_status(struct v4l2_subdev *sd) { struct m5mols_info *info = to_m5mols(sd); v4l2_ctrl_handler_log_status(&info->handle, sd->name); return 0; } static const struct v4l2_subdev_core_ops m5mols_core_ops = { .s_power = m5mols_s_power, .g_ctrl = v4l2_subdev_g_ctrl, .s_ctrl = v4l2_subdev_s_ctrl, .queryctrl = v4l2_subdev_queryctrl, .querymenu = v4l2_subdev_querymenu, .g_ext_ctrls = v4l2_subdev_g_ext_ctrls, .try_ext_ctrls = v4l2_subdev_try_ext_ctrls, .s_ext_ctrls = v4l2_subdev_s_ext_ctrls, .log_status = m5mols_log_status, }; /* * V4L2 subdev internal operations */ static int m5mols_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(fh, 0); *format = m5mols_default_ffmt[0]; return 0; } static const struct v4l2_subdev_internal_ops m5mols_subdev_internal_ops = { .open = m5mols_open, }; static const struct v4l2_subdev_ops m5mols_ops = { .core = &m5mols_core_ops, .pad = &m5mols_pad_ops, .video = &m5mols_video_ops, }; static irqreturn_t m5mols_irq_handler(int irq, void *data) { struct m5mols_info *info = to_m5mols(data); atomic_set(&info->irq_done, 1); wake_up_interruptible(&info->irq_waitq); return IRQ_HANDLED; } static int __devinit m5mols_probe(struct i2c_client *client, const struct i2c_device_id *id) { const struct m5mols_platform_data *pdata = client->dev.platform_data; struct m5mols_info *info; struct v4l2_subdev *sd; int ret; if (pdata == NULL) { dev_err(&client->dev, "No platform data\n"); return -EINVAL; } if (!gpio_is_valid(pdata->gpio_reset)) { dev_err(&client->dev, "No valid RESET GPIO specified\n"); return -EINVAL; } if (!client->irq) { dev_err(&client->dev, "Interrupt not assigned\n"); return -EINVAL; } info = kzalloc(sizeof(struct m5mols_info), GFP_KERNEL); if (!info) return -ENOMEM; info->pdata = pdata; info->set_power = pdata->set_power; ret = gpio_request(pdata->gpio_reset, "M5MOLS_NRST"); if (ret) { dev_err(&client->dev, "Failed to request gpio: %d\n", ret); goto out_free; } gpio_direction_output(pdata->gpio_reset, pdata->reset_polarity); ret = regulator_bulk_get(&client->dev, ARRAY_SIZE(supplies), supplies); if (ret) { dev_err(&client->dev, "Failed to get regulators: %d\n", ret); goto out_gpio; } sd = &info->sd; v4l2_i2c_subdev_init(sd, client, &m5mols_ops); strlcpy(sd->name, MODULE_NAME, sizeof(sd->name)); sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; sd->internal_ops = &m5mols_subdev_internal_ops; info->pad.flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_init(&sd->entity, 1, &info->pad, 0); if (ret < 0) goto out_reg; sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR; init_waitqueue_head(&info->irq_waitq); ret = request_irq(client->irq, m5mols_irq_handler, IRQF_TRIGGER_RISING, MODULE_NAME, sd); if (ret) { dev_err(&client->dev, "Interrupt request failed: %d\n", ret); goto out_me; } info->res_type = M5MOLS_RESTYPE_MONITOR; info->ffmt[0] = m5mols_default_ffmt[0]; info->ffmt[1] = m5mols_default_ffmt[1]; ret = m5mols_sensor_power(info, true); if (ret) goto out_me; ret = m5mols_fw_start(sd); if (!ret) ret = m5mols_init_controls(info); m5mols_sensor_power(info, false); if (!ret) return 0; out_me: media_entity_cleanup(&sd->entity); out_reg: regulator_bulk_free(ARRAY_SIZE(supplies), supplies); out_gpio: gpio_free(pdata->gpio_reset); out_free: kfree(info); return ret; } static int __devexit m5mols_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct m5mols_info *info = to_m5mols(sd); v4l2_device_unregister_subdev(sd); v4l2_ctrl_handler_free(sd->ctrl_handler); free_irq(client->irq, sd); regulator_bulk_free(ARRAY_SIZE(supplies), supplies); gpio_free(info->pdata->gpio_reset); media_entity_cleanup(&sd->entity); kfree(info); return 0; } static const struct i2c_device_id m5mols_id[] = { { MODULE_NAME, 0 }, { }, }; MODULE_DEVICE_TABLE(i2c, m5mols_id); static struct i2c_driver m5mols_i2c_driver = { .driver = { .name = MODULE_NAME, }, .probe = m5mols_probe, .remove = __devexit_p(m5mols_remove), .id_table = m5mols_id, }; module_i2c_driver(m5mols_i2c_driver); MODULE_AUTHOR("HeungJun Kim <riverful.kim@samsung.com>"); MODULE_AUTHOR("Dongsoo Kim <dongsoo45.kim@samsung.com>"); MODULE_DESCRIPTION("Fujitsu M-5MOLS 8M Pixel camera driver"); MODULE_LICENSE("GPL");
gpl-2.0
boa19861105/android_422_kernel_htc_dlxub1
fs/omfs/dir.c
4907
10284
/* * OMFS (as used by RIO Karma) directory operations. * Copyright (C) 2005 Bob Copeland <me@bobcopeland.com> * Released under GPL v2. */ #include <linux/fs.h> #include <linux/ctype.h> #include <linux/buffer_head.h> #include "omfs.h" static int omfs_hash(const char *name, int namelen, int mod) { int i, hash = 0; for (i = 0; i < namelen; i++) hash ^= tolower(name[i]) << (i % 24); return hash % mod; } /* * Finds the bucket for a given name and reads the containing block; * *ofs is set to the offset of the first list entry. */ static struct buffer_head *omfs_get_bucket(struct inode *dir, const char *name, int namelen, int *ofs) { int nbuckets = (dir->i_size - OMFS_DIR_START)/8; int bucket = omfs_hash(name, namelen, nbuckets); *ofs = OMFS_DIR_START + bucket * 8; return omfs_bread(dir->i_sb, dir->i_ino); } static struct buffer_head *omfs_scan_list(struct inode *dir, u64 block, const char *name, int namelen, u64 *prev_block) { struct buffer_head *bh; struct omfs_inode *oi; int err = -ENOENT; *prev_block = ~0; while (block != ~0) { bh = omfs_bread(dir->i_sb, block); if (!bh) { err = -EIO; goto err; } oi = (struct omfs_inode *) bh->b_data; if (omfs_is_bad(OMFS_SB(dir->i_sb), &oi->i_head, block)) { brelse(bh); goto err; } if (strncmp(oi->i_name, name, namelen) == 0) return bh; *prev_block = block; block = be64_to_cpu(oi->i_sibling); brelse(bh); } err: return ERR_PTR(err); } static struct buffer_head *omfs_find_entry(struct inode *dir, const char *name, int namelen) { struct buffer_head *bh; int ofs; u64 block, dummy; bh = omfs_get_bucket(dir, name, namelen, &ofs); if (!bh) return ERR_PTR(-EIO); block = be64_to_cpu(*((__be64 *) &bh->b_data[ofs])); brelse(bh); return omfs_scan_list(dir, block, name, namelen, &dummy); } int omfs_make_empty(struct inode *inode, struct super_block *sb) { struct omfs_sb_info *sbi = OMFS_SB(sb); struct buffer_head *bh; struct omfs_inode *oi; bh = omfs_bread(sb, inode->i_ino); if (!bh) return -ENOMEM; memset(bh->b_data, 0, sizeof(struct omfs_inode)); if (S_ISDIR(inode->i_mode)) { memset(&bh->b_data[OMFS_DIR_START], 0xff, sbi->s_sys_blocksize - OMFS_DIR_START); } else omfs_make_empty_table(bh, OMFS_EXTENT_START); oi = (struct omfs_inode *) bh->b_data; oi->i_head.h_self = cpu_to_be64(inode->i_ino); oi->i_sibling = ~cpu_to_be64(0ULL); mark_buffer_dirty(bh); brelse(bh); return 0; } static int omfs_add_link(struct dentry *dentry, struct inode *inode) { struct inode *dir = dentry->d_parent->d_inode; const char *name = dentry->d_name.name; int namelen = dentry->d_name.len; struct omfs_inode *oi; struct buffer_head *bh; u64 block; __be64 *entry; int ofs; /* just prepend to head of queue in proper bucket */ bh = omfs_get_bucket(dir, name, namelen, &ofs); if (!bh) goto out; entry = (__be64 *) &bh->b_data[ofs]; block = be64_to_cpu(*entry); *entry = cpu_to_be64(inode->i_ino); mark_buffer_dirty(bh); brelse(bh); /* now set the sibling and parent pointers on the new inode */ bh = omfs_bread(dir->i_sb, inode->i_ino); if (!bh) goto out; oi = (struct omfs_inode *) bh->b_data; memcpy(oi->i_name, name, namelen); memset(oi->i_name + namelen, 0, OMFS_NAMELEN - namelen); oi->i_sibling = cpu_to_be64(block); oi->i_parent = cpu_to_be64(dir->i_ino); mark_buffer_dirty(bh); brelse(bh); dir->i_ctime = CURRENT_TIME_SEC; /* mark affected inodes dirty to rebuild checksums */ mark_inode_dirty(dir); mark_inode_dirty(inode); return 0; out: return -ENOMEM; } static int omfs_delete_entry(struct dentry *dentry) { struct inode *dir = dentry->d_parent->d_inode; struct inode *dirty; const char *name = dentry->d_name.name; int namelen = dentry->d_name.len; struct omfs_inode *oi; struct buffer_head *bh, *bh2; __be64 *entry, next; u64 block, prev; int ofs; int err = -ENOMEM; /* delete the proper node in the bucket's linked list */ bh = omfs_get_bucket(dir, name, namelen, &ofs); if (!bh) goto out; entry = (__be64 *) &bh->b_data[ofs]; block = be64_to_cpu(*entry); bh2 = omfs_scan_list(dir, block, name, namelen, &prev); if (IS_ERR(bh2)) { err = PTR_ERR(bh2); goto out_free_bh; } oi = (struct omfs_inode *) bh2->b_data; next = oi->i_sibling; brelse(bh2); if (prev != ~0) { /* found in middle of list, get list ptr */ brelse(bh); bh = omfs_bread(dir->i_sb, prev); if (!bh) goto out; oi = (struct omfs_inode *) bh->b_data; entry = &oi->i_sibling; } *entry = next; mark_buffer_dirty(bh); if (prev != ~0) { dirty = omfs_iget(dir->i_sb, prev); if (!IS_ERR(dirty)) { mark_inode_dirty(dirty); iput(dirty); } } err = 0; out_free_bh: brelse(bh); out: return err; } static int omfs_dir_is_empty(struct inode *inode) { int nbuckets = (inode->i_size - OMFS_DIR_START) / 8; struct buffer_head *bh; u64 *ptr; int i; bh = omfs_bread(inode->i_sb, inode->i_ino); if (!bh) return 0; ptr = (u64 *) &bh->b_data[OMFS_DIR_START]; for (i = 0; i < nbuckets; i++, ptr++) if (*ptr != ~0) break; brelse(bh); return *ptr != ~0; } static int omfs_remove(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; int ret; if (S_ISDIR(inode->i_mode) && !omfs_dir_is_empty(inode)) return -ENOTEMPTY; ret = omfs_delete_entry(dentry); if (ret) return ret; clear_nlink(inode); mark_inode_dirty(inode); mark_inode_dirty(dir); return 0; } static int omfs_add_node(struct inode *dir, struct dentry *dentry, umode_t mode) { int err; struct inode *inode = omfs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); err = omfs_make_empty(inode, dir->i_sb); if (err) goto out_free_inode; err = omfs_add_link(dentry, inode); if (err) goto out_free_inode; d_instantiate(dentry, inode); return 0; out_free_inode: iput(inode); return err; } static int omfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { return omfs_add_node(dir, dentry, mode | S_IFDIR); } static int omfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd) { return omfs_add_node(dir, dentry, mode | S_IFREG); } static struct dentry *omfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct buffer_head *bh; struct inode *inode = NULL; if (dentry->d_name.len > OMFS_NAMELEN) return ERR_PTR(-ENAMETOOLONG); bh = omfs_find_entry(dir, dentry->d_name.name, dentry->d_name.len); if (!IS_ERR(bh)) { struct omfs_inode *oi = (struct omfs_inode *)bh->b_data; ino_t ino = be64_to_cpu(oi->i_head.h_self); brelse(bh); inode = omfs_iget(dir->i_sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); } d_add(dentry, inode); return NULL; } /* sanity check block's self pointer */ int omfs_is_bad(struct omfs_sb_info *sbi, struct omfs_header *header, u64 fsblock) { int is_bad; u64 ino = be64_to_cpu(header->h_self); is_bad = ((ino != fsblock) || (ino < sbi->s_root_ino) || (ino > sbi->s_num_blocks)); if (is_bad) printk(KERN_WARNING "omfs: bad hash chain detected\n"); return is_bad; } static int omfs_fill_chain(struct file *filp, void *dirent, filldir_t filldir, u64 fsblock, int hindex) { struct inode *dir = filp->f_dentry->d_inode; struct buffer_head *bh; struct omfs_inode *oi; u64 self; int res = 0; unsigned char d_type; /* follow chain in this bucket */ while (fsblock != ~0) { bh = omfs_bread(dir->i_sb, fsblock); if (!bh) goto out; oi = (struct omfs_inode *) bh->b_data; if (omfs_is_bad(OMFS_SB(dir->i_sb), &oi->i_head, fsblock)) { brelse(bh); goto out; } self = fsblock; fsblock = be64_to_cpu(oi->i_sibling); /* skip visited nodes */ if (hindex) { hindex--; brelse(bh); continue; } d_type = (oi->i_type == OMFS_DIR) ? DT_DIR : DT_REG; res = filldir(dirent, oi->i_name, strnlen(oi->i_name, OMFS_NAMELEN), filp->f_pos, self, d_type); brelse(bh); if (res < 0) break; filp->f_pos++; } out: return res; } static int omfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct inode *new_inode = new_dentry->d_inode; struct inode *old_inode = old_dentry->d_inode; int err; if (new_inode) { /* overwriting existing file/dir */ err = omfs_remove(new_dir, new_dentry); if (err) goto out; } /* since omfs locates files by name, we need to unlink _before_ * adding the new link or we won't find the old one */ err = omfs_delete_entry(old_dentry); if (err) goto out; mark_inode_dirty(old_dir); err = omfs_add_link(new_dentry, old_inode); if (err) goto out; old_inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(old_inode); out: return err; } static int omfs_readdir(struct file *filp, void *dirent, filldir_t filldir) { struct inode *dir = filp->f_dentry->d_inode; struct buffer_head *bh; loff_t offset, res; unsigned int hchain, hindex; int nbuckets; u64 fsblock; int ret = -EINVAL; if (filp->f_pos >> 32) goto success; switch ((unsigned long) filp->f_pos) { case 0: if (filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR) < 0) goto success; filp->f_pos++; /* fall through */ case 1: if (filldir(dirent, "..", 2, 1, parent_ino(filp->f_dentry), DT_DIR) < 0) goto success; filp->f_pos = 1 << 20; /* fall through */ } nbuckets = (dir->i_size - OMFS_DIR_START) / 8; /* high 12 bits store bucket + 1 and low 20 bits store hash index */ hchain = (filp->f_pos >> 20) - 1; hindex = filp->f_pos & 0xfffff; bh = omfs_bread(dir->i_sb, dir->i_ino); if (!bh) goto out; offset = OMFS_DIR_START + hchain * 8; for (; hchain < nbuckets; hchain++, offset += 8) { fsblock = be64_to_cpu(*((__be64 *) &bh->b_data[offset])); res = omfs_fill_chain(filp, dirent, filldir, fsblock, hindex); hindex = 0; if (res < 0) break; filp->f_pos = (hchain+2) << 20; } brelse(bh); success: ret = 0; out: return ret; } const struct inode_operations omfs_dir_inops = { .lookup = omfs_lookup, .mkdir = omfs_mkdir, .rename = omfs_rename, .create = omfs_create, .unlink = omfs_remove, .rmdir = omfs_remove, }; const struct file_operations omfs_dir_operations = { .read = generic_read_dir, .readdir = omfs_readdir, .llseek = generic_file_llseek, };
gpl-2.0
gasseluk/htc-vision-kernel-ics
net/ax25/ax25_std_subr.c
5163
2329
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <asm/uaccess.h> #include <asm/system.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> /* * The following routines are taken from page 170 of the 7th ARRL Computer * Networking Conference paper, as is the whole state machine. */ void ax25_std_nr_error_recovery(ax25_cb *ax25) { ax25_std_establish_data_link(ax25); } void ax25_std_establish_data_link(ax25_cb *ax25) { ax25->condition = 0x00; ax25->n2count = 0; if (ax25->modulus == AX25_MODULUS) ax25_send_control(ax25, AX25_SABM, AX25_POLLON, AX25_COMMAND); else ax25_send_control(ax25, AX25_SABME, AX25_POLLON, AX25_COMMAND); ax25_calculate_t1(ax25); ax25_stop_idletimer(ax25); ax25_stop_t3timer(ax25); ax25_stop_t2timer(ax25); ax25_start_t1timer(ax25); } void ax25_std_transmit_enquiry(ax25_cb *ax25) { if (ax25->condition & AX25_COND_OWN_RX_BUSY) ax25_send_control(ax25, AX25_RNR, AX25_POLLON, AX25_COMMAND); else ax25_send_control(ax25, AX25_RR, AX25_POLLON, AX25_COMMAND); ax25->condition &= ~AX25_COND_ACK_PENDING; ax25_calculate_t1(ax25); ax25_start_t1timer(ax25); } void ax25_std_enquiry_response(ax25_cb *ax25) { if (ax25->condition & AX25_COND_OWN_RX_BUSY) ax25_send_control(ax25, AX25_RNR, AX25_POLLON, AX25_RESPONSE); else ax25_send_control(ax25, AX25_RR, AX25_POLLON, AX25_RESPONSE); ax25->condition &= ~AX25_COND_ACK_PENDING; } void ax25_std_timeout_response(ax25_cb *ax25) { if (ax25->condition & AX25_COND_OWN_RX_BUSY) ax25_send_control(ax25, AX25_RNR, AX25_POLLOFF, AX25_RESPONSE); else ax25_send_control(ax25, AX25_RR, AX25_POLLOFF, AX25_RESPONSE); ax25->condition &= ~AX25_COND_ACK_PENDING; }
gpl-2.0
jollaman999/jolla-kernel_G_v30a-Stock
net/unix/garbage.c
7723
10621
/* * NET3: Garbage Collector For AF_UNIX sockets * * Garbage Collector: * Copyright (C) Barak A. Pearlmutter. * Released under the GPL version 2 or later. * * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem. * If it doesn't work blame me, it worked when Barak sent it. * * Assumptions: * * - object w/ a bit * - free list * * Current optimizations: * * - explicit stack instead of recursion * - tail recurse on first born instead of immediate push/pop * - we gather the stuff that should not be killed into tree * and stack is just a path from root to the current pointer. * * Future optimizations: * * - don't just push entire root set; process in place * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Fixes: * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed. * Cope with changing max_files. * Al Viro 11 Oct 1998 * Graph may have cycles. That is, we can send the descriptor * of foo to bar and vice versa. Current code chokes on that. * Fix: move SCM_RIGHTS ones into the separate list and then * skb_free() them all instead of doing explicit fput's. * Another problem: since fput() may block somebody may * create a new unix_socket when we are in the middle of sweep * phase. Fix: revert the logic wrt MARKED. Mark everything * upon the beginning and unmark non-junk ones. * * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS * sent to connect()'ed but still not accept()'ed sockets. * Fixed. Old code had slightly different problem here: * extra fput() in situation when we passed the descriptor via * such socket and closed it (descriptor). That would happen on * each unix_gc() until the accept(). Since the struct file in * question would go to the free list and might be reused... * That might be the reason of random oopses on filp_close() * in unrelated processes. * * AV 28 Feb 1999 * Kill the explicit allocation of stack. Now we keep the tree * with root in dummy + pointer (gc_current) to one of the nodes. * Stack is represented as path from gc_current to dummy. Unmark * now means "add to tree". Push == "make it a son of gc_current". * Pop == "move gc_current to parent". We keep only pointers to * parents (->gc_tree). * AV 1 Mar 1999 * Damn. Added missing check for ->dead in listen queues scanning. * * Miklos Szeredi 25 Jun 2007 * Reimplement with a cycle collecting algorithm. This should * solve several problems with the previous code, like being racy * wrt receive and holding up unrelated socket operations. */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/un.h> #include <linux/net.h> #include <linux/fs.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/file.h> #include <linux/proc_fs.h> #include <linux/mutex.h> #include <linux/wait.h> #include <net/sock.h> #include <net/af_unix.h> #include <net/scm.h> #include <net/tcp_states.h> /* Internal data structures and random procedures: */ static LIST_HEAD(gc_inflight_list); static LIST_HEAD(gc_candidates); static DEFINE_SPINLOCK(unix_gc_lock); static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait); unsigned int unix_tot_inflight; struct sock *unix_get_socket(struct file *filp) { struct sock *u_sock = NULL; struct inode *inode = filp->f_path.dentry->d_inode; /* * Socket ? */ if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { struct socket *sock = SOCKET_I(inode); struct sock *s = sock->sk; /* * PF_UNIX ? */ if (s && sock->ops && sock->ops->family == PF_UNIX) u_sock = s; } return u_sock; } /* * Keep the number of times in flight count for the file * descriptor if it is for an AF_UNIX socket. */ void unix_inflight(struct file *fp) { struct sock *s = unix_get_socket(fp); if (s) { struct unix_sock *u = unix_sk(s); spin_lock(&unix_gc_lock); if (atomic_long_inc_return(&u->inflight) == 1) { BUG_ON(!list_empty(&u->link)); list_add_tail(&u->link, &gc_inflight_list); } else { BUG_ON(list_empty(&u->link)); } unix_tot_inflight++; spin_unlock(&unix_gc_lock); } } void unix_notinflight(struct file *fp) { struct sock *s = unix_get_socket(fp); if (s) { struct unix_sock *u = unix_sk(s); spin_lock(&unix_gc_lock); BUG_ON(list_empty(&u->link)); if (atomic_long_dec_and_test(&u->inflight)) list_del_init(&u->link); unix_tot_inflight--; spin_unlock(&unix_gc_lock); } } static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), struct sk_buff_head *hitlist) { struct sk_buff *skb; struct sk_buff *next; spin_lock(&x->sk_receive_queue.lock); skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { /* * Do we have file descriptors ? */ if (UNIXCB(skb).fp) { bool hit = false; /* * Process the descriptors of this socket */ int nfd = UNIXCB(skb).fp->count; struct file **fp = UNIXCB(skb).fp->fp; while (nfd--) { /* * Get the socket the fd matches * if it indeed does so */ struct sock *sk = unix_get_socket(*fp++); if (sk) { struct unix_sock *u = unix_sk(sk); /* * Ignore non-candidates, they could * have been added to the queues after * starting the garbage collection */ if (u->gc_candidate) { hit = true; func(u); } } } if (hit && hitlist != NULL) { __skb_unlink(skb, &x->sk_receive_queue); __skb_queue_tail(hitlist, skb); } } } spin_unlock(&x->sk_receive_queue.lock); } static void scan_children(struct sock *x, void (*func)(struct unix_sock *), struct sk_buff_head *hitlist) { if (x->sk_state != TCP_LISTEN) scan_inflight(x, func, hitlist); else { struct sk_buff *skb; struct sk_buff *next; struct unix_sock *u; LIST_HEAD(embryos); /* * For a listening socket collect the queued embryos * and perform a scan on them as well. */ spin_lock(&x->sk_receive_queue.lock); skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { u = unix_sk(skb->sk); /* * An embryo cannot be in-flight, so it's safe * to use the list link. */ BUG_ON(!list_empty(&u->link)); list_add_tail(&u->link, &embryos); } spin_unlock(&x->sk_receive_queue.lock); while (!list_empty(&embryos)) { u = list_entry(embryos.next, struct unix_sock, link); scan_inflight(&u->sk, func, hitlist); list_del_init(&u->link); } } } static void dec_inflight(struct unix_sock *usk) { atomic_long_dec(&usk->inflight); } static void inc_inflight(struct unix_sock *usk) { atomic_long_inc(&usk->inflight); } static void inc_inflight_move_tail(struct unix_sock *u) { atomic_long_inc(&u->inflight); /* * If this still might be part of a cycle, move it to the end * of the list, so that it's checked even if it was already * passed over */ if (u->gc_maybe_cycle) list_move_tail(&u->link, &gc_candidates); } static bool gc_in_progress = false; #define UNIX_INFLIGHT_TRIGGER_GC 16000 void wait_for_unix_gc(void) { /* * If number of inflight sockets is insane, * force a garbage collect right now. */ if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress) unix_gc(); wait_event(unix_gc_wait, gc_in_progress == false); } /* The external entry point: unix_gc() */ void unix_gc(void) { struct unix_sock *u; struct unix_sock *next; struct sk_buff_head hitlist; struct list_head cursor; LIST_HEAD(not_cycle_list); spin_lock(&unix_gc_lock); /* Avoid a recursive GC. */ if (gc_in_progress) goto out; gc_in_progress = true; /* * First, select candidates for garbage collection. Only * in-flight sockets are considered, and from those only ones * which don't have any external reference. * * Holding unix_gc_lock will protect these candidates from * being detached, and hence from gaining an external * reference. Since there are no possible receivers, all * buffers currently on the candidates' queues stay there * during the garbage collection. * * We also know that no new candidate can be added onto the * receive queues. Other, non candidate sockets _can_ be * added to queue, so we must make sure only to touch * candidates. */ list_for_each_entry_safe(u, next, &gc_inflight_list, link) { long total_refs; long inflight_refs; total_refs = file_count(u->sk.sk_socket->file); inflight_refs = atomic_long_read(&u->inflight); BUG_ON(inflight_refs < 1); BUG_ON(total_refs < inflight_refs); if (total_refs == inflight_refs) { list_move_tail(&u->link, &gc_candidates); u->gc_candidate = 1; u->gc_maybe_cycle = 1; } } /* * Now remove all internal in-flight reference to children of * the candidates. */ list_for_each_entry(u, &gc_candidates, link) scan_children(&u->sk, dec_inflight, NULL); /* * Restore the references for children of all candidates, * which have remaining references. Do this recursively, so * only those remain, which form cyclic references. * * Use a "cursor" link, to make the list traversal safe, even * though elements might be moved about. */ list_add(&cursor, &gc_candidates); while (cursor.next != &gc_candidates) { u = list_entry(cursor.next, struct unix_sock, link); /* Move cursor to after the current position. */ list_move(&cursor, &u->link); if (atomic_long_read(&u->inflight) > 0) { list_move_tail(&u->link, &not_cycle_list); u->gc_maybe_cycle = 0; scan_children(&u->sk, inc_inflight_move_tail, NULL); } } list_del(&cursor); /* * not_cycle_list contains those sockets which do not make up a * cycle. Restore these to the inflight list. */ while (!list_empty(&not_cycle_list)) { u = list_entry(not_cycle_list.next, struct unix_sock, link); u->gc_candidate = 0; list_move_tail(&u->link, &gc_inflight_list); } /* * Now gc_candidates contains only garbage. Restore original * inflight counters for these as well, and remove the skbuffs * which are creating the cycle(s). */ skb_queue_head_init(&hitlist); list_for_each_entry(u, &gc_candidates, link) scan_children(&u->sk, inc_inflight, &hitlist); spin_unlock(&unix_gc_lock); /* Here we are. Hitlist is filled. Die. */ __skb_queue_purge(&hitlist); spin_lock(&unix_gc_lock); /* All candidates should have been detached by now. */ BUG_ON(!list_empty(&gc_candidates)); gc_in_progress = false; wake_up(&unix_gc_wait); out: spin_unlock(&unix_gc_lock); }
gpl-2.0
syhost/android_kernel_pantech_ef50l
drivers/video/jz4740_fb.c
8235
19873
/* * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> * JZ4740 SoC LCD framebuffer driver * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/console.h> #include <linux/fb.h> #include <linux/dma-mapping.h> #include <asm/mach-jz4740/jz4740_fb.h> #include <asm/mach-jz4740/gpio.h> #define JZ_REG_LCD_CFG 0x00 #define JZ_REG_LCD_VSYNC 0x04 #define JZ_REG_LCD_HSYNC 0x08 #define JZ_REG_LCD_VAT 0x0C #define JZ_REG_LCD_DAH 0x10 #define JZ_REG_LCD_DAV 0x14 #define JZ_REG_LCD_PS 0x18 #define JZ_REG_LCD_CLS 0x1C #define JZ_REG_LCD_SPL 0x20 #define JZ_REG_LCD_REV 0x24 #define JZ_REG_LCD_CTRL 0x30 #define JZ_REG_LCD_STATE 0x34 #define JZ_REG_LCD_IID 0x38 #define JZ_REG_LCD_DA0 0x40 #define JZ_REG_LCD_SA0 0x44 #define JZ_REG_LCD_FID0 0x48 #define JZ_REG_LCD_CMD0 0x4C #define JZ_REG_LCD_DA1 0x50 #define JZ_REG_LCD_SA1 0x54 #define JZ_REG_LCD_FID1 0x58 #define JZ_REG_LCD_CMD1 0x5C #define JZ_LCD_CFG_SLCD BIT(31) #define JZ_LCD_CFG_PS_DISABLE BIT(23) #define JZ_LCD_CFG_CLS_DISABLE BIT(22) #define JZ_LCD_CFG_SPL_DISABLE BIT(21) #define JZ_LCD_CFG_REV_DISABLE BIT(20) #define JZ_LCD_CFG_HSYNCM BIT(19) #define JZ_LCD_CFG_PCLKM BIT(18) #define JZ_LCD_CFG_INV BIT(17) #define JZ_LCD_CFG_SYNC_DIR BIT(16) #define JZ_LCD_CFG_PS_POLARITY BIT(15) #define JZ_LCD_CFG_CLS_POLARITY BIT(14) #define JZ_LCD_CFG_SPL_POLARITY BIT(13) #define JZ_LCD_CFG_REV_POLARITY BIT(12) #define JZ_LCD_CFG_HSYNC_ACTIVE_LOW BIT(11) #define JZ_LCD_CFG_PCLK_FALLING_EDGE BIT(10) #define JZ_LCD_CFG_DE_ACTIVE_LOW BIT(9) #define JZ_LCD_CFG_VSYNC_ACTIVE_LOW BIT(8) #define JZ_LCD_CFG_18_BIT BIT(7) #define JZ_LCD_CFG_PDW (BIT(5) | BIT(4)) #define JZ_LCD_CFG_MODE_MASK 0xf #define JZ_LCD_CTRL_BURST_4 (0x0 << 28) #define JZ_LCD_CTRL_BURST_8 (0x1 << 28) #define JZ_LCD_CTRL_BURST_16 (0x2 << 28) #define JZ_LCD_CTRL_RGB555 BIT(27) #define JZ_LCD_CTRL_OFUP BIT(26) #define JZ_LCD_CTRL_FRC_GRAYSCALE_16 (0x0 << 24) #define JZ_LCD_CTRL_FRC_GRAYSCALE_4 (0x1 << 24) #define JZ_LCD_CTRL_FRC_GRAYSCALE_2 (0x2 << 24) #define JZ_LCD_CTRL_PDD_MASK (0xff << 16) #define JZ_LCD_CTRL_EOF_IRQ BIT(13) #define JZ_LCD_CTRL_SOF_IRQ BIT(12) #define JZ_LCD_CTRL_OFU_IRQ BIT(11) #define JZ_LCD_CTRL_IFU0_IRQ BIT(10) #define JZ_LCD_CTRL_IFU1_IRQ BIT(9) #define JZ_LCD_CTRL_DD_IRQ BIT(8) #define JZ_LCD_CTRL_QDD_IRQ BIT(7) #define JZ_LCD_CTRL_REVERSE_ENDIAN BIT(6) #define JZ_LCD_CTRL_LSB_FISRT BIT(5) #define JZ_LCD_CTRL_DISABLE BIT(4) #define JZ_LCD_CTRL_ENABLE BIT(3) #define JZ_LCD_CTRL_BPP_1 0x0 #define JZ_LCD_CTRL_BPP_2 0x1 #define JZ_LCD_CTRL_BPP_4 0x2 #define JZ_LCD_CTRL_BPP_8 0x3 #define JZ_LCD_CTRL_BPP_15_16 0x4 #define JZ_LCD_CTRL_BPP_18_24 0x5 #define JZ_LCD_CMD_SOF_IRQ BIT(15) #define JZ_LCD_CMD_EOF_IRQ BIT(16) #define JZ_LCD_CMD_ENABLE_PAL BIT(12) #define JZ_LCD_SYNC_MASK 0x3ff #define JZ_LCD_STATE_DISABLED BIT(0) struct jzfb_framedesc { uint32_t next; uint32_t addr; uint32_t id; uint32_t cmd; } __packed; struct jzfb { struct fb_info *fb; struct platform_device *pdev; void __iomem *base; struct resource *mem; struct jz4740_fb_platform_data *pdata; size_t vidmem_size; void *vidmem; dma_addr_t vidmem_phys; struct jzfb_framedesc *framedesc; dma_addr_t framedesc_phys; struct clk *ldclk; struct clk *lpclk; unsigned is_enabled:1; struct mutex lock; uint32_t pseudo_palette[16]; }; static const struct fb_fix_screeninfo jzfb_fix __devinitdata = { .id = "JZ4740 FB", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_TRUECOLOR, .xpanstep = 0, .ypanstep = 0, .ywrapstep = 0, .accel = FB_ACCEL_NONE, }; static const struct jz_gpio_bulk_request jz_lcd_ctrl_pins[] = { JZ_GPIO_BULK_PIN(LCD_PCLK), JZ_GPIO_BULK_PIN(LCD_HSYNC), JZ_GPIO_BULK_PIN(LCD_VSYNC), JZ_GPIO_BULK_PIN(LCD_DE), JZ_GPIO_BULK_PIN(LCD_PS), JZ_GPIO_BULK_PIN(LCD_REV), JZ_GPIO_BULK_PIN(LCD_CLS), JZ_GPIO_BULK_PIN(LCD_SPL), }; static const struct jz_gpio_bulk_request jz_lcd_data_pins[] = { JZ_GPIO_BULK_PIN(LCD_DATA0), JZ_GPIO_BULK_PIN(LCD_DATA1), JZ_GPIO_BULK_PIN(LCD_DATA2), JZ_GPIO_BULK_PIN(LCD_DATA3), JZ_GPIO_BULK_PIN(LCD_DATA4), JZ_GPIO_BULK_PIN(LCD_DATA5), JZ_GPIO_BULK_PIN(LCD_DATA6), JZ_GPIO_BULK_PIN(LCD_DATA7), JZ_GPIO_BULK_PIN(LCD_DATA8), JZ_GPIO_BULK_PIN(LCD_DATA9), JZ_GPIO_BULK_PIN(LCD_DATA10), JZ_GPIO_BULK_PIN(LCD_DATA11), JZ_GPIO_BULK_PIN(LCD_DATA12), JZ_GPIO_BULK_PIN(LCD_DATA13), JZ_GPIO_BULK_PIN(LCD_DATA14), JZ_GPIO_BULK_PIN(LCD_DATA15), JZ_GPIO_BULK_PIN(LCD_DATA16), JZ_GPIO_BULK_PIN(LCD_DATA17), }; static unsigned int jzfb_num_ctrl_pins(struct jzfb *jzfb) { unsigned int num; switch (jzfb->pdata->lcd_type) { case JZ_LCD_TYPE_GENERIC_16_BIT: num = 4; break; case JZ_LCD_TYPE_GENERIC_18_BIT: num = 4; break; case JZ_LCD_TYPE_8BIT_SERIAL: num = 3; break; case JZ_LCD_TYPE_SPECIAL_TFT_1: case JZ_LCD_TYPE_SPECIAL_TFT_2: case JZ_LCD_TYPE_SPECIAL_TFT_3: num = 8; break; default: num = 0; break; } return num; } static unsigned int jzfb_num_data_pins(struct jzfb *jzfb) { unsigned int num; switch (jzfb->pdata->lcd_type) { case JZ_LCD_TYPE_GENERIC_16_BIT: num = 16; break; case JZ_LCD_TYPE_GENERIC_18_BIT: num = 18; break; case JZ_LCD_TYPE_8BIT_SERIAL: num = 8; break; case JZ_LCD_TYPE_SPECIAL_TFT_1: case JZ_LCD_TYPE_SPECIAL_TFT_2: case JZ_LCD_TYPE_SPECIAL_TFT_3: if (jzfb->pdata->bpp == 18) num = 18; else num = 16; break; default: num = 0; break; } return num; } /* Based on CNVT_TOHW macro from skeletonfb.c */ static inline uint32_t jzfb_convert_color_to_hw(unsigned val, struct fb_bitfield *bf) { return (((val << bf->length) + 0x7FFF - val) >> 16) << bf->offset; } static int jzfb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *fb) { uint32_t color; if (regno >= 16) return -EINVAL; color = jzfb_convert_color_to_hw(red, &fb->var.red); color |= jzfb_convert_color_to_hw(green, &fb->var.green); color |= jzfb_convert_color_to_hw(blue, &fb->var.blue); color |= jzfb_convert_color_to_hw(transp, &fb->var.transp); ((uint32_t *)(fb->pseudo_palette))[regno] = color; return 0; } static int jzfb_get_controller_bpp(struct jzfb *jzfb) { switch (jzfb->pdata->bpp) { case 18: case 24: return 32; case 15: return 16; default: return jzfb->pdata->bpp; } } static struct fb_videomode *jzfb_get_mode(struct jzfb *jzfb, struct fb_var_screeninfo *var) { size_t i; struct fb_videomode *mode = jzfb->pdata->modes; for (i = 0; i < jzfb->pdata->num_modes; ++i, ++mode) { if (mode->xres == var->xres && mode->yres == var->yres) return mode; } return NULL; } static int jzfb_check_var(struct fb_var_screeninfo *var, struct fb_info *fb) { struct jzfb *jzfb = fb->par; struct fb_videomode *mode; if (var->bits_per_pixel != jzfb_get_controller_bpp(jzfb) && var->bits_per_pixel != jzfb->pdata->bpp) return -EINVAL; mode = jzfb_get_mode(jzfb, var); if (mode == NULL) return -EINVAL; fb_videomode_to_var(var, mode); switch (jzfb->pdata->bpp) { case 8: break; case 15: var->red.offset = 10; var->red.length = 5; var->green.offset = 6; var->green.length = 5; var->blue.offset = 0; var->blue.length = 5; break; case 16: var->red.offset = 11; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 0; var->blue.length = 5; break; case 18: var->red.offset = 16; var->red.length = 6; var->green.offset = 8; var->green.length = 6; var->blue.offset = 0; var->blue.length = 6; var->bits_per_pixel = 32; break; case 32: case 24: var->transp.offset = 24; var->transp.length = 8; var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->bits_per_pixel = 32; break; default: break; } return 0; } static int jzfb_set_par(struct fb_info *info) { struct jzfb *jzfb = info->par; struct jz4740_fb_platform_data *pdata = jzfb->pdata; struct fb_var_screeninfo *var = &info->var; struct fb_videomode *mode; uint16_t hds, vds; uint16_t hde, vde; uint16_t ht, vt; uint32_t ctrl; uint32_t cfg; unsigned long rate; mode = jzfb_get_mode(jzfb, var); if (mode == NULL) return -EINVAL; if (mode == info->mode) return 0; info->mode = mode; hds = mode->hsync_len + mode->left_margin; hde = hds + mode->xres; ht = hde + mode->right_margin; vds = mode->vsync_len + mode->upper_margin; vde = vds + mode->yres; vt = vde + mode->lower_margin; ctrl = JZ_LCD_CTRL_OFUP | JZ_LCD_CTRL_BURST_16; switch (pdata->bpp) { case 1: ctrl |= JZ_LCD_CTRL_BPP_1; break; case 2: ctrl |= JZ_LCD_CTRL_BPP_2; break; case 4: ctrl |= JZ_LCD_CTRL_BPP_4; break; case 8: ctrl |= JZ_LCD_CTRL_BPP_8; break; case 15: ctrl |= JZ_LCD_CTRL_RGB555; /* Falltrough */ case 16: ctrl |= JZ_LCD_CTRL_BPP_15_16; break; case 18: case 24: case 32: ctrl |= JZ_LCD_CTRL_BPP_18_24; break; default: break; } cfg = pdata->lcd_type & 0xf; if (!(mode->sync & FB_SYNC_HOR_HIGH_ACT)) cfg |= JZ_LCD_CFG_HSYNC_ACTIVE_LOW; if (!(mode->sync & FB_SYNC_VERT_HIGH_ACT)) cfg |= JZ_LCD_CFG_VSYNC_ACTIVE_LOW; if (pdata->pixclk_falling_edge) cfg |= JZ_LCD_CFG_PCLK_FALLING_EDGE; if (pdata->date_enable_active_low) cfg |= JZ_LCD_CFG_DE_ACTIVE_LOW; if (pdata->lcd_type == JZ_LCD_TYPE_GENERIC_18_BIT) cfg |= JZ_LCD_CFG_18_BIT; if (mode->pixclock) { rate = PICOS2KHZ(mode->pixclock) * 1000; mode->refresh = rate / vt / ht; } else { if (pdata->lcd_type == JZ_LCD_TYPE_8BIT_SERIAL) rate = mode->refresh * (vt + 2 * mode->xres) * ht; else rate = mode->refresh * vt * ht; mode->pixclock = KHZ2PICOS(rate / 1000); } mutex_lock(&jzfb->lock); if (!jzfb->is_enabled) clk_enable(jzfb->ldclk); else ctrl |= JZ_LCD_CTRL_ENABLE; switch (pdata->lcd_type) { case JZ_LCD_TYPE_SPECIAL_TFT_1: case JZ_LCD_TYPE_SPECIAL_TFT_2: case JZ_LCD_TYPE_SPECIAL_TFT_3: writel(pdata->special_tft_config.spl, jzfb->base + JZ_REG_LCD_SPL); writel(pdata->special_tft_config.cls, jzfb->base + JZ_REG_LCD_CLS); writel(pdata->special_tft_config.ps, jzfb->base + JZ_REG_LCD_PS); writel(pdata->special_tft_config.ps, jzfb->base + JZ_REG_LCD_REV); break; default: cfg |= JZ_LCD_CFG_PS_DISABLE; cfg |= JZ_LCD_CFG_CLS_DISABLE; cfg |= JZ_LCD_CFG_SPL_DISABLE; cfg |= JZ_LCD_CFG_REV_DISABLE; break; } writel(mode->hsync_len, jzfb->base + JZ_REG_LCD_HSYNC); writel(mode->vsync_len, jzfb->base + JZ_REG_LCD_VSYNC); writel((ht << 16) | vt, jzfb->base + JZ_REG_LCD_VAT); writel((hds << 16) | hde, jzfb->base + JZ_REG_LCD_DAH); writel((vds << 16) | vde, jzfb->base + JZ_REG_LCD_DAV); writel(cfg, jzfb->base + JZ_REG_LCD_CFG); writel(ctrl, jzfb->base + JZ_REG_LCD_CTRL); if (!jzfb->is_enabled) clk_disable(jzfb->ldclk); mutex_unlock(&jzfb->lock); clk_set_rate(jzfb->lpclk, rate); clk_set_rate(jzfb->ldclk, rate * 3); return 0; } static void jzfb_enable(struct jzfb *jzfb) { uint32_t ctrl; clk_enable(jzfb->ldclk); jz_gpio_bulk_resume(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb)); jz_gpio_bulk_resume(jz_lcd_data_pins, jzfb_num_data_pins(jzfb)); writel(0, jzfb->base + JZ_REG_LCD_STATE); writel(jzfb->framedesc->next, jzfb->base + JZ_REG_LCD_DA0); ctrl = readl(jzfb->base + JZ_REG_LCD_CTRL); ctrl |= JZ_LCD_CTRL_ENABLE; ctrl &= ~JZ_LCD_CTRL_DISABLE; writel(ctrl, jzfb->base + JZ_REG_LCD_CTRL); } static void jzfb_disable(struct jzfb *jzfb) { uint32_t ctrl; ctrl = readl(jzfb->base + JZ_REG_LCD_CTRL); ctrl |= JZ_LCD_CTRL_DISABLE; writel(ctrl, jzfb->base + JZ_REG_LCD_CTRL); do { ctrl = readl(jzfb->base + JZ_REG_LCD_STATE); } while (!(ctrl & JZ_LCD_STATE_DISABLED)); jz_gpio_bulk_suspend(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb)); jz_gpio_bulk_suspend(jz_lcd_data_pins, jzfb_num_data_pins(jzfb)); clk_disable(jzfb->ldclk); } static int jzfb_blank(int blank_mode, struct fb_info *info) { struct jzfb *jzfb = info->par; switch (blank_mode) { case FB_BLANK_UNBLANK: mutex_lock(&jzfb->lock); if (jzfb->is_enabled) { mutex_unlock(&jzfb->lock); return 0; } jzfb_enable(jzfb); jzfb->is_enabled = 1; mutex_unlock(&jzfb->lock); break; default: mutex_lock(&jzfb->lock); if (!jzfb->is_enabled) { mutex_unlock(&jzfb->lock); return 0; } jzfb_disable(jzfb); jzfb->is_enabled = 0; mutex_unlock(&jzfb->lock); break; } return 0; } static int jzfb_alloc_devmem(struct jzfb *jzfb) { int max_videosize = 0; struct fb_videomode *mode = jzfb->pdata->modes; void *page; int i; for (i = 0; i < jzfb->pdata->num_modes; ++mode, ++i) { if (max_videosize < mode->xres * mode->yres) max_videosize = mode->xres * mode->yres; } max_videosize *= jzfb_get_controller_bpp(jzfb) >> 3; jzfb->framedesc = dma_alloc_coherent(&jzfb->pdev->dev, sizeof(*jzfb->framedesc), &jzfb->framedesc_phys, GFP_KERNEL); if (!jzfb->framedesc) return -ENOMEM; jzfb->vidmem_size = PAGE_ALIGN(max_videosize); jzfb->vidmem = dma_alloc_coherent(&jzfb->pdev->dev, jzfb->vidmem_size, &jzfb->vidmem_phys, GFP_KERNEL); if (!jzfb->vidmem) goto err_free_framedesc; for (page = jzfb->vidmem; page < jzfb->vidmem + PAGE_ALIGN(jzfb->vidmem_size); page += PAGE_SIZE) { SetPageReserved(virt_to_page(page)); } jzfb->framedesc->next = jzfb->framedesc_phys; jzfb->framedesc->addr = jzfb->vidmem_phys; jzfb->framedesc->id = 0xdeafbead; jzfb->framedesc->cmd = 0; jzfb->framedesc->cmd |= max_videosize / 4; return 0; err_free_framedesc: dma_free_coherent(&jzfb->pdev->dev, sizeof(*jzfb->framedesc), jzfb->framedesc, jzfb->framedesc_phys); return -ENOMEM; } static void jzfb_free_devmem(struct jzfb *jzfb) { dma_free_coherent(&jzfb->pdev->dev, jzfb->vidmem_size, jzfb->vidmem, jzfb->vidmem_phys); dma_free_coherent(&jzfb->pdev->dev, sizeof(*jzfb->framedesc), jzfb->framedesc, jzfb->framedesc_phys); } static struct fb_ops jzfb_ops = { .owner = THIS_MODULE, .fb_check_var = jzfb_check_var, .fb_set_par = jzfb_set_par, .fb_blank = jzfb_blank, .fb_fillrect = sys_fillrect, .fb_copyarea = sys_copyarea, .fb_imageblit = sys_imageblit, .fb_setcolreg = jzfb_setcolreg, }; static int __devinit jzfb_probe(struct platform_device *pdev) { int ret; struct jzfb *jzfb; struct fb_info *fb; struct jz4740_fb_platform_data *pdata = pdev->dev.platform_data; struct resource *mem; if (!pdata) { dev_err(&pdev->dev, "Missing platform data\n"); return -ENXIO; } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pdev->dev, "Failed to get register memory resource\n"); return -ENXIO; } mem = request_mem_region(mem->start, resource_size(mem), pdev->name); if (!mem) { dev_err(&pdev->dev, "Failed to request register memory region\n"); return -EBUSY; } fb = framebuffer_alloc(sizeof(struct jzfb), &pdev->dev); if (!fb) { dev_err(&pdev->dev, "Failed to allocate framebuffer device\n"); ret = -ENOMEM; goto err_release_mem_region; } fb->fbops = &jzfb_ops; fb->flags = FBINFO_DEFAULT; jzfb = fb->par; jzfb->pdev = pdev; jzfb->pdata = pdata; jzfb->mem = mem; jzfb->ldclk = clk_get(&pdev->dev, "lcd"); if (IS_ERR(jzfb->ldclk)) { ret = PTR_ERR(jzfb->ldclk); dev_err(&pdev->dev, "Failed to get lcd clock: %d\n", ret); goto err_framebuffer_release; } jzfb->lpclk = clk_get(&pdev->dev, "lcd_pclk"); if (IS_ERR(jzfb->lpclk)) { ret = PTR_ERR(jzfb->lpclk); dev_err(&pdev->dev, "Failed to get lcd pixel clock: %d\n", ret); goto err_put_ldclk; } jzfb->base = ioremap(mem->start, resource_size(mem)); if (!jzfb->base) { dev_err(&pdev->dev, "Failed to ioremap register memory region\n"); ret = -EBUSY; goto err_put_lpclk; } platform_set_drvdata(pdev, jzfb); mutex_init(&jzfb->lock); fb_videomode_to_modelist(pdata->modes, pdata->num_modes, &fb->modelist); fb_videomode_to_var(&fb->var, pdata->modes); fb->var.bits_per_pixel = pdata->bpp; jzfb_check_var(&fb->var, fb); ret = jzfb_alloc_devmem(jzfb); if (ret) { dev_err(&pdev->dev, "Failed to allocate video memory\n"); goto err_iounmap; } fb->fix = jzfb_fix; fb->fix.line_length = fb->var.bits_per_pixel * fb->var.xres / 8; fb->fix.mmio_start = mem->start; fb->fix.mmio_len = resource_size(mem); fb->fix.smem_start = jzfb->vidmem_phys; fb->fix.smem_len = fb->fix.line_length * fb->var.yres; fb->screen_base = jzfb->vidmem; fb->pseudo_palette = jzfb->pseudo_palette; fb_alloc_cmap(&fb->cmap, 256, 0); clk_enable(jzfb->ldclk); jzfb->is_enabled = 1; writel(jzfb->framedesc->next, jzfb->base + JZ_REG_LCD_DA0); fb->mode = NULL; jzfb_set_par(fb); jz_gpio_bulk_request(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb)); jz_gpio_bulk_request(jz_lcd_data_pins, jzfb_num_data_pins(jzfb)); ret = register_framebuffer(fb); if (ret) { dev_err(&pdev->dev, "Failed to register framebuffer: %d\n", ret); goto err_free_devmem; } jzfb->fb = fb; return 0; err_free_devmem: jz_gpio_bulk_free(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb)); jz_gpio_bulk_free(jz_lcd_data_pins, jzfb_num_data_pins(jzfb)); fb_dealloc_cmap(&fb->cmap); jzfb_free_devmem(jzfb); err_iounmap: iounmap(jzfb->base); err_put_lpclk: clk_put(jzfb->lpclk); err_put_ldclk: clk_put(jzfb->ldclk); err_framebuffer_release: framebuffer_release(fb); err_release_mem_region: release_mem_region(mem->start, resource_size(mem)); return ret; } static int __devexit jzfb_remove(struct platform_device *pdev) { struct jzfb *jzfb = platform_get_drvdata(pdev); jzfb_blank(FB_BLANK_POWERDOWN, jzfb->fb); jz_gpio_bulk_free(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb)); jz_gpio_bulk_free(jz_lcd_data_pins, jzfb_num_data_pins(jzfb)); iounmap(jzfb->base); release_mem_region(jzfb->mem->start, resource_size(jzfb->mem)); fb_dealloc_cmap(&jzfb->fb->cmap); jzfb_free_devmem(jzfb); platform_set_drvdata(pdev, NULL); clk_put(jzfb->lpclk); clk_put(jzfb->ldclk); framebuffer_release(jzfb->fb); return 0; } #ifdef CONFIG_PM static int jzfb_suspend(struct device *dev) { struct jzfb *jzfb = dev_get_drvdata(dev); console_lock(); fb_set_suspend(jzfb->fb, 1); console_unlock(); mutex_lock(&jzfb->lock); if (jzfb->is_enabled) jzfb_disable(jzfb); mutex_unlock(&jzfb->lock); return 0; } static int jzfb_resume(struct device *dev) { struct jzfb *jzfb = dev_get_drvdata(dev); clk_enable(jzfb->ldclk); mutex_lock(&jzfb->lock); if (jzfb->is_enabled) jzfb_enable(jzfb); mutex_unlock(&jzfb->lock); console_lock(); fb_set_suspend(jzfb->fb, 0); console_unlock(); return 0; } static const struct dev_pm_ops jzfb_pm_ops = { .suspend = jzfb_suspend, .resume = jzfb_resume, .poweroff = jzfb_suspend, .restore = jzfb_resume, }; #define JZFB_PM_OPS (&jzfb_pm_ops) #else #define JZFB_PM_OPS NULL #endif static struct platform_driver jzfb_driver = { .probe = jzfb_probe, .remove = __devexit_p(jzfb_remove), .driver = { .name = "jz4740-fb", .pm = JZFB_PM_OPS, }, }; static int __init jzfb_init(void) { return platform_driver_register(&jzfb_driver); } module_init(jzfb_init); static void __exit jzfb_exit(void) { platform_driver_unregister(&jzfb_driver); } module_exit(jzfb_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); MODULE_DESCRIPTION("JZ4740 SoC LCD framebuffer driver"); MODULE_ALIAS("platform:jz4740-fb");
gpl-2.0
IOKP/kernel_samsung_jf
drivers/video/jz4740_fb.c
8235
19873
/* * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> * JZ4740 SoC LCD framebuffer driver * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/console.h> #include <linux/fb.h> #include <linux/dma-mapping.h> #include <asm/mach-jz4740/jz4740_fb.h> #include <asm/mach-jz4740/gpio.h> #define JZ_REG_LCD_CFG 0x00 #define JZ_REG_LCD_VSYNC 0x04 #define JZ_REG_LCD_HSYNC 0x08 #define JZ_REG_LCD_VAT 0x0C #define JZ_REG_LCD_DAH 0x10 #define JZ_REG_LCD_DAV 0x14 #define JZ_REG_LCD_PS 0x18 #define JZ_REG_LCD_CLS 0x1C #define JZ_REG_LCD_SPL 0x20 #define JZ_REG_LCD_REV 0x24 #define JZ_REG_LCD_CTRL 0x30 #define JZ_REG_LCD_STATE 0x34 #define JZ_REG_LCD_IID 0x38 #define JZ_REG_LCD_DA0 0x40 #define JZ_REG_LCD_SA0 0x44 #define JZ_REG_LCD_FID0 0x48 #define JZ_REG_LCD_CMD0 0x4C #define JZ_REG_LCD_DA1 0x50 #define JZ_REG_LCD_SA1 0x54 #define JZ_REG_LCD_FID1 0x58 #define JZ_REG_LCD_CMD1 0x5C #define JZ_LCD_CFG_SLCD BIT(31) #define JZ_LCD_CFG_PS_DISABLE BIT(23) #define JZ_LCD_CFG_CLS_DISABLE BIT(22) #define JZ_LCD_CFG_SPL_DISABLE BIT(21) #define JZ_LCD_CFG_REV_DISABLE BIT(20) #define JZ_LCD_CFG_HSYNCM BIT(19) #define JZ_LCD_CFG_PCLKM BIT(18) #define JZ_LCD_CFG_INV BIT(17) #define JZ_LCD_CFG_SYNC_DIR BIT(16) #define JZ_LCD_CFG_PS_POLARITY BIT(15) #define JZ_LCD_CFG_CLS_POLARITY BIT(14) #define JZ_LCD_CFG_SPL_POLARITY BIT(13) #define JZ_LCD_CFG_REV_POLARITY BIT(12) #define JZ_LCD_CFG_HSYNC_ACTIVE_LOW BIT(11) #define JZ_LCD_CFG_PCLK_FALLING_EDGE BIT(10) #define JZ_LCD_CFG_DE_ACTIVE_LOW BIT(9) #define JZ_LCD_CFG_VSYNC_ACTIVE_LOW BIT(8) #define JZ_LCD_CFG_18_BIT BIT(7) #define JZ_LCD_CFG_PDW (BIT(5) | BIT(4)) #define JZ_LCD_CFG_MODE_MASK 0xf #define JZ_LCD_CTRL_BURST_4 (0x0 << 28) #define JZ_LCD_CTRL_BURST_8 (0x1 << 28) #define JZ_LCD_CTRL_BURST_16 (0x2 << 28) #define JZ_LCD_CTRL_RGB555 BIT(27) #define JZ_LCD_CTRL_OFUP BIT(26) #define JZ_LCD_CTRL_FRC_GRAYSCALE_16 (0x0 << 24) #define JZ_LCD_CTRL_FRC_GRAYSCALE_4 (0x1 << 24) #define JZ_LCD_CTRL_FRC_GRAYSCALE_2 (0x2 << 24) #define JZ_LCD_CTRL_PDD_MASK (0xff << 16) #define JZ_LCD_CTRL_EOF_IRQ BIT(13) #define JZ_LCD_CTRL_SOF_IRQ BIT(12) #define JZ_LCD_CTRL_OFU_IRQ BIT(11) #define JZ_LCD_CTRL_IFU0_IRQ BIT(10) #define JZ_LCD_CTRL_IFU1_IRQ BIT(9) #define JZ_LCD_CTRL_DD_IRQ BIT(8) #define JZ_LCD_CTRL_QDD_IRQ BIT(7) #define JZ_LCD_CTRL_REVERSE_ENDIAN BIT(6) #define JZ_LCD_CTRL_LSB_FISRT BIT(5) #define JZ_LCD_CTRL_DISABLE BIT(4) #define JZ_LCD_CTRL_ENABLE BIT(3) #define JZ_LCD_CTRL_BPP_1 0x0 #define JZ_LCD_CTRL_BPP_2 0x1 #define JZ_LCD_CTRL_BPP_4 0x2 #define JZ_LCD_CTRL_BPP_8 0x3 #define JZ_LCD_CTRL_BPP_15_16 0x4 #define JZ_LCD_CTRL_BPP_18_24 0x5 #define JZ_LCD_CMD_SOF_IRQ BIT(15) #define JZ_LCD_CMD_EOF_IRQ BIT(16) #define JZ_LCD_CMD_ENABLE_PAL BIT(12) #define JZ_LCD_SYNC_MASK 0x3ff #define JZ_LCD_STATE_DISABLED BIT(0) struct jzfb_framedesc { uint32_t next; uint32_t addr; uint32_t id; uint32_t cmd; } __packed; struct jzfb { struct fb_info *fb; struct platform_device *pdev; void __iomem *base; struct resource *mem; struct jz4740_fb_platform_data *pdata; size_t vidmem_size; void *vidmem; dma_addr_t vidmem_phys; struct jzfb_framedesc *framedesc; dma_addr_t framedesc_phys; struct clk *ldclk; struct clk *lpclk; unsigned is_enabled:1; struct mutex lock; uint32_t pseudo_palette[16]; }; static const struct fb_fix_screeninfo jzfb_fix __devinitdata = { .id = "JZ4740 FB", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_TRUECOLOR, .xpanstep = 0, .ypanstep = 0, .ywrapstep = 0, .accel = FB_ACCEL_NONE, }; static const struct jz_gpio_bulk_request jz_lcd_ctrl_pins[] = { JZ_GPIO_BULK_PIN(LCD_PCLK), JZ_GPIO_BULK_PIN(LCD_HSYNC), JZ_GPIO_BULK_PIN(LCD_VSYNC), JZ_GPIO_BULK_PIN(LCD_DE), JZ_GPIO_BULK_PIN(LCD_PS), JZ_GPIO_BULK_PIN(LCD_REV), JZ_GPIO_BULK_PIN(LCD_CLS), JZ_GPIO_BULK_PIN(LCD_SPL), }; static const struct jz_gpio_bulk_request jz_lcd_data_pins[] = { JZ_GPIO_BULK_PIN(LCD_DATA0), JZ_GPIO_BULK_PIN(LCD_DATA1), JZ_GPIO_BULK_PIN(LCD_DATA2), JZ_GPIO_BULK_PIN(LCD_DATA3), JZ_GPIO_BULK_PIN(LCD_DATA4), JZ_GPIO_BULK_PIN(LCD_DATA5), JZ_GPIO_BULK_PIN(LCD_DATA6), JZ_GPIO_BULK_PIN(LCD_DATA7), JZ_GPIO_BULK_PIN(LCD_DATA8), JZ_GPIO_BULK_PIN(LCD_DATA9), JZ_GPIO_BULK_PIN(LCD_DATA10), JZ_GPIO_BULK_PIN(LCD_DATA11), JZ_GPIO_BULK_PIN(LCD_DATA12), JZ_GPIO_BULK_PIN(LCD_DATA13), JZ_GPIO_BULK_PIN(LCD_DATA14), JZ_GPIO_BULK_PIN(LCD_DATA15), JZ_GPIO_BULK_PIN(LCD_DATA16), JZ_GPIO_BULK_PIN(LCD_DATA17), }; static unsigned int jzfb_num_ctrl_pins(struct jzfb *jzfb) { unsigned int num; switch (jzfb->pdata->lcd_type) { case JZ_LCD_TYPE_GENERIC_16_BIT: num = 4; break; case JZ_LCD_TYPE_GENERIC_18_BIT: num = 4; break; case JZ_LCD_TYPE_8BIT_SERIAL: num = 3; break; case JZ_LCD_TYPE_SPECIAL_TFT_1: case JZ_LCD_TYPE_SPECIAL_TFT_2: case JZ_LCD_TYPE_SPECIAL_TFT_3: num = 8; break; default: num = 0; break; } return num; } static unsigned int jzfb_num_data_pins(struct jzfb *jzfb) { unsigned int num; switch (jzfb->pdata->lcd_type) { case JZ_LCD_TYPE_GENERIC_16_BIT: num = 16; break; case JZ_LCD_TYPE_GENERIC_18_BIT: num = 18; break; case JZ_LCD_TYPE_8BIT_SERIAL: num = 8; break; case JZ_LCD_TYPE_SPECIAL_TFT_1: case JZ_LCD_TYPE_SPECIAL_TFT_2: case JZ_LCD_TYPE_SPECIAL_TFT_3: if (jzfb->pdata->bpp == 18) num = 18; else num = 16; break; default: num = 0; break; } return num; } /* Based on CNVT_TOHW macro from skeletonfb.c */ static inline uint32_t jzfb_convert_color_to_hw(unsigned val, struct fb_bitfield *bf) { return (((val << bf->length) + 0x7FFF - val) >> 16) << bf->offset; } static int jzfb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *fb) { uint32_t color; if (regno >= 16) return -EINVAL; color = jzfb_convert_color_to_hw(red, &fb->var.red); color |= jzfb_convert_color_to_hw(green, &fb->var.green); color |= jzfb_convert_color_to_hw(blue, &fb->var.blue); color |= jzfb_convert_color_to_hw(transp, &fb->var.transp); ((uint32_t *)(fb->pseudo_palette))[regno] = color; return 0; } static int jzfb_get_controller_bpp(struct jzfb *jzfb) { switch (jzfb->pdata->bpp) { case 18: case 24: return 32; case 15: return 16; default: return jzfb->pdata->bpp; } } static struct fb_videomode *jzfb_get_mode(struct jzfb *jzfb, struct fb_var_screeninfo *var) { size_t i; struct fb_videomode *mode = jzfb->pdata->modes; for (i = 0; i < jzfb->pdata->num_modes; ++i, ++mode) { if (mode->xres == var->xres && mode->yres == var->yres) return mode; } return NULL; } static int jzfb_check_var(struct fb_var_screeninfo *var, struct fb_info *fb) { struct jzfb *jzfb = fb->par; struct fb_videomode *mode; if (var->bits_per_pixel != jzfb_get_controller_bpp(jzfb) && var->bits_per_pixel != jzfb->pdata->bpp) return -EINVAL; mode = jzfb_get_mode(jzfb, var); if (mode == NULL) return -EINVAL; fb_videomode_to_var(var, mode); switch (jzfb->pdata->bpp) { case 8: break; case 15: var->red.offset = 10; var->red.length = 5; var->green.offset = 6; var->green.length = 5; var->blue.offset = 0; var->blue.length = 5; break; case 16: var->red.offset = 11; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 0; var->blue.length = 5; break; case 18: var->red.offset = 16; var->red.length = 6; var->green.offset = 8; var->green.length = 6; var->blue.offset = 0; var->blue.length = 6; var->bits_per_pixel = 32; break; case 32: case 24: var->transp.offset = 24; var->transp.length = 8; var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->bits_per_pixel = 32; break; default: break; } return 0; } static int jzfb_set_par(struct fb_info *info) { struct jzfb *jzfb = info->par; struct jz4740_fb_platform_data *pdata = jzfb->pdata; struct fb_var_screeninfo *var = &info->var; struct fb_videomode *mode; uint16_t hds, vds; uint16_t hde, vde; uint16_t ht, vt; uint32_t ctrl; uint32_t cfg; unsigned long rate; mode = jzfb_get_mode(jzfb, var); if (mode == NULL) return -EINVAL; if (mode == info->mode) return 0; info->mode = mode; hds = mode->hsync_len + mode->left_margin; hde = hds + mode->xres; ht = hde + mode->right_margin; vds = mode->vsync_len + mode->upper_margin; vde = vds + mode->yres; vt = vde + mode->lower_margin; ctrl = JZ_LCD_CTRL_OFUP | JZ_LCD_CTRL_BURST_16; switch (pdata->bpp) { case 1: ctrl |= JZ_LCD_CTRL_BPP_1; break; case 2: ctrl |= JZ_LCD_CTRL_BPP_2; break; case 4: ctrl |= JZ_LCD_CTRL_BPP_4; break; case 8: ctrl |= JZ_LCD_CTRL_BPP_8; break; case 15: ctrl |= JZ_LCD_CTRL_RGB555; /* Falltrough */ case 16: ctrl |= JZ_LCD_CTRL_BPP_15_16; break; case 18: case 24: case 32: ctrl |= JZ_LCD_CTRL_BPP_18_24; break; default: break; } cfg = pdata->lcd_type & 0xf; if (!(mode->sync & FB_SYNC_HOR_HIGH_ACT)) cfg |= JZ_LCD_CFG_HSYNC_ACTIVE_LOW; if (!(mode->sync & FB_SYNC_VERT_HIGH_ACT)) cfg |= JZ_LCD_CFG_VSYNC_ACTIVE_LOW; if (pdata->pixclk_falling_edge) cfg |= JZ_LCD_CFG_PCLK_FALLING_EDGE; if (pdata->date_enable_active_low) cfg |= JZ_LCD_CFG_DE_ACTIVE_LOW; if (pdata->lcd_type == JZ_LCD_TYPE_GENERIC_18_BIT) cfg |= JZ_LCD_CFG_18_BIT; if (mode->pixclock) { rate = PICOS2KHZ(mode->pixclock) * 1000; mode->refresh = rate / vt / ht; } else { if (pdata->lcd_type == JZ_LCD_TYPE_8BIT_SERIAL) rate = mode->refresh * (vt + 2 * mode->xres) * ht; else rate = mode->refresh * vt * ht; mode->pixclock = KHZ2PICOS(rate / 1000); } mutex_lock(&jzfb->lock); if (!jzfb->is_enabled) clk_enable(jzfb->ldclk); else ctrl |= JZ_LCD_CTRL_ENABLE; switch (pdata->lcd_type) { case JZ_LCD_TYPE_SPECIAL_TFT_1: case JZ_LCD_TYPE_SPECIAL_TFT_2: case JZ_LCD_TYPE_SPECIAL_TFT_3: writel(pdata->special_tft_config.spl, jzfb->base + JZ_REG_LCD_SPL); writel(pdata->special_tft_config.cls, jzfb->base + JZ_REG_LCD_CLS); writel(pdata->special_tft_config.ps, jzfb->base + JZ_REG_LCD_PS); writel(pdata->special_tft_config.ps, jzfb->base + JZ_REG_LCD_REV); break; default: cfg |= JZ_LCD_CFG_PS_DISABLE; cfg |= JZ_LCD_CFG_CLS_DISABLE; cfg |= JZ_LCD_CFG_SPL_DISABLE; cfg |= JZ_LCD_CFG_REV_DISABLE; break; } writel(mode->hsync_len, jzfb->base + JZ_REG_LCD_HSYNC); writel(mode->vsync_len, jzfb->base + JZ_REG_LCD_VSYNC); writel((ht << 16) | vt, jzfb->base + JZ_REG_LCD_VAT); writel((hds << 16) | hde, jzfb->base + JZ_REG_LCD_DAH); writel((vds << 16) | vde, jzfb->base + JZ_REG_LCD_DAV); writel(cfg, jzfb->base + JZ_REG_LCD_CFG); writel(ctrl, jzfb->base + JZ_REG_LCD_CTRL); if (!jzfb->is_enabled) clk_disable(jzfb->ldclk); mutex_unlock(&jzfb->lock); clk_set_rate(jzfb->lpclk, rate); clk_set_rate(jzfb->ldclk, rate * 3); return 0; } static void jzfb_enable(struct jzfb *jzfb) { uint32_t ctrl; clk_enable(jzfb->ldclk); jz_gpio_bulk_resume(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb)); jz_gpio_bulk_resume(jz_lcd_data_pins, jzfb_num_data_pins(jzfb)); writel(0, jzfb->base + JZ_REG_LCD_STATE); writel(jzfb->framedesc->next, jzfb->base + JZ_REG_LCD_DA0); ctrl = readl(jzfb->base + JZ_REG_LCD_CTRL); ctrl |= JZ_LCD_CTRL_ENABLE; ctrl &= ~JZ_LCD_CTRL_DISABLE; writel(ctrl, jzfb->base + JZ_REG_LCD_CTRL); } static void jzfb_disable(struct jzfb *jzfb) { uint32_t ctrl; ctrl = readl(jzfb->base + JZ_REG_LCD_CTRL); ctrl |= JZ_LCD_CTRL_DISABLE; writel(ctrl, jzfb->base + JZ_REG_LCD_CTRL); do { ctrl = readl(jzfb->base + JZ_REG_LCD_STATE); } while (!(ctrl & JZ_LCD_STATE_DISABLED)); jz_gpio_bulk_suspend(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb)); jz_gpio_bulk_suspend(jz_lcd_data_pins, jzfb_num_data_pins(jzfb)); clk_disable(jzfb->ldclk); } static int jzfb_blank(int blank_mode, struct fb_info *info) { struct jzfb *jzfb = info->par; switch (blank_mode) { case FB_BLANK_UNBLANK: mutex_lock(&jzfb->lock); if (jzfb->is_enabled) { mutex_unlock(&jzfb->lock); return 0; } jzfb_enable(jzfb); jzfb->is_enabled = 1; mutex_unlock(&jzfb->lock); break; default: mutex_lock(&jzfb->lock); if (!jzfb->is_enabled) { mutex_unlock(&jzfb->lock); return 0; } jzfb_disable(jzfb); jzfb->is_enabled = 0; mutex_unlock(&jzfb->lock); break; } return 0; } static int jzfb_alloc_devmem(struct jzfb *jzfb) { int max_videosize = 0; struct fb_videomode *mode = jzfb->pdata->modes; void *page; int i; for (i = 0; i < jzfb->pdata->num_modes; ++mode, ++i) { if (max_videosize < mode->xres * mode->yres) max_videosize = mode->xres * mode->yres; } max_videosize *= jzfb_get_controller_bpp(jzfb) >> 3; jzfb->framedesc = dma_alloc_coherent(&jzfb->pdev->dev, sizeof(*jzfb->framedesc), &jzfb->framedesc_phys, GFP_KERNEL); if (!jzfb->framedesc) return -ENOMEM; jzfb->vidmem_size = PAGE_ALIGN(max_videosize); jzfb->vidmem = dma_alloc_coherent(&jzfb->pdev->dev, jzfb->vidmem_size, &jzfb->vidmem_phys, GFP_KERNEL); if (!jzfb->vidmem) goto err_free_framedesc; for (page = jzfb->vidmem; page < jzfb->vidmem + PAGE_ALIGN(jzfb->vidmem_size); page += PAGE_SIZE) { SetPageReserved(virt_to_page(page)); } jzfb->framedesc->next = jzfb->framedesc_phys; jzfb->framedesc->addr = jzfb->vidmem_phys; jzfb->framedesc->id = 0xdeafbead; jzfb->framedesc->cmd = 0; jzfb->framedesc->cmd |= max_videosize / 4; return 0; err_free_framedesc: dma_free_coherent(&jzfb->pdev->dev, sizeof(*jzfb->framedesc), jzfb->framedesc, jzfb->framedesc_phys); return -ENOMEM; } static void jzfb_free_devmem(struct jzfb *jzfb) { dma_free_coherent(&jzfb->pdev->dev, jzfb->vidmem_size, jzfb->vidmem, jzfb->vidmem_phys); dma_free_coherent(&jzfb->pdev->dev, sizeof(*jzfb->framedesc), jzfb->framedesc, jzfb->framedesc_phys); } static struct fb_ops jzfb_ops = { .owner = THIS_MODULE, .fb_check_var = jzfb_check_var, .fb_set_par = jzfb_set_par, .fb_blank = jzfb_blank, .fb_fillrect = sys_fillrect, .fb_copyarea = sys_copyarea, .fb_imageblit = sys_imageblit, .fb_setcolreg = jzfb_setcolreg, }; static int __devinit jzfb_probe(struct platform_device *pdev) { int ret; struct jzfb *jzfb; struct fb_info *fb; struct jz4740_fb_platform_data *pdata = pdev->dev.platform_data; struct resource *mem; if (!pdata) { dev_err(&pdev->dev, "Missing platform data\n"); return -ENXIO; } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pdev->dev, "Failed to get register memory resource\n"); return -ENXIO; } mem = request_mem_region(mem->start, resource_size(mem), pdev->name); if (!mem) { dev_err(&pdev->dev, "Failed to request register memory region\n"); return -EBUSY; } fb = framebuffer_alloc(sizeof(struct jzfb), &pdev->dev); if (!fb) { dev_err(&pdev->dev, "Failed to allocate framebuffer device\n"); ret = -ENOMEM; goto err_release_mem_region; } fb->fbops = &jzfb_ops; fb->flags = FBINFO_DEFAULT; jzfb = fb->par; jzfb->pdev = pdev; jzfb->pdata = pdata; jzfb->mem = mem; jzfb->ldclk = clk_get(&pdev->dev, "lcd"); if (IS_ERR(jzfb->ldclk)) { ret = PTR_ERR(jzfb->ldclk); dev_err(&pdev->dev, "Failed to get lcd clock: %d\n", ret); goto err_framebuffer_release; } jzfb->lpclk = clk_get(&pdev->dev, "lcd_pclk"); if (IS_ERR(jzfb->lpclk)) { ret = PTR_ERR(jzfb->lpclk); dev_err(&pdev->dev, "Failed to get lcd pixel clock: %d\n", ret); goto err_put_ldclk; } jzfb->base = ioremap(mem->start, resource_size(mem)); if (!jzfb->base) { dev_err(&pdev->dev, "Failed to ioremap register memory region\n"); ret = -EBUSY; goto err_put_lpclk; } platform_set_drvdata(pdev, jzfb); mutex_init(&jzfb->lock); fb_videomode_to_modelist(pdata->modes, pdata->num_modes, &fb->modelist); fb_videomode_to_var(&fb->var, pdata->modes); fb->var.bits_per_pixel = pdata->bpp; jzfb_check_var(&fb->var, fb); ret = jzfb_alloc_devmem(jzfb); if (ret) { dev_err(&pdev->dev, "Failed to allocate video memory\n"); goto err_iounmap; } fb->fix = jzfb_fix; fb->fix.line_length = fb->var.bits_per_pixel * fb->var.xres / 8; fb->fix.mmio_start = mem->start; fb->fix.mmio_len = resource_size(mem); fb->fix.smem_start = jzfb->vidmem_phys; fb->fix.smem_len = fb->fix.line_length * fb->var.yres; fb->screen_base = jzfb->vidmem; fb->pseudo_palette = jzfb->pseudo_palette; fb_alloc_cmap(&fb->cmap, 256, 0); clk_enable(jzfb->ldclk); jzfb->is_enabled = 1; writel(jzfb->framedesc->next, jzfb->base + JZ_REG_LCD_DA0); fb->mode = NULL; jzfb_set_par(fb); jz_gpio_bulk_request(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb)); jz_gpio_bulk_request(jz_lcd_data_pins, jzfb_num_data_pins(jzfb)); ret = register_framebuffer(fb); if (ret) { dev_err(&pdev->dev, "Failed to register framebuffer: %d\n", ret); goto err_free_devmem; } jzfb->fb = fb; return 0; err_free_devmem: jz_gpio_bulk_free(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb)); jz_gpio_bulk_free(jz_lcd_data_pins, jzfb_num_data_pins(jzfb)); fb_dealloc_cmap(&fb->cmap); jzfb_free_devmem(jzfb); err_iounmap: iounmap(jzfb->base); err_put_lpclk: clk_put(jzfb->lpclk); err_put_ldclk: clk_put(jzfb->ldclk); err_framebuffer_release: framebuffer_release(fb); err_release_mem_region: release_mem_region(mem->start, resource_size(mem)); return ret; } static int __devexit jzfb_remove(struct platform_device *pdev) { struct jzfb *jzfb = platform_get_drvdata(pdev); jzfb_blank(FB_BLANK_POWERDOWN, jzfb->fb); jz_gpio_bulk_free(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb)); jz_gpio_bulk_free(jz_lcd_data_pins, jzfb_num_data_pins(jzfb)); iounmap(jzfb->base); release_mem_region(jzfb->mem->start, resource_size(jzfb->mem)); fb_dealloc_cmap(&jzfb->fb->cmap); jzfb_free_devmem(jzfb); platform_set_drvdata(pdev, NULL); clk_put(jzfb->lpclk); clk_put(jzfb->ldclk); framebuffer_release(jzfb->fb); return 0; } #ifdef CONFIG_PM static int jzfb_suspend(struct device *dev) { struct jzfb *jzfb = dev_get_drvdata(dev); console_lock(); fb_set_suspend(jzfb->fb, 1); console_unlock(); mutex_lock(&jzfb->lock); if (jzfb->is_enabled) jzfb_disable(jzfb); mutex_unlock(&jzfb->lock); return 0; } static int jzfb_resume(struct device *dev) { struct jzfb *jzfb = dev_get_drvdata(dev); clk_enable(jzfb->ldclk); mutex_lock(&jzfb->lock); if (jzfb->is_enabled) jzfb_enable(jzfb); mutex_unlock(&jzfb->lock); console_lock(); fb_set_suspend(jzfb->fb, 0); console_unlock(); return 0; } static const struct dev_pm_ops jzfb_pm_ops = { .suspend = jzfb_suspend, .resume = jzfb_resume, .poweroff = jzfb_suspend, .restore = jzfb_resume, }; #define JZFB_PM_OPS (&jzfb_pm_ops) #else #define JZFB_PM_OPS NULL #endif static struct platform_driver jzfb_driver = { .probe = jzfb_probe, .remove = __devexit_p(jzfb_remove), .driver = { .name = "jz4740-fb", .pm = JZFB_PM_OPS, }, }; static int __init jzfb_init(void) { return platform_driver_register(&jzfb_driver); } module_init(jzfb_init); static void __exit jzfb_exit(void) { platform_driver_unregister(&jzfb_driver); } module_exit(jzfb_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); MODULE_DESCRIPTION("JZ4740 SoC LCD framebuffer driver"); MODULE_ALIAS("platform:jz4740-fb");
gpl-2.0
Abhinav1997/android_kernel_sony_ste
arch/powerpc/boot/mpc52xx-psc.c
13867
1467
/* * MPC5200 PSC serial console support. * * Author: Grant Likely <grant.likely@secretlab.ca> * * Copyright (c) 2007 Secret Lab Technologies Ltd. * Copyright (c) 2007 Freescale Semiconductor, Inc. * * It is assumed that the firmware (or the platform file) has already set * up the port. */ #include "types.h" #include "io.h" #include "ops.h" /* Programmable Serial Controller (PSC) status register bits */ #define MPC52xx_PSC_SR 0x04 #define MPC52xx_PSC_SR_RXRDY 0x0100 #define MPC52xx_PSC_SR_RXFULL 0x0200 #define MPC52xx_PSC_SR_TXRDY 0x0400 #define MPC52xx_PSC_SR_TXEMP 0x0800 #define MPC52xx_PSC_BUFFER 0x0C static void *psc; static int psc_open(void) { /* Assume the firmware has already configured the PSC into * uart mode */ return 0; } static void psc_putc(unsigned char c) { while (!(in_be16(psc + MPC52xx_PSC_SR) & MPC52xx_PSC_SR_TXRDY)) ; out_8(psc + MPC52xx_PSC_BUFFER, c); } static unsigned char psc_tstc(void) { return (in_be16(psc + MPC52xx_PSC_SR) & MPC52xx_PSC_SR_RXRDY) != 0; } static unsigned char psc_getc(void) { while (!(in_be16(psc + MPC52xx_PSC_SR) & MPC52xx_PSC_SR_RXRDY)) ; return in_8(psc + MPC52xx_PSC_BUFFER); } int mpc5200_psc_console_init(void *devp, struct serial_console_data *scdp) { /* Get the base address of the psc registers */ if (dt_get_virtual_reg(devp, &psc, 1) < 1) return -1; scdp->open = psc_open; scdp->putc = psc_putc; scdp->getc = psc_getc; scdp->tstc = psc_tstc; return 0; }
gpl-2.0
forumi0721/android_kernel_lge_batman_lgu
arch/powerpc/boot/mpc52xx-psc.c
13867
1467
/* * MPC5200 PSC serial console support. * * Author: Grant Likely <grant.likely@secretlab.ca> * * Copyright (c) 2007 Secret Lab Technologies Ltd. * Copyright (c) 2007 Freescale Semiconductor, Inc. * * It is assumed that the firmware (or the platform file) has already set * up the port. */ #include "types.h" #include "io.h" #include "ops.h" /* Programmable Serial Controller (PSC) status register bits */ #define MPC52xx_PSC_SR 0x04 #define MPC52xx_PSC_SR_RXRDY 0x0100 #define MPC52xx_PSC_SR_RXFULL 0x0200 #define MPC52xx_PSC_SR_TXRDY 0x0400 #define MPC52xx_PSC_SR_TXEMP 0x0800 #define MPC52xx_PSC_BUFFER 0x0C static void *psc; static int psc_open(void) { /* Assume the firmware has already configured the PSC into * uart mode */ return 0; } static void psc_putc(unsigned char c) { while (!(in_be16(psc + MPC52xx_PSC_SR) & MPC52xx_PSC_SR_TXRDY)) ; out_8(psc + MPC52xx_PSC_BUFFER, c); } static unsigned char psc_tstc(void) { return (in_be16(psc + MPC52xx_PSC_SR) & MPC52xx_PSC_SR_RXRDY) != 0; } static unsigned char psc_getc(void) { while (!(in_be16(psc + MPC52xx_PSC_SR) & MPC52xx_PSC_SR_RXRDY)) ; return in_8(psc + MPC52xx_PSC_BUFFER); } int mpc5200_psc_console_init(void *devp, struct serial_console_data *scdp) { /* Get the base address of the psc registers */ if (dt_get_virtual_reg(devp, &psc, 1) < 1) return -1; scdp->open = psc_open; scdp->putc = psc_putc; scdp->getc = psc_getc; scdp->tstc = psc_tstc; return 0; }
gpl-2.0
jumpnow/linux
drivers/usb/dwc3/dwc3-st.c
44
10313
/** * dwc3-st.c Support for dwc3 platform devices on ST Microelectronics platforms * * This is a small driver for the dwc3 to provide the glue logic * to configure the controller. Tested on STi platforms. * * Copyright (C) 2014 Stmicroelectronics * * Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> * Contributors: Aymen Bouattay <aymen.bouattay@st.com> * Peter Griffin <peter.griffin@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Inspired by dwc3-omap.c and dwc3-exynos.c. */ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/regmap.h> #include <linux/reset.h> #include <linux/pinctrl/consumer.h> #include <linux/usb/of.h> #include "core.h" #include "io.h" /* glue registers */ #define CLKRST_CTRL 0x00 #define AUX_CLK_EN BIT(0) #define SW_PIPEW_RESET_N BIT(4) #define EXT_CFG_RESET_N BIT(8) /* * 1'b0 : The host controller complies with the xHCI revision 0.96 * 1'b1 : The host controller complies with the xHCI revision 1.0 */ #define XHCI_REVISION BIT(12) #define USB2_VBUS_MNGMNT_SEL1 0x2C /* * For all fields in USB2_VBUS_MNGMNT_SEL1 * 2’b00 : Override value from Reg 0x30 is selected * 2’b01 : utmiotg_<signal_name> from usb3_top is selected * 2’b10 : pipew_<signal_name> from PIPEW instance is selected * 2’b11 : value is 1'b0 */ #define USB2_VBUS_REG30 0x0 #define USB2_VBUS_UTMIOTG 0x1 #define USB2_VBUS_PIPEW 0x2 #define USB2_VBUS_ZERO 0x3 #define SEL_OVERRIDE_VBUSVALID(n) (n << 0) #define SEL_OVERRIDE_POWERPRESENT(n) (n << 4) #define SEL_OVERRIDE_BVALID(n) (n << 8) /* Static DRD configuration */ #define USB3_CONTROL_MASK 0xf77 #define USB3_DEVICE_NOT_HOST BIT(0) #define USB3_FORCE_VBUSVALID BIT(1) #define USB3_DELAY_VBUSVALID BIT(2) #define USB3_SEL_FORCE_OPMODE BIT(4) #define USB3_FORCE_OPMODE(n) (n << 5) #define USB3_SEL_FORCE_DPPULLDOWN2 BIT(8) #define USB3_FORCE_DPPULLDOWN2 BIT(9) #define USB3_SEL_FORCE_DMPULLDOWN2 BIT(10) #define USB3_FORCE_DMPULLDOWN2 BIT(11) /** * struct st_dwc3 - dwc3-st driver private structure * @dev: device pointer * @glue_base: ioaddr for the glue registers * @regmap: regmap pointer for getting syscfg * @syscfg_reg_off: usb syscfg control offset * @dr_mode: drd static host/device config * @rstc_pwrdn: rest controller for powerdown signal * @rstc_rst: reset controller for softreset signal */ struct st_dwc3 { struct device *dev; void __iomem *glue_base; struct regmap *regmap; int syscfg_reg_off; enum usb_dr_mode dr_mode; struct reset_control *rstc_pwrdn; struct reset_control *rstc_rst; }; static inline u32 st_dwc3_readl(void __iomem *base, u32 offset) { return readl_relaxed(base + offset); } static inline void st_dwc3_writel(void __iomem *base, u32 offset, u32 value) { writel_relaxed(value, base + offset); } /** * st_dwc3_drd_init: program the port * @dwc3_data: driver private structure * Description: this function is to program the port as either host or device * according to the static configuration passed from devicetree. * OTG and dual role are not yet supported! */ static int st_dwc3_drd_init(struct st_dwc3 *dwc3_data) { u32 val; int err; err = regmap_read(dwc3_data->regmap, dwc3_data->syscfg_reg_off, &val); if (err) return err; val &= USB3_CONTROL_MASK; switch (dwc3_data->dr_mode) { case USB_DR_MODE_PERIPHERAL: val &= ~(USB3_DELAY_VBUSVALID | USB3_SEL_FORCE_OPMODE | USB3_FORCE_OPMODE(0x3) | USB3_SEL_FORCE_DPPULLDOWN2 | USB3_FORCE_DPPULLDOWN2 | USB3_SEL_FORCE_DMPULLDOWN2 | USB3_FORCE_DMPULLDOWN2); /* * USB3_PORT2_FORCE_VBUSVALID When '1' and when * USB3_PORT2_DEVICE_NOT_HOST = 1, forces VBUSVLDEXT2 input * of the pico PHY to 1. */ val |= USB3_DEVICE_NOT_HOST | USB3_FORCE_VBUSVALID; break; case USB_DR_MODE_HOST: val &= ~(USB3_DEVICE_NOT_HOST | USB3_FORCE_VBUSVALID | USB3_SEL_FORCE_OPMODE | USB3_FORCE_OPMODE(0x3) | USB3_SEL_FORCE_DPPULLDOWN2 | USB3_FORCE_DPPULLDOWN2 | USB3_SEL_FORCE_DMPULLDOWN2 | USB3_FORCE_DMPULLDOWN2); /* * USB3_DELAY_VBUSVALID is ANDed with USB_C_VBUSVALID. Thus, * when set to ‘0‘, it can delay the arrival of VBUSVALID * information to VBUSVLDEXT2 input of the pico PHY. * We don't want to do that so we set the bit to '1'. */ val |= USB3_DELAY_VBUSVALID; break; default: dev_err(dwc3_data->dev, "Unsupported mode of operation %d\n", dwc3_data->dr_mode); return -EINVAL; } return regmap_write(dwc3_data->regmap, dwc3_data->syscfg_reg_off, val); } /** * st_dwc3_init: init the controller via glue logic * @dwc3_data: driver private structure */ static void st_dwc3_init(struct st_dwc3 *dwc3_data) { u32 reg = st_dwc3_readl(dwc3_data->glue_base, CLKRST_CTRL); reg |= AUX_CLK_EN | EXT_CFG_RESET_N | XHCI_REVISION; reg &= ~SW_PIPEW_RESET_N; st_dwc3_writel(dwc3_data->glue_base, CLKRST_CTRL, reg); /* configure mux for vbus, powerpresent and bvalid signals */ reg = st_dwc3_readl(dwc3_data->glue_base, USB2_VBUS_MNGMNT_SEL1); reg |= SEL_OVERRIDE_VBUSVALID(USB2_VBUS_UTMIOTG) | SEL_OVERRIDE_POWERPRESENT(USB2_VBUS_UTMIOTG) | SEL_OVERRIDE_BVALID(USB2_VBUS_UTMIOTG); st_dwc3_writel(dwc3_data->glue_base, USB2_VBUS_MNGMNT_SEL1, reg); reg = st_dwc3_readl(dwc3_data->glue_base, CLKRST_CTRL); reg |= SW_PIPEW_RESET_N; st_dwc3_writel(dwc3_data->glue_base, CLKRST_CTRL, reg); } static int st_dwc3_probe(struct platform_device *pdev) { struct st_dwc3 *dwc3_data; struct resource *res; struct device *dev = &pdev->dev; struct device_node *node = dev->of_node, *child; struct platform_device *child_pdev; struct regmap *regmap; int ret; dwc3_data = devm_kzalloc(dev, sizeof(*dwc3_data), GFP_KERNEL); if (!dwc3_data) return -ENOMEM; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg-glue"); dwc3_data->glue_base = devm_ioremap_resource(dev, res); if (IS_ERR(dwc3_data->glue_base)) return PTR_ERR(dwc3_data->glue_base); regmap = syscon_regmap_lookup_by_phandle(node, "st,syscfg"); if (IS_ERR(regmap)) return PTR_ERR(regmap); dma_set_coherent_mask(dev, dev->coherent_dma_mask); dwc3_data->dev = dev; dwc3_data->regmap = regmap; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "syscfg-reg"); if (!res) { ret = -ENXIO; goto undo_platform_dev_alloc; } dwc3_data->syscfg_reg_off = res->start; dev_vdbg(&pdev->dev, "glue-logic addr 0x%p, syscfg-reg offset 0x%x\n", dwc3_data->glue_base, dwc3_data->syscfg_reg_off); dwc3_data->rstc_pwrdn = devm_reset_control_get_exclusive(dev, "powerdown"); if (IS_ERR(dwc3_data->rstc_pwrdn)) { dev_err(&pdev->dev, "could not get power controller\n"); ret = PTR_ERR(dwc3_data->rstc_pwrdn); goto undo_platform_dev_alloc; } /* Manage PowerDown */ reset_control_deassert(dwc3_data->rstc_pwrdn); dwc3_data->rstc_rst = devm_reset_control_get_shared(dev, "softreset"); if (IS_ERR(dwc3_data->rstc_rst)) { dev_err(&pdev->dev, "could not get reset controller\n"); ret = PTR_ERR(dwc3_data->rstc_rst); goto undo_powerdown; } /* Manage SoftReset */ reset_control_deassert(dwc3_data->rstc_rst); child = of_get_child_by_name(node, "dwc3"); if (!child) { dev_err(&pdev->dev, "failed to find dwc3 core node\n"); ret = -ENODEV; goto undo_softreset; } /* Allocate and initialize the core */ ret = of_platform_populate(node, NULL, NULL, dev); if (ret) { dev_err(dev, "failed to add dwc3 core\n"); goto undo_softreset; } child_pdev = of_find_device_by_node(child); if (!child_pdev) { dev_err(dev, "failed to find dwc3 core device\n"); ret = -ENODEV; goto undo_softreset; } dwc3_data->dr_mode = usb_get_dr_mode(&child_pdev->dev); /* * Configure the USB port as device or host according to the static * configuration passed from DT. * DRD is the only mode currently supported so this will be enhanced * as soon as OTG is available. */ ret = st_dwc3_drd_init(dwc3_data); if (ret) { dev_err(dev, "drd initialisation failed\n"); goto undo_softreset; } /* ST glue logic init */ st_dwc3_init(dwc3_data); platform_set_drvdata(pdev, dwc3_data); return 0; undo_softreset: reset_control_assert(dwc3_data->rstc_rst); undo_powerdown: reset_control_assert(dwc3_data->rstc_pwrdn); undo_platform_dev_alloc: platform_device_put(pdev); return ret; } static int st_dwc3_remove(struct platform_device *pdev) { struct st_dwc3 *dwc3_data = platform_get_drvdata(pdev); of_platform_depopulate(&pdev->dev); reset_control_assert(dwc3_data->rstc_pwrdn); reset_control_assert(dwc3_data->rstc_rst); return 0; } #ifdef CONFIG_PM_SLEEP static int st_dwc3_suspend(struct device *dev) { struct st_dwc3 *dwc3_data = dev_get_drvdata(dev); reset_control_assert(dwc3_data->rstc_pwrdn); reset_control_assert(dwc3_data->rstc_rst); pinctrl_pm_select_sleep_state(dev); return 0; } static int st_dwc3_resume(struct device *dev) { struct st_dwc3 *dwc3_data = dev_get_drvdata(dev); int ret; pinctrl_pm_select_default_state(dev); reset_control_deassert(dwc3_data->rstc_pwrdn); reset_control_deassert(dwc3_data->rstc_rst); ret = st_dwc3_drd_init(dwc3_data); if (ret) { dev_err(dev, "drd initialisation failed\n"); return ret; } /* ST glue logic init */ st_dwc3_init(dwc3_data); return 0; } #endif /* CONFIG_PM_SLEEP */ static SIMPLE_DEV_PM_OPS(st_dwc3_dev_pm_ops, st_dwc3_suspend, st_dwc3_resume); static const struct of_device_id st_dwc3_match[] = { { .compatible = "st,stih407-dwc3" }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, st_dwc3_match); static struct platform_driver st_dwc3_driver = { .probe = st_dwc3_probe, .remove = st_dwc3_remove, .driver = { .name = "usb-st-dwc3", .of_match_table = st_dwc3_match, .pm = &st_dwc3_dev_pm_ops, }, }; module_platform_driver(st_dwc3_driver); MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); MODULE_DESCRIPTION("DesignWare USB3 STi Glue Layer"); MODULE_LICENSE("GPL v2");
gpl-2.0
allangj/linux-2.6.32.2_mini2440
drivers/hwmon/coretemp.c
44
13003
/* * coretemp.c - Linux kernel module for hardware monitoring * * Copyright (C) 2007 Rudolf Marek <r.marek@assembler.cz> * * Inspired from many hwmon drivers * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301 USA. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/hwmon.h> #include <linux/sysfs.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/platform_device.h> #include <linux/cpu.h> #include <asm/msr.h> #include <asm/processor.h> #define DRVNAME "coretemp" typedef enum { SHOW_TEMP, SHOW_TJMAX, SHOW_TTARGET, SHOW_LABEL, SHOW_NAME } SHOW; /* * Functions declaration */ static struct coretemp_data *coretemp_update_device(struct device *dev); struct coretemp_data { struct device *hwmon_dev; struct mutex update_lock; const char *name; u32 id; char valid; /* zero until following fields are valid */ unsigned long last_updated; /* in jiffies */ int temp; int tjmax; int ttarget; u8 alarm; }; /* * Sysfs stuff */ static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { int ret; struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct coretemp_data *data = dev_get_drvdata(dev); if (attr->index == SHOW_NAME) ret = sprintf(buf, "%s\n", data->name); else /* show label */ ret = sprintf(buf, "Core %d\n", data->id); return ret; } static ssize_t show_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { struct coretemp_data *data = coretemp_update_device(dev); /* read the Out-of-spec log, never clear */ return sprintf(buf, "%d\n", data->alarm); } static ssize_t show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct coretemp_data *data = coretemp_update_device(dev); int err; if (attr->index == SHOW_TEMP) err = data->valid ? sprintf(buf, "%d\n", data->temp) : -EAGAIN; else if (attr->index == SHOW_TJMAX) err = sprintf(buf, "%d\n", data->tjmax); else err = sprintf(buf, "%d\n", data->ttarget); return err; } static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, SHOW_TEMP); static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp, NULL, SHOW_TJMAX); static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp, NULL, SHOW_TTARGET); static DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL); static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_name, NULL, SHOW_LABEL); static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, SHOW_NAME); static struct attribute *coretemp_attributes[] = { &sensor_dev_attr_name.dev_attr.attr, &sensor_dev_attr_temp1_label.dev_attr.attr, &dev_attr_temp1_crit_alarm.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_crit.dev_attr.attr, NULL }; static const struct attribute_group coretemp_group = { .attrs = coretemp_attributes, }; static struct coretemp_data *coretemp_update_device(struct device *dev) { struct coretemp_data *data = dev_get_drvdata(dev); mutex_lock(&data->update_lock); if (!data->valid || time_after(jiffies, data->last_updated + HZ)) { u32 eax, edx; data->valid = 0; rdmsr_on_cpu(data->id, MSR_IA32_THERM_STATUS, &eax, &edx); data->alarm = (eax >> 5) & 1; /* update only if data has been valid */ if (eax & 0x80000000) { data->temp = data->tjmax - (((eax >> 16) & 0x7f) * 1000); data->valid = 1; } else { dev_dbg(dev, "Temperature data invalid (0x%x)\n", eax); } data->last_updated = jiffies; } mutex_unlock(&data->update_lock); return data; } static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) { /* The 100C is default for both mobile and non mobile CPUs */ int tjmax = 100000; int tjmax_ee = 85000; int usemsr_ee = 1; int err; u32 eax, edx; /* Early chips have no MSR for TjMax */ if ((c->x86_model == 0xf) && (c->x86_mask < 4)) { usemsr_ee = 0; } /* Atoms seems to have TjMax at 90C */ if (c->x86_model == 0x1c) { usemsr_ee = 0; tjmax = 90000; } if ((c->x86_model > 0xe) && (usemsr_ee)) { u8 platform_id; /* Now we can detect the mobile CPU using Intel provided table http://softwarecommunity.intel.com/Wiki/Mobility/720.htm For Core2 cores, check MSR 0x17, bit 28 1 = Mobile CPU */ err = rdmsr_safe_on_cpu(id, 0x17, &eax, &edx); if (err) { dev_warn(dev, "Unable to access MSR 0x17, assuming desktop" " CPU\n"); usemsr_ee = 0; } else if (c->x86_model < 0x17 && !(eax & 0x10000000)) { /* Trust bit 28 up to Penryn, I could not find any documentation on that; if you happen to know someone at Intel please ask */ usemsr_ee = 0; } else { /* Platform ID bits 52:50 (EDX starts at bit 32) */ platform_id = (edx >> 18) & 0x7; /* Mobile Penryn CPU seems to be platform ID 7 or 5 (guesswork) */ if ((c->x86_model == 0x17) && ((platform_id == 5) || (platform_id == 7))) { /* If MSR EE bit is set, set it to 90 degrees C, otherwise 105 degrees C */ tjmax_ee = 90000; tjmax = 105000; } } } if (usemsr_ee) { err = rdmsr_safe_on_cpu(id, 0xee, &eax, &edx); if (err) { dev_warn(dev, "Unable to access MSR 0xEE, for Tjmax, left" " at default"); } else if (eax & 0x40000000) { tjmax = tjmax_ee; } /* if we dont use msr EE it means we are desktop CPU (with exeception of Atom) */ } else if (tjmax == 100000) { dev_warn(dev, "Using relative temperature scale!\n"); } return tjmax; } static int __devinit coretemp_probe(struct platform_device *pdev) { struct coretemp_data *data; struct cpuinfo_x86 *c = &cpu_data(pdev->id); int err; u32 eax, edx; if (!(data = kzalloc(sizeof(struct coretemp_data), GFP_KERNEL))) { err = -ENOMEM; dev_err(&pdev->dev, "Out of memory\n"); goto exit; } data->id = pdev->id; data->name = "coretemp"; mutex_init(&data->update_lock); /* test if we can access the THERM_STATUS MSR */ err = rdmsr_safe_on_cpu(data->id, MSR_IA32_THERM_STATUS, &eax, &edx); if (err) { dev_err(&pdev->dev, "Unable to access THERM_STATUS MSR, giving up\n"); goto exit_free; } /* Check if we have problem with errata AE18 of Core processors: Readings might stop update when processor visited too deep sleep, fixed for stepping D0 (6EC). */ if ((c->x86_model == 0xe) && (c->x86_mask < 0xc)) { /* check for microcode update */ rdmsr_on_cpu(data->id, MSR_IA32_UCODE_REV, &eax, &edx); if (edx < 0x39) { err = -ENODEV; dev_err(&pdev->dev, "Errata AE18 not fixed, update BIOS or " "microcode of the CPU!\n"); goto exit_free; } } data->tjmax = adjust_tjmax(c, data->id, &pdev->dev); platform_set_drvdata(pdev, data); /* read the still undocumented IA32_TEMPERATURE_TARGET it exists on older CPUs but not in this register, Atoms don't have it either */ if ((c->x86_model > 0xe) && (c->x86_model != 0x1c)) { err = rdmsr_safe_on_cpu(data->id, 0x1a2, &eax, &edx); if (err) { dev_warn(&pdev->dev, "Unable to read" " IA32_TEMPERATURE_TARGET MSR\n"); } else { data->ttarget = data->tjmax - (((eax >> 8) & 0xff) * 1000); err = device_create_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr); if (err) goto exit_free; } } if ((err = sysfs_create_group(&pdev->dev.kobj, &coretemp_group))) goto exit_dev; data->hwmon_dev = hwmon_device_register(&pdev->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); dev_err(&pdev->dev, "Class registration failed (%d)\n", err); goto exit_class; } return 0; exit_class: sysfs_remove_group(&pdev->dev.kobj, &coretemp_group); exit_dev: device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr); exit_free: kfree(data); exit: return err; } static int __devexit coretemp_remove(struct platform_device *pdev) { struct coretemp_data *data = platform_get_drvdata(pdev); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&pdev->dev.kobj, &coretemp_group); device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr); platform_set_drvdata(pdev, NULL); kfree(data); return 0; } static struct platform_driver coretemp_driver = { .driver = { .owner = THIS_MODULE, .name = DRVNAME, }, .probe = coretemp_probe, .remove = __devexit_p(coretemp_remove), }; struct pdev_entry { struct list_head list; struct platform_device *pdev; unsigned int cpu; }; static LIST_HEAD(pdev_list); static DEFINE_MUTEX(pdev_list_mutex); static int __cpuinit coretemp_device_add(unsigned int cpu) { int err; struct platform_device *pdev; struct pdev_entry *pdev_entry; pdev = platform_device_alloc(DRVNAME, cpu); if (!pdev) { err = -ENOMEM; printk(KERN_ERR DRVNAME ": Device allocation failed\n"); goto exit; } pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL); if (!pdev_entry) { err = -ENOMEM; goto exit_device_put; } err = platform_device_add(pdev); if (err) { printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n", err); goto exit_device_free; } pdev_entry->pdev = pdev; pdev_entry->cpu = cpu; mutex_lock(&pdev_list_mutex); list_add_tail(&pdev_entry->list, &pdev_list); mutex_unlock(&pdev_list_mutex); return 0; exit_device_free: kfree(pdev_entry); exit_device_put: platform_device_put(pdev); exit: return err; } #ifdef CONFIG_HOTPLUG_CPU static void coretemp_device_remove(unsigned int cpu) { struct pdev_entry *p, *n; mutex_lock(&pdev_list_mutex); list_for_each_entry_safe(p, n, &pdev_list, list) { if (p->cpu == cpu) { platform_device_unregister(p->pdev); list_del(&p->list); kfree(p); } } mutex_unlock(&pdev_list_mutex); } static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long) hcpu; switch (action) { case CPU_ONLINE: case CPU_DOWN_FAILED: coretemp_device_add(cpu); break; case CPU_DOWN_PREPARE: coretemp_device_remove(cpu); break; } return NOTIFY_OK; } static struct notifier_block coretemp_cpu_notifier __refdata = { .notifier_call = coretemp_cpu_callback, }; #endif /* !CONFIG_HOTPLUG_CPU */ static int __init coretemp_init(void) { int i, err = -ENODEV; struct pdev_entry *p, *n; /* quick check if we run Intel */ if (cpu_data(0).x86_vendor != X86_VENDOR_INTEL) goto exit; err = platform_driver_register(&coretemp_driver); if (err) goto exit; for_each_online_cpu(i) { struct cpuinfo_x86 *c = &cpu_data(i); /* check if family 6, models 0xe (Pentium M DC), 0xf (Core 2 DC 65nm), 0x16 (Core 2 SC 65nm), 0x17 (Penryn 45nm), 0x1a (Nehalem), 0x1c (Atom), 0x1e (Lynnfield) */ if ((c->cpuid_level < 0) || (c->x86 != 0x6) || !((c->x86_model == 0xe) || (c->x86_model == 0xf) || (c->x86_model == 0x16) || (c->x86_model == 0x17) || (c->x86_model == 0x1a) || (c->x86_model == 0x1c) || (c->x86_model == 0x1e))) { /* supported CPU not found, but report the unknown family 6 CPU */ if ((c->x86 == 0x6) && (c->x86_model > 0xf)) printk(KERN_WARNING DRVNAME ": Unknown CPU " "model %x\n", c->x86_model); continue; } err = coretemp_device_add(i); if (err) goto exit_devices_unreg; } if (list_empty(&pdev_list)) { err = -ENODEV; goto exit_driver_unreg; } #ifdef CONFIG_HOTPLUG_CPU register_hotcpu_notifier(&coretemp_cpu_notifier); #endif return 0; exit_devices_unreg: mutex_lock(&pdev_list_mutex); list_for_each_entry_safe(p, n, &pdev_list, list) { platform_device_unregister(p->pdev); list_del(&p->list); kfree(p); } mutex_unlock(&pdev_list_mutex); exit_driver_unreg: platform_driver_unregister(&coretemp_driver); exit: return err; } static void __exit coretemp_exit(void) { struct pdev_entry *p, *n; #ifdef CONFIG_HOTPLUG_CPU unregister_hotcpu_notifier(&coretemp_cpu_notifier); #endif mutex_lock(&pdev_list_mutex); list_for_each_entry_safe(p, n, &pdev_list, list) { platform_device_unregister(p->pdev); list_del(&p->list); kfree(p); } mutex_unlock(&pdev_list_mutex); platform_driver_unregister(&coretemp_driver); } MODULE_AUTHOR("Rudolf Marek <r.marek@assembler.cz>"); MODULE_DESCRIPTION("Intel Core temperature monitor"); MODULE_LICENSE("GPL"); module_init(coretemp_init) module_exit(coretemp_exit)
gpl-2.0
smartassfox/rk-29
drivers/ata/pata_it8213.c
556
8423
/* * pata_it8213.c - iTE Tech. Inc. IT8213 PATA driver * * The IT8213 is a very Intel ICH like device for timing purposes, having * a similar register layout and the same split clock arrangement. Cable * detection is different, and it does not have slave channels or all the * clutter of later ICH/SATA setups. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/device.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #include <linux/ata.h> #define DRV_NAME "pata_it8213" #define DRV_VERSION "0.0.3" /** * it8213_pre_reset - check for 40/80 pin * @link: link * @deadline: deadline jiffies for the operation * * Filter out ports by the enable bits before doing the normal reset * and probe. */ static int it8213_pre_reset(struct ata_link *link, unsigned long deadline) { static const struct pci_bits it8213_enable_bits[] = { { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */ }; struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); if (!pci_test_config_bits(pdev, &it8213_enable_bits[ap->port_no])) return -ENOENT; return ata_sff_prereset(link, deadline); } /** * it8213_cable_detect - check for 40/80 pin * @ap: Port * * Perform cable detection for the 8213 ATA interface. This is * different to the PIIX arrangement */ static int it8213_cable_detect(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u8 tmp; pci_read_config_byte(pdev, 0x42, &tmp); if (tmp & 2) /* The initial docs are incorrect */ return ATA_CBL_PATA40; return ATA_CBL_PATA80; } /** * it8213_set_piomode - Initialize host controller PATA PIO timings * @ap: Port whose timings we are configuring * @adev: Device whose timings we are configuring * * Set PIO mode for device, in host controller PCI config space. * * LOCKING: * None (inherited from caller). */ static void it8213_set_piomode (struct ata_port *ap, struct ata_device *adev) { unsigned int pio = adev->pio_mode - XFER_PIO_0; struct pci_dev *dev = to_pci_dev(ap->host->dev); unsigned int idetm_port= ap->port_no ? 0x42 : 0x40; u16 idetm_data; int control = 0; /* * See Intel Document 298600-004 for the timing programing rules * for PIIX/ICH. The 8213 is a clone so very similar */ static const /* ISP RTC */ u8 timings[][2] = { { 0, 0 }, { 0, 0 }, { 1, 0 }, { 2, 1 }, { 2, 3 }, }; if (pio > 2) control |= 1; /* TIME1 enable */ if (ata_pio_need_iordy(adev)) /* PIO 3/4 require IORDY */ control |= 2; /* IORDY enable */ /* Bit 2 is set for ATAPI on the IT8213 - reverse of ICH/PIIX */ if (adev->class != ATA_DEV_ATA) control |= 4; pci_read_config_word(dev, idetm_port, &idetm_data); /* Enable PPE, IE and TIME as appropriate */ if (adev->devno == 0) { idetm_data &= 0xCCF0; idetm_data |= control; idetm_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8); } else { u8 slave_data; idetm_data &= 0xCC0F; idetm_data |= (control << 4); /* Slave timing in separate register */ pci_read_config_byte(dev, 0x44, &slave_data); slave_data &= 0xF0; slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << 4; pci_write_config_byte(dev, 0x44, slave_data); } idetm_data |= 0x4000; /* Ensure SITRE is enabled */ pci_write_config_word(dev, idetm_port, idetm_data); } /** * it8213_set_dmamode - Initialize host controller PATA DMA timings * @ap: Port whose timings we are configuring * @adev: Device to program * * Set UDMA/MWDMA mode for device, in host controller PCI config space. * This device is basically an ICH alike. * * LOCKING: * None (inherited from caller). */ static void it8213_set_dmamode (struct ata_port *ap, struct ata_device *adev) { struct pci_dev *dev = to_pci_dev(ap->host->dev); u16 master_data; u8 speed = adev->dma_mode; int devid = adev->devno; u8 udma_enable; static const /* ISP RTC */ u8 timings[][2] = { { 0, 0 }, { 0, 0 }, { 1, 0 }, { 2, 1 }, { 2, 3 }, }; pci_read_config_word(dev, 0x40, &master_data); pci_read_config_byte(dev, 0x48, &udma_enable); if (speed >= XFER_UDMA_0) { unsigned int udma = adev->dma_mode - XFER_UDMA_0; u16 udma_timing; u16 ideconf; int u_clock, u_speed; /* Clocks follow the PIIX style */ u_speed = min(2 - (udma & 1), udma); if (udma == 5) u_clock = 0x1000; /* 100Mhz */ else if (udma > 2) u_clock = 1; /* 66Mhz */ else u_clock = 0; /* 33Mhz */ udma_enable |= (1 << devid); /* Load the UDMA mode number */ pci_read_config_word(dev, 0x4A, &udma_timing); udma_timing &= ~(3 << (4 * devid)); udma_timing |= (udma & 3) << (4 * devid); pci_write_config_word(dev, 0x4A, udma_timing); /* Load the clock selection */ pci_read_config_word(dev, 0x54, &ideconf); ideconf &= ~(0x1001 << devid); ideconf |= u_clock << devid; pci_write_config_word(dev, 0x54, ideconf); } else { /* * MWDMA is driven by the PIO timings. We must also enable * IORDY unconditionally along with TIME1. PPE has already * been set when the PIO timing was set. */ unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0; unsigned int control; u8 slave_data; static const unsigned int needed_pio[3] = { XFER_PIO_0, XFER_PIO_3, XFER_PIO_4 }; int pio = needed_pio[mwdma] - XFER_PIO_0; control = 3; /* IORDY|TIME1 */ /* If the drive MWDMA is faster than it can do PIO then we must force PIO into PIO0 */ if (adev->pio_mode < needed_pio[mwdma]) /* Enable DMA timing only */ control |= 8; /* PIO cycles in PIO0 */ if (devid) { /* Slave */ master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */ master_data |= control << 4; pci_read_config_byte(dev, 0x44, &slave_data); slave_data &= (0x0F + 0xE1 * ap->port_no); /* Load the matching timing */ slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0); pci_write_config_byte(dev, 0x44, slave_data); } else { /* Master */ master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY and master timing bits */ master_data |= control; master_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8); } udma_enable &= ~(1 << devid); pci_write_config_word(dev, 0x40, master_data); } pci_write_config_byte(dev, 0x48, udma_enable); } static struct scsi_host_template it8213_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations it8213_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = it8213_cable_detect, .set_piomode = it8213_set_piomode, .set_dmamode = it8213_set_dmamode, .prereset = it8213_pre_reset, }; /** * it8213_init_one - Register 8213 ATA PCI device with kernel services * @pdev: PCI device to register * @ent: Entry in it8213_pci_tbl matching with @pdev * * Called from kernel PCI layer. * * LOCKING: * Inherited from PCI layer (may sleep). * * RETURNS: * Zero on success, or -ERRNO value. */ static int it8213_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { static int printed_version; static const struct ata_port_info info = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA4, /* FIXME: want UDMA 100? */ .port_ops = &it8213_ops, }; /* Current IT8213 stuff is single port */ const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info }; if (!printed_version++) dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); return ata_pci_sff_init_one(pdev, ppi, &it8213_sht, NULL); } static const struct pci_device_id it8213_pci_tbl[] = { { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8213), }, { } /* terminate list */ }; static struct pci_driver it8213_pci_driver = { .name = DRV_NAME, .id_table = it8213_pci_tbl, .probe = it8213_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = ata_pci_device_resume, #endif }; static int __init it8213_init(void) { return pci_register_driver(&it8213_pci_driver); } static void __exit it8213_exit(void) { pci_unregister_driver(&it8213_pci_driver); } module_init(it8213_init); module_exit(it8213_exit); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("SCSI low-level driver for the ITE 8213"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, it8213_pci_tbl); MODULE_VERSION(DRV_VERSION);
gpl-2.0
tobetter/hardkernel-linux
mm/page-writeback.c
1068
69011
/* * mm/page-writeback.c * * Copyright (C) 2002, Linus Torvalds. * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> * * Contains functions related to writing back dirty pages at the * address_space level. * * 10Apr2002 Andrew Morton * Initial version */ #include <linux/kernel.h> #include <linux/export.h> #include <linux/spinlock.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/writeback.h> #include <linux/init.h> #include <linux/backing-dev.h> #include <linux/task_io_accounting_ops.h> #include <linux/blkdev.h> #include <linux/mpage.h> #include <linux/rmap.h> #include <linux/percpu.h> #include <linux/notifier.h> #include <linux/smp.h> #include <linux/sysctl.h> #include <linux/cpu.h> #include <linux/syscalls.h> #include <linux/buffer_head.h> /* __set_page_dirty_buffers */ #include <linux/pagevec.h> #include <trace/events/writeback.h> /* * Sleep at most 200ms at a time in balance_dirty_pages(). */ #define MAX_PAUSE max(HZ/5, 1) /* * Try to keep balance_dirty_pages() call intervals higher than this many pages * by raising pause time to max_pause when falls below it. */ #define DIRTY_POLL_THRESH (128 >> (PAGE_SHIFT - 10)) /* * Estimate write bandwidth at 200ms intervals. */ #define BANDWIDTH_INTERVAL max(HZ/5, 1) #define RATELIMIT_CALC_SHIFT 10 /* * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited * will look to see if it needs to force writeback or throttling. */ static long ratelimit_pages = 32; /* The following parameters are exported via /proc/sys/vm */ /* * Start background writeback (via writeback threads) at this percentage */ int dirty_background_ratio = 10; /* * dirty_background_bytes starts at 0 (disabled) so that it is a function of * dirty_background_ratio * the amount of dirtyable memory */ unsigned long dirty_background_bytes; /* * free highmem will not be subtracted from the total free memory * for calculating free ratios if vm_highmem_is_dirtyable is true */ int vm_highmem_is_dirtyable; /* * The generator of dirty data starts writeback at this percentage */ int vm_dirty_ratio = 20; /* * vm_dirty_bytes starts at 0 (disabled) so that it is a function of * vm_dirty_ratio * the amount of dirtyable memory */ unsigned long vm_dirty_bytes; /* * The interval between `kupdate'-style writebacks */ unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */ EXPORT_SYMBOL_GPL(dirty_writeback_interval); /* * The longest time for which data is allowed to remain dirty */ unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */ /* * Flag that makes the machine dump writes/reads and block dirtyings. */ int block_dump; /* * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies: * a full sync is triggered after this time elapses without any disk activity. */ int laptop_mode; EXPORT_SYMBOL(laptop_mode); /* End of sysctl-exported parameters */ unsigned long global_dirty_limit; /* * Scale the writeback cache size proportional to the relative writeout speeds. * * We do this by keeping a floating proportion between BDIs, based on page * writeback completions [end_page_writeback()]. Those devices that write out * pages fastest will get the larger share, while the slower will get a smaller * share. * * We use page writeout completions because we are interested in getting rid of * dirty pages. Having them written out is the primary goal. * * We introduce a concept of time, a period over which we measure these events, * because demand can/will vary over time. The length of this period itself is * measured in page writeback completions. * */ static struct prop_descriptor vm_completions; /* * Work out the current dirty-memory clamping and background writeout * thresholds. * * The main aim here is to lower them aggressively if there is a lot of mapped * memory around. To avoid stressing page reclaim with lots of unreclaimable * pages. It is better to clamp down on writers than to start swapping, and * performing lots of scanning. * * We only allow 1/2 of the currently-unmapped memory to be dirtied. * * We don't permit the clamping level to fall below 5% - that is getting rather * excessive. * * We make sure that the background writeout level is below the adjusted * clamping level. */ /* * In a memory zone, there is a certain amount of pages we consider * available for the page cache, which is essentially the number of * free and reclaimable pages, minus some zone reserves to protect * lowmem and the ability to uphold the zone's watermarks without * requiring writeback. * * This number of dirtyable pages is the base value of which the * user-configurable dirty ratio is the effictive number of pages that * are allowed to be actually dirtied. Per individual zone, or * globally by using the sum of dirtyable pages over all zones. * * Because the user is allowed to specify the dirty limit globally as * absolute number of bytes, calculating the per-zone dirty limit can * require translating the configured limit into a percentage of * global dirtyable memory first. */ static unsigned long highmem_dirtyable_memory(unsigned long total) { #ifdef CONFIG_HIGHMEM int node; unsigned long x = 0; for_each_node_state(node, N_HIGH_MEMORY) { struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; x += zone_page_state(z, NR_FREE_PAGES) + zone_reclaimable_pages(z) - z->dirty_balance_reserve; } /* * Make sure that the number of highmem pages is never larger * than the number of the total dirtyable memory. This can only * occur in very strange VM situations but we want to make sure * that this does not occur. */ return min(x, total); #else return 0; #endif } /** * global_dirtyable_memory - number of globally dirtyable pages * * Returns the global number of pages potentially available for dirty * page cache. This is the base value for the global dirty limits. */ unsigned long global_dirtyable_memory(void) { unsigned long x; x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages() - dirty_balance_reserve; if (!vm_highmem_is_dirtyable) x -= highmem_dirtyable_memory(x); return x + 1; /* Ensure that we never return 0 */ } /* * global_dirty_limits - background-writeback and dirty-throttling thresholds * * Calculate the dirty thresholds based on sysctl parameters * - vm.dirty_background_ratio or vm.dirty_background_bytes * - vm.dirty_ratio or vm.dirty_bytes * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and * real-time tasks. */ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) { unsigned long background; unsigned long dirty; unsigned long uninitialized_var(available_memory); struct task_struct *tsk; if (!vm_dirty_bytes || !dirty_background_bytes) available_memory = global_dirtyable_memory(); if (vm_dirty_bytes) dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE); else dirty = (vm_dirty_ratio * available_memory) / 100; if (dirty_background_bytes) background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE); else background = (dirty_background_ratio * available_memory) / 100; if (background >= dirty) background = dirty / 2; tsk = current; if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) { background += background / 4; dirty += dirty / 4; } *pbackground = background; *pdirty = dirty; trace_global_dirty_state(background, dirty); } /** * zone_dirtyable_memory - number of dirtyable pages in a zone * @zone: the zone * * Returns the zone's number of pages potentially available for dirty * page cache. This is the base value for the per-zone dirty limits. */ static unsigned long zone_dirtyable_memory(struct zone *zone) { /* * The effective global number of dirtyable pages may exclude * highmem as a big-picture measure to keep the ratio between * dirty memory and lowmem reasonable. * * But this function is purely about the individual zone and a * highmem zone can hold its share of dirty pages, so we don't * care about vm_highmem_is_dirtyable here. */ return zone_page_state(zone, NR_FREE_PAGES) + zone_reclaimable_pages(zone) - zone->dirty_balance_reserve; } /** * zone_dirty_limit - maximum number of dirty pages allowed in a zone * @zone: the zone * * Returns the maximum number of dirty pages allowed in a zone, based * on the zone's dirtyable memory. */ static unsigned long zone_dirty_limit(struct zone *zone) { unsigned long zone_memory = zone_dirtyable_memory(zone); struct task_struct *tsk = current; unsigned long dirty; if (vm_dirty_bytes) dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) * zone_memory / global_dirtyable_memory(); else dirty = vm_dirty_ratio * zone_memory / 100; if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) dirty += dirty / 4; return dirty; } /** * zone_dirty_ok - tells whether a zone is within its dirty limits * @zone: the zone to check * * Returns %true when the dirty pages in @zone are within the zone's * dirty limit, %false if the limit is exceeded. */ bool zone_dirty_ok(struct zone *zone) { unsigned long limit = zone_dirty_limit(zone); return zone_page_state(zone, NR_FILE_DIRTY) + zone_page_state(zone, NR_UNSTABLE_NFS) + zone_page_state(zone, NR_WRITEBACK) <= limit; } /* * couple the period to the dirty_ratio: * * period/2 ~ roundup_pow_of_two(dirty limit) */ static int calc_period_shift(void) { unsigned long dirty_total; if (vm_dirty_bytes) dirty_total = vm_dirty_bytes / PAGE_SIZE; else dirty_total = (vm_dirty_ratio * global_dirtyable_memory()) / 100; return 2 + ilog2(dirty_total - 1); } /* * update the period when the dirty threshold changes. */ static void update_completion_period(void) { int shift = calc_period_shift(); prop_change_shift(&vm_completions, shift); writeback_set_ratelimit(); } int dirty_background_ratio_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (ret == 0 && write) dirty_background_bytes = 0; return ret; } int dirty_background_bytes_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); if (ret == 0 && write) dirty_background_ratio = 0; return ret; } int dirty_ratio_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int old_ratio = vm_dirty_ratio; int ret; ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (ret == 0 && write && vm_dirty_ratio != old_ratio) { update_completion_period(); vm_dirty_bytes = 0; } return ret; } int dirty_bytes_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { unsigned long old_bytes = vm_dirty_bytes; int ret; ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); if (ret == 0 && write && vm_dirty_bytes != old_bytes) { update_completion_period(); vm_dirty_ratio = 0; } return ret; } /* * Increment the BDI's writeout completion count and the global writeout * completion count. Called from test_clear_page_writeback(). */ static inline void __bdi_writeout_inc(struct backing_dev_info *bdi) { __inc_bdi_stat(bdi, BDI_WRITTEN); __prop_inc_percpu_max(&vm_completions, &bdi->completions, bdi->max_prop_frac); } void bdi_writeout_inc(struct backing_dev_info *bdi) { unsigned long flags; local_irq_save(flags); __bdi_writeout_inc(bdi); local_irq_restore(flags); } EXPORT_SYMBOL_GPL(bdi_writeout_inc); /* * Obtain an accurate fraction of the BDI's portion. */ static void bdi_writeout_fraction(struct backing_dev_info *bdi, long *numerator, long *denominator) { prop_fraction_percpu(&vm_completions, &bdi->completions, numerator, denominator); } /* * bdi_min_ratio keeps the sum of the minimum dirty shares of all * registered backing devices, which, for obvious reasons, can not * exceed 100%. */ static unsigned int bdi_min_ratio; int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio) { int ret = 0; spin_lock_bh(&bdi_lock); if (min_ratio > bdi->max_ratio) { ret = -EINVAL; } else { min_ratio -= bdi->min_ratio; if (bdi_min_ratio + min_ratio < 100) { bdi_min_ratio += min_ratio; bdi->min_ratio += min_ratio; } else { ret = -EINVAL; } } spin_unlock_bh(&bdi_lock); return ret; } int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio) { int ret = 0; if (max_ratio > 100) return -EINVAL; spin_lock_bh(&bdi_lock); if (bdi->min_ratio > max_ratio) { ret = -EINVAL; } else { bdi->max_ratio = max_ratio; bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100; } spin_unlock_bh(&bdi_lock); return ret; } EXPORT_SYMBOL(bdi_set_max_ratio); static unsigned long dirty_freerun_ceiling(unsigned long thresh, unsigned long bg_thresh) { return (thresh + bg_thresh) / 2; } static unsigned long hard_dirty_limit(unsigned long thresh) { return max(thresh, global_dirty_limit); } /** * bdi_dirty_limit - @bdi's share of dirty throttling threshold * @bdi: the backing_dev_info to query * @dirty: global dirty limit in pages * * Returns @bdi's dirty limit in pages. The term "dirty" in the context of * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages. * * Note that balance_dirty_pages() will only seriously take it as a hard limit * when sleeping max_pause per page is not enough to keep the dirty pages under * control. For example, when the device is completely stalled due to some error * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key. * In the other normal situations, it acts more gently by throttling the tasks * more (rather than completely block them) when the bdi dirty pages go high. * * It allocates high/low dirty limits to fast/slow devices, in order to prevent * - starving fast devices * - piling up dirty pages (that will take long time to sync) on slow devices * * The bdi's share of dirty limit will be adapting to its throughput and * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set. */ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty) { u64 bdi_dirty; long numerator, denominator; /* * Calculate this BDI's share of the dirty ratio. */ bdi_writeout_fraction(bdi, &numerator, &denominator); bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100; bdi_dirty *= numerator; do_div(bdi_dirty, denominator); bdi_dirty += (dirty * bdi->min_ratio) / 100; if (bdi_dirty > (dirty * bdi->max_ratio) / 100) bdi_dirty = dirty * bdi->max_ratio / 100; return bdi_dirty; } /* * Dirty position control. * * (o) global/bdi setpoints * * We want the dirty pages be balanced around the global/bdi setpoints. * When the number of dirty pages is higher/lower than the setpoint, the * dirty position control ratio (and hence task dirty ratelimit) will be * decreased/increased to bring the dirty pages back to the setpoint. * * pos_ratio = 1 << RATELIMIT_CALC_SHIFT * * if (dirty < setpoint) scale up pos_ratio * if (dirty > setpoint) scale down pos_ratio * * if (bdi_dirty < bdi_setpoint) scale up pos_ratio * if (bdi_dirty > bdi_setpoint) scale down pos_ratio * * task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT * * (o) global control line * * ^ pos_ratio * | * | |<===== global dirty control scope ======>| * 2.0 .............* * | .* * | . * * | . * * | . * * | . * * | . * * 1.0 ................................* * | . . * * | . . * * | . . * * | . . * * | . . * * 0 +------------.------------------.----------------------*-------------> * freerun^ setpoint^ limit^ dirty pages * * (o) bdi control line * * ^ pos_ratio * | * | * * | * * | * * | * * | * |<=========== span ============>| * 1.0 .......................* * | . * * | . * * | . * * | . * * | . * * | . * * | . * * | . * * | . * * | . * * | . * * 1/4 ...............................................* * * * * * * * * * * * * | . . * | . . * | . . * 0 +----------------------.-------------------------------.-------------> * bdi_setpoint^ x_intercept^ * * The bdi control line won't drop below pos_ratio=1/4, so that bdi_dirty can * be smoothly throttled down to normal if it starts high in situations like * - start writing to a slow SD card and a fast disk at the same time. The SD * card's bdi_dirty may rush to many times higher than bdi_setpoint. * - the bdi dirty thresh drops quickly due to change of JBOD workload */ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi, unsigned long thresh, unsigned long bg_thresh, unsigned long dirty, unsigned long bdi_thresh, unsigned long bdi_dirty) { unsigned long write_bw = bdi->avg_write_bandwidth; unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh); unsigned long limit = hard_dirty_limit(thresh); unsigned long x_intercept; unsigned long setpoint; /* dirty pages' target balance point */ unsigned long bdi_setpoint; unsigned long span; long long pos_ratio; /* for scaling up/down the rate limit */ long x; if (unlikely(dirty >= limit)) return 0; /* * global setpoint * * setpoint - dirty 3 * f(dirty) := 1.0 + (----------------) * limit - setpoint * * it's a 3rd order polynomial that subjects to * * (1) f(freerun) = 2.0 => rampup dirty_ratelimit reasonably fast * (2) f(setpoint) = 1.0 => the balance point * (3) f(limit) = 0 => the hard limit * (4) df/dx <= 0 => negative feedback control * (5) the closer to setpoint, the smaller |df/dx| (and the reverse) * => fast response on large errors; small oscillation near setpoint */ setpoint = (freerun + limit) / 2; x = div_s64((setpoint - dirty) << RATELIMIT_CALC_SHIFT, limit - setpoint + 1); pos_ratio = x; pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; pos_ratio += 1 << RATELIMIT_CALC_SHIFT; /* * We have computed basic pos_ratio above based on global situation. If * the bdi is over/under its share of dirty pages, we want to scale * pos_ratio further down/up. That is done by the following mechanism. */ /* * bdi setpoint * * f(bdi_dirty) := 1.0 + k * (bdi_dirty - bdi_setpoint) * * x_intercept - bdi_dirty * := -------------------------- * x_intercept - bdi_setpoint * * The main bdi control line is a linear function that subjects to * * (1) f(bdi_setpoint) = 1.0 * (2) k = - 1 / (8 * write_bw) (in single bdi case) * or equally: x_intercept = bdi_setpoint + 8 * write_bw * * For single bdi case, the dirty pages are observed to fluctuate * regularly within range * [bdi_setpoint - write_bw/2, bdi_setpoint + write_bw/2] * for various filesystems, where (2) can yield in a reasonable 12.5% * fluctuation range for pos_ratio. * * For JBOD case, bdi_thresh (not bdi_dirty!) could fluctuate up to its * own size, so move the slope over accordingly and choose a slope that * yields 100% pos_ratio fluctuation on suddenly doubled bdi_thresh. */ if (unlikely(bdi_thresh > thresh)) bdi_thresh = thresh; /* * It's very possible that bdi_thresh is close to 0 not because the * device is slow, but that it has remained inactive for long time. * Honour such devices a reasonable good (hopefully IO efficient) * threshold, so that the occasional writes won't be blocked and active * writes can rampup the threshold quickly. */ bdi_thresh = max(bdi_thresh, (limit - dirty) / 8); /* * scale global setpoint to bdi's: * bdi_setpoint = setpoint * bdi_thresh / thresh */ x = div_u64((u64)bdi_thresh << 16, thresh + 1); bdi_setpoint = setpoint * (u64)x >> 16; /* * Use span=(8*write_bw) in single bdi case as indicated by * (thresh - bdi_thresh ~= 0) and transit to bdi_thresh in JBOD case. * * bdi_thresh thresh - bdi_thresh * span = ---------- * (8 * write_bw) + ------------------- * bdi_thresh * thresh thresh */ span = (thresh - bdi_thresh + 8 * write_bw) * (u64)x >> 16; x_intercept = bdi_setpoint + span; if (bdi_dirty < x_intercept - span / 4) { pos_ratio = div_u64(pos_ratio * (x_intercept - bdi_dirty), x_intercept - bdi_setpoint + 1); } else pos_ratio /= 4; /* * bdi reserve area, safeguard against dirty pool underrun and disk idle * It may push the desired control point of global dirty pages higher * than setpoint. */ x_intercept = bdi_thresh / 2; if (bdi_dirty < x_intercept) { if (bdi_dirty > x_intercept / 8) pos_ratio = div_u64(pos_ratio * x_intercept, bdi_dirty); else pos_ratio *= 8; } return pos_ratio; } static void bdi_update_write_bandwidth(struct backing_dev_info *bdi, unsigned long elapsed, unsigned long written) { const unsigned long period = roundup_pow_of_two(3 * HZ); unsigned long avg = bdi->avg_write_bandwidth; unsigned long old = bdi->write_bandwidth; u64 bw; /* * bw = written * HZ / elapsed * * bw * elapsed + write_bandwidth * (period - elapsed) * write_bandwidth = --------------------------------------------------- * period */ bw = written - bdi->written_stamp; bw *= HZ; if (unlikely(elapsed > period)) { do_div(bw, elapsed); avg = bw; goto out; } bw += (u64)bdi->write_bandwidth * (period - elapsed); bw >>= ilog2(period); /* * one more level of smoothing, for filtering out sudden spikes */ if (avg > old && old >= (unsigned long)bw) avg -= (avg - old) >> 3; if (avg < old && old <= (unsigned long)bw) avg += (old - avg) >> 3; out: bdi->write_bandwidth = bw; bdi->avg_write_bandwidth = avg; } /* * The global dirtyable memory and dirty threshold could be suddenly knocked * down by a large amount (eg. on the startup of KVM in a swapless system). * This may throw the system into deep dirty exceeded state and throttle * heavy/light dirtiers alike. To retain good responsiveness, maintain * global_dirty_limit for tracking slowly down to the knocked down dirty * threshold. */ static void update_dirty_limit(unsigned long thresh, unsigned long dirty) { unsigned long limit = global_dirty_limit; /* * Follow up in one step. */ if (limit < thresh) { limit = thresh; goto update; } /* * Follow down slowly. Use the higher one as the target, because thresh * may drop below dirty. This is exactly the reason to introduce * global_dirty_limit which is guaranteed to lie above the dirty pages. */ thresh = max(thresh, dirty); if (limit > thresh) { limit -= (limit - thresh) >> 5; goto update; } return; update: global_dirty_limit = limit; } static void global_update_bandwidth(unsigned long thresh, unsigned long dirty, unsigned long now) { static DEFINE_SPINLOCK(dirty_lock); static unsigned long update_time; /* * check locklessly first to optimize away locking for the most time */ if (time_before(now, update_time + BANDWIDTH_INTERVAL)) return; spin_lock(&dirty_lock); if (time_after_eq(now, update_time + BANDWIDTH_INTERVAL)) { update_dirty_limit(thresh, dirty); update_time = now; } spin_unlock(&dirty_lock); } /* * Maintain bdi->dirty_ratelimit, the base dirty throttle rate. * * Normal bdi tasks will be curbed at or below it in long term. * Obviously it should be around (write_bw / N) when there are N dd tasks. */ static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi, unsigned long thresh, unsigned long bg_thresh, unsigned long dirty, unsigned long bdi_thresh, unsigned long bdi_dirty, unsigned long dirtied, unsigned long elapsed) { unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh); unsigned long limit = hard_dirty_limit(thresh); unsigned long setpoint = (freerun + limit) / 2; unsigned long write_bw = bdi->avg_write_bandwidth; unsigned long dirty_ratelimit = bdi->dirty_ratelimit; unsigned long dirty_rate; unsigned long task_ratelimit; unsigned long balanced_dirty_ratelimit; unsigned long pos_ratio; unsigned long step; unsigned long x; /* * The dirty rate will match the writeout rate in long term, except * when dirty pages are truncated by userspace or re-dirtied by FS. */ dirty_rate = (dirtied - bdi->dirtied_stamp) * HZ / elapsed; pos_ratio = bdi_position_ratio(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty); /* * task_ratelimit reflects each dd's dirty rate for the past 200ms. */ task_ratelimit = (u64)dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT; task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */ /* * A linear estimation of the "balanced" throttle rate. The theory is, * if there are N dd tasks, each throttled at task_ratelimit, the bdi's * dirty_rate will be measured to be (N * task_ratelimit). So the below * formula will yield the balanced rate limit (write_bw / N). * * Note that the expanded form is not a pure rate feedback: * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) (1) * but also takes pos_ratio into account: * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio (2) * * (1) is not realistic because pos_ratio also takes part in balancing * the dirty rate. Consider the state * pos_ratio = 0.5 (3) * rate = 2 * (write_bw / N) (4) * If (1) is used, it will stuck in that state! Because each dd will * be throttled at * task_ratelimit = pos_ratio * rate = (write_bw / N) (5) * yielding * dirty_rate = N * task_ratelimit = write_bw (6) * put (6) into (1) we get * rate_(i+1) = rate_(i) (7) * * So we end up using (2) to always keep * rate_(i+1) ~= (write_bw / N) (8) * regardless of the value of pos_ratio. As long as (8) is satisfied, * pos_ratio is able to drive itself to 1.0, which is not only where * the dirty count meet the setpoint, but also where the slope of * pos_ratio is most flat and hence task_ratelimit is least fluctuated. */ balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw, dirty_rate | 1); /* * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw */ if (unlikely(balanced_dirty_ratelimit > write_bw)) balanced_dirty_ratelimit = write_bw; /* * We could safely do this and return immediately: * * bdi->dirty_ratelimit = balanced_dirty_ratelimit; * * However to get a more stable dirty_ratelimit, the below elaborated * code makes use of task_ratelimit to filter out sigular points and * limit the step size. * * The below code essentially only uses the relative value of * * task_ratelimit - dirty_ratelimit * = (pos_ratio - 1) * dirty_ratelimit * * which reflects the direction and size of dirty position error. */ /* * dirty_ratelimit will follow balanced_dirty_ratelimit iff * task_ratelimit is on the same side of dirty_ratelimit, too. * For example, when * - dirty_ratelimit > balanced_dirty_ratelimit * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint) * lowering dirty_ratelimit will help meet both the position and rate * control targets. Otherwise, don't update dirty_ratelimit if it will * only help meet the rate target. After all, what the users ultimately * feel and care are stable dirty rate and small position error. * * |task_ratelimit - dirty_ratelimit| is used to limit the step size * and filter out the sigular points of balanced_dirty_ratelimit. Which * keeps jumping around randomly and can even leap far away at times * due to the small 200ms estimation period of dirty_rate (we want to * keep that period small to reduce time lags). */ step = 0; if (dirty < setpoint) { x = min(bdi->balanced_dirty_ratelimit, min(balanced_dirty_ratelimit, task_ratelimit)); if (dirty_ratelimit < x) step = x - dirty_ratelimit; } else { x = max(bdi->balanced_dirty_ratelimit, max(balanced_dirty_ratelimit, task_ratelimit)); if (dirty_ratelimit > x) step = dirty_ratelimit - x; } /* * Don't pursue 100% rate matching. It's impossible since the balanced * rate itself is constantly fluctuating. So decrease the track speed * when it gets close to the target. Helps eliminate pointless tremors. */ step >>= dirty_ratelimit / (2 * step + 1); /* * Limit the tracking speed to avoid overshooting. */ step = (step + 7) / 8; if (dirty_ratelimit < balanced_dirty_ratelimit) dirty_ratelimit += step; else dirty_ratelimit -= step; bdi->dirty_ratelimit = max(dirty_ratelimit, 1UL); bdi->balanced_dirty_ratelimit = balanced_dirty_ratelimit; trace_bdi_dirty_ratelimit(bdi, dirty_rate, task_ratelimit); } void __bdi_update_bandwidth(struct backing_dev_info *bdi, unsigned long thresh, unsigned long bg_thresh, unsigned long dirty, unsigned long bdi_thresh, unsigned long bdi_dirty, unsigned long start_time) { unsigned long now = jiffies; unsigned long elapsed = now - bdi->bw_time_stamp; unsigned long dirtied; unsigned long written; /* * rate-limit, only update once every 200ms. */ if (elapsed < BANDWIDTH_INTERVAL) return; dirtied = percpu_counter_read(&bdi->bdi_stat[BDI_DIRTIED]); written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]); /* * Skip quiet periods when disk bandwidth is under-utilized. * (at least 1s idle time between two flusher runs) */ if (elapsed > HZ && time_before(bdi->bw_time_stamp, start_time)) goto snapshot; if (thresh) { global_update_bandwidth(thresh, dirty, now); bdi_update_dirty_ratelimit(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty, dirtied, elapsed); } bdi_update_write_bandwidth(bdi, elapsed, written); snapshot: bdi->dirtied_stamp = dirtied; bdi->written_stamp = written; bdi->bw_time_stamp = now; } static void bdi_update_bandwidth(struct backing_dev_info *bdi, unsigned long thresh, unsigned long bg_thresh, unsigned long dirty, unsigned long bdi_thresh, unsigned long bdi_dirty, unsigned long start_time) { if (time_is_after_eq_jiffies(bdi->bw_time_stamp + BANDWIDTH_INTERVAL)) return; spin_lock(&bdi->wb.list_lock); __bdi_update_bandwidth(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty, start_time); spin_unlock(&bdi->wb.list_lock); } /* * After a task dirtied this many pages, balance_dirty_pages_ratelimited_nr() * will look to see if it needs to start dirty throttling. * * If dirty_poll_interval is too low, big NUMA machines will call the expensive * global_page_state() too often. So scale it near-sqrt to the safety margin * (the number of pages we may dirty without exceeding the dirty limits). */ static unsigned long dirty_poll_interval(unsigned long dirty, unsigned long thresh) { if (thresh > dirty) return 1UL << (ilog2(thresh - dirty) >> 1); return 1; } static long bdi_max_pause(struct backing_dev_info *bdi, unsigned long bdi_dirty) { long bw = bdi->avg_write_bandwidth; long t; /* * Limit pause time for small memory systems. If sleeping for too long * time, a small pool of dirty/writeback pages may go empty and disk go * idle. * * 8 serves as the safety ratio. */ t = bdi_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8)); t++; return min_t(long, t, MAX_PAUSE); } static long bdi_min_pause(struct backing_dev_info *bdi, long max_pause, unsigned long task_ratelimit, unsigned long dirty_ratelimit, int *nr_dirtied_pause) { long hi = ilog2(bdi->avg_write_bandwidth); long lo = ilog2(bdi->dirty_ratelimit); long t; /* target pause */ long pause; /* estimated next pause */ int pages; /* target nr_dirtied_pause */ /* target for 10ms pause on 1-dd case */ t = max(1, HZ / 100); /* * Scale up pause time for concurrent dirtiers in order to reduce CPU * overheads. * * (N * 10ms) on 2^N concurrent tasks. */ if (hi > lo) t += (hi - lo) * (10 * HZ) / 1024; /* * This is a bit convoluted. We try to base the next nr_dirtied_pause * on the much more stable dirty_ratelimit. However the next pause time * will be computed based on task_ratelimit and the two rate limits may * depart considerably at some time. Especially if task_ratelimit goes * below dirty_ratelimit/2 and the target pause is max_pause, the next * pause time will be max_pause*2 _trimmed down_ to max_pause. As a * result task_ratelimit won't be executed faithfully, which could * eventually bring down dirty_ratelimit. * * We apply two rules to fix it up: * 1) try to estimate the next pause time and if necessary, use a lower * nr_dirtied_pause so as not to exceed max_pause. When this happens, * nr_dirtied_pause will be "dancing" with task_ratelimit. * 2) limit the target pause time to max_pause/2, so that the normal * small fluctuations of task_ratelimit won't trigger rule (1) and * nr_dirtied_pause will remain as stable as dirty_ratelimit. */ t = min(t, 1 + max_pause / 2); pages = dirty_ratelimit * t / roundup_pow_of_two(HZ); /* * Tiny nr_dirtied_pause is found to hurt I/O performance in the test * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}. * When the 16 consecutive reads are often interrupted by some dirty * throttling pause during the async writes, cfq will go into idles * (deadline is fine). So push nr_dirtied_pause as high as possible * until reaches DIRTY_POLL_THRESH=32 pages. */ if (pages < DIRTY_POLL_THRESH) { t = max_pause; pages = dirty_ratelimit * t / roundup_pow_of_two(HZ); if (pages > DIRTY_POLL_THRESH) { pages = DIRTY_POLL_THRESH; t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit; } } pause = HZ * pages / (task_ratelimit + 1); if (pause > max_pause) { t = max_pause; pages = task_ratelimit * t / roundup_pow_of_two(HZ); } *nr_dirtied_pause = pages; /* * The minimal pause time will normally be half the target pause time. */ return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t; } /* * balance_dirty_pages() must be called by processes which are generating dirty * data. It looks at the number of dirty pages in the machine and will force * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2. * If we're over `background_thresh' then the writeback threads are woken to * perform some writeout. */ static void balance_dirty_pages(struct address_space *mapping, unsigned long pages_dirtied) { unsigned long nr_reclaimable; /* = file_dirty + unstable_nfs */ unsigned long bdi_reclaimable; unsigned long nr_dirty; /* = file_dirty + writeback + unstable_nfs */ unsigned long bdi_dirty; unsigned long freerun; unsigned long background_thresh; unsigned long dirty_thresh; unsigned long bdi_thresh; long period; long pause; long max_pause; long min_pause; int nr_dirtied_pause; bool dirty_exceeded = false; unsigned long task_ratelimit; unsigned long dirty_ratelimit; unsigned long pos_ratio; struct backing_dev_info *bdi = mapping->backing_dev_info; unsigned long start_time = jiffies; for (;;) { unsigned long now = jiffies; /* * Unstable writes are a feature of certain networked * filesystems (i.e. NFS) in which data may have been * written to the server's write cache, but has not yet * been flushed to permanent storage. */ nr_reclaimable = global_page_state(NR_FILE_DIRTY) + global_page_state(NR_UNSTABLE_NFS); nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK); global_dirty_limits(&background_thresh, &dirty_thresh); /* * Throttle it only when the background writeback cannot * catch-up. This avoids (excessively) small writeouts * when the bdi limits are ramping up. */ freerun = dirty_freerun_ceiling(dirty_thresh, background_thresh); if (nr_dirty <= freerun) { current->dirty_paused_when = now; current->nr_dirtied = 0; current->nr_dirtied_pause = dirty_poll_interval(nr_dirty, dirty_thresh); break; } if (unlikely(!writeback_in_progress(bdi))) bdi_start_background_writeback(bdi); /* * bdi_thresh is not treated as some limiting factor as * dirty_thresh, due to reasons * - in JBOD setup, bdi_thresh can fluctuate a lot * - in a system with HDD and USB key, the USB key may somehow * go into state (bdi_dirty >> bdi_thresh) either because * bdi_dirty starts high, or because bdi_thresh drops low. * In this case we don't want to hard throttle the USB key * dirtiers for 100 seconds until bdi_dirty drops under * bdi_thresh. Instead the auxiliary bdi control line in * bdi_position_ratio() will let the dirtier task progress * at some rate <= (write_bw / 2) for bringing down bdi_dirty. */ bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); /* * In order to avoid the stacked BDI deadlock we need * to ensure we accurately count the 'dirty' pages when * the threshold is low. * * Otherwise it would be possible to get thresh+n pages * reported dirty, even though there are thresh-m pages * actually dirty; with m+n sitting in the percpu * deltas. */ if (bdi_thresh < 2 * bdi_stat_error(bdi)) { bdi_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE); bdi_dirty = bdi_reclaimable + bdi_stat_sum(bdi, BDI_WRITEBACK); } else { bdi_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); bdi_dirty = bdi_reclaimable + bdi_stat(bdi, BDI_WRITEBACK); } dirty_exceeded = (bdi_dirty > bdi_thresh) && (nr_dirty > dirty_thresh); if (dirty_exceeded && !bdi->dirty_exceeded) bdi->dirty_exceeded = 1; bdi_update_bandwidth(bdi, dirty_thresh, background_thresh, nr_dirty, bdi_thresh, bdi_dirty, start_time); dirty_ratelimit = bdi->dirty_ratelimit; pos_ratio = bdi_position_ratio(bdi, dirty_thresh, background_thresh, nr_dirty, bdi_thresh, bdi_dirty); task_ratelimit = ((u64)dirty_ratelimit * pos_ratio) >> RATELIMIT_CALC_SHIFT; max_pause = bdi_max_pause(bdi, bdi_dirty); min_pause = bdi_min_pause(bdi, max_pause, task_ratelimit, dirty_ratelimit, &nr_dirtied_pause); if (unlikely(task_ratelimit == 0)) { period = max_pause; pause = max_pause; goto pause; } period = HZ * pages_dirtied / task_ratelimit; pause = period; if (current->dirty_paused_when) pause -= now - current->dirty_paused_when; /* * For less than 1s think time (ext3/4 may block the dirtier * for up to 800ms from time to time on 1-HDD; so does xfs, * however at much less frequency), try to compensate it in * future periods by updating the virtual time; otherwise just * do a reset, as it may be a light dirtier. */ if (pause < min_pause) { trace_balance_dirty_pages(bdi, dirty_thresh, background_thresh, nr_dirty, bdi_thresh, bdi_dirty, dirty_ratelimit, task_ratelimit, pages_dirtied, period, min(pause, 0L), start_time); if (pause < -HZ) { current->dirty_paused_when = now; current->nr_dirtied = 0; } else if (period) { current->dirty_paused_when += period; current->nr_dirtied = 0; } else if (current->nr_dirtied_pause <= pages_dirtied) current->nr_dirtied_pause += pages_dirtied; break; } if (unlikely(pause > max_pause)) { /* for occasional dropped task_ratelimit */ now += min(pause - max_pause, max_pause); pause = max_pause; } pause: trace_balance_dirty_pages(bdi, dirty_thresh, background_thresh, nr_dirty, bdi_thresh, bdi_dirty, dirty_ratelimit, task_ratelimit, pages_dirtied, period, pause, start_time); __set_current_state(TASK_KILLABLE); io_schedule_timeout(pause); current->dirty_paused_when = now + pause; current->nr_dirtied = 0; current->nr_dirtied_pause = nr_dirtied_pause; /* * This is typically equal to (nr_dirty < dirty_thresh) and can * also keep "1000+ dd on a slow USB stick" under control. */ if (task_ratelimit) break; /* * In the case of an unresponding NFS server and the NFS dirty * pages exceeds dirty_thresh, give the other good bdi's a pipe * to go through, so that tasks on them still remain responsive. * * In theory 1 page is enough to keep the comsumer-producer * pipe going: the flusher cleans 1 page => the task dirties 1 * more page. However bdi_dirty has accounting errors. So use * the larger and more IO friendly bdi_stat_error. */ if (bdi_dirty <= bdi_stat_error(bdi)) break; if (fatal_signal_pending(current)) break; } if (!dirty_exceeded && bdi->dirty_exceeded) bdi->dirty_exceeded = 0; if (writeback_in_progress(bdi)) return; /* * In laptop mode, we wait until hitting the higher threshold before * starting background writeout, and then write out all the way down * to the lower threshold. So slow writers cause minimal disk activity. * * In normal mode, we start background writeout at the lower * background_thresh, to keep the amount of dirty memory low. */ if (laptop_mode) return; if (nr_reclaimable > background_thresh) bdi_start_background_writeback(bdi); } void set_page_dirty_balance(struct page *page, int page_mkwrite) { if (set_page_dirty(page) || page_mkwrite) { struct address_space *mapping = page_mapping(page); if (mapping) balance_dirty_pages_ratelimited(mapping); } } static DEFINE_PER_CPU(int, bdp_ratelimits); /* * Normal tasks are throttled by * loop { * dirty tsk->nr_dirtied_pause pages; * take a snap in balance_dirty_pages(); * } * However there is a worst case. If every task exit immediately when dirtied * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be * called to throttle the page dirties. The solution is to save the not yet * throttled page dirties in dirty_throttle_leaks on task exit and charge them * randomly into the running tasks. This works well for the above worst case, * as the new task will pick up and accumulate the old task's leaked dirty * count and eventually get throttled. */ DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0; /** * balance_dirty_pages_ratelimited_nr - balance dirty memory state * @mapping: address_space which was dirtied * @nr_pages_dirtied: number of pages which the caller has just dirtied * * Processes which are dirtying memory should call in here once for each page * which was newly dirtied. The function will periodically check the system's * dirty state and will initiate writeback if needed. * * On really big machines, get_writeback_state is expensive, so try to avoid * calling it too often (ratelimiting). But once we're over the dirty memory * limit we decrease the ratelimiting by a lot, to prevent individual processes * from overshooting the limit by (ratelimit_pages) each. */ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, unsigned long nr_pages_dirtied) { struct backing_dev_info *bdi = mapping->backing_dev_info; int ratelimit; int *p; if (!bdi_cap_account_dirty(bdi)) return; ratelimit = current->nr_dirtied_pause; if (bdi->dirty_exceeded) ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10)); preempt_disable(); /* * This prevents one CPU to accumulate too many dirtied pages without * calling into balance_dirty_pages(), which can happen when there are * 1000+ tasks, all of them start dirtying pages at exactly the same * time, hence all honoured too large initial task->nr_dirtied_pause. */ p = &__get_cpu_var(bdp_ratelimits); if (unlikely(current->nr_dirtied >= ratelimit)) *p = 0; else if (unlikely(*p >= ratelimit_pages)) { *p = 0; ratelimit = 0; } /* * Pick up the dirtied pages by the exited tasks. This avoids lots of * short-lived tasks (eg. gcc invocations in a kernel build) escaping * the dirty throttling and livelock other long-run dirtiers. */ p = &__get_cpu_var(dirty_throttle_leaks); if (*p > 0 && current->nr_dirtied < ratelimit) { nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied); *p -= nr_pages_dirtied; current->nr_dirtied += nr_pages_dirtied; } preempt_enable(); if (unlikely(current->nr_dirtied >= ratelimit)) balance_dirty_pages(mapping, current->nr_dirtied); } EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr); void throttle_vm_writeout(gfp_t gfp_mask) { unsigned long background_thresh; unsigned long dirty_thresh; for ( ; ; ) { global_dirty_limits(&background_thresh, &dirty_thresh); dirty_thresh = hard_dirty_limit(dirty_thresh); /* * Boost the allowable dirty threshold a bit for page * allocators so they don't get DoS'ed by heavy writers */ dirty_thresh += dirty_thresh / 10; /* wheeee... */ if (global_page_state(NR_UNSTABLE_NFS) + global_page_state(NR_WRITEBACK) <= dirty_thresh) break; congestion_wait(BLK_RW_ASYNC, HZ/10); /* * The caller might hold locks which can prevent IO completion * or progress in the filesystem. So we cannot just sit here * waiting for IO to complete. */ if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) break; } } /* * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs */ int dirty_writeback_centisecs_handler(ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { proc_dointvec(table, write, buffer, length, ppos); bdi_arm_supers_timer(); return 0; } #ifdef CONFIG_BLOCK void laptop_mode_timer_fn(unsigned long data) { struct request_queue *q = (struct request_queue *)data; int nr_pages = global_page_state(NR_FILE_DIRTY) + global_page_state(NR_UNSTABLE_NFS); /* * We want to write everything out, not just down to the dirty * threshold */ if (bdi_has_dirty_io(&q->backing_dev_info)) bdi_start_writeback(&q->backing_dev_info, nr_pages, WB_REASON_LAPTOP_TIMER); } /* * We've spun up the disk and we're in laptop mode: schedule writeback * of all dirty data a few seconds from now. If the flush is already scheduled * then push it back - the user is still using the disk. */ void laptop_io_completion(struct backing_dev_info *info) { mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode); } /* * We're in laptop mode and we've just synced. The sync's writes will have * caused another writeback to be scheduled by laptop_io_completion. * Nothing needs to be written back anymore, so we unschedule the writeback. */ void laptop_sync_completion(void) { struct backing_dev_info *bdi; rcu_read_lock(); list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) del_timer(&bdi->laptop_mode_wb_timer); rcu_read_unlock(); } #endif /* * If ratelimit_pages is too high then we can get into dirty-data overload * if a large number of processes all perform writes at the same time. * If it is too low then SMP machines will call the (expensive) * get_writeback_state too often. * * Here we set ratelimit_pages to a level which ensures that when all CPUs are * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory * thresholds. */ void writeback_set_ratelimit(void) { unsigned long background_thresh; unsigned long dirty_thresh; global_dirty_limits(&background_thresh, &dirty_thresh); ratelimit_pages = dirty_thresh / (num_online_cpus() * 32); if (ratelimit_pages < 16) ratelimit_pages = 16; } static int __cpuinit ratelimit_handler(struct notifier_block *self, unsigned long u, void *v) { writeback_set_ratelimit(); return NOTIFY_DONE; } static struct notifier_block __cpuinitdata ratelimit_nb = { .notifier_call = ratelimit_handler, .next = NULL, }; /* * Called early on to tune the page writeback dirty limits. * * We used to scale dirty pages according to how total memory * related to pages that could be allocated for buffers (by * comparing nr_free_buffer_pages() to vm_total_pages. * * However, that was when we used "dirty_ratio" to scale with * all memory, and we don't do that any more. "dirty_ratio" * is now applied to total non-HIGHPAGE memory (by subtracting * totalhigh_pages from vm_total_pages), and as such we can't * get into the old insane situation any more where we had * large amounts of dirty pages compared to a small amount of * non-HIGHMEM memory. * * But we might still want to scale the dirty_ratio by how * much memory the box has.. */ void __init page_writeback_init(void) { int shift; writeback_set_ratelimit(); register_cpu_notifier(&ratelimit_nb); shift = calc_period_shift(); prop_descriptor_init(&vm_completions, shift); } /** * tag_pages_for_writeback - tag pages to be written by write_cache_pages * @mapping: address space structure to write * @start: starting page index * @end: ending page index (inclusive) * * This function scans the page range from @start to @end (inclusive) and tags * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is * that write_cache_pages (or whoever calls this function) will then use * TOWRITE tag to identify pages eligible for writeback. This mechanism is * used to avoid livelocking of writeback by a process steadily creating new * dirty pages in the file (thus it is important for this function to be quick * so that it can tag pages faster than a dirtying process can create them). */ /* * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency. */ void tag_pages_for_writeback(struct address_space *mapping, pgoff_t start, pgoff_t end) { #define WRITEBACK_TAG_BATCH 4096 unsigned long tagged; do { spin_lock_irq(&mapping->tree_lock); tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree, &start, end, WRITEBACK_TAG_BATCH, PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE); spin_unlock_irq(&mapping->tree_lock); WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH); cond_resched(); /* We check 'start' to handle wrapping when end == ~0UL */ } while (tagged >= WRITEBACK_TAG_BATCH && start); } EXPORT_SYMBOL(tag_pages_for_writeback); /** * write_cache_pages - walk the list of dirty pages of the given address space and write all of them. * @mapping: address space structure to write * @wbc: subtract the number of written pages from *@wbc->nr_to_write * @writepage: function called for each page * @data: data passed to writepage function * * If a page is already under I/O, write_cache_pages() skips it, even * if it's dirty. This is desirable behaviour for memory-cleaning writeback, * but it is INCORRECT for data-integrity system calls such as fsync(). fsync() * and msync() need to guarantee that all the data which was dirty at the time * the call was made get new I/O started against them. If wbc->sync_mode is * WB_SYNC_ALL then we were called for data integrity and we must wait for * existing IO to complete. * * To avoid livelocks (when other process dirties new pages), we first tag * pages which should be written back with TOWRITE tag and only then start * writing them. For data-integrity sync we have to be careful so that we do * not miss some pages (e.g., because some other process has cleared TOWRITE * tag we set). The rule we follow is that TOWRITE tag can be cleared only * by the process clearing the DIRTY tag (and submitting the page for IO). */ int write_cache_pages(struct address_space *mapping, struct writeback_control *wbc, writepage_t writepage, void *data) { int ret = 0; int done = 0; struct pagevec pvec; int nr_pages; pgoff_t uninitialized_var(writeback_index); pgoff_t index; pgoff_t end; /* Inclusive */ pgoff_t done_index; int cycled; int range_whole = 0; int tag; pagevec_init(&pvec, 0); if (wbc->range_cyclic) { writeback_index = mapping->writeback_index; /* prev offset */ index = writeback_index; if (index == 0) cycled = 1; else cycled = 0; end = -1; } else { index = wbc->range_start >> PAGE_CACHE_SHIFT; end = wbc->range_end >> PAGE_CACHE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; cycled = 1; /* ignore range_cyclic tests */ } if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag = PAGECACHE_TAG_TOWRITE; else tag = PAGECACHE_TAG_DIRTY; retry: if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag_pages_for_writeback(mapping, index, end); done_index = index; while (!done && (index <= end)) { int i; nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); if (nr_pages == 0) break; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; /* * At this point, the page may be truncated or * invalidated (changing page->mapping to NULL), or * even swizzled back from swapper_space to tmpfs file * mapping. However, page->index will not change * because we have a reference on the page. */ if (page->index > end) { /* * can't be range_cyclic (1st pass) because * end == -1 in that case. */ done = 1; break; } done_index = page->index; lock_page(page); /* * Page truncated or invalidated. We can freely skip it * then, even for data integrity operations: the page * has disappeared concurrently, so there could be no * real expectation of this data interity operation * even if there is now a new, dirty page at the same * pagecache address. */ if (unlikely(page->mapping != mapping)) { continue_unlock: unlock_page(page); continue; } if (!PageDirty(page)) { /* someone wrote it for us */ goto continue_unlock; } if (PageWriteback(page)) { if (wbc->sync_mode != WB_SYNC_NONE) wait_on_page_writeback(page); else goto continue_unlock; } BUG_ON(PageWriteback(page)); if (!clear_page_dirty_for_io(page)) goto continue_unlock; trace_wbc_writepage(wbc, mapping->backing_dev_info); ret = (*writepage)(page, wbc, data); if (unlikely(ret)) { if (ret == AOP_WRITEPAGE_ACTIVATE) { unlock_page(page); ret = 0; } else { /* * done_index is set past this page, * so media errors will not choke * background writeout for the entire * file. This has consequences for * range_cyclic semantics (ie. it may * not be suitable for data integrity * writeout). */ done_index = page->index + 1; done = 1; break; } } /* * We stop writing back only if we are not doing * integrity sync. In case of integrity sync we have to * keep going until we have written all the pages * we tagged for writeback prior to entering this loop. */ if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) { done = 1; break; } } pagevec_release(&pvec); cond_resched(); } if (!cycled && !done) { /* * range_cyclic: * We hit the last page and there is more work to be done: wrap * back to the start of the file */ cycled = 1; index = 0; end = writeback_index - 1; goto retry; } if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) mapping->writeback_index = done_index; return ret; } EXPORT_SYMBOL(write_cache_pages); /* * Function used by generic_writepages to call the real writepage * function and set the mapping flags on error */ static int __writepage(struct page *page, struct writeback_control *wbc, void *data) { struct address_space *mapping = data; int ret = mapping->a_ops->writepage(page, wbc); mapping_set_error(mapping, ret); return ret; } /** * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them. * @mapping: address space structure to write * @wbc: subtract the number of written pages from *@wbc->nr_to_write * * This is a library function, which implements the writepages() * address_space_operation. */ int generic_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct blk_plug plug; int ret; /* deal with chardevs and other special file */ if (!mapping->a_ops->writepage) return 0; blk_start_plug(&plug); ret = write_cache_pages(mapping, wbc, __writepage, mapping); blk_finish_plug(&plug); return ret; } EXPORT_SYMBOL(generic_writepages); int do_writepages(struct address_space *mapping, struct writeback_control *wbc) { int ret; if (wbc->nr_to_write <= 0) return 0; if (mapping->a_ops->writepages) ret = mapping->a_ops->writepages(mapping, wbc); else ret = generic_writepages(mapping, wbc); return ret; } /** * write_one_page - write out a single page and optionally wait on I/O * @page: the page to write * @wait: if true, wait on writeout * * The page must be locked by the caller and will be unlocked upon return. * * write_one_page() returns a negative error code if I/O failed. */ int write_one_page(struct page *page, int wait) { struct address_space *mapping = page->mapping; int ret = 0; struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = 1, }; BUG_ON(!PageLocked(page)); if (wait) wait_on_page_writeback(page); if (clear_page_dirty_for_io(page)) { page_cache_get(page); ret = mapping->a_ops->writepage(page, &wbc); if (ret == 0 && wait) { wait_on_page_writeback(page); if (PageError(page)) ret = -EIO; } page_cache_release(page); } else { unlock_page(page); } return ret; } EXPORT_SYMBOL(write_one_page); /* * For address_spaces which do not use buffers nor write back. */ int __set_page_dirty_no_writeback(struct page *page) { if (!PageDirty(page)) return !TestSetPageDirty(page); return 0; } /* * Helper function for set_page_dirty family. * NOTE: This relies on being atomic wrt interrupts. */ void account_page_dirtied(struct page *page, struct address_space *mapping) { if (mapping_cap_account_dirty(mapping)) { __inc_zone_page_state(page, NR_FILE_DIRTY); __inc_zone_page_state(page, NR_DIRTIED); __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED); task_io_account_write(PAGE_CACHE_SIZE); current->nr_dirtied++; this_cpu_inc(bdp_ratelimits); } } EXPORT_SYMBOL(account_page_dirtied); /* * Helper function for set_page_writeback family. * NOTE: Unlike account_page_dirtied this does not rely on being atomic * wrt interrupts. */ void account_page_writeback(struct page *page) { inc_zone_page_state(page, NR_WRITEBACK); } EXPORT_SYMBOL(account_page_writeback); /* * For address_spaces which do not use buffers. Just tag the page as dirty in * its radix tree. * * This is also used when a single buffer is being dirtied: we want to set the * page dirty in that case, but not all the buffers. This is a "bottom-up" * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying. * * Most callers have locked the page, which pins the address_space in memory. * But zap_pte_range() does not lock the page, however in that case the * mapping is pinned by the vma's ->vm_file reference. * * We take care to handle the case where the page was truncated from the * mapping by re-checking page_mapping() inside tree_lock. */ int __set_page_dirty_nobuffers(struct page *page) { if (!TestSetPageDirty(page)) { struct address_space *mapping = page_mapping(page); struct address_space *mapping2; if (!mapping) return 1; spin_lock_irq(&mapping->tree_lock); mapping2 = page_mapping(page); if (mapping2) { /* Race with truncate? */ BUG_ON(mapping2 != mapping); WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); account_page_dirtied(page, mapping); radix_tree_tag_set(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); } spin_unlock_irq(&mapping->tree_lock); if (mapping->host) { /* !PageAnon && !swapper_space */ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); } return 1; } return 0; } EXPORT_SYMBOL(__set_page_dirty_nobuffers); /* * Call this whenever redirtying a page, to de-account the dirty counters * (NR_DIRTIED, BDI_DIRTIED, tsk->nr_dirtied), so that they match the written * counters (NR_WRITTEN, BDI_WRITTEN) in long term. The mismatches will lead to * systematic errors in balanced_dirty_ratelimit and the dirty pages position * control. */ void account_page_redirty(struct page *page) { struct address_space *mapping = page->mapping; if (mapping && mapping_cap_account_dirty(mapping)) { current->nr_dirtied--; dec_zone_page_state(page, NR_DIRTIED); dec_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED); } } EXPORT_SYMBOL(account_page_redirty); /* * When a writepage implementation decides that it doesn't want to write this * page for some reason, it should redirty the locked page via * redirty_page_for_writepage() and it should then unlock the page and return 0 */ int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page) { wbc->pages_skipped++; account_page_redirty(page); return __set_page_dirty_nobuffers(page); } EXPORT_SYMBOL(redirty_page_for_writepage); /* * Dirty a page. * * For pages with a mapping this should be done under the page lock * for the benefit of asynchronous memory errors who prefer a consistent * dirty state. This rule can be broken in some special cases, * but should be better not to. * * If the mapping doesn't provide a set_page_dirty a_op, then * just fall through and assume that it wants buffer_heads. */ int set_page_dirty(struct page *page) { struct address_space *mapping = page_mapping(page); if (likely(mapping)) { int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; /* * readahead/lru_deactivate_page could remain * PG_readahead/PG_reclaim due to race with end_page_writeback * About readahead, if the page is written, the flags would be * reset. So no problem. * About lru_deactivate_page, if the page is redirty, the flag * will be reset. So no problem. but if the page is used by readahead * it will confuse readahead and make it restart the size rampup * process. But it's a trivial problem. */ ClearPageReclaim(page); #ifdef CONFIG_BLOCK if (!spd) spd = __set_page_dirty_buffers; #endif return (*spd)(page); } if (!PageDirty(page)) { if (!TestSetPageDirty(page)) return 1; } return 0; } EXPORT_SYMBOL(set_page_dirty); /* * set_page_dirty() is racy if the caller has no reference against * page->mapping->host, and if the page is unlocked. This is because another * CPU could truncate the page off the mapping and then free the mapping. * * Usually, the page _is_ locked, or the caller is a user-space process which * holds a reference on the inode by having an open file. * * In other cases, the page should be locked before running set_page_dirty(). */ int set_page_dirty_lock(struct page *page) { int ret; lock_page(page); ret = set_page_dirty(page); unlock_page(page); return ret; } EXPORT_SYMBOL(set_page_dirty_lock); /* * Clear a page's dirty flag, while caring for dirty memory accounting. * Returns true if the page was previously dirty. * * This is for preparing to put the page under writeout. We leave the page * tagged as dirty in the radix tree so that a concurrent write-for-sync * can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage * implementation will run either set_page_writeback() or set_page_dirty(), * at which stage we bring the page's dirty flag and radix-tree dirty tag * back into sync. * * This incoherency between the page's dirty flag and radix-tree tag is * unfortunate, but it only exists while the page is locked. */ int clear_page_dirty_for_io(struct page *page) { struct address_space *mapping = page_mapping(page); BUG_ON(!PageLocked(page)); if (mapping && mapping_cap_account_dirty(mapping)) { /* * Yes, Virginia, this is indeed insane. * * We use this sequence to make sure that * (a) we account for dirty stats properly * (b) we tell the low-level filesystem to * mark the whole page dirty if it was * dirty in a pagetable. Only to then * (c) clean the page again and return 1 to * cause the writeback. * * This way we avoid all nasty races with the * dirty bit in multiple places and clearing * them concurrently from different threads. * * Note! Normally the "set_page_dirty(page)" * has no effect on the actual dirty bit - since * that will already usually be set. But we * need the side effects, and it can help us * avoid races. * * We basically use the page "master dirty bit" * as a serialization point for all the different * threads doing their things. */ if (page_mkclean(page)) set_page_dirty(page); /* * We carefully synchronise fault handlers against * installing a dirty pte and marking the page dirty * at this point. We do this by having them hold the * page lock at some point after installing their * pte, but before marking the page dirty. * Pages are always locked coming in here, so we get * the desired exclusion. See mm/memory.c:do_wp_page() * for more comments. */ if (TestClearPageDirty(page)) { dec_zone_page_state(page, NR_FILE_DIRTY); dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); return 1; } return 0; } return TestClearPageDirty(page); } EXPORT_SYMBOL(clear_page_dirty_for_io); int test_clear_page_writeback(struct page *page) { struct address_space *mapping = page_mapping(page); int ret; if (mapping) { struct backing_dev_info *bdi = mapping->backing_dev_info; unsigned long flags; spin_lock_irqsave(&mapping->tree_lock, flags); ret = TestClearPageWriteback(page); if (ret) { radix_tree_tag_clear(&mapping->page_tree, page_index(page), PAGECACHE_TAG_WRITEBACK); if (bdi_cap_account_writeback(bdi)) { __dec_bdi_stat(bdi, BDI_WRITEBACK); __bdi_writeout_inc(bdi); } } spin_unlock_irqrestore(&mapping->tree_lock, flags); } else { ret = TestClearPageWriteback(page); } if (ret) { dec_zone_page_state(page, NR_WRITEBACK); inc_zone_page_state(page, NR_WRITTEN); } return ret; } int test_set_page_writeback(struct page *page) { struct address_space *mapping = page_mapping(page); int ret; if (mapping) { struct backing_dev_info *bdi = mapping->backing_dev_info; unsigned long flags; spin_lock_irqsave(&mapping->tree_lock, flags); ret = TestSetPageWriteback(page); if (!ret) { radix_tree_tag_set(&mapping->page_tree, page_index(page), PAGECACHE_TAG_WRITEBACK); if (bdi_cap_account_writeback(bdi)) __inc_bdi_stat(bdi, BDI_WRITEBACK); } if (!PageDirty(page)) radix_tree_tag_clear(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); radix_tree_tag_clear(&mapping->page_tree, page_index(page), PAGECACHE_TAG_TOWRITE); spin_unlock_irqrestore(&mapping->tree_lock, flags); } else { ret = TestSetPageWriteback(page); } if (!ret) account_page_writeback(page); return ret; } EXPORT_SYMBOL(test_set_page_writeback); /* * Return true if any of the pages in the mapping are marked with the * passed tag. */ int mapping_tagged(struct address_space *mapping, int tag) { return radix_tree_tagged(&mapping->page_tree, tag); } EXPORT_SYMBOL(mapping_tagged);
gpl-2.0
PatrikKT/android_kernel_htc_a31ul
drivers/regulator/mc13892-regulator.c
2092
20761
/* * Regulator Driver for Freescale MC13892 PMIC * * Copyright 2010 Yong Shen <yong.shen@linaro.org> * * Based on draft driver from Arnaud Patard <arnaud.patard@rtp-net.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/mfd/mc13892.h> #include <linux/regulator/machine.h> #include <linux/regulator/driver.h> #include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/err.h> #include <linux/module.h> #include "mc13xxx.h" #define MC13892_REVISION 7 #define MC13892_POWERCTL0 13 #define MC13892_POWERCTL0_USEROFFSPI 3 #define MC13892_POWERCTL0_VCOINCELLVSEL 20 #define MC13892_POWERCTL0_VCOINCELLVSEL_M (7<<20) #define MC13892_POWERCTL0_VCOINCELLEN (1<<23) #define MC13892_SWITCHERS0_SWxHI (1<<23) #define MC13892_SWITCHERS0 24 #define MC13892_SWITCHERS0_SW1VSEL 0 #define MC13892_SWITCHERS0_SW1VSEL_M (0x1f<<0) #define MC13892_SWITCHERS0_SW1HI (1<<23) #define MC13892_SWITCHERS0_SW1EN 0 #define MC13892_SWITCHERS1 25 #define MC13892_SWITCHERS1_SW2VSEL 0 #define MC13892_SWITCHERS1_SW2VSEL_M (0x1f<<0) #define MC13892_SWITCHERS1_SW2HI (1<<23) #define MC13892_SWITCHERS1_SW2EN 0 #define MC13892_SWITCHERS2 26 #define MC13892_SWITCHERS2_SW3VSEL 0 #define MC13892_SWITCHERS2_SW3VSEL_M (0x1f<<0) #define MC13892_SWITCHERS2_SW3HI (1<<23) #define MC13892_SWITCHERS2_SW3EN 0 #define MC13892_SWITCHERS3 27 #define MC13892_SWITCHERS3_SW4VSEL 0 #define MC13892_SWITCHERS3_SW4VSEL_M (0x1f<<0) #define MC13892_SWITCHERS3_SW4HI (1<<23) #define MC13892_SWITCHERS3_SW4EN 0 #define MC13892_SWITCHERS4 28 #define MC13892_SWITCHERS4_SW1MODE 0 #define MC13892_SWITCHERS4_SW1MODE_AUTO (8<<0) #define MC13892_SWITCHERS4_SW1MODE_M (0xf<<0) #define MC13892_SWITCHERS4_SW2MODE 10 #define MC13892_SWITCHERS4_SW2MODE_AUTO (8<<10) #define MC13892_SWITCHERS4_SW2MODE_M (0xf<<10) #define MC13892_SWITCHERS5 29 #define MC13892_SWITCHERS5_SW3MODE 0 #define MC13892_SWITCHERS5_SW3MODE_AUTO (8<<0) #define MC13892_SWITCHERS5_SW3MODE_M (0xf<<0) #define MC13892_SWITCHERS5_SW4MODE 8 #define MC13892_SWITCHERS5_SW4MODE_AUTO (8<<8) #define MC13892_SWITCHERS5_SW4MODE_M (0xf<<8) #define MC13892_SWITCHERS5_SWBSTEN (1<<20) #define MC13892_REGULATORSETTING0 30 #define MC13892_REGULATORSETTING0_VGEN1VSEL 0 #define MC13892_REGULATORSETTING0_VDIGVSEL 4 #define MC13892_REGULATORSETTING0_VGEN2VSEL 6 #define MC13892_REGULATORSETTING0_VPLLVSEL 9 #define MC13892_REGULATORSETTING0_VUSB2VSEL 11 #define MC13892_REGULATORSETTING0_VGEN3VSEL 14 #define MC13892_REGULATORSETTING0_VCAMVSEL 16 #define MC13892_REGULATORSETTING0_VGEN1VSEL_M (3<<0) #define MC13892_REGULATORSETTING0_VDIGVSEL_M (3<<4) #define MC13892_REGULATORSETTING0_VGEN2VSEL_M (7<<6) #define MC13892_REGULATORSETTING0_VPLLVSEL_M (3<<9) #define MC13892_REGULATORSETTING0_VUSB2VSEL_M (3<<11) #define MC13892_REGULATORSETTING0_VGEN3VSEL_M (1<<14) #define MC13892_REGULATORSETTING0_VCAMVSEL_M (3<<16) #define MC13892_REGULATORSETTING1 31 #define MC13892_REGULATORSETTING1_VVIDEOVSEL 2 #define MC13892_REGULATORSETTING1_VAUDIOVSEL 4 #define MC13892_REGULATORSETTING1_VSDVSEL 6 #define MC13892_REGULATORSETTING1_VVIDEOVSEL_M (3<<2) #define MC13892_REGULATORSETTING1_VAUDIOVSEL_M (3<<4) #define MC13892_REGULATORSETTING1_VSDVSEL_M (7<<6) #define MC13892_REGULATORMODE0 32 #define MC13892_REGULATORMODE0_VGEN1EN (1<<0) #define MC13892_REGULATORMODE0_VGEN1STDBY (1<<1) #define MC13892_REGULATORMODE0_VGEN1MODE (1<<2) #define MC13892_REGULATORMODE0_VIOHIEN (1<<3) #define MC13892_REGULATORMODE0_VIOHISTDBY (1<<4) #define MC13892_REGULATORMODE0_VIOHIMODE (1<<5) #define MC13892_REGULATORMODE0_VDIGEN (1<<9) #define MC13892_REGULATORMODE0_VDIGSTDBY (1<<10) #define MC13892_REGULATORMODE0_VDIGMODE (1<<11) #define MC13892_REGULATORMODE0_VGEN2EN (1<<12) #define MC13892_REGULATORMODE0_VGEN2STDBY (1<<13) #define MC13892_REGULATORMODE0_VGEN2MODE (1<<14) #define MC13892_REGULATORMODE0_VPLLEN (1<<15) #define MC13892_REGULATORMODE0_VPLLSTDBY (1<<16) #define MC13892_REGULATORMODE0_VPLLMODE (1<<17) #define MC13892_REGULATORMODE0_VUSB2EN (1<<18) #define MC13892_REGULATORMODE0_VUSB2STDBY (1<<19) #define MC13892_REGULATORMODE0_VUSB2MODE (1<<20) #define MC13892_REGULATORMODE1 33 #define MC13892_REGULATORMODE1_VGEN3EN (1<<0) #define MC13892_REGULATORMODE1_VGEN3STDBY (1<<1) #define MC13892_REGULATORMODE1_VGEN3MODE (1<<2) #define MC13892_REGULATORMODE1_VCAMEN (1<<6) #define MC13892_REGULATORMODE1_VCAMSTDBY (1<<7) #define MC13892_REGULATORMODE1_VCAMMODE (1<<8) #define MC13892_REGULATORMODE1_VCAMCONFIGEN (1<<9) #define MC13892_REGULATORMODE1_VVIDEOEN (1<<12) #define MC13892_REGULATORMODE1_VVIDEOSTDBY (1<<13) #define MC13892_REGULATORMODE1_VVIDEOMODE (1<<14) #define MC13892_REGULATORMODE1_VAUDIOEN (1<<15) #define MC13892_REGULATORMODE1_VAUDIOSTDBY (1<<16) #define MC13892_REGULATORMODE1_VAUDIOMODE (1<<17) #define MC13892_REGULATORMODE1_VSDEN (1<<18) #define MC13892_REGULATORMODE1_VSDSTDBY (1<<19) #define MC13892_REGULATORMODE1_VSDMODE (1<<20) #define MC13892_POWERMISC 34 #define MC13892_POWERMISC_GPO1EN (1<<6) #define MC13892_POWERMISC_GPO2EN (1<<8) #define MC13892_POWERMISC_GPO3EN (1<<10) #define MC13892_POWERMISC_GPO4EN (1<<12) #define MC13892_POWERMISC_PWGT1SPIEN (1<<15) #define MC13892_POWERMISC_PWGT2SPIEN (1<<16) #define MC13892_POWERMISC_GPO4ADINEN (1<<21) #define MC13892_POWERMISC_PWGTSPI_M (3 << 15) #define MC13892_USB1 50 #define MC13892_USB1_VUSBEN (1<<3) static const unsigned int mc13892_vcoincell[] = { 2500000, 2700000, 2800000, 2900000, 3000000, 3100000, 3200000, 3300000, }; static const unsigned int mc13892_sw1[] = { 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000, 800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000 }; /* * Note: this table is used to derive SWxVSEL by index into * the array. Offset the values by the index of 1100000uV * to get the actual register value for that voltage selector * if the HI bit is to be set as well. */ #define MC13892_SWxHI_SEL_OFFSET 20 static const unsigned int mc13892_sw[] = { 600000, 625000, 650000, 675000, 700000, 725000, 750000, 775000, 800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000, 1400000, 1425000, 1450000, 1475000, 1500000, 1525000, 1550000, 1575000, 1600000, 1625000, 1650000, 1675000, 1700000, 1725000, 1750000, 1775000, 1800000, 1825000, 1850000, 1875000 }; static const unsigned int mc13892_swbst[] = { 5000000, }; static const unsigned int mc13892_viohi[] = { 2775000, }; static const unsigned int mc13892_vpll[] = { 1050000, 1250000, 1650000, 1800000, }; static const unsigned int mc13892_vdig[] = { 1050000, 1250000, 1650000, 1800000, }; static const unsigned int mc13892_vsd[] = { 1800000, 2000000, 2600000, 2700000, 2800000, 2900000, 3000000, 3150000, }; static const unsigned int mc13892_vusb2[] = { 2400000, 2600000, 2700000, 2775000, }; static const unsigned int mc13892_vvideo[] = { 2700000, 2775000, 2500000, 2600000, }; static const unsigned int mc13892_vaudio[] = { 2300000, 2500000, 2775000, 3000000, }; static const unsigned int mc13892_vcam[] = { 2500000, 2600000, 2750000, 3000000, }; static const unsigned int mc13892_vgen1[] = { 1200000, 1500000, 2775000, 3150000, }; static const unsigned int mc13892_vgen2[] = { 1200000, 1500000, 1600000, 1800000, 2700000, 2800000, 3000000, 3150000, }; static const unsigned int mc13892_vgen3[] = { 1800000, 2900000, }; static const unsigned int mc13892_vusb[] = { 3300000, }; static const unsigned int mc13892_gpo[] = { 2750000, }; static const unsigned int mc13892_pwgtdrv[] = { 5000000, }; static struct regulator_ops mc13892_gpo_regulator_ops; static struct regulator_ops mc13892_sw_regulator_ops; #define MC13892_FIXED_DEFINE(name, reg, voltages) \ MC13xxx_FIXED_DEFINE(MC13892_, name, reg, voltages, \ mc13xxx_fixed_regulator_ops) #define MC13892_GPO_DEFINE(name, reg, voltages) \ MC13xxx_GPO_DEFINE(MC13892_, name, reg, voltages, \ mc13892_gpo_regulator_ops) #define MC13892_SW_DEFINE(name, reg, vsel_reg, voltages) \ MC13xxx_DEFINE(MC13892_, name, reg, vsel_reg, voltages, \ mc13892_sw_regulator_ops) #define MC13892_DEFINE_REGU(name, reg, vsel_reg, voltages) \ MC13xxx_DEFINE(MC13892_, name, reg, vsel_reg, voltages, \ mc13xxx_regulator_ops) static struct mc13xxx_regulator mc13892_regulators[] = { MC13892_DEFINE_REGU(VCOINCELL, POWERCTL0, POWERCTL0, mc13892_vcoincell), MC13892_SW_DEFINE(SW1, SWITCHERS0, SWITCHERS0, mc13892_sw1), MC13892_SW_DEFINE(SW2, SWITCHERS1, SWITCHERS1, mc13892_sw), MC13892_SW_DEFINE(SW3, SWITCHERS2, SWITCHERS2, mc13892_sw), MC13892_SW_DEFINE(SW4, SWITCHERS3, SWITCHERS3, mc13892_sw), MC13892_FIXED_DEFINE(SWBST, SWITCHERS5, mc13892_swbst), MC13892_FIXED_DEFINE(VIOHI, REGULATORMODE0, mc13892_viohi), MC13892_DEFINE_REGU(VPLL, REGULATORMODE0, REGULATORSETTING0, \ mc13892_vpll), MC13892_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0, \ mc13892_vdig), MC13892_DEFINE_REGU(VSD, REGULATORMODE1, REGULATORSETTING1, \ mc13892_vsd), MC13892_DEFINE_REGU(VUSB2, REGULATORMODE0, REGULATORSETTING0, \ mc13892_vusb2), MC13892_DEFINE_REGU(VVIDEO, REGULATORMODE1, REGULATORSETTING1, \ mc13892_vvideo), MC13892_DEFINE_REGU(VAUDIO, REGULATORMODE1, REGULATORSETTING1, \ mc13892_vaudio), MC13892_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0, \ mc13892_vcam), MC13892_DEFINE_REGU(VGEN1, REGULATORMODE0, REGULATORSETTING0, \ mc13892_vgen1), MC13892_DEFINE_REGU(VGEN2, REGULATORMODE0, REGULATORSETTING0, \ mc13892_vgen2), MC13892_DEFINE_REGU(VGEN3, REGULATORMODE1, REGULATORSETTING0, \ mc13892_vgen3), MC13892_FIXED_DEFINE(VUSB, USB1, mc13892_vusb), MC13892_GPO_DEFINE(GPO1, POWERMISC, mc13892_gpo), MC13892_GPO_DEFINE(GPO2, POWERMISC, mc13892_gpo), MC13892_GPO_DEFINE(GPO3, POWERMISC, mc13892_gpo), MC13892_GPO_DEFINE(GPO4, POWERMISC, mc13892_gpo), MC13892_GPO_DEFINE(PWGT1SPI, POWERMISC, mc13892_pwgtdrv), MC13892_GPO_DEFINE(PWGT2SPI, POWERMISC, mc13892_pwgtdrv), }; static int mc13892_powermisc_rmw(struct mc13xxx_regulator_priv *priv, u32 mask, u32 val) { struct mc13xxx *mc13892 = priv->mc13xxx; int ret; u32 valread; BUG_ON(val & ~mask); mc13xxx_lock(priv->mc13xxx); ret = mc13xxx_reg_read(mc13892, MC13892_POWERMISC, &valread); if (ret) goto out; /* Update the stored state for Power Gates. */ priv->powermisc_pwgt_state = (priv->powermisc_pwgt_state & ~mask) | val; priv->powermisc_pwgt_state &= MC13892_POWERMISC_PWGTSPI_M; /* Construct the new register value */ valread = (valread & ~mask) | val; /* Overwrite the PWGTxEN with the stored version */ valread = (valread & ~MC13892_POWERMISC_PWGTSPI_M) | priv->powermisc_pwgt_state; ret = mc13xxx_reg_write(mc13892, MC13892_POWERMISC, valread); out: mc13xxx_unlock(priv->mc13xxx); return ret; } static int mc13892_gpo_regulator_enable(struct regulator_dev *rdev) { struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); int id = rdev_get_id(rdev); u32 en_val = mc13892_regulators[id].enable_bit; u32 mask = mc13892_regulators[id].enable_bit; dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); /* Power Gate enable value is 0 */ if (id == MC13892_PWGT1SPI || id == MC13892_PWGT2SPI) en_val = 0; if (id == MC13892_GPO4) mask |= MC13892_POWERMISC_GPO4ADINEN; return mc13892_powermisc_rmw(priv, mask, en_val); } static int mc13892_gpo_regulator_disable(struct regulator_dev *rdev) { struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); int id = rdev_get_id(rdev); u32 dis_val = 0; dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); /* Power Gate disable value is 1 */ if (id == MC13892_PWGT1SPI || id == MC13892_PWGT2SPI) dis_val = mc13892_regulators[id].enable_bit; return mc13892_powermisc_rmw(priv, mc13892_regulators[id].enable_bit, dis_val); } static int mc13892_gpo_regulator_is_enabled(struct regulator_dev *rdev) { struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); int ret, id = rdev_get_id(rdev); unsigned int val; mc13xxx_lock(priv->mc13xxx); ret = mc13xxx_reg_read(priv->mc13xxx, mc13892_regulators[id].reg, &val); mc13xxx_unlock(priv->mc13xxx); if (ret) return ret; /* Power Gates state is stored in powermisc_pwgt_state * where the meaning of bits is negated */ val = (val & ~MC13892_POWERMISC_PWGTSPI_M) | (priv->powermisc_pwgt_state ^ MC13892_POWERMISC_PWGTSPI_M); return (val & mc13892_regulators[id].enable_bit) != 0; } static struct regulator_ops mc13892_gpo_regulator_ops = { .enable = mc13892_gpo_regulator_enable, .disable = mc13892_gpo_regulator_disable, .is_enabled = mc13892_gpo_regulator_is_enabled, .list_voltage = regulator_list_voltage_table, .set_voltage = mc13xxx_fixed_regulator_set_voltage, }; static int mc13892_sw_regulator_get_voltage_sel(struct regulator_dev *rdev) { struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); int ret, id = rdev_get_id(rdev); unsigned int val, selector; dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); mc13xxx_lock(priv->mc13xxx); ret = mc13xxx_reg_read(priv->mc13xxx, mc13892_regulators[id].vsel_reg, &val); mc13xxx_unlock(priv->mc13xxx); if (ret) return ret; /* * Figure out if the HI bit is set inside the switcher mode register * since this means the selector value we return is at a different * offset into the selector table. * * According to the MC13892 documentation note 59 (Table 47) the SW1 * buck switcher does not support output range programming therefore * the HI bit must always remain 0. So do not do anything strange if * our register is MC13892_SWITCHERS0. */ selector = val & mc13892_regulators[id].vsel_mask; if ((mc13892_regulators[id].vsel_reg != MC13892_SWITCHERS0) && (val & MC13892_SWITCHERS0_SWxHI)) { selector += MC13892_SWxHI_SEL_OFFSET; } dev_dbg(rdev_get_dev(rdev), "%s id: %d val: 0x%08x selector: %d\n", __func__, id, val, selector); return selector; } static int mc13892_sw_regulator_set_voltage_sel(struct regulator_dev *rdev, unsigned selector) { struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); int volt, mask, id = rdev_get_id(rdev); u32 reg_value; int ret; volt = rdev->desc->volt_table[selector]; mask = mc13892_regulators[id].vsel_mask; reg_value = selector; /* * Don't mess with the HI bit or support HI voltage offsets for SW1. * * Since the get_voltage_sel callback has given a fudged value for * the selector offset, we need to back out that offset if HI is * to be set so we write the correct value to the register. * * The HI bit addition and selector offset handling COULD be more * complicated by shifting and masking off the voltage selector part * of the register then logical OR it back in, but since the selector * is at bits 4:0 there is very little point. This makes the whole * thing more readable and we do far less work. */ if (mc13892_regulators[id].vsel_reg != MC13892_SWITCHERS0) { mask |= MC13892_SWITCHERS0_SWxHI; if (volt > 1375000) { reg_value -= MC13892_SWxHI_SEL_OFFSET; reg_value |= MC13892_SWITCHERS0_SWxHI; } else { reg_value &= ~MC13892_SWITCHERS0_SWxHI; } } mc13xxx_lock(priv->mc13xxx); ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].vsel_reg, mask, reg_value); mc13xxx_unlock(priv->mc13xxx); return ret; } static struct regulator_ops mc13892_sw_regulator_ops = { .list_voltage = regulator_list_voltage_table, .map_voltage = regulator_map_voltage_ascend, .set_voltage_sel = mc13892_sw_regulator_set_voltage_sel, .get_voltage_sel = mc13892_sw_regulator_get_voltage_sel, }; static int mc13892_vcam_set_mode(struct regulator_dev *rdev, unsigned int mode) { unsigned int en_val = 0; struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); int ret, id = rdev_get_id(rdev); if (mode == REGULATOR_MODE_FAST) en_val = MC13892_REGULATORMODE1_VCAMCONFIGEN; mc13xxx_lock(priv->mc13xxx); ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].reg, MC13892_REGULATORMODE1_VCAMCONFIGEN, en_val); mc13xxx_unlock(priv->mc13xxx); return ret; } static unsigned int mc13892_vcam_get_mode(struct regulator_dev *rdev) { struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); int ret, id = rdev_get_id(rdev); unsigned int val; mc13xxx_lock(priv->mc13xxx); ret = mc13xxx_reg_read(priv->mc13xxx, mc13892_regulators[id].reg, &val); mc13xxx_unlock(priv->mc13xxx); if (ret) return ret; if (val & MC13892_REGULATORMODE1_VCAMCONFIGEN) return REGULATOR_MODE_FAST; return REGULATOR_MODE_NORMAL; } static int mc13892_regulator_probe(struct platform_device *pdev) { struct mc13xxx_regulator_priv *priv; struct mc13xxx *mc13892 = dev_get_drvdata(pdev->dev.parent); struct mc13xxx_regulator_platform_data *pdata = dev_get_platdata(&pdev->dev); struct mc13xxx_regulator_init_data *mc13xxx_data; struct regulator_config config = { }; int i, ret; int num_regulators = 0; u32 val; num_regulators = mc13xxx_get_num_regulators_dt(pdev); if (num_regulators <= 0 && pdata) num_regulators = pdata->num_regulators; if (num_regulators <= 0) return -EINVAL; priv = devm_kzalloc(&pdev->dev, sizeof(*priv) + num_regulators * sizeof(priv->regulators[0]), GFP_KERNEL); if (!priv) return -ENOMEM; priv->num_regulators = num_regulators; priv->mc13xxx_regulators = mc13892_regulators; priv->mc13xxx = mc13892; platform_set_drvdata(pdev, priv); mc13xxx_lock(mc13892); ret = mc13xxx_reg_read(mc13892, MC13892_REVISION, &val); if (ret) goto err_unlock; /* enable switch auto mode (on 2.0A silicon only) */ if ((val & 0x0000FFFF) == 0x45d0) { ret = mc13xxx_reg_rmw(mc13892, MC13892_SWITCHERS4, MC13892_SWITCHERS4_SW1MODE_M | MC13892_SWITCHERS4_SW2MODE_M, MC13892_SWITCHERS4_SW1MODE_AUTO | MC13892_SWITCHERS4_SW2MODE_AUTO); if (ret) goto err_unlock; ret = mc13xxx_reg_rmw(mc13892, MC13892_SWITCHERS5, MC13892_SWITCHERS5_SW3MODE_M | MC13892_SWITCHERS5_SW4MODE_M, MC13892_SWITCHERS5_SW3MODE_AUTO | MC13892_SWITCHERS5_SW4MODE_AUTO); if (ret) goto err_unlock; } mc13xxx_unlock(mc13892); mc13892_regulators[MC13892_VCAM].desc.ops->set_mode = mc13892_vcam_set_mode; mc13892_regulators[MC13892_VCAM].desc.ops->get_mode = mc13892_vcam_get_mode; mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators, ARRAY_SIZE(mc13892_regulators)); for (i = 0; i < priv->num_regulators; i++) { struct regulator_init_data *init_data; struct regulator_desc *desc; struct device_node *node = NULL; int id; if (mc13xxx_data) { id = mc13xxx_data[i].id; init_data = mc13xxx_data[i].init_data; node = mc13xxx_data[i].node; } else { id = pdata->regulators[i].id; init_data = pdata->regulators[i].init_data; } desc = &mc13892_regulators[id].desc; config.dev = &pdev->dev; config.init_data = init_data; config.driver_data = priv; config.of_node = node; priv->regulators[i] = regulator_register(desc, &config); if (IS_ERR(priv->regulators[i])) { dev_err(&pdev->dev, "failed to register regulator %s\n", mc13892_regulators[i].desc.name); ret = PTR_ERR(priv->regulators[i]); goto err; } } return 0; err: while (--i >= 0) regulator_unregister(priv->regulators[i]); return ret; err_unlock: mc13xxx_unlock(mc13892); return ret; } static int mc13892_regulator_remove(struct platform_device *pdev) { struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev); int i; platform_set_drvdata(pdev, NULL); for (i = 0; i < priv->num_regulators; i++) regulator_unregister(priv->regulators[i]); return 0; } static struct platform_driver mc13892_regulator_driver = { .driver = { .name = "mc13892-regulator", .owner = THIS_MODULE, }, .remove = mc13892_regulator_remove, .probe = mc13892_regulator_probe, }; static int __init mc13892_regulator_init(void) { return platform_driver_register(&mc13892_regulator_driver); } subsys_initcall(mc13892_regulator_init); static void __exit mc13892_regulator_exit(void) { platform_driver_unregister(&mc13892_regulator_driver); } module_exit(mc13892_regulator_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Yong Shen <yong.shen@linaro.org>"); MODULE_DESCRIPTION("Regulator Driver for Freescale MC13892 PMIC"); MODULE_ALIAS("platform:mc13892-regulator");
gpl-2.0
Dees-Troy/android_kernel_samsung_coreprimelte
arch/powerpc/platforms/pseries/pci_dlpar.c
2092
6433
/* * PCI Dynamic LPAR, PCI Hot Plug and PCI EEH recovery code * for RPA-compliant PPC64 platform. * Copyright (C) 2003 Linda Xie <lxie@us.ibm.com> * Copyright (C) 2005 International Business Machines * * Updates, 2005, John Rose <johnrose@austin.ibm.com> * Updates, 2005, Linas Vepstas <linas@austin.ibm.com> * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/pci.h> #include <linux/export.h> #include <asm/pci-bridge.h> #include <asm/ppc-pci.h> #include <asm/firmware.h> #include <asm/eeh.h> static struct pci_bus * find_bus_among_children(struct pci_bus *bus, struct device_node *dn) { struct pci_bus *child = NULL; struct list_head *tmp; struct device_node *busdn; busdn = pci_bus_to_OF_node(bus); if (busdn == dn) return bus; list_for_each(tmp, &bus->children) { child = find_bus_among_children(pci_bus_b(tmp), dn); if (child) break; }; return child; } struct pci_bus * pcibios_find_pci_bus(struct device_node *dn) { struct pci_dn *pdn = dn->data; if (!pdn || !pdn->phb || !pdn->phb->bus) return NULL; return find_bus_among_children(pdn->phb->bus, dn); } EXPORT_SYMBOL_GPL(pcibios_find_pci_bus); /** * __pcibios_remove_pci_devices - remove all devices under this bus * @bus: the indicated PCI bus * @purge_pe: destroy the PE on removal of PCI devices * * Remove all of the PCI devices under this bus both from the * linux pci device tree, and from the powerpc EEH address cache. * By default, the corresponding PE will be destroied during the * normal PCI hotplug path. For PCI hotplug during EEH recovery, * the corresponding PE won't be destroied and deallocated. */ void __pcibios_remove_pci_devices(struct pci_bus *bus, int purge_pe) { struct pci_dev *dev, *tmp; struct pci_bus *child_bus; /* First go down child busses */ list_for_each_entry(child_bus, &bus->children, node) __pcibios_remove_pci_devices(child_bus, purge_pe); pr_debug("PCI: Removing devices on bus %04x:%02x\n", pci_domain_nr(bus), bus->number); list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) { pr_debug(" * Removing %s...\n", pci_name(dev)); eeh_remove_bus_device(dev, purge_pe); pci_stop_and_remove_bus_device(dev); } } /** * pcibios_remove_pci_devices - remove all devices under this bus * * Remove all of the PCI devices under this bus both from the * linux pci device tree, and from the powerpc EEH address cache. */ void pcibios_remove_pci_devices(struct pci_bus *bus) { __pcibios_remove_pci_devices(bus, 1); } EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices); /** * pcibios_add_pci_devices - adds new pci devices to bus * * This routine will find and fixup new pci devices under * the indicated bus. This routine presumes that there * might already be some devices under this bridge, so * it carefully tries to add only new devices. (And that * is how this routine differs from other, similar pcibios * routines.) */ void pcibios_add_pci_devices(struct pci_bus * bus) { int slotno, num, mode, pass, max; struct pci_dev *dev; struct device_node *dn = pci_bus_to_OF_node(bus); eeh_add_device_tree_early(dn); mode = PCI_PROBE_NORMAL; if (ppc_md.pci_probe_mode) mode = ppc_md.pci_probe_mode(bus); if (mode == PCI_PROBE_DEVTREE) { /* use ofdt-based probe */ of_rescan_bus(dn, bus); } else if (mode == PCI_PROBE_NORMAL) { /* use legacy probe */ slotno = PCI_SLOT(PCI_DN(dn->child)->devfn); num = pci_scan_slot(bus, PCI_DEVFN(slotno, 0)); if (!num) return; pcibios_setup_bus_devices(bus); max = bus->busn_res.start; for (pass=0; pass < 2; pass++) list_for_each_entry(dev, &bus->devices, bus_list) { if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) max = pci_scan_bridge(bus, dev, max, pass); } } pcibios_finish_adding_to_bus(bus); } EXPORT_SYMBOL_GPL(pcibios_add_pci_devices); struct pci_controller *init_phb_dynamic(struct device_node *dn) { struct pci_controller *phb; pr_debug("PCI: Initializing new hotplug PHB %s\n", dn->full_name); phb = pcibios_alloc_controller(dn); if (!phb) return NULL; rtas_setup_phb(phb); pci_process_bridge_OF_ranges(phb, dn, 0); pci_devs_phb_init_dynamic(phb); /* Create EEH devices for the PHB */ eeh_dev_phb_init_dynamic(phb); if (dn->child) eeh_add_device_tree_early(dn); pcibios_scan_phb(phb); pcibios_finish_adding_to_bus(phb->bus); return phb; } EXPORT_SYMBOL_GPL(init_phb_dynamic); /* RPA-specific bits for removing PHBs */ int remove_phb_dynamic(struct pci_controller *phb) { struct pci_bus *b = phb->bus; struct resource *res; int rc, i; pr_debug("PCI: Removing PHB %04x:%02x...\n", pci_domain_nr(b), b->number); /* We cannot to remove a root bus that has children */ if (!(list_empty(&b->children) && list_empty(&b->devices))) return -EBUSY; /* We -know- there aren't any child devices anymore at this stage * and thus, we can safely unmap the IO space as it's not in use */ res = &phb->io_resource; if (res->flags & IORESOURCE_IO) { rc = pcibios_unmap_io_space(b); if (rc) { printk(KERN_ERR "%s: failed to unmap IO on bus %s\n", __func__, b->name); return 1; } } /* Unregister the bridge device from sysfs and remove the PCI bus */ device_unregister(b->bridge); phb->bus = NULL; pci_remove_bus(b); /* Now release the IO resource */ if (res->flags & IORESOURCE_IO) release_resource(res); /* Release memory resources */ for (i = 0; i < 3; ++i) { res = &phb->mem_resources[i]; if (!(res->flags & IORESOURCE_MEM)) continue; release_resource(res); } /* Free pci_controller data structure */ pcibios_free_controller(phb); return 0; } EXPORT_SYMBOL_GPL(remove_phb_dynamic);
gpl-2.0
kurainooni/rk30-kernel
arch/arm/plat-omap/devices.c
2092
7653
/* * linux/arch/arm/plat-omap/devices.c * * Common platform device setup/initialization for OMAP1 and OMAP2 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/memblock.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/map.h> #include <plat/tc.h> #include <plat/board.h> #include <plat/mmc.h> #include <mach/gpio.h> #include <plat/menelaus.h> #include <plat/mcbsp.h> #include <plat/omap44xx.h> /*-------------------------------------------------------------------------*/ #if defined(CONFIG_OMAP_MCBSP) || defined(CONFIG_OMAP_MCBSP_MODULE) static struct platform_device **omap_mcbsp_devices; void omap_mcbsp_register_board_cfg(struct resource *res, int res_count, struct omap_mcbsp_platform_data *config, int size) { int i; omap_mcbsp_devices = kzalloc(size * sizeof(struct platform_device *), GFP_KERNEL); if (!omap_mcbsp_devices) { printk(KERN_ERR "Could not register McBSP devices\n"); return; } for (i = 0; i < size; i++) { struct platform_device *new_mcbsp; int ret; new_mcbsp = platform_device_alloc("omap-mcbsp", i + 1); if (!new_mcbsp) continue; platform_device_add_resources(new_mcbsp, &res[i * res_count], res_count); new_mcbsp->dev.platform_data = &config[i]; ret = platform_device_add(new_mcbsp); if (ret) { platform_device_put(new_mcbsp); continue; } omap_mcbsp_devices[i] = new_mcbsp; } } #else void omap_mcbsp_register_board_cfg(struct resource *res, int res_count, struct omap_mcbsp_platform_data *config, int size) { } #endif /*-------------------------------------------------------------------------*/ #if defined(CONFIG_SND_OMAP_SOC_MCPDM) || \ defined(CONFIG_SND_OMAP_SOC_MCPDM_MODULE) static struct resource mcpdm_resources[] = { { .name = "mcpdm_mem", .start = OMAP44XX_MCPDM_BASE, .end = OMAP44XX_MCPDM_BASE + SZ_4K, .flags = IORESOURCE_MEM, }, { .name = "mcpdm_irq", .start = OMAP44XX_IRQ_MCPDM, .end = OMAP44XX_IRQ_MCPDM, .flags = IORESOURCE_IRQ, }, }; static struct platform_device omap_mcpdm_device = { .name = "omap-mcpdm", .id = -1, .num_resources = ARRAY_SIZE(mcpdm_resources), .resource = mcpdm_resources, }; static void omap_init_mcpdm(void) { (void) platform_device_register(&omap_mcpdm_device); } #else static inline void omap_init_mcpdm(void) {} #endif /*-------------------------------------------------------------------------*/ #if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) || \ defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE) #define OMAP_MMC_NR_RES 2 /* * Register MMC devices. Called from mach-omap1 and mach-omap2 device init. */ int __init omap_mmc_add(const char *name, int id, unsigned long base, unsigned long size, unsigned int irq, struct omap_mmc_platform_data *data) { struct platform_device *pdev; struct resource res[OMAP_MMC_NR_RES]; int ret; pdev = platform_device_alloc(name, id); if (!pdev) return -ENOMEM; memset(res, 0, OMAP_MMC_NR_RES * sizeof(struct resource)); res[0].start = base; res[0].end = base + size - 1; res[0].flags = IORESOURCE_MEM; res[1].start = res[1].end = irq; res[1].flags = IORESOURCE_IRQ; ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res)); if (ret == 0) ret = platform_device_add_data(pdev, data, sizeof(*data)); if (ret) goto fail; ret = platform_device_add(pdev); if (ret) goto fail; /* return device handle to board setup code */ data->dev = &pdev->dev; return 0; fail: platform_device_put(pdev); return ret; } #endif /*-------------------------------------------------------------------------*/ #if defined(CONFIG_HW_RANDOM_OMAP) || defined(CONFIG_HW_RANDOM_OMAP_MODULE) #ifdef CONFIG_ARCH_OMAP2 #define OMAP_RNG_BASE 0x480A0000 #else #define OMAP_RNG_BASE 0xfffe5000 #endif static struct resource rng_resources[] = { { .start = OMAP_RNG_BASE, .end = OMAP_RNG_BASE + 0x4f, .flags = IORESOURCE_MEM, }, }; static struct platform_device omap_rng_device = { .name = "omap_rng", .id = -1, .num_resources = ARRAY_SIZE(rng_resources), .resource = rng_resources, }; static void omap_init_rng(void) { (void) platform_device_register(&omap_rng_device); } #else static inline void omap_init_rng(void) {} #endif /*-------------------------------------------------------------------------*/ /* Numbering for the SPI-capable controllers when used for SPI: * spi = 1 * uwire = 2 * mmc1..2 = 3..4 * mcbsp1..3 = 5..7 */ #if defined(CONFIG_SPI_OMAP_UWIRE) || defined(CONFIG_SPI_OMAP_UWIRE_MODULE) #define OMAP_UWIRE_BASE 0xfffb3000 static struct resource uwire_resources[] = { { .start = OMAP_UWIRE_BASE, .end = OMAP_UWIRE_BASE + 0x20, .flags = IORESOURCE_MEM, }, }; static struct platform_device omap_uwire_device = { .name = "omap_uwire", .id = -1, .num_resources = ARRAY_SIZE(uwire_resources), .resource = uwire_resources, }; static void omap_init_uwire(void) { /* FIXME define and use a boot tag; not all boards will be hooking * up devices to the microwire controller, and multi-board configs * mean that CONFIG_SPI_OMAP_UWIRE may be configured anyway... */ /* board-specific code must configure chipselects (only a few * are normally used) and SCLK/SDI/SDO (each has two choices). */ (void) platform_device_register(&omap_uwire_device); } #else static inline void omap_init_uwire(void) {} #endif #if defined(CONFIG_TIDSPBRIDGE) || defined(CONFIG_TIDSPBRIDGE_MODULE) static phys_addr_t omap_dsp_phys_mempool_base; void __init omap_dsp_reserve_sdram_memblock(void) { phys_addr_t size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE; phys_addr_t paddr; if (!size) return; paddr = memblock_alloc(size, SZ_1M); if (!paddr) { pr_err("%s: failed to reserve %x bytes\n", __func__, size); return; } memblock_free(paddr, size); memblock_remove(paddr, size); omap_dsp_phys_mempool_base = paddr; } phys_addr_t omap_dsp_get_mempool_base(void) { return omap_dsp_phys_mempool_base; } EXPORT_SYMBOL(omap_dsp_get_mempool_base); #endif /* * This gets called after board-specific INIT_MACHINE, and initializes most * on-chip peripherals accessible on this board (except for few like USB): * * (a) Does any "standard config" pin muxing needed. Board-specific * code will have muxed GPIO pins and done "nonstandard" setup; * that code could live in the boot loader. * (b) Populating board-specific platform_data with the data drivers * rely on to handle wiring variations. * (c) Creating platform devices as meaningful on this board and * with this kernel configuration. * * Claiming GPIOs, and setting their direction and initial values, is the * responsibility of the device drivers. So is responding to probe(). * * Board-specific knowledge like creating devices or pin setup is to be * kept out of drivers as much as possible. In particular, pin setup * may be handled by the boot loader, and drivers should expect it will * normally have been done by the time they're probed. */ static int __init omap_init_devices(void) { /* please keep these calls, and their implementations above, * in alphabetical order so they're easier to sort through. */ omap_init_rng(); omap_init_mcpdm(); omap_init_uwire(); return 0; } arch_initcall(omap_init_devices);
gpl-2.0
putuotingchan/kernel_goldfish
drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
2092
64111
/* * V4L2 Driver for SuperH Mobile CEU interface * * Copyright (C) 2008 Magnus Damm * * Based on V4L2 Driver for PXA camera host - "pxa_camera.c", * * Copyright (C) 2006, Sascha Hauer, Pengutronix * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/init.h> #include <linux/module.h> #include <linux/io.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/moduleparam.h> #include <linux/time.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/videodev2.h> #include <linux/pm_runtime.h> #include <linux/sched.h> #include <media/v4l2-common.h> #include <media/v4l2-dev.h> #include <media/soc_camera.h> #include <media/sh_mobile_ceu.h> #include <media/sh_mobile_csi2.h> #include <media/videobuf2-dma-contig.h> #include <media/v4l2-mediabus.h> #include <media/soc_mediabus.h> /* register offsets for sh7722 / sh7723 */ #define CAPSR 0x00 /* Capture start register */ #define CAPCR 0x04 /* Capture control register */ #define CAMCR 0x08 /* Capture interface control register */ #define CMCYR 0x0c /* Capture interface cycle register */ #define CAMOR 0x10 /* Capture interface offset register */ #define CAPWR 0x14 /* Capture interface width register */ #define CAIFR 0x18 /* Capture interface input format register */ #define CSTCR 0x20 /* Camera strobe control register (<= sh7722) */ #define CSECR 0x24 /* Camera strobe emission count register (<= sh7722) */ #define CRCNTR 0x28 /* CEU register control register */ #define CRCMPR 0x2c /* CEU register forcible control register */ #define CFLCR 0x30 /* Capture filter control register */ #define CFSZR 0x34 /* Capture filter size clip register */ #define CDWDR 0x38 /* Capture destination width register */ #define CDAYR 0x3c /* Capture data address Y register */ #define CDACR 0x40 /* Capture data address C register */ #define CDBYR 0x44 /* Capture data bottom-field address Y register */ #define CDBCR 0x48 /* Capture data bottom-field address C register */ #define CBDSR 0x4c /* Capture bundle destination size register */ #define CFWCR 0x5c /* Firewall operation control register */ #define CLFCR 0x60 /* Capture low-pass filter control register */ #define CDOCR 0x64 /* Capture data output control register */ #define CDDCR 0x68 /* Capture data complexity level register */ #define CDDAR 0x6c /* Capture data complexity level address register */ #define CEIER 0x70 /* Capture event interrupt enable register */ #define CETCR 0x74 /* Capture event flag clear register */ #define CSTSR 0x7c /* Capture status register */ #define CSRTR 0x80 /* Capture software reset register */ #define CDSSR 0x84 /* Capture data size register */ #define CDAYR2 0x90 /* Capture data address Y register 2 */ #define CDACR2 0x94 /* Capture data address C register 2 */ #define CDBYR2 0x98 /* Capture data bottom-field address Y register 2 */ #define CDBCR2 0x9c /* Capture data bottom-field address C register 2 */ #undef DEBUG_GEOMETRY #ifdef DEBUG_GEOMETRY #define dev_geo dev_info #else #define dev_geo dev_dbg #endif /* per video frame buffer */ struct sh_mobile_ceu_buffer { struct vb2_buffer vb; /* v4l buffer must be first */ struct list_head queue; }; struct sh_mobile_ceu_dev { struct soc_camera_host ici; struct soc_camera_device *icd; struct platform_device *csi2_pdev; unsigned int irq; void __iomem *base; size_t video_limit; size_t buf_total; spinlock_t lock; /* Protects video buffer lists */ struct list_head capture; struct vb2_buffer *active; struct vb2_alloc_ctx *alloc_ctx; struct sh_mobile_ceu_info *pdata; struct completion complete; u32 cflcr; /* static max sizes either from platform data or default */ int max_width; int max_height; enum v4l2_field field; int sequence; unsigned int image_mode:1; unsigned int is_16bit:1; unsigned int frozen:1; }; struct sh_mobile_ceu_cam { /* CEU offsets within the camera output, before the CEU scaler */ unsigned int ceu_left; unsigned int ceu_top; /* Client output, as seen by the CEU */ unsigned int width; unsigned int height; /* * User window from S_CROP / G_CROP, produced by client cropping and * scaling, CEU scaling and CEU cropping, mapped back onto the client * input window */ struct v4l2_rect subrect; /* Camera cropping rectangle */ struct v4l2_rect rect; const struct soc_mbus_pixelfmt *extra_fmt; enum v4l2_mbus_pixelcode code; }; static struct sh_mobile_ceu_buffer *to_ceu_vb(struct vb2_buffer *vb) { return container_of(vb, struct sh_mobile_ceu_buffer, vb); } static void ceu_write(struct sh_mobile_ceu_dev *priv, unsigned long reg_offs, u32 data) { iowrite32(data, priv->base + reg_offs); } static u32 ceu_read(struct sh_mobile_ceu_dev *priv, unsigned long reg_offs) { return ioread32(priv->base + reg_offs); } static int sh_mobile_ceu_soft_reset(struct sh_mobile_ceu_dev *pcdev) { int i, success = 0; struct soc_camera_device *icd = pcdev->icd; ceu_write(pcdev, CAPSR, 1 << 16); /* reset */ /* wait CSTSR.CPTON bit */ for (i = 0; i < 1000; i++) { if (!(ceu_read(pcdev, CSTSR) & 1)) { success++; break; } udelay(1); } /* wait CAPSR.CPKIL bit */ for (i = 0; i < 1000; i++) { if (!(ceu_read(pcdev, CAPSR) & (1 << 16))) { success++; break; } udelay(1); } if (2 != success) { dev_warn(icd->pdev, "soft reset time out\n"); return -EIO; } return 0; } /* * Videobuf operations */ /* * .queue_setup() is called to check, whether the driver can accept the * requested number of buffers and to fill in plane sizes * for the current frame format if required */ static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq, const struct v4l2_format *fmt, unsigned int *count, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { struct soc_camera_device *icd = container_of(vq, struct soc_camera_device, vb2_vidq); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; if (fmt) { const struct soc_camera_format_xlate *xlate = soc_camera_xlate_by_fourcc(icd, fmt->fmt.pix.pixelformat); unsigned int bytes_per_line; int ret; if (!xlate) return -EINVAL; ret = soc_mbus_bytes_per_line(fmt->fmt.pix.width, xlate->host_fmt); if (ret < 0) return ret; bytes_per_line = max_t(u32, fmt->fmt.pix.bytesperline, ret); ret = soc_mbus_image_size(xlate->host_fmt, bytes_per_line, fmt->fmt.pix.height); if (ret < 0) return ret; sizes[0] = max_t(u32, fmt->fmt.pix.sizeimage, ret); } else { /* Called from VIDIOC_REQBUFS or in compatibility mode */ sizes[0] = icd->sizeimage; } alloc_ctxs[0] = pcdev->alloc_ctx; if (!vq->num_buffers) pcdev->sequence = 0; if (!*count) *count = 2; /* If *num_planes != 0, we have already verified *count. */ if (pcdev->video_limit && !*num_planes) { size_t size = PAGE_ALIGN(sizes[0]) * *count; if (size + pcdev->buf_total > pcdev->video_limit) *count = (pcdev->video_limit - pcdev->buf_total) / PAGE_ALIGN(sizes[0]); } *num_planes = 1; dev_dbg(icd->parent, "count=%d, size=%u\n", *count, sizes[0]); return 0; } #define CEU_CETCR_MAGIC 0x0317f313 /* acknowledge magical interrupt sources */ #define CEU_CETCR_IGRW (1 << 4) /* prohibited register access interrupt bit */ #define CEU_CEIER_CPEIE (1 << 0) /* one-frame capture end interrupt */ #define CEU_CEIER_VBP (1 << 20) /* vbp error */ #define CEU_CAPCR_CTNCP (1 << 16) /* continuous capture mode (if set) */ #define CEU_CEIER_MASK (CEU_CEIER_CPEIE | CEU_CEIER_VBP) /* * return value doesn't reflex the success/failure to queue the new buffer, * but rather the status of the previous buffer. */ static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev) { struct soc_camera_device *icd = pcdev->icd; dma_addr_t phys_addr_top, phys_addr_bottom; unsigned long top1, top2; unsigned long bottom1, bottom2; u32 status; bool planar; int ret = 0; /* * The hardware is _very_ picky about this sequence. Especially * the CEU_CETCR_MAGIC value. It seems like we need to acknowledge * several not-so-well documented interrupt sources in CETCR. */ ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) & ~CEU_CEIER_MASK); status = ceu_read(pcdev, CETCR); ceu_write(pcdev, CETCR, ~status & CEU_CETCR_MAGIC); if (!pcdev->frozen) ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) | CEU_CEIER_MASK); ceu_write(pcdev, CAPCR, ceu_read(pcdev, CAPCR) & ~CEU_CAPCR_CTNCP); ceu_write(pcdev, CETCR, CEU_CETCR_MAGIC ^ CEU_CETCR_IGRW); /* * When a VBP interrupt occurs, a capture end interrupt does not occur * and the image of that frame is not captured correctly. So, soft reset * is needed here. */ if (status & CEU_CEIER_VBP) { sh_mobile_ceu_soft_reset(pcdev); ret = -EIO; } if (pcdev->frozen) { complete(&pcdev->complete); return ret; } if (!pcdev->active) return ret; if (V4L2_FIELD_INTERLACED_BT == pcdev->field) { top1 = CDBYR; top2 = CDBCR; bottom1 = CDAYR; bottom2 = CDACR; } else { top1 = CDAYR; top2 = CDACR; bottom1 = CDBYR; bottom2 = CDBCR; } phys_addr_top = vb2_dma_contig_plane_dma_addr(pcdev->active, 0); switch (icd->current_fmt->host_fmt->fourcc) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: planar = true; break; default: planar = false; } ceu_write(pcdev, top1, phys_addr_top); if (V4L2_FIELD_NONE != pcdev->field) { phys_addr_bottom = phys_addr_top + icd->bytesperline; ceu_write(pcdev, bottom1, phys_addr_bottom); } if (planar) { phys_addr_top += icd->bytesperline * icd->user_height; ceu_write(pcdev, top2, phys_addr_top); if (V4L2_FIELD_NONE != pcdev->field) { phys_addr_bottom = phys_addr_top + icd->bytesperline; ceu_write(pcdev, bottom2, phys_addr_bottom); } } ceu_write(pcdev, CAPSR, 0x1); /* start capture */ return ret; } static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb) { struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb); /* Added list head initialization on alloc */ WARN(!list_empty(&buf->queue), "Buffer %p on queue!\n", vb); return 0; } static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb) { struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb); unsigned long size; size = icd->sizeimage; if (vb2_plane_size(vb, 0) < size) { dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n", vb->v4l2_buf.index, vb2_plane_size(vb, 0), size); goto error; } vb2_set_plane_payload(vb, 0, size); dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__, vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0)); #ifdef DEBUG /* * This can be useful if you want to see if we actually fill * the buffer with something */ if (vb2_plane_vaddr(vb, 0)) memset(vb2_plane_vaddr(vb, 0), 0xaa, vb2_get_plane_payload(vb, 0)); #endif spin_lock_irq(&pcdev->lock); list_add_tail(&buf->queue, &pcdev->capture); if (!pcdev->active) { /* * Because there were no active buffer at this moment, * we are not interested in the return value of * sh_mobile_ceu_capture here. */ pcdev->active = vb; sh_mobile_ceu_capture(pcdev); } spin_unlock_irq(&pcdev->lock); return; error: vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); } static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb) { struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb); struct sh_mobile_ceu_dev *pcdev = ici->priv; spin_lock_irq(&pcdev->lock); if (pcdev->active == vb) { /* disable capture (release DMA buffer), reset */ ceu_write(pcdev, CAPSR, 1 << 16); pcdev->active = NULL; } /* * Doesn't hurt also if the list is empty, but it hurts, if queuing the * buffer failed, and .buf_init() hasn't been called */ if (buf->queue.next) list_del_init(&buf->queue); pcdev->buf_total -= PAGE_ALIGN(vb2_plane_size(vb, 0)); dev_dbg(icd->parent, "%s() %zu bytes buffers\n", __func__, pcdev->buf_total); spin_unlock_irq(&pcdev->lock); } static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb) { struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; pcdev->buf_total += PAGE_ALIGN(vb2_plane_size(vb, 0)); dev_dbg(icd->parent, "%s() %zu bytes buffers\n", __func__, pcdev->buf_total); /* This is for locking debugging only */ INIT_LIST_HEAD(&to_ceu_vb(vb)->queue); return 0; } static int sh_mobile_ceu_stop_streaming(struct vb2_queue *q) { struct soc_camera_device *icd = container_of(q, struct soc_camera_device, vb2_vidq); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct list_head *buf_head, *tmp; spin_lock_irq(&pcdev->lock); pcdev->active = NULL; list_for_each_safe(buf_head, tmp, &pcdev->capture) list_del_init(buf_head); spin_unlock_irq(&pcdev->lock); return sh_mobile_ceu_soft_reset(pcdev); } static struct vb2_ops sh_mobile_ceu_videobuf_ops = { .queue_setup = sh_mobile_ceu_videobuf_setup, .buf_prepare = sh_mobile_ceu_videobuf_prepare, .buf_queue = sh_mobile_ceu_videobuf_queue, .buf_cleanup = sh_mobile_ceu_videobuf_release, .buf_init = sh_mobile_ceu_videobuf_init, .wait_prepare = soc_camera_unlock, .wait_finish = soc_camera_lock, .stop_streaming = sh_mobile_ceu_stop_streaming, }; static irqreturn_t sh_mobile_ceu_irq(int irq, void *data) { struct sh_mobile_ceu_dev *pcdev = data; struct vb2_buffer *vb; int ret; spin_lock(&pcdev->lock); vb = pcdev->active; if (!vb) /* Stale interrupt from a released buffer */ goto out; list_del_init(&to_ceu_vb(vb)->queue); if (!list_empty(&pcdev->capture)) pcdev->active = &list_entry(pcdev->capture.next, struct sh_mobile_ceu_buffer, queue)->vb; else pcdev->active = NULL; ret = sh_mobile_ceu_capture(pcdev); v4l2_get_timestamp(&vb->v4l2_buf.timestamp); if (!ret) { vb->v4l2_buf.field = pcdev->field; vb->v4l2_buf.sequence = pcdev->sequence++; } vb2_buffer_done(vb, ret < 0 ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); out: spin_unlock(&pcdev->lock); return IRQ_HANDLED; } static struct v4l2_subdev *find_csi2(struct sh_mobile_ceu_dev *pcdev) { struct v4l2_subdev *sd; if (!pcdev->csi2_pdev) return NULL; v4l2_device_for_each_subdev(sd, &pcdev->ici.v4l2_dev) if (&pcdev->csi2_pdev->dev == v4l2_get_subdevdata(sd)) return sd; return NULL; } /* Called with .host_lock held */ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct v4l2_subdev *csi2_sd; int ret; if (pcdev->icd) return -EBUSY; dev_info(icd->parent, "SuperH Mobile CEU driver attached to camera %d\n", icd->devnum); pm_runtime_get_sync(ici->v4l2_dev.dev); pcdev->buf_total = 0; ret = sh_mobile_ceu_soft_reset(pcdev); csi2_sd = find_csi2(pcdev); if (csi2_sd) { csi2_sd->grp_id = soc_camera_grp_id(icd); v4l2_set_subdev_hostdata(csi2_sd, icd); } ret = v4l2_subdev_call(csi2_sd, core, s_power, 1); if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) { pm_runtime_put(ici->v4l2_dev.dev); return ret; } /* * -ENODEV is special: either csi2_sd == NULL or the CSI-2 driver * has not found this soc-camera device among its clients */ if (ret == -ENODEV && csi2_sd) csi2_sd->grp_id = 0; pcdev->icd = icd; return 0; } /* Called with .host_lock held */ static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct v4l2_subdev *csi2_sd = find_csi2(pcdev); BUG_ON(icd != pcdev->icd); v4l2_subdev_call(csi2_sd, core, s_power, 0); if (csi2_sd) csi2_sd->grp_id = 0; /* disable capture, disable interrupts */ ceu_write(pcdev, CEIER, 0); sh_mobile_ceu_soft_reset(pcdev); /* make sure active buffer is canceled */ spin_lock_irq(&pcdev->lock); if (pcdev->active) { list_del_init(&to_ceu_vb(pcdev->active)->queue); vb2_buffer_done(pcdev->active, VB2_BUF_STATE_ERROR); pcdev->active = NULL; } spin_unlock_irq(&pcdev->lock); pm_runtime_put(ici->v4l2_dev.dev); dev_info(icd->parent, "SuperH Mobile CEU driver detached from camera %d\n", icd->devnum); pcdev->icd = NULL; } /* * See chapter 29.4.12 "Capture Filter Control Register (CFLCR)" * in SH7722 Hardware Manual */ static unsigned int size_dst(unsigned int src, unsigned int scale) { unsigned int mant_pre = scale >> 12; if (!src || !scale) return src; return ((mant_pre + 2 * (src - 1)) / (2 * mant_pre) - 1) * mant_pre * 4096 / scale + 1; } static u16 calc_scale(unsigned int src, unsigned int *dst) { u16 scale; if (src == *dst) return 0; scale = (src * 4096 / *dst) & ~7; while (scale > 4096 && size_dst(src, scale) < *dst) scale -= 8; *dst = size_dst(src, scale); return scale; } /* rect is guaranteed to not exceed the scaled camera rectangle */ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_cam *cam = icd->host_priv; struct sh_mobile_ceu_dev *pcdev = ici->priv; unsigned int height, width, cdwdr_width, in_width, in_height; unsigned int left_offset, top_offset; u32 camor; dev_geo(icd->parent, "Crop %ux%u@%u:%u\n", icd->user_width, icd->user_height, cam->ceu_left, cam->ceu_top); left_offset = cam->ceu_left; top_offset = cam->ceu_top; WARN_ON(icd->user_width & 3 || icd->user_height & 3); width = icd->user_width; if (pcdev->image_mode) { in_width = cam->width; if (!pcdev->is_16bit) { in_width *= 2; left_offset *= 2; } } else { unsigned int w_factor; switch (icd->current_fmt->host_fmt->packing) { case SOC_MBUS_PACKING_2X8_PADHI: w_factor = 2; break; default: w_factor = 1; } in_width = cam->width * w_factor; left_offset *= w_factor; } cdwdr_width = icd->bytesperline; height = icd->user_height; in_height = cam->height; if (V4L2_FIELD_NONE != pcdev->field) { height = (height / 2) & ~3; in_height /= 2; top_offset /= 2; cdwdr_width *= 2; } /* CSI2 special configuration */ if (pcdev->pdata->csi2) { in_width = ((in_width - 2) * 2); left_offset *= 2; } /* Set CAMOR, CAPWR, CFSZR, take care of CDWDR */ camor = left_offset | (top_offset << 16); dev_geo(icd->parent, "CAMOR 0x%x, CAPWR 0x%x, CFSZR 0x%x, CDWDR 0x%x\n", camor, (in_height << 16) | in_width, (height << 16) | width, cdwdr_width); ceu_write(pcdev, CAMOR, camor); ceu_write(pcdev, CAPWR, (in_height << 16) | in_width); /* CFSZR clipping is applied _after_ the scaling filter (CFLCR) */ ceu_write(pcdev, CFSZR, (height << 16) | width); ceu_write(pcdev, CDWDR, cdwdr_width); } static u32 capture_save_reset(struct sh_mobile_ceu_dev *pcdev) { u32 capsr = ceu_read(pcdev, CAPSR); ceu_write(pcdev, CAPSR, 1 << 16); /* reset, stop capture */ return capsr; } static void capture_restore(struct sh_mobile_ceu_dev *pcdev, u32 capsr) { unsigned long timeout = jiffies + 10 * HZ; /* * Wait until the end of the current frame. It can take a long time, * but if it has been aborted by a CAPSR reset, it shoule exit sooner. */ while ((ceu_read(pcdev, CSTSR) & 1) && time_before(jiffies, timeout)) msleep(1); if (time_after(jiffies, timeout)) { dev_err(pcdev->ici.v4l2_dev.dev, "Timeout waiting for frame end! Interface problem?\n"); return; } /* Wait until reset clears, this shall not hang... */ while (ceu_read(pcdev, CAPSR) & (1 << 16)) udelay(10); /* Anything to restore? */ if (capsr & ~(1 << 16)) ceu_write(pcdev, CAPSR, capsr); } /* Find the bus subdevice driver, e.g., CSI2 */ static struct v4l2_subdev *find_bus_subdev(struct sh_mobile_ceu_dev *pcdev, struct soc_camera_device *icd) { if (pcdev->csi2_pdev) { struct v4l2_subdev *csi2_sd = find_csi2(pcdev); if (csi2_sd && csi2_sd->grp_id == soc_camera_grp_id(icd)) return csi2_sd; } return soc_camera_to_subdev(icd); } #define CEU_BUS_FLAGS (V4L2_MBUS_MASTER | \ V4L2_MBUS_PCLK_SAMPLE_RISING | \ V4L2_MBUS_HSYNC_ACTIVE_HIGH | \ V4L2_MBUS_HSYNC_ACTIVE_LOW | \ V4L2_MBUS_VSYNC_ACTIVE_HIGH | \ V4L2_MBUS_VSYNC_ACTIVE_LOW | \ V4L2_MBUS_DATA_ACTIVE_HIGH) /* Capture is not running, no interrupts, no locking needed */ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct v4l2_subdev *sd = find_bus_subdev(pcdev, icd); struct sh_mobile_ceu_cam *cam = icd->host_priv; struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,}; unsigned long value, common_flags = CEU_BUS_FLAGS; u32 capsr = capture_save_reset(pcdev); unsigned int yuv_lineskip; int ret; /* * If the client doesn't implement g_mbus_config, we just use our * platform data */ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg); if (!ret) { common_flags = soc_mbus_config_compatible(&cfg, common_flags); if (!common_flags) return -EINVAL; } else if (ret != -ENOIOCTLCMD) { return ret; } /* Make choises, based on platform preferences */ if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) && (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) { if (pcdev->pdata->flags & SH_CEU_FLAG_HSYNC_LOW) common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH; else common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW; } if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) && (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) { if (pcdev->pdata->flags & SH_CEU_FLAG_VSYNC_LOW) common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH; else common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW; } cfg.flags = common_flags; ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg); if (ret < 0 && ret != -ENOIOCTLCMD) return ret; if (icd->current_fmt->host_fmt->bits_per_sample > 8) pcdev->is_16bit = 1; else pcdev->is_16bit = 0; ceu_write(pcdev, CRCNTR, 0); ceu_write(pcdev, CRCMPR, 0); value = 0x00000010; /* data fetch by default */ yuv_lineskip = 0x10; switch (icd->current_fmt->host_fmt->fourcc) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: /* convert 4:2:2 -> 4:2:0 */ yuv_lineskip = 0; /* skip for NV12/21, no skip for NV16/61 */ /* fall-through */ case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: switch (cam->code) { case V4L2_MBUS_FMT_UYVY8_2X8: value = 0x00000000; /* Cb0, Y0, Cr0, Y1 */ break; case V4L2_MBUS_FMT_VYUY8_2X8: value = 0x00000100; /* Cr0, Y0, Cb0, Y1 */ break; case V4L2_MBUS_FMT_YUYV8_2X8: value = 0x00000200; /* Y0, Cb0, Y1, Cr0 */ break; case V4L2_MBUS_FMT_YVYU8_2X8: value = 0x00000300; /* Y0, Cr0, Y1, Cb0 */ break; default: BUG(); } } if (icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV21 || icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV61) value ^= 0x00000100; /* swap U, V to change from NV1x->NVx1 */ value |= common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW ? 1 << 1 : 0; value |= common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW ? 1 << 0 : 0; if (pcdev->pdata->csi2) /* CSI2 mode */ value |= 3 << 12; else if (pcdev->is_16bit) value |= 1 << 12; else if (pcdev->pdata->flags & SH_CEU_FLAG_LOWER_8BIT) value |= 2 << 12; ceu_write(pcdev, CAMCR, value); ceu_write(pcdev, CAPCR, 0x00300000); switch (pcdev->field) { case V4L2_FIELD_INTERLACED_TB: value = 0x101; break; case V4L2_FIELD_INTERLACED_BT: value = 0x102; break; default: value = 0; break; } ceu_write(pcdev, CAIFR, value); sh_mobile_ceu_set_rect(icd); mdelay(1); dev_geo(icd->parent, "CFLCR 0x%x\n", pcdev->cflcr); ceu_write(pcdev, CFLCR, pcdev->cflcr); /* * A few words about byte order (observed in Big Endian mode) * * In data fetch mode bytes are received in chunks of 8 bytes. * D0, D1, D2, D3, D4, D5, D6, D7 (D0 received first) * * The data is however by default written to memory in reverse order: * D7, D6, D5, D4, D3, D2, D1, D0 (D7 written to lowest byte) * * The lowest three bits of CDOCR allows us to do swapping, * using 7 we swap the data bytes to match the incoming order: * D0, D1, D2, D3, D4, D5, D6, D7 */ value = 0x00000007 | yuv_lineskip; ceu_write(pcdev, CDOCR, value); ceu_write(pcdev, CFWCR, 0); /* keep "datafetch firewall" disabled */ capture_restore(pcdev, capsr); /* not in bundle mode: skip CBDSR, CDAYR2, CDACR2, CDBYR2, CDBCR2 */ return 0; } static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd, unsigned char buswidth) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct v4l2_subdev *sd = find_bus_subdev(pcdev, icd); unsigned long common_flags = CEU_BUS_FLAGS; struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,}; int ret; ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg); if (!ret) common_flags = soc_mbus_config_compatible(&cfg, common_flags); else if (ret != -ENOIOCTLCMD) return ret; if (!common_flags || buswidth > 16) return -EINVAL; return 0; } static const struct soc_mbus_pixelfmt sh_mobile_ceu_formats[] = { { .fourcc = V4L2_PIX_FMT_NV12, .name = "NV12", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_1_5X8, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PLANAR_2Y_C, }, { .fourcc = V4L2_PIX_FMT_NV21, .name = "NV21", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_1_5X8, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PLANAR_2Y_C, }, { .fourcc = V4L2_PIX_FMT_NV16, .name = "NV16", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PLANAR_Y_C, }, { .fourcc = V4L2_PIX_FMT_NV61, .name = "NV61", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PLANAR_Y_C, }, }; /* This will be corrected as we get more formats */ static bool sh_mobile_ceu_packing_supported(const struct soc_mbus_pixelfmt *fmt) { return fmt->packing == SOC_MBUS_PACKING_NONE || (fmt->bits_per_sample == 8 && fmt->packing == SOC_MBUS_PACKING_1_5X8) || (fmt->bits_per_sample == 8 && fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) || (fmt->bits_per_sample > 8 && fmt->packing == SOC_MBUS_PACKING_EXTEND16); } static int client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect); static struct soc_camera_device *ctrl_to_icd(struct v4l2_ctrl *ctrl) { return container_of(ctrl->handler, struct soc_camera_device, ctrl_handler); } static int sh_mobile_ceu_s_ctrl(struct v4l2_ctrl *ctrl) { struct soc_camera_device *icd = ctrl_to_icd(ctrl); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; switch (ctrl->id) { case V4L2_CID_SHARPNESS: switch (icd->current_fmt->host_fmt->fourcc) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: ceu_write(pcdev, CLFCR, !ctrl->val); return 0; } break; } return -EINVAL; } static const struct v4l2_ctrl_ops sh_mobile_ceu_ctrl_ops = { .s_ctrl = sh_mobile_ceu_s_ctrl, }; static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int idx, struct soc_camera_format_xlate *xlate) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->parent; struct soc_camera_host *ici = to_soc_camera_host(dev); struct sh_mobile_ceu_dev *pcdev = ici->priv; int ret, k, n; int formats = 0; struct sh_mobile_ceu_cam *cam; enum v4l2_mbus_pixelcode code; const struct soc_mbus_pixelfmt *fmt; ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code); if (ret < 0) /* No more formats */ return 0; fmt = soc_mbus_get_fmtdesc(code); if (!fmt) { dev_warn(dev, "unsupported format code #%u: %d\n", idx, code); return 0; } if (!pcdev->pdata->csi2) { /* Are there any restrictions in the CSI-2 case? */ ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample); if (ret < 0) return 0; } if (!icd->host_priv) { struct v4l2_mbus_framefmt mf; struct v4l2_rect rect; int shift = 0; /* Add our control */ v4l2_ctrl_new_std(&icd->ctrl_handler, &sh_mobile_ceu_ctrl_ops, V4L2_CID_SHARPNESS, 0, 1, 1, 1); if (icd->ctrl_handler.error) return icd->ctrl_handler.error; /* FIXME: subwindow is lost between close / open */ /* Cache current client geometry */ ret = client_g_rect(sd, &rect); if (ret < 0) return ret; /* First time */ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf); if (ret < 0) return ret; /* * All currently existing CEU implementations support 2560x1920 * or larger frames. If the sensor is proposing too big a frame, * don't bother with possibly supportred by the CEU larger * sizes, just try VGA multiples. If needed, this can be * adjusted in the future. */ while ((mf.width > pcdev->max_width || mf.height > pcdev->max_height) && shift < 4) { /* Try 2560x1920, 1280x960, 640x480, 320x240 */ mf.width = 2560 >> shift; mf.height = 1920 >> shift; ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd), video, s_mbus_fmt, &mf); if (ret < 0) return ret; shift++; } if (shift == 4) { dev_err(dev, "Failed to configure the client below %ux%x\n", mf.width, mf.height); return -EIO; } dev_geo(dev, "camera fmt %ux%u\n", mf.width, mf.height); cam = kzalloc(sizeof(*cam), GFP_KERNEL); if (!cam) return -ENOMEM; /* We are called with current camera crop, initialise subrect with it */ cam->rect = rect; cam->subrect = rect; cam->width = mf.width; cam->height = mf.height; icd->host_priv = cam; } else { cam = icd->host_priv; } /* Beginning of a pass */ if (!idx) cam->extra_fmt = NULL; switch (code) { case V4L2_MBUS_FMT_UYVY8_2X8: case V4L2_MBUS_FMT_VYUY8_2X8: case V4L2_MBUS_FMT_YUYV8_2X8: case V4L2_MBUS_FMT_YVYU8_2X8: if (cam->extra_fmt) break; /* * Our case is simple so far: for any of the above four camera * formats we add all our four synthesized NV* formats, so, * just marking the device with a single flag suffices. If * the format generation rules are more complex, you would have * to actually hang your already added / counted formats onto * the host_priv pointer and check whether the format you're * going to add now is already there. */ cam->extra_fmt = sh_mobile_ceu_formats; n = ARRAY_SIZE(sh_mobile_ceu_formats); formats += n; for (k = 0; xlate && k < n; k++) { xlate->host_fmt = &sh_mobile_ceu_formats[k]; xlate->code = code; xlate++; dev_dbg(dev, "Providing format %s using code %d\n", sh_mobile_ceu_formats[k].name, code); } break; default: if (!sh_mobile_ceu_packing_supported(fmt)) return 0; } /* Generic pass-through */ formats++; if (xlate) { xlate->host_fmt = fmt; xlate->code = code; xlate++; dev_dbg(dev, "Providing format %s in pass-through mode\n", fmt->name); } return formats; } static void sh_mobile_ceu_put_formats(struct soc_camera_device *icd) { kfree(icd->host_priv); icd->host_priv = NULL; } /* Check if any dimension of r1 is smaller than respective one of r2 */ static bool is_smaller(const struct v4l2_rect *r1, const struct v4l2_rect *r2) { return r1->width < r2->width || r1->height < r2->height; } /* Check if r1 fails to cover r2 */ static bool is_inside(const struct v4l2_rect *r1, const struct v4l2_rect *r2) { return r1->left > r2->left || r1->top > r2->top || r1->left + r1->width < r2->left + r2->width || r1->top + r1->height < r2->top + r2->height; } static unsigned int scale_down(unsigned int size, unsigned int scale) { return (size * 4096 + scale / 2) / scale; } static unsigned int calc_generic_scale(unsigned int input, unsigned int output) { return (input * 4096 + output / 2) / output; } /* Get and store current client crop */ static int client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect) { struct v4l2_crop crop; struct v4l2_cropcap cap; int ret; crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ret = v4l2_subdev_call(sd, video, g_crop, &crop); if (!ret) { *rect = crop.c; return ret; } /* Camera driver doesn't support .g_crop(), assume default rectangle */ cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ret = v4l2_subdev_call(sd, video, cropcap, &cap); if (!ret) *rect = cap.defrect; return ret; } /* Client crop has changed, update our sub-rectangle to remain within the area */ static void update_subrect(struct sh_mobile_ceu_cam *cam) { struct v4l2_rect *rect = &cam->rect, *subrect = &cam->subrect; if (rect->width < subrect->width) subrect->width = rect->width; if (rect->height < subrect->height) subrect->height = rect->height; if (rect->left > subrect->left) subrect->left = rect->left; else if (rect->left + rect->width > subrect->left + subrect->width) subrect->left = rect->left + rect->width - subrect->width; if (rect->top > subrect->top) subrect->top = rect->top; else if (rect->top + rect->height > subrect->top + subrect->height) subrect->top = rect->top + rect->height - subrect->height; } /* * The common for both scaling and cropping iterative approach is: * 1. try if the client can produce exactly what requested by the user * 2. if (1) failed, try to double the client image until we get one big enough * 3. if (2) failed, try to request the maximum image */ static int client_s_crop(struct soc_camera_device *icd, struct v4l2_crop *crop, struct v4l2_crop *cam_crop) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct v4l2_rect *rect = &crop->c, *cam_rect = &cam_crop->c; struct device *dev = sd->v4l2_dev->dev; struct sh_mobile_ceu_cam *cam = icd->host_priv; struct v4l2_cropcap cap; int ret; unsigned int width, height; v4l2_subdev_call(sd, video, s_crop, crop); ret = client_g_rect(sd, cam_rect); if (ret < 0) return ret; /* * Now cam_crop contains the current camera input rectangle, and it must * be within camera cropcap bounds */ if (!memcmp(rect, cam_rect, sizeof(*rect))) { /* Even if camera S_CROP failed, but camera rectangle matches */ dev_dbg(dev, "Camera S_CROP successful for %dx%d@%d:%d\n", rect->width, rect->height, rect->left, rect->top); cam->rect = *cam_rect; return 0; } /* Try to fix cropping, that camera hasn't managed to set */ dev_geo(dev, "Fix camera S_CROP for %dx%d@%d:%d to %dx%d@%d:%d\n", cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top, rect->width, rect->height, rect->left, rect->top); /* We need sensor maximum rectangle */ ret = v4l2_subdev_call(sd, video, cropcap, &cap); if (ret < 0) return ret; /* Put user requested rectangle within sensor bounds */ soc_camera_limit_side(&rect->left, &rect->width, cap.bounds.left, 2, cap.bounds.width); soc_camera_limit_side(&rect->top, &rect->height, cap.bounds.top, 4, cap.bounds.height); /* * Popular special case - some cameras can only handle fixed sizes like * QVGA, VGA,... Take care to avoid infinite loop. */ width = max(cam_rect->width, 2); height = max(cam_rect->height, 2); /* * Loop as long as sensor is not covering the requested rectangle and * is still within its bounds */ while (!ret && (is_smaller(cam_rect, rect) || is_inside(cam_rect, rect)) && (cap.bounds.width > width || cap.bounds.height > height)) { width *= 2; height *= 2; cam_rect->width = width; cam_rect->height = height; /* * We do not know what capabilities the camera has to set up * left and top borders. We could try to be smarter in iterating * them, e.g., if camera current left is to the right of the * target left, set it to the middle point between the current * left and minimum left. But that would add too much * complexity: we would have to iterate each border separately. * Instead we just drop to the left and top bounds. */ if (cam_rect->left > rect->left) cam_rect->left = cap.bounds.left; if (cam_rect->left + cam_rect->width < rect->left + rect->width) cam_rect->width = rect->left + rect->width - cam_rect->left; if (cam_rect->top > rect->top) cam_rect->top = cap.bounds.top; if (cam_rect->top + cam_rect->height < rect->top + rect->height) cam_rect->height = rect->top + rect->height - cam_rect->top; v4l2_subdev_call(sd, video, s_crop, cam_crop); ret = client_g_rect(sd, cam_rect); dev_geo(dev, "Camera S_CROP %d for %dx%d@%d:%d\n", ret, cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top); } /* S_CROP must not modify the rectangle */ if (is_smaller(cam_rect, rect) || is_inside(cam_rect, rect)) { /* * The camera failed to configure a suitable cropping, * we cannot use the current rectangle, set to max */ *cam_rect = cap.bounds; v4l2_subdev_call(sd, video, s_crop, cam_crop); ret = client_g_rect(sd, cam_rect); dev_geo(dev, "Camera S_CROP %d for max %dx%d@%d:%d\n", ret, cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top); } if (!ret) { cam->rect = *cam_rect; update_subrect(cam); } return ret; } /* Iterative s_mbus_fmt, also updates cached client crop on success */ static int client_s_fmt(struct soc_camera_device *icd, struct v4l2_mbus_framefmt *mf, bool ceu_can_scale) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct sh_mobile_ceu_cam *cam = icd->host_priv; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->parent; unsigned int width = mf->width, height = mf->height, tmp_w, tmp_h; unsigned int max_width, max_height; struct v4l2_cropcap cap; bool ceu_1to1; int ret; ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd), video, s_mbus_fmt, mf); if (ret < 0) return ret; dev_geo(dev, "camera scaled to %ux%u\n", mf->width, mf->height); if (width == mf->width && height == mf->height) { /* Perfect! The client has done it all. */ ceu_1to1 = true; goto update_cache; } ceu_1to1 = false; if (!ceu_can_scale) goto update_cache; cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ret = v4l2_subdev_call(sd, video, cropcap, &cap); if (ret < 0) return ret; max_width = min(cap.bounds.width, pcdev->max_width); max_height = min(cap.bounds.height, pcdev->max_height); /* Camera set a format, but geometry is not precise, try to improve */ tmp_w = mf->width; tmp_h = mf->height; /* width <= max_width && height <= max_height - guaranteed by try_fmt */ while ((width > tmp_w || height > tmp_h) && tmp_w < max_width && tmp_h < max_height) { tmp_w = min(2 * tmp_w, max_width); tmp_h = min(2 * tmp_h, max_height); mf->width = tmp_w; mf->height = tmp_h; ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd), video, s_mbus_fmt, mf); dev_geo(dev, "Camera scaled to %ux%u\n", mf->width, mf->height); if (ret < 0) { /* This shouldn't happen */ dev_err(dev, "Client failed to set format: %d\n", ret); return ret; } } update_cache: /* Update cache */ ret = client_g_rect(sd, &cam->rect); if (ret < 0) return ret; if (ceu_1to1) cam->subrect = cam->rect; else update_subrect(cam); return 0; } /** * @width - on output: user width, mapped back to input * @height - on output: user height, mapped back to input * @mf - in- / output camera output window */ static int client_scale(struct soc_camera_device *icd, struct v4l2_mbus_framefmt *mf, unsigned int *width, unsigned int *height, bool ceu_can_scale) { struct sh_mobile_ceu_cam *cam = icd->host_priv; struct device *dev = icd->parent; struct v4l2_mbus_framefmt mf_tmp = *mf; unsigned int scale_h, scale_v; int ret; /* * 5. Apply iterative camera S_FMT for camera user window (also updates * client crop cache and the imaginary sub-rectangle). */ ret = client_s_fmt(icd, &mf_tmp, ceu_can_scale); if (ret < 0) return ret; dev_geo(dev, "5: camera scaled to %ux%u\n", mf_tmp.width, mf_tmp.height); /* 6. Retrieve camera output window (g_fmt) */ /* unneeded - it is already in "mf_tmp" */ /* 7. Calculate new client scales. */ scale_h = calc_generic_scale(cam->rect.width, mf_tmp.width); scale_v = calc_generic_scale(cam->rect.height, mf_tmp.height); mf->width = mf_tmp.width; mf->height = mf_tmp.height; mf->colorspace = mf_tmp.colorspace; /* * 8. Calculate new CEU crop - apply camera scales to previously * updated "effective" crop. */ *width = scale_down(cam->subrect.width, scale_h); *height = scale_down(cam->subrect.height, scale_v); dev_geo(dev, "8: new client sub-window %ux%u\n", *width, *height); return 0; } /* * CEU can scale and crop, but we don't want to waste bandwidth and kill the * framerate by always requesting the maximum image from the client. See * Documentation/video4linux/sh_mobile_ceu_camera.txt for a description of * scaling and cropping algorithms and for the meaning of referenced here steps. */ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd, const struct v4l2_crop *a) { struct v4l2_crop a_writable = *a; const struct v4l2_rect *rect = &a_writable.c; struct device *dev = icd->parent; struct soc_camera_host *ici = to_soc_camera_host(dev); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct v4l2_crop cam_crop; struct sh_mobile_ceu_cam *cam = icd->host_priv; struct v4l2_rect *cam_rect = &cam_crop.c; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct v4l2_mbus_framefmt mf; unsigned int scale_cam_h, scale_cam_v, scale_ceu_h, scale_ceu_v, out_width, out_height; int interm_width, interm_height; u32 capsr, cflcr; int ret; dev_geo(dev, "S_CROP(%ux%u@%u:%u)\n", rect->width, rect->height, rect->left, rect->top); /* During camera cropping its output window can change too, stop CEU */ capsr = capture_save_reset(pcdev); dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr); /* * 1. - 2. Apply iterative camera S_CROP for new input window, read back * actual camera rectangle. */ ret = client_s_crop(icd, &a_writable, &cam_crop); if (ret < 0) return ret; dev_geo(dev, "1-2: camera cropped to %ux%u@%u:%u\n", cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top); /* On success cam_crop contains current camera crop */ /* 3. Retrieve camera output window */ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf); if (ret < 0) return ret; if (mf.width > pcdev->max_width || mf.height > pcdev->max_height) return -EINVAL; /* 4. Calculate camera scales */ scale_cam_h = calc_generic_scale(cam_rect->width, mf.width); scale_cam_v = calc_generic_scale(cam_rect->height, mf.height); /* Calculate intermediate window */ interm_width = scale_down(rect->width, scale_cam_h); interm_height = scale_down(rect->height, scale_cam_v); if (interm_width < icd->user_width) { u32 new_scale_h; new_scale_h = calc_generic_scale(rect->width, icd->user_width); mf.width = scale_down(cam_rect->width, new_scale_h); } if (interm_height < icd->user_height) { u32 new_scale_v; new_scale_v = calc_generic_scale(rect->height, icd->user_height); mf.height = scale_down(cam_rect->height, new_scale_v); } if (interm_width < icd->user_width || interm_height < icd->user_height) { ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd), video, s_mbus_fmt, &mf); if (ret < 0) return ret; dev_geo(dev, "New camera output %ux%u\n", mf.width, mf.height); scale_cam_h = calc_generic_scale(cam_rect->width, mf.width); scale_cam_v = calc_generic_scale(cam_rect->height, mf.height); interm_width = scale_down(rect->width, scale_cam_h); interm_height = scale_down(rect->height, scale_cam_v); } /* Cache camera output window */ cam->width = mf.width; cam->height = mf.height; if (pcdev->image_mode) { out_width = min(interm_width, icd->user_width); out_height = min(interm_height, icd->user_height); } else { out_width = interm_width; out_height = interm_height; } /* * 5. Calculate CEU scales from camera scales from results of (5) and * the user window */ scale_ceu_h = calc_scale(interm_width, &out_width); scale_ceu_v = calc_scale(interm_height, &out_height); dev_geo(dev, "5: CEU scales %u:%u\n", scale_ceu_h, scale_ceu_v); /* Apply CEU scales. */ cflcr = scale_ceu_h | (scale_ceu_v << 16); if (cflcr != pcdev->cflcr) { pcdev->cflcr = cflcr; ceu_write(pcdev, CFLCR, cflcr); } icd->user_width = out_width & ~3; icd->user_height = out_height & ~3; /* Offsets are applied at the CEU scaling filter input */ cam->ceu_left = scale_down(rect->left - cam_rect->left, scale_cam_h) & ~1; cam->ceu_top = scale_down(rect->top - cam_rect->top, scale_cam_v) & ~1; /* 6. Use CEU cropping to crop to the new window. */ sh_mobile_ceu_set_rect(icd); cam->subrect = *rect; dev_geo(dev, "6: CEU cropped to %ux%u@%u:%u\n", icd->user_width, icd->user_height, cam->ceu_left, cam->ceu_top); /* Restore capture. The CE bit can be cleared by the hardware */ if (pcdev->active) capsr |= 1; capture_restore(pcdev, capsr); /* Even if only camera cropping succeeded */ return ret; } static int sh_mobile_ceu_get_crop(struct soc_camera_device *icd, struct v4l2_crop *a) { struct sh_mobile_ceu_cam *cam = icd->host_priv; a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; a->c = cam->subrect; return 0; } /* * Calculate real client output window by applying new scales to the current * client crop. New scales are calculated from the requested output format and * CEU crop, mapped backed onto the client input (subrect). */ static void calculate_client_output(struct soc_camera_device *icd, const struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mf) { struct sh_mobile_ceu_cam *cam = icd->host_priv; struct device *dev = icd->parent; struct v4l2_rect *cam_subrect = &cam->subrect; unsigned int scale_v, scale_h; if (cam_subrect->width == cam->rect.width && cam_subrect->height == cam->rect.height) { /* No sub-cropping */ mf->width = pix->width; mf->height = pix->height; return; } /* 1.-2. Current camera scales and subwin - cached. */ dev_geo(dev, "2: subwin %ux%u@%u:%u\n", cam_subrect->width, cam_subrect->height, cam_subrect->left, cam_subrect->top); /* * 3. Calculate new combined scales from input sub-window to requested * user window. */ /* * TODO: CEU cannot scale images larger than VGA to smaller than SubQCIF * (128x96) or larger than VGA */ scale_h = calc_generic_scale(cam_subrect->width, pix->width); scale_v = calc_generic_scale(cam_subrect->height, pix->height); dev_geo(dev, "3: scales %u:%u\n", scale_h, scale_v); /* * 4. Calculate desired client output window by applying combined scales * to client (real) input window. */ mf->width = scale_down(cam->rect.width, scale_h); mf->height = scale_down(cam->rect.height, scale_v); } /* Similar to set_crop multistage iterative algorithm */ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { struct device *dev = icd->parent; struct soc_camera_host *ici = to_soc_camera_host(dev); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct sh_mobile_ceu_cam *cam = icd->host_priv; struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_mbus_framefmt mf; __u32 pixfmt = pix->pixelformat; const struct soc_camera_format_xlate *xlate; /* Keep Compiler Happy */ unsigned int ceu_sub_width = 0, ceu_sub_height = 0; u16 scale_v, scale_h; int ret; bool image_mode; enum v4l2_field field; switch (pix->field) { default: pix->field = V4L2_FIELD_NONE; /* fall-through */ case V4L2_FIELD_INTERLACED_TB: case V4L2_FIELD_INTERLACED_BT: case V4L2_FIELD_NONE: field = pix->field; break; case V4L2_FIELD_INTERLACED: field = V4L2_FIELD_INTERLACED_TB; break; } xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); if (!xlate) { dev_warn(dev, "Format %x not found\n", pixfmt); return -EINVAL; } /* 1.-4. Calculate desired client output geometry */ calculate_client_output(icd, pix, &mf); mf.field = pix->field; mf.colorspace = pix->colorspace; mf.code = xlate->code; switch (pixfmt) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: image_mode = true; break; default: image_mode = false; } dev_geo(dev, "S_FMT(pix=0x%x, fld 0x%x, code 0x%x, %ux%u)\n", pixfmt, mf.field, mf.code, pix->width, pix->height); dev_geo(dev, "4: request camera output %ux%u\n", mf.width, mf.height); /* 5. - 9. */ ret = client_scale(icd, &mf, &ceu_sub_width, &ceu_sub_height, image_mode && V4L2_FIELD_NONE == field); dev_geo(dev, "5-9: client scale return %d\n", ret); /* Done with the camera. Now see if we can improve the result */ dev_geo(dev, "fmt %ux%u, requested %ux%u\n", mf.width, mf.height, pix->width, pix->height); if (ret < 0) return ret; if (mf.code != xlate->code) return -EINVAL; /* 9. Prepare CEU crop */ cam->width = mf.width; cam->height = mf.height; /* 10. Use CEU scaling to scale to the requested user window. */ /* We cannot scale up */ if (pix->width > ceu_sub_width) ceu_sub_width = pix->width; if (pix->height > ceu_sub_height) ceu_sub_height = pix->height; pix->colorspace = mf.colorspace; if (image_mode) { /* Scale pix->{width x height} down to width x height */ scale_h = calc_scale(ceu_sub_width, &pix->width); scale_v = calc_scale(ceu_sub_height, &pix->height); } else { pix->width = ceu_sub_width; pix->height = ceu_sub_height; scale_h = 0; scale_v = 0; } pcdev->cflcr = scale_h | (scale_v << 16); /* * We have calculated CFLCR, the actual configuration will be performed * in sh_mobile_ceu_set_bus_param() */ dev_geo(dev, "10: W: %u : 0x%x = %u, H: %u : 0x%x = %u\n", ceu_sub_width, scale_h, pix->width, ceu_sub_height, scale_v, pix->height); cam->code = xlate->code; icd->current_fmt = xlate; pcdev->field = field; pcdev->image_mode = image_mode; /* CFSZR requirement */ pix->width &= ~3; pix->height &= ~3; return 0; } #define CEU_CHDW_MAX 8188U /* Maximum line stride */ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; const struct soc_camera_format_xlate *xlate; struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct v4l2_mbus_framefmt mf; __u32 pixfmt = pix->pixelformat; int width, height; int ret; dev_geo(icd->parent, "TRY_FMT(pix=0x%x, %ux%u)\n", pixfmt, pix->width, pix->height); xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); if (!xlate) { xlate = icd->current_fmt; dev_dbg(icd->parent, "Format %x not found, keeping %x\n", pixfmt, xlate->host_fmt->fourcc); pixfmt = xlate->host_fmt->fourcc; pix->pixelformat = pixfmt; pix->colorspace = icd->colorspace; } /* FIXME: calculate using depth and bus width */ /* CFSZR requires height and width to be 4-pixel aligned */ v4l_bound_align_image(&pix->width, 2, pcdev->max_width, 2, &pix->height, 4, pcdev->max_height, 2, 0); width = pix->width; height = pix->height; /* limit to sensor capabilities */ mf.width = pix->width; mf.height = pix->height; mf.field = pix->field; mf.code = xlate->code; mf.colorspace = pix->colorspace; ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd), video, try_mbus_fmt, &mf); if (ret < 0) return ret; pix->width = mf.width; pix->height = mf.height; pix->field = mf.field; pix->colorspace = mf.colorspace; switch (pixfmt) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: /* FIXME: check against rect_max after converting soc-camera */ /* We can scale precisely, need a bigger image from camera */ if (pix->width < width || pix->height < height) { /* * We presume, the sensor behaves sanely, i.e., if * requested a bigger rectangle, it will not return a * smaller one. */ mf.width = pcdev->max_width; mf.height = pcdev->max_height; ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd), video, try_mbus_fmt, &mf); if (ret < 0) { /* Shouldn't actually happen... */ dev_err(icd->parent, "FIXME: client try_fmt() = %d\n", ret); return ret; } } /* We will scale exactly */ if (mf.width > width) pix->width = width; if (mf.height > height) pix->height = height; pix->bytesperline = max(pix->bytesperline, pix->width); pix->bytesperline = min(pix->bytesperline, CEU_CHDW_MAX); pix->bytesperline &= ~3; break; default: /* Configurable stride isn't supported in pass-through mode. */ pix->bytesperline = 0; } pix->width &= ~3; pix->height &= ~3; pix->sizeimage = 0; dev_geo(icd->parent, "%s(): return %d, fmt 0x%x, %ux%u\n", __func__, ret, pix->pixelformat, pix->width, pix->height); return ret; } static int sh_mobile_ceu_set_livecrop(struct soc_camera_device *icd, const struct v4l2_crop *a) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; u32 out_width = icd->user_width, out_height = icd->user_height; int ret; /* Freeze queue */ pcdev->frozen = 1; /* Wait for frame */ ret = wait_for_completion_interruptible(&pcdev->complete); /* Stop the client */ ret = v4l2_subdev_call(sd, video, s_stream, 0); if (ret < 0) dev_warn(icd->parent, "Client failed to stop the stream: %d\n", ret); else /* Do the crop, if it fails, there's nothing more we can do */ sh_mobile_ceu_set_crop(icd, a); dev_geo(icd->parent, "Output after crop: %ux%u\n", icd->user_width, icd->user_height); if (icd->user_width != out_width || icd->user_height != out_height) { struct v4l2_format f = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .fmt.pix = { .width = out_width, .height = out_height, .pixelformat = icd->current_fmt->host_fmt->fourcc, .field = pcdev->field, .colorspace = icd->colorspace, }, }; ret = sh_mobile_ceu_set_fmt(icd, &f); if (!ret && (out_width != f.fmt.pix.width || out_height != f.fmt.pix.height)) ret = -EINVAL; if (!ret) { icd->user_width = out_width & ~3; icd->user_height = out_height & ~3; ret = sh_mobile_ceu_set_bus_param(icd); } } /* Thaw the queue */ pcdev->frozen = 0; spin_lock_irq(&pcdev->lock); sh_mobile_ceu_capture(pcdev); spin_unlock_irq(&pcdev->lock); /* Start the client */ ret = v4l2_subdev_call(sd, video, s_stream, 1); return ret; } static unsigned int sh_mobile_ceu_poll(struct file *file, poll_table *pt) { struct soc_camera_device *icd = file->private_data; return vb2_poll(&icd->vb2_vidq, file, pt); } static int sh_mobile_ceu_querycap(struct soc_camera_host *ici, struct v4l2_capability *cap) { strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card)); cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; return 0; } static int sh_mobile_ceu_init_videobuf(struct vb2_queue *q, struct soc_camera_device *icd) { q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q->io_modes = VB2_MMAP | VB2_USERPTR; q->drv_priv = icd; q->ops = &sh_mobile_ceu_videobuf_ops; q->mem_ops = &vb2_dma_contig_memops; q->buf_struct_size = sizeof(struct sh_mobile_ceu_buffer); q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; return vb2_queue_init(q); } static struct soc_camera_host_ops sh_mobile_ceu_host_ops = { .owner = THIS_MODULE, .add = sh_mobile_ceu_add_device, .remove = sh_mobile_ceu_remove_device, .get_formats = sh_mobile_ceu_get_formats, .put_formats = sh_mobile_ceu_put_formats, .get_crop = sh_mobile_ceu_get_crop, .set_crop = sh_mobile_ceu_set_crop, .set_livecrop = sh_mobile_ceu_set_livecrop, .set_fmt = sh_mobile_ceu_set_fmt, .try_fmt = sh_mobile_ceu_try_fmt, .poll = sh_mobile_ceu_poll, .querycap = sh_mobile_ceu_querycap, .set_bus_param = sh_mobile_ceu_set_bus_param, .init_videobuf2 = sh_mobile_ceu_init_videobuf, }; struct bus_wait { struct notifier_block notifier; struct completion completion; struct device *dev; }; static int bus_notify(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; struct bus_wait *wait = container_of(nb, struct bus_wait, notifier); if (wait->dev != dev) return NOTIFY_DONE; switch (action) { case BUS_NOTIFY_UNBOUND_DRIVER: /* Protect from module unloading */ wait_for_completion(&wait->completion); return NOTIFY_OK; } return NOTIFY_DONE; } static int sh_mobile_ceu_probe(struct platform_device *pdev) { struct sh_mobile_ceu_dev *pcdev; struct resource *res; void __iomem *base; unsigned int irq; int err = 0; struct bus_wait wait = { .completion = COMPLETION_INITIALIZER_ONSTACK(wait.completion), .notifier.notifier_call = bus_notify, }; struct sh_mobile_ceu_companion *csi2; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!res || (int)irq <= 0) { dev_err(&pdev->dev, "Not enough CEU platform resources.\n"); return -ENODEV; } pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev), GFP_KERNEL); if (!pcdev) { dev_err(&pdev->dev, "Could not allocate pcdev\n"); return -ENOMEM; } INIT_LIST_HEAD(&pcdev->capture); spin_lock_init(&pcdev->lock); init_completion(&pcdev->complete); pcdev->pdata = pdev->dev.platform_data; if (!pcdev->pdata) { dev_err(&pdev->dev, "CEU platform data not set.\n"); return -EINVAL; } pcdev->max_width = pcdev->pdata->max_width ? : 2560; pcdev->max_height = pcdev->pdata->max_height ? : 1920; base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) return PTR_ERR(base); pcdev->irq = irq; pcdev->base = base; pcdev->video_limit = 0; /* only enabled if second resource exists */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (res) { err = dma_declare_coherent_memory(&pdev->dev, res->start, res->start, resource_size(res), DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE); if (!err) { dev_err(&pdev->dev, "Unable to declare CEU memory.\n"); return -ENXIO; } pcdev->video_limit = resource_size(res); } /* request irq */ err = devm_request_irq(&pdev->dev, pcdev->irq, sh_mobile_ceu_irq, IRQF_DISABLED, dev_name(&pdev->dev), pcdev); if (err) { dev_err(&pdev->dev, "Unable to register CEU interrupt.\n"); goto exit_release_mem; } pm_suspend_ignore_children(&pdev->dev, true); pm_runtime_enable(&pdev->dev); pm_runtime_resume(&pdev->dev); pcdev->ici.priv = pcdev; pcdev->ici.v4l2_dev.dev = &pdev->dev; pcdev->ici.nr = pdev->id; pcdev->ici.drv_name = dev_name(&pdev->dev); pcdev->ici.ops = &sh_mobile_ceu_host_ops; pcdev->ici.capabilities = SOCAM_HOST_CAP_STRIDE; pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev); if (IS_ERR(pcdev->alloc_ctx)) { err = PTR_ERR(pcdev->alloc_ctx); goto exit_free_clk; } err = soc_camera_host_register(&pcdev->ici); if (err) goto exit_free_ctx; /* CSI2 interfacing */ csi2 = pcdev->pdata->csi2; if (csi2) { struct platform_device *csi2_pdev = platform_device_alloc("sh-mobile-csi2", csi2->id); struct sh_csi2_pdata *csi2_pdata = csi2->platform_data; if (!csi2_pdev) { err = -ENOMEM; goto exit_host_unregister; } pcdev->csi2_pdev = csi2_pdev; err = platform_device_add_data(csi2_pdev, csi2_pdata, sizeof(*csi2_pdata)); if (err < 0) goto exit_pdev_put; csi2_pdata = csi2_pdev->dev.platform_data; csi2_pdata->v4l2_dev = &pcdev->ici.v4l2_dev; csi2_pdev->resource = csi2->resource; csi2_pdev->num_resources = csi2->num_resources; err = platform_device_add(csi2_pdev); if (err < 0) goto exit_pdev_put; wait.dev = &csi2_pdev->dev; err = bus_register_notifier(&platform_bus_type, &wait.notifier); if (err < 0) goto exit_pdev_unregister; /* * From this point the driver module will not unload, until * we complete the completion. */ if (!csi2_pdev->dev.driver) { complete(&wait.completion); /* Either too late, or probing failed */ bus_unregister_notifier(&platform_bus_type, &wait.notifier); err = -ENXIO; goto exit_pdev_unregister; } /* * The module is still loaded, in the worst case it is hanging * in device release on our completion. So, _now_ dereferencing * the "owner" is safe! */ err = try_module_get(csi2_pdev->dev.driver->owner); /* Let notifier complete, if it has been locked */ complete(&wait.completion); bus_unregister_notifier(&platform_bus_type, &wait.notifier); if (!err) { err = -ENODEV; goto exit_pdev_unregister; } } return 0; exit_pdev_unregister: platform_device_del(pcdev->csi2_pdev); exit_pdev_put: pcdev->csi2_pdev->resource = NULL; platform_device_put(pcdev->csi2_pdev); exit_host_unregister: soc_camera_host_unregister(&pcdev->ici); exit_free_ctx: vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx); exit_free_clk: pm_runtime_disable(&pdev->dev); exit_release_mem: if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) dma_release_declared_memory(&pdev->dev); return err; } static int sh_mobile_ceu_remove(struct platform_device *pdev) { struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev); struct sh_mobile_ceu_dev *pcdev = container_of(soc_host, struct sh_mobile_ceu_dev, ici); struct platform_device *csi2_pdev = pcdev->csi2_pdev; soc_camera_host_unregister(soc_host); pm_runtime_disable(&pdev->dev); if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) dma_release_declared_memory(&pdev->dev); vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx); if (csi2_pdev && csi2_pdev->dev.driver) { struct module *csi2_drv = csi2_pdev->dev.driver->owner; platform_device_del(csi2_pdev); csi2_pdev->resource = NULL; platform_device_put(csi2_pdev); module_put(csi2_drv); } return 0; } static int sh_mobile_ceu_runtime_nop(struct device *dev) { /* Runtime PM callback shared between ->runtime_suspend() * and ->runtime_resume(). Simply returns success. * * This driver re-initializes all registers after * pm_runtime_get_sync() anyway so there is no need * to save and restore registers here. */ return 0; } static const struct dev_pm_ops sh_mobile_ceu_dev_pm_ops = { .runtime_suspend = sh_mobile_ceu_runtime_nop, .runtime_resume = sh_mobile_ceu_runtime_nop, }; static struct platform_driver sh_mobile_ceu_driver = { .driver = { .name = "sh_mobile_ceu", .pm = &sh_mobile_ceu_dev_pm_ops, }, .probe = sh_mobile_ceu_probe, .remove = sh_mobile_ceu_remove, }; static int __init sh_mobile_ceu_init(void) { /* Whatever return code */ request_module("sh_mobile_csi2"); return platform_driver_register(&sh_mobile_ceu_driver); } static void __exit sh_mobile_ceu_exit(void) { platform_driver_unregister(&sh_mobile_ceu_driver); } module_init(sh_mobile_ceu_init); module_exit(sh_mobile_ceu_exit); MODULE_DESCRIPTION("SuperH Mobile CEU driver"); MODULE_AUTHOR("Magnus Damm"); MODULE_LICENSE("GPL"); MODULE_VERSION("0.0.6"); MODULE_ALIAS("platform:sh_mobile_ceu");
gpl-2.0
javelinanddart/bricked-flo
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
3372
148450
/* * Hardware modules present on the OMAP44xx chips * * Copyright (C) 2009-2011 Texas Instruments, Inc. * Copyright (C) 2009-2010 Nokia Corporation * * Paul Walmsley * Benoit Cousson * * This file is automatically generated from the OMAP hardware databases. * We respectfully ask that any modifications to this file be coordinated * with the public linux-omap@vger.kernel.org mailing list and the * authors above to ensure that the autogeneration scripts are kept * up-to-date with the file contents. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/io.h> #include <plat/omap_hwmod.h> #include <plat/cpu.h> #include <plat/i2c.h> #include <plat/gpio.h> #include <plat/dma.h> #include <plat/mcspi.h> #include <plat/mcbsp.h> #include <plat/mmc.h> #include <plat/dmtimer.h> #include <plat/common.h> #include "omap_hwmod_common_data.h" #include "smartreflex.h" #include "cm1_44xx.h" #include "cm2_44xx.h" #include "prm44xx.h" #include "prm-regbits-44xx.h" #include "wd_timer.h" /* Base offset for all OMAP4 interrupts external to MPUSS */ #define OMAP44XX_IRQ_GIC_START 32 /* Base offset for all OMAP4 dma requests */ #define OMAP44XX_DMA_REQ_START 1 /* Backward references (IPs with Bus Master capability) */ static struct omap_hwmod omap44xx_aess_hwmod; static struct omap_hwmod omap44xx_dma_system_hwmod; static struct omap_hwmod omap44xx_dmm_hwmod; static struct omap_hwmod omap44xx_dsp_hwmod; static struct omap_hwmod omap44xx_dss_hwmod; static struct omap_hwmod omap44xx_emif_fw_hwmod; static struct omap_hwmod omap44xx_hsi_hwmod; static struct omap_hwmod omap44xx_ipu_hwmod; static struct omap_hwmod omap44xx_iss_hwmod; static struct omap_hwmod omap44xx_iva_hwmod; static struct omap_hwmod omap44xx_l3_instr_hwmod; static struct omap_hwmod omap44xx_l3_main_1_hwmod; static struct omap_hwmod omap44xx_l3_main_2_hwmod; static struct omap_hwmod omap44xx_l3_main_3_hwmod; static struct omap_hwmod omap44xx_l4_abe_hwmod; static struct omap_hwmod omap44xx_l4_cfg_hwmod; static struct omap_hwmod omap44xx_l4_per_hwmod; static struct omap_hwmod omap44xx_l4_wkup_hwmod; static struct omap_hwmod omap44xx_mmc1_hwmod; static struct omap_hwmod omap44xx_mmc2_hwmod; static struct omap_hwmod omap44xx_mpu_hwmod; static struct omap_hwmod omap44xx_mpu_private_hwmod; static struct omap_hwmod omap44xx_usb_otg_hs_hwmod; static struct omap_hwmod omap44xx_usb_host_hs_hwmod; static struct omap_hwmod omap44xx_usb_tll_hs_hwmod; /* * Interconnects omap_hwmod structures * hwmods that compose the global OMAP interconnect */ /* * 'dmm' class * instance(s): dmm */ static struct omap_hwmod_class omap44xx_dmm_hwmod_class = { .name = "dmm", }; /* dmm */ static struct omap_hwmod_irq_info omap44xx_dmm_irqs[] = { { .irq = 113 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; /* l3_main_1 -> dmm */ static struct omap_hwmod_ocp_if omap44xx_l3_main_1__dmm = { .master = &omap44xx_l3_main_1_hwmod, .slave = &omap44xx_dmm_hwmod, .clk = "l3_div_ck", .user = OCP_USER_SDMA, }; static struct omap_hwmod_addr_space omap44xx_dmm_addrs[] = { { .pa_start = 0x4e000000, .pa_end = 0x4e0007ff, .flags = ADDR_TYPE_RT }, { } }; /* mpu -> dmm */ static struct omap_hwmod_ocp_if omap44xx_mpu__dmm = { .master = &omap44xx_mpu_hwmod, .slave = &omap44xx_dmm_hwmod, .clk = "l3_div_ck", .addr = omap44xx_dmm_addrs, .user = OCP_USER_MPU, }; /* dmm slave ports */ static struct omap_hwmod_ocp_if *omap44xx_dmm_slaves[] = { &omap44xx_l3_main_1__dmm, &omap44xx_mpu__dmm, }; static struct omap_hwmod omap44xx_dmm_hwmod = { .name = "dmm", .class = &omap44xx_dmm_hwmod_class, .clkdm_name = "l3_emif_clkdm", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_MEMIF_DMM_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_MEMIF_DMM_CONTEXT_OFFSET, }, }, .slaves = omap44xx_dmm_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_dmm_slaves), .mpu_irqs = omap44xx_dmm_irqs, }; /* * 'emif_fw' class * instance(s): emif_fw */ static struct omap_hwmod_class omap44xx_emif_fw_hwmod_class = { .name = "emif_fw", }; /* emif_fw */ /* dmm -> emif_fw */ static struct omap_hwmod_ocp_if omap44xx_dmm__emif_fw = { .master = &omap44xx_dmm_hwmod, .slave = &omap44xx_emif_fw_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod_addr_space omap44xx_emif_fw_addrs[] = { { .pa_start = 0x4a20c000, .pa_end = 0x4a20c0ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_cfg -> emif_fw */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__emif_fw = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_emif_fw_hwmod, .clk = "l4_div_ck", .addr = omap44xx_emif_fw_addrs, .user = OCP_USER_MPU, }; /* emif_fw slave ports */ static struct omap_hwmod_ocp_if *omap44xx_emif_fw_slaves[] = { &omap44xx_dmm__emif_fw, &omap44xx_l4_cfg__emif_fw, }; static struct omap_hwmod omap44xx_emif_fw_hwmod = { .name = "emif_fw", .class = &omap44xx_emif_fw_hwmod_class, .clkdm_name = "l3_emif_clkdm", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_MEMIF_EMIF_FW_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_MEMIF_EMIF_FW_CONTEXT_OFFSET, }, }, .slaves = omap44xx_emif_fw_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_emif_fw_slaves), }; /* * 'l3' class * instance(s): l3_instr, l3_main_1, l3_main_2, l3_main_3 */ static struct omap_hwmod_class omap44xx_l3_hwmod_class = { .name = "l3", }; /* l3_instr */ /* iva -> l3_instr */ static struct omap_hwmod_ocp_if omap44xx_iva__l3_instr = { .master = &omap44xx_iva_hwmod, .slave = &omap44xx_l3_instr_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l3_main_3 -> l3_instr */ static struct omap_hwmod_ocp_if omap44xx_l3_main_3__l3_instr = { .master = &omap44xx_l3_main_3_hwmod, .slave = &omap44xx_l3_instr_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l3_instr slave ports */ static struct omap_hwmod_ocp_if *omap44xx_l3_instr_slaves[] = { &omap44xx_iva__l3_instr, &omap44xx_l3_main_3__l3_instr, }; static struct omap_hwmod omap44xx_l3_instr_hwmod = { .name = "l3_instr", .class = &omap44xx_l3_hwmod_class, .clkdm_name = "l3_instr_clkdm", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L3INSTR_L3_INSTR_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L3INSTR_L3_INSTR_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .slaves = omap44xx_l3_instr_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_l3_instr_slaves), }; /* l3_main_1 */ static struct omap_hwmod_irq_info omap44xx_l3_main_1_irqs[] = { { .name = "dbg_err", .irq = 9 + OMAP44XX_IRQ_GIC_START }, { .name = "app_err", .irq = 10 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; /* dsp -> l3_main_1 */ static struct omap_hwmod_ocp_if omap44xx_dsp__l3_main_1 = { .master = &omap44xx_dsp_hwmod, .slave = &omap44xx_l3_main_1_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* dss -> l3_main_1 */ static struct omap_hwmod_ocp_if omap44xx_dss__l3_main_1 = { .master = &omap44xx_dss_hwmod, .slave = &omap44xx_l3_main_1_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l3_main_2 -> l3_main_1 */ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__l3_main_1 = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_l3_main_1_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_cfg -> l3_main_1 */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_1 = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_l3_main_1_hwmod, .clk = "l4_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mmc1 -> l3_main_1 */ static struct omap_hwmod_ocp_if omap44xx_mmc1__l3_main_1 = { .master = &omap44xx_mmc1_hwmod, .slave = &omap44xx_l3_main_1_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mmc2 -> l3_main_1 */ static struct omap_hwmod_ocp_if omap44xx_mmc2__l3_main_1 = { .master = &omap44xx_mmc2_hwmod, .slave = &omap44xx_l3_main_1_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod_addr_space omap44xx_l3_main_1_addrs[] = { { .pa_start = 0x44000000, .pa_end = 0x44000fff, .flags = ADDR_TYPE_RT }, { } }; /* mpu -> l3_main_1 */ static struct omap_hwmod_ocp_if omap44xx_mpu__l3_main_1 = { .master = &omap44xx_mpu_hwmod, .slave = &omap44xx_l3_main_1_hwmod, .clk = "l3_div_ck", .addr = omap44xx_l3_main_1_addrs, .user = OCP_USER_MPU, }; /* l3_main_1 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_l3_main_1_slaves[] = { &omap44xx_dsp__l3_main_1, &omap44xx_dss__l3_main_1, &omap44xx_l3_main_2__l3_main_1, &omap44xx_l4_cfg__l3_main_1, &omap44xx_mmc1__l3_main_1, &omap44xx_mmc2__l3_main_1, &omap44xx_mpu__l3_main_1, }; static struct omap_hwmod omap44xx_l3_main_1_hwmod = { .name = "l3_main_1", .class = &omap44xx_l3_hwmod_class, .clkdm_name = "l3_1_clkdm", .mpu_irqs = omap44xx_l3_main_1_irqs, .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L3_1_L3_1_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L3_1_L3_1_CONTEXT_OFFSET, }, }, .slaves = omap44xx_l3_main_1_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_l3_main_1_slaves), }; /* l3_main_2 */ /* dma_system -> l3_main_2 */ static struct omap_hwmod_ocp_if omap44xx_dma_system__l3_main_2 = { .master = &omap44xx_dma_system_hwmod, .slave = &omap44xx_l3_main_2_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* hsi -> l3_main_2 */ static struct omap_hwmod_ocp_if omap44xx_hsi__l3_main_2 = { .master = &omap44xx_hsi_hwmod, .slave = &omap44xx_l3_main_2_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* ipu -> l3_main_2 */ static struct omap_hwmod_ocp_if omap44xx_ipu__l3_main_2 = { .master = &omap44xx_ipu_hwmod, .slave = &omap44xx_l3_main_2_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* iss -> l3_main_2 */ static struct omap_hwmod_ocp_if omap44xx_iss__l3_main_2 = { .master = &omap44xx_iss_hwmod, .slave = &omap44xx_l3_main_2_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* iva -> l3_main_2 */ static struct omap_hwmod_ocp_if omap44xx_iva__l3_main_2 = { .master = &omap44xx_iva_hwmod, .slave = &omap44xx_l3_main_2_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod_addr_space omap44xx_l3_main_2_addrs[] = { { .pa_start = 0x44800000, .pa_end = 0x44801fff, .flags = ADDR_TYPE_RT }, { } }; /* l3_main_1 -> l3_main_2 */ static struct omap_hwmod_ocp_if omap44xx_l3_main_1__l3_main_2 = { .master = &omap44xx_l3_main_1_hwmod, .slave = &omap44xx_l3_main_2_hwmod, .clk = "l3_div_ck", .addr = omap44xx_l3_main_2_addrs, .user = OCP_USER_MPU, }; /* l4_cfg -> l3_main_2 */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_2 = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_l3_main_2_hwmod, .clk = "l4_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* usb_otg_hs -> l3_main_2 */ static struct omap_hwmod_ocp_if omap44xx_usb_otg_hs__l3_main_2 = { .master = &omap44xx_usb_otg_hs_hwmod, .slave = &omap44xx_l3_main_2_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l3_main_2 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_l3_main_2_slaves[] = { &omap44xx_dma_system__l3_main_2, &omap44xx_hsi__l3_main_2, &omap44xx_ipu__l3_main_2, &omap44xx_iss__l3_main_2, &omap44xx_iva__l3_main_2, &omap44xx_l3_main_1__l3_main_2, &omap44xx_l4_cfg__l3_main_2, &omap44xx_usb_otg_hs__l3_main_2, }; static struct omap_hwmod omap44xx_l3_main_2_hwmod = { .name = "l3_main_2", .class = &omap44xx_l3_hwmod_class, .clkdm_name = "l3_2_clkdm", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L3_2_L3_2_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L3_2_L3_2_CONTEXT_OFFSET, }, }, .slaves = omap44xx_l3_main_2_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_l3_main_2_slaves), }; /* l3_main_3 */ static struct omap_hwmod_addr_space omap44xx_l3_main_3_addrs[] = { { .pa_start = 0x45000000, .pa_end = 0x45000fff, .flags = ADDR_TYPE_RT }, { } }; /* l3_main_1 -> l3_main_3 */ static struct omap_hwmod_ocp_if omap44xx_l3_main_1__l3_main_3 = { .master = &omap44xx_l3_main_1_hwmod, .slave = &omap44xx_l3_main_3_hwmod, .clk = "l3_div_ck", .addr = omap44xx_l3_main_3_addrs, .user = OCP_USER_MPU, }; /* l3_main_2 -> l3_main_3 */ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__l3_main_3 = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_l3_main_3_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_cfg -> l3_main_3 */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_3 = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_l3_main_3_hwmod, .clk = "l4_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l3_main_3 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_l3_main_3_slaves[] = { &omap44xx_l3_main_1__l3_main_3, &omap44xx_l3_main_2__l3_main_3, &omap44xx_l4_cfg__l3_main_3, }; static struct omap_hwmod omap44xx_l3_main_3_hwmod = { .name = "l3_main_3", .class = &omap44xx_l3_hwmod_class, .clkdm_name = "l3_instr_clkdm", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L3INSTR_L3_3_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L3INSTR_L3_3_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .slaves = omap44xx_l3_main_3_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_l3_main_3_slaves), }; /* * 'l4' class * instance(s): l4_abe, l4_cfg, l4_per, l4_wkup */ static struct omap_hwmod_class omap44xx_l4_hwmod_class = { .name = "l4", }; /* l4_abe */ /* aess -> l4_abe */ static struct omap_hwmod_ocp_if omap44xx_aess__l4_abe = { .master = &omap44xx_aess_hwmod, .slave = &omap44xx_l4_abe_hwmod, .clk = "ocp_abe_iclk", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* dsp -> l4_abe */ static struct omap_hwmod_ocp_if omap44xx_dsp__l4_abe = { .master = &omap44xx_dsp_hwmod, .slave = &omap44xx_l4_abe_hwmod, .clk = "ocp_abe_iclk", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l3_main_1 -> l4_abe */ static struct omap_hwmod_ocp_if omap44xx_l3_main_1__l4_abe = { .master = &omap44xx_l3_main_1_hwmod, .slave = &omap44xx_l4_abe_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mpu -> l4_abe */ static struct omap_hwmod_ocp_if omap44xx_mpu__l4_abe = { .master = &omap44xx_mpu_hwmod, .slave = &omap44xx_l4_abe_hwmod, .clk = "ocp_abe_iclk", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_abe slave ports */ static struct omap_hwmod_ocp_if *omap44xx_l4_abe_slaves[] = { &omap44xx_aess__l4_abe, &omap44xx_dsp__l4_abe, &omap44xx_l3_main_1__l4_abe, &omap44xx_mpu__l4_abe, }; static struct omap_hwmod omap44xx_l4_abe_hwmod = { .name = "l4_abe", .class = &omap44xx_l4_hwmod_class, .clkdm_name = "abe_clkdm", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM1_ABE_L4ABE_CLKCTRL_OFFSET, }, }, .slaves = omap44xx_l4_abe_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_l4_abe_slaves), }; /* l4_cfg */ /* l3_main_1 -> l4_cfg */ static struct omap_hwmod_ocp_if omap44xx_l3_main_1__l4_cfg = { .master = &omap44xx_l3_main_1_hwmod, .slave = &omap44xx_l4_cfg_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_cfg slave ports */ static struct omap_hwmod_ocp_if *omap44xx_l4_cfg_slaves[] = { &omap44xx_l3_main_1__l4_cfg, }; static struct omap_hwmod omap44xx_l4_cfg_hwmod = { .name = "l4_cfg", .class = &omap44xx_l4_hwmod_class, .clkdm_name = "l4_cfg_clkdm", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4CFG_L4_CFG_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4CFG_L4_CFG_CONTEXT_OFFSET, }, }, .slaves = omap44xx_l4_cfg_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_l4_cfg_slaves), }; /* l4_per */ /* l3_main_2 -> l4_per */ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__l4_per = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_l4_per_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_per slave ports */ static struct omap_hwmod_ocp_if *omap44xx_l4_per_slaves[] = { &omap44xx_l3_main_2__l4_per, }; static struct omap_hwmod omap44xx_l4_per_hwmod = { .name = "l4_per", .class = &omap44xx_l4_hwmod_class, .clkdm_name = "l4_per_clkdm", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_L4PER_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_L4_PER_CONTEXT_OFFSET, }, }, .slaves = omap44xx_l4_per_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_l4_per_slaves), }; /* l4_wkup */ /* l4_cfg -> l4_wkup */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l4_wkup = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_l4_wkup_hwmod, .clk = "l4_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* l4_wkup slave ports */ static struct omap_hwmod_ocp_if *omap44xx_l4_wkup_slaves[] = { &omap44xx_l4_cfg__l4_wkup, }; static struct omap_hwmod omap44xx_l4_wkup_hwmod = { .name = "l4_wkup", .class = &omap44xx_l4_hwmod_class, .clkdm_name = "l4_wkup_clkdm", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_WKUP_L4WKUP_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_WKUP_L4WKUP_CONTEXT_OFFSET, }, }, .slaves = omap44xx_l4_wkup_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_l4_wkup_slaves), }; /* * 'mpu_bus' class * instance(s): mpu_private */ static struct omap_hwmod_class omap44xx_mpu_bus_hwmod_class = { .name = "mpu_bus", }; /* mpu_private */ /* mpu -> mpu_private */ static struct omap_hwmod_ocp_if omap44xx_mpu__mpu_private = { .master = &omap44xx_mpu_hwmod, .slave = &omap44xx_mpu_private_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mpu_private slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mpu_private_slaves[] = { &omap44xx_mpu__mpu_private, }; static struct omap_hwmod omap44xx_mpu_private_hwmod = { .name = "mpu_private", .class = &omap44xx_mpu_bus_hwmod_class, .clkdm_name = "mpuss_clkdm", .slaves = omap44xx_mpu_private_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mpu_private_slaves), }; /* * Modules omap_hwmod structures * * The following IPs are excluded for the moment because: * - They do not need an explicit SW control using omap_hwmod API. * - They still need to be validated with the driver * properly adapted to omap_hwmod / omap_device * * c2c * c2c_target_fw * cm_core * cm_core_aon * ctrl_module_core * ctrl_module_pad_core * ctrl_module_pad_wkup * ctrl_module_wkup * debugss * efuse_ctrl_cust * efuse_ctrl_std * elm * emif1 * emif2 * fdif * gpmc * gpu * hdq1w * mcasp * mpu_c0 * mpu_c1 * ocmc_ram * ocp2scp_usb_phy * ocp_wp_noc * prcm_mpu * prm * scrm * sl2if * slimbus1 * slimbus2 * usb_host_fs * usb_host_hs * usb_phy_cm * usb_tll_hs * usim */ /* * 'aess' class * audio engine sub system */ static struct omap_hwmod_class_sysconfig omap44xx_aess_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART | MSTANDBY_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type2, }; static struct omap_hwmod_class omap44xx_aess_hwmod_class = { .name = "aess", .sysc = &omap44xx_aess_sysc, }; /* aess */ static struct omap_hwmod_irq_info omap44xx_aess_irqs[] = { { .irq = 99 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_aess_sdma_reqs[] = { { .name = "fifo0", .dma_req = 100 + OMAP44XX_DMA_REQ_START }, { .name = "fifo1", .dma_req = 101 + OMAP44XX_DMA_REQ_START }, { .name = "fifo2", .dma_req = 102 + OMAP44XX_DMA_REQ_START }, { .name = "fifo3", .dma_req = 103 + OMAP44XX_DMA_REQ_START }, { .name = "fifo4", .dma_req = 104 + OMAP44XX_DMA_REQ_START }, { .name = "fifo5", .dma_req = 105 + OMAP44XX_DMA_REQ_START }, { .name = "fifo6", .dma_req = 106 + OMAP44XX_DMA_REQ_START }, { .name = "fifo7", .dma_req = 107 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; /* aess master ports */ static struct omap_hwmod_ocp_if *omap44xx_aess_masters[] = { &omap44xx_aess__l4_abe, }; static struct omap_hwmod_addr_space omap44xx_aess_addrs[] = { { .pa_start = 0x401f1000, .pa_end = 0x401f13ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> aess */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_aess_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_aess_addrs, .user = OCP_USER_MPU, }; static struct omap_hwmod_addr_space omap44xx_aess_dma_addrs[] = { { .pa_start = 0x490f1000, .pa_end = 0x490f13ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> aess (dma) */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess_dma = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_aess_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_aess_dma_addrs, .user = OCP_USER_SDMA, }; /* aess slave ports */ static struct omap_hwmod_ocp_if *omap44xx_aess_slaves[] = { &omap44xx_l4_abe__aess, &omap44xx_l4_abe__aess_dma, }; static struct omap_hwmod omap44xx_aess_hwmod = { .name = "aess", .class = &omap44xx_aess_hwmod_class, .clkdm_name = "abe_clkdm", .mpu_irqs = omap44xx_aess_irqs, .sdma_reqs = omap44xx_aess_sdma_reqs, .main_clk = "aess_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM1_ABE_AESS_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ABE_AESS_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_aess_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_aess_slaves), .masters = omap44xx_aess_masters, .masters_cnt = ARRAY_SIZE(omap44xx_aess_masters), }; /* * 'bandgap' class * bangap reference for ldo regulators */ static struct omap_hwmod_class omap44xx_bandgap_hwmod_class = { .name = "bandgap", }; /* bandgap */ static struct omap_hwmod_opt_clk bandgap_opt_clks[] = { { .role = "fclk", .clk = "bandgap_fclk" }, }; static struct omap_hwmod omap44xx_bandgap_hwmod = { .name = "bandgap", .class = &omap44xx_bandgap_hwmod_class, .clkdm_name = "l4_wkup_clkdm", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_WKUP_BANDGAP_CLKCTRL_OFFSET, }, }, .opt_clks = bandgap_opt_clks, .opt_clks_cnt = ARRAY_SIZE(bandgap_opt_clks), }; /* * 'counter' class * 32-bit ordinary counter, clocked by the falling edge of the 32 khz clock */ static struct omap_hwmod_class_sysconfig omap44xx_counter_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0004, .sysc_flags = SYSC_HAS_SIDLEMODE, .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_counter_hwmod_class = { .name = "counter", .sysc = &omap44xx_counter_sysc, }; /* counter_32k */ static struct omap_hwmod omap44xx_counter_32k_hwmod; static struct omap_hwmod_addr_space omap44xx_counter_32k_addrs[] = { { .pa_start = 0x4a304000, .pa_end = 0x4a30401f, .flags = ADDR_TYPE_RT }, { } }; /* l4_wkup -> counter_32k */ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__counter_32k = { .master = &omap44xx_l4_wkup_hwmod, .slave = &omap44xx_counter_32k_hwmod, .clk = "l4_wkup_clk_mux_ck", .addr = omap44xx_counter_32k_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* counter_32k slave ports */ static struct omap_hwmod_ocp_if *omap44xx_counter_32k_slaves[] = { &omap44xx_l4_wkup__counter_32k, }; static struct omap_hwmod omap44xx_counter_32k_hwmod = { .name = "counter_32k", .class = &omap44xx_counter_hwmod_class, .clkdm_name = "l4_wkup_clkdm", .flags = HWMOD_SWSUP_SIDLE, .main_clk = "sys_32k_ck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_WKUP_SYNCTIMER_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_WKUP_SYNCTIMER_CONTEXT_OFFSET, }, }, .slaves = omap44xx_counter_32k_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_counter_32k_slaves), }; /* * 'dma' class * dma controller for data exchange between memory to memory (i.e. internal or * external memory) and gp peripherals to memory or memory to gp peripherals */ static struct omap_hwmod_class_sysconfig omap44xx_dma_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x002c, .syss_offs = 0x0028, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_EMUFREE | SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_dma_hwmod_class = { .name = "dma", .sysc = &omap44xx_dma_sysc, }; /* dma dev_attr */ static struct omap_dma_dev_attr dma_dev_attr = { .dev_caps = RESERVE_CHANNEL | DMA_LINKED_LCH | GLOBAL_PRIORITY | IS_CSSA_32 | IS_CDSA_32 | IS_RW_PRIORITY, .lch_count = 32, }; /* dma_system */ static struct omap_hwmod_irq_info omap44xx_dma_system_irqs[] = { { .name = "0", .irq = 12 + OMAP44XX_IRQ_GIC_START }, { .name = "1", .irq = 13 + OMAP44XX_IRQ_GIC_START }, { .name = "2", .irq = 14 + OMAP44XX_IRQ_GIC_START }, { .name = "3", .irq = 15 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; /* dma_system master ports */ static struct omap_hwmod_ocp_if *omap44xx_dma_system_masters[] = { &omap44xx_dma_system__l3_main_2, }; static struct omap_hwmod_addr_space omap44xx_dma_system_addrs[] = { { .pa_start = 0x4a056000, .pa_end = 0x4a056fff, .flags = ADDR_TYPE_RT }, { } }; /* l4_cfg -> dma_system */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__dma_system = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_dma_system_hwmod, .clk = "l4_div_ck", .addr = omap44xx_dma_system_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* dma_system slave ports */ static struct omap_hwmod_ocp_if *omap44xx_dma_system_slaves[] = { &omap44xx_l4_cfg__dma_system, }; static struct omap_hwmod omap44xx_dma_system_hwmod = { .name = "dma_system", .class = &omap44xx_dma_hwmod_class, .clkdm_name = "l3_dma_clkdm", .mpu_irqs = omap44xx_dma_system_irqs, .main_clk = "l3_div_ck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_SDMA_SDMA_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_SDMA_SDMA_CONTEXT_OFFSET, }, }, .dev_attr = &dma_dev_attr, .slaves = omap44xx_dma_system_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_dma_system_slaves), .masters = omap44xx_dma_system_masters, .masters_cnt = ARRAY_SIZE(omap44xx_dma_system_masters), }; /* * 'dmic' class * digital microphone controller */ static struct omap_hwmod_class_sysconfig omap44xx_dmic_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type2, }; static struct omap_hwmod_class omap44xx_dmic_hwmod_class = { .name = "dmic", .sysc = &omap44xx_dmic_sysc, }; /* dmic */ static struct omap_hwmod omap44xx_dmic_hwmod; static struct omap_hwmod_irq_info omap44xx_dmic_irqs[] = { { .irq = 114 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_dmic_sdma_reqs[] = { { .dma_req = 66 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_dmic_addrs[] = { { .name = "mpu", .pa_start = 0x4012e000, .pa_end = 0x4012e07f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> dmic */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__dmic = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_dmic_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_dmic_addrs, .user = OCP_USER_MPU, }; static struct omap_hwmod_addr_space omap44xx_dmic_dma_addrs[] = { { .name = "dma", .pa_start = 0x4902e000, .pa_end = 0x4902e07f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> dmic (dma) */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__dmic_dma = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_dmic_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_dmic_dma_addrs, .user = OCP_USER_SDMA, }; /* dmic slave ports */ static struct omap_hwmod_ocp_if *omap44xx_dmic_slaves[] = { &omap44xx_l4_abe__dmic, &omap44xx_l4_abe__dmic_dma, }; static struct omap_hwmod omap44xx_dmic_hwmod = { .name = "dmic", .class = &omap44xx_dmic_hwmod_class, .clkdm_name = "abe_clkdm", .mpu_irqs = omap44xx_dmic_irqs, .sdma_reqs = omap44xx_dmic_sdma_reqs, .main_clk = "dmic_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM1_ABE_DMIC_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ABE_DMIC_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_dmic_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_dmic_slaves), }; /* * 'dsp' class * dsp sub-system */ static struct omap_hwmod_class omap44xx_dsp_hwmod_class = { .name = "dsp", }; /* dsp */ static struct omap_hwmod_irq_info omap44xx_dsp_irqs[] = { { .irq = 28 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_rst_info omap44xx_dsp_resets[] = { { .name = "mmu_cache", .rst_shift = 1 }, }; static struct omap_hwmod_rst_info omap44xx_dsp_c0_resets[] = { { .name = "dsp", .rst_shift = 0 }, }; /* dsp -> iva */ static struct omap_hwmod_ocp_if omap44xx_dsp__iva = { .master = &omap44xx_dsp_hwmod, .slave = &omap44xx_iva_hwmod, .clk = "dpll_iva_m5x2_ck", }; /* dsp master ports */ static struct omap_hwmod_ocp_if *omap44xx_dsp_masters[] = { &omap44xx_dsp__l3_main_1, &omap44xx_dsp__l4_abe, &omap44xx_dsp__iva, }; /* l4_cfg -> dsp */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__dsp = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_dsp_hwmod, .clk = "l4_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* dsp slave ports */ static struct omap_hwmod_ocp_if *omap44xx_dsp_slaves[] = { &omap44xx_l4_cfg__dsp, }; /* Pseudo hwmod for reset control purpose only */ static struct omap_hwmod omap44xx_dsp_c0_hwmod = { .name = "dsp_c0", .class = &omap44xx_dsp_hwmod_class, .clkdm_name = "tesla_clkdm", .flags = HWMOD_INIT_NO_RESET, .rst_lines = omap44xx_dsp_c0_resets, .rst_lines_cnt = ARRAY_SIZE(omap44xx_dsp_c0_resets), .prcm = { .omap4 = { .rstctrl_offs = OMAP4_RM_TESLA_RSTCTRL_OFFSET, }, }, }; static struct omap_hwmod omap44xx_dsp_hwmod = { .name = "dsp", .class = &omap44xx_dsp_hwmod_class, .clkdm_name = "tesla_clkdm", .mpu_irqs = omap44xx_dsp_irqs, .rst_lines = omap44xx_dsp_resets, .rst_lines_cnt = ARRAY_SIZE(omap44xx_dsp_resets), .main_clk = "dsp_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_TESLA_TESLA_CLKCTRL_OFFSET, .rstctrl_offs = OMAP4_RM_TESLA_RSTCTRL_OFFSET, .context_offs = OMAP4_RM_TESLA_TESLA_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .slaves = omap44xx_dsp_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_dsp_slaves), .masters = omap44xx_dsp_masters, .masters_cnt = ARRAY_SIZE(omap44xx_dsp_masters), }; /* * 'dss' class * display sub-system */ static struct omap_hwmod_class_sysconfig omap44xx_dss_sysc = { .rev_offs = 0x0000, .syss_offs = 0x0014, .sysc_flags = SYSS_HAS_RESET_STATUS, }; static struct omap_hwmod_class omap44xx_dss_hwmod_class = { .name = "dss", .sysc = &omap44xx_dss_sysc, .reset = omap_dss_reset, }; /* dss */ /* dss master ports */ static struct omap_hwmod_ocp_if *omap44xx_dss_masters[] = { &omap44xx_dss__l3_main_1, }; static struct omap_hwmod_addr_space omap44xx_dss_dma_addrs[] = { { .pa_start = 0x58000000, .pa_end = 0x5800007f, .flags = ADDR_TYPE_RT }, { } }; /* l3_main_2 -> dss */ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_dss_hwmod, .clk = "dss_fck", .addr = omap44xx_dss_dma_addrs, .user = OCP_USER_SDMA, }; static struct omap_hwmod_addr_space omap44xx_dss_addrs[] = { { .pa_start = 0x48040000, .pa_end = 0x4804007f, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> dss */ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_dss_hwmod, .clk = "l4_div_ck", .addr = omap44xx_dss_addrs, .user = OCP_USER_MPU, }; /* dss slave ports */ static struct omap_hwmod_ocp_if *omap44xx_dss_slaves[] = { &omap44xx_l3_main_2__dss, &omap44xx_l4_per__dss, }; static struct omap_hwmod_opt_clk dss_opt_clks[] = { { .role = "sys_clk", .clk = "dss_sys_clk" }, { .role = "tv_clk", .clk = "dss_tv_clk" }, { .role = "hdmi_clk", .clk = "dss_48mhz_clk" }, }; static struct omap_hwmod omap44xx_dss_hwmod = { .name = "dss_core", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .class = &omap44xx_dss_hwmod_class, .clkdm_name = "l3_dss_clkdm", .main_clk = "dss_dss_clk", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET, }, }, .opt_clks = dss_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dss_opt_clks), .slaves = omap44xx_dss_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_dss_slaves), .masters = omap44xx_dss_masters, .masters_cnt = ARRAY_SIZE(omap44xx_dss_masters), }; /* * 'dispc' class * display controller */ static struct omap_hwmod_class_sysconfig omap44xx_dispc_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_ENAWAKEUP | SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_dispc_hwmod_class = { .name = "dispc", .sysc = &omap44xx_dispc_sysc, }; /* dss_dispc */ static struct omap_hwmod omap44xx_dss_dispc_hwmod; static struct omap_hwmod_irq_info omap44xx_dss_dispc_irqs[] = { { .irq = 25 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_dss_dispc_sdma_reqs[] = { { .dma_req = 5 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_dss_dispc_dma_addrs[] = { { .pa_start = 0x58001000, .pa_end = 0x58001fff, .flags = ADDR_TYPE_RT }, { } }; /* l3_main_2 -> dss_dispc */ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_dispc = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_dss_dispc_hwmod, .clk = "dss_fck", .addr = omap44xx_dss_dispc_dma_addrs, .user = OCP_USER_SDMA, }; static struct omap_hwmod_addr_space omap44xx_dss_dispc_addrs[] = { { .pa_start = 0x48041000, .pa_end = 0x48041fff, .flags = ADDR_TYPE_RT }, { } }; static struct omap_dss_dispc_dev_attr omap44xx_dss_dispc_dev_attr = { .manager_count = 3, .has_framedonetv_irq = 1 }; /* l4_per -> dss_dispc */ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dispc = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_dss_dispc_hwmod, .clk = "l4_div_ck", .addr = omap44xx_dss_dispc_addrs, .user = OCP_USER_MPU, }; /* dss_dispc slave ports */ static struct omap_hwmod_ocp_if *omap44xx_dss_dispc_slaves[] = { &omap44xx_l3_main_2__dss_dispc, &omap44xx_l4_per__dss_dispc, }; static struct omap_hwmod omap44xx_dss_dispc_hwmod = { .name = "dss_dispc", .class = &omap44xx_dispc_hwmod_class, .clkdm_name = "l3_dss_clkdm", .mpu_irqs = omap44xx_dss_dispc_irqs, .sdma_reqs = omap44xx_dss_dispc_sdma_reqs, .main_clk = "dss_dss_clk", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET, }, }, .slaves = omap44xx_dss_dispc_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_dss_dispc_slaves), .dev_attr = &omap44xx_dss_dispc_dev_attr }; /* * 'dsi' class * display serial interface controller */ static struct omap_hwmod_class_sysconfig omap44xx_dsi_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_dsi_hwmod_class = { .name = "dsi", .sysc = &omap44xx_dsi_sysc, }; /* dss_dsi1 */ static struct omap_hwmod omap44xx_dss_dsi1_hwmod; static struct omap_hwmod_irq_info omap44xx_dss_dsi1_irqs[] = { { .irq = 53 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_dss_dsi1_sdma_reqs[] = { { .dma_req = 74 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_dss_dsi1_dma_addrs[] = { { .pa_start = 0x58004000, .pa_end = 0x580041ff, .flags = ADDR_TYPE_RT }, { } }; /* l3_main_2 -> dss_dsi1 */ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_dsi1 = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_dss_dsi1_hwmod, .clk = "dss_fck", .addr = omap44xx_dss_dsi1_dma_addrs, .user = OCP_USER_SDMA, }; static struct omap_hwmod_addr_space omap44xx_dss_dsi1_addrs[] = { { .pa_start = 0x48044000, .pa_end = 0x480441ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> dss_dsi1 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dsi1 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_dss_dsi1_hwmod, .clk = "l4_div_ck", .addr = omap44xx_dss_dsi1_addrs, .user = OCP_USER_MPU, }; /* dss_dsi1 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_dss_dsi1_slaves[] = { &omap44xx_l3_main_2__dss_dsi1, &omap44xx_l4_per__dss_dsi1, }; static struct omap_hwmod_opt_clk dss_dsi1_opt_clks[] = { { .role = "sys_clk", .clk = "dss_sys_clk" }, }; static struct omap_hwmod omap44xx_dss_dsi1_hwmod = { .name = "dss_dsi1", .class = &omap44xx_dsi_hwmod_class, .clkdm_name = "l3_dss_clkdm", .mpu_irqs = omap44xx_dss_dsi1_irqs, .sdma_reqs = omap44xx_dss_dsi1_sdma_reqs, .main_clk = "dss_dss_clk", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET, }, }, .opt_clks = dss_dsi1_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dss_dsi1_opt_clks), .slaves = omap44xx_dss_dsi1_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_dss_dsi1_slaves), }; /* dss_dsi2 */ static struct omap_hwmod omap44xx_dss_dsi2_hwmod; static struct omap_hwmod_irq_info omap44xx_dss_dsi2_irqs[] = { { .irq = 84 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_dss_dsi2_sdma_reqs[] = { { .dma_req = 83 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_dss_dsi2_dma_addrs[] = { { .pa_start = 0x58005000, .pa_end = 0x580051ff, .flags = ADDR_TYPE_RT }, { } }; /* l3_main_2 -> dss_dsi2 */ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_dsi2 = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_dss_dsi2_hwmod, .clk = "dss_fck", .addr = omap44xx_dss_dsi2_dma_addrs, .user = OCP_USER_SDMA, }; static struct omap_hwmod_addr_space omap44xx_dss_dsi2_addrs[] = { { .pa_start = 0x48045000, .pa_end = 0x480451ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> dss_dsi2 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_dsi2 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_dss_dsi2_hwmod, .clk = "l4_div_ck", .addr = omap44xx_dss_dsi2_addrs, .user = OCP_USER_MPU, }; /* dss_dsi2 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_dss_dsi2_slaves[] = { &omap44xx_l3_main_2__dss_dsi2, &omap44xx_l4_per__dss_dsi2, }; static struct omap_hwmod_opt_clk dss_dsi2_opt_clks[] = { { .role = "sys_clk", .clk = "dss_sys_clk" }, }; static struct omap_hwmod omap44xx_dss_dsi2_hwmod = { .name = "dss_dsi2", .class = &omap44xx_dsi_hwmod_class, .clkdm_name = "l3_dss_clkdm", .mpu_irqs = omap44xx_dss_dsi2_irqs, .sdma_reqs = omap44xx_dss_dsi2_sdma_reqs, .main_clk = "dss_dss_clk", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET, }, }, .opt_clks = dss_dsi2_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dss_dsi2_opt_clks), .slaves = omap44xx_dss_dsi2_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_dss_dsi2_slaves), }; /* * 'hdmi' class * hdmi controller */ static struct omap_hwmod_class_sysconfig omap44xx_hdmi_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .sysc_flags = (SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type2, }; static struct omap_hwmod_class omap44xx_hdmi_hwmod_class = { .name = "hdmi", .sysc = &omap44xx_hdmi_sysc, }; /* dss_hdmi */ static struct omap_hwmod omap44xx_dss_hdmi_hwmod; static struct omap_hwmod_irq_info omap44xx_dss_hdmi_irqs[] = { { .irq = 101 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_dss_hdmi_sdma_reqs[] = { { .dma_req = 75 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_dss_hdmi_dma_addrs[] = { { .pa_start = 0x58006000, .pa_end = 0x58006fff, .flags = ADDR_TYPE_RT }, { } }; /* l3_main_2 -> dss_hdmi */ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_hdmi = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_dss_hdmi_hwmod, .clk = "dss_fck", .addr = omap44xx_dss_hdmi_dma_addrs, .user = OCP_USER_SDMA, }; static struct omap_hwmod_addr_space omap44xx_dss_hdmi_addrs[] = { { .pa_start = 0x48046000, .pa_end = 0x48046fff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> dss_hdmi */ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_hdmi = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_dss_hdmi_hwmod, .clk = "l4_div_ck", .addr = omap44xx_dss_hdmi_addrs, .user = OCP_USER_MPU, }; /* dss_hdmi slave ports */ static struct omap_hwmod_ocp_if *omap44xx_dss_hdmi_slaves[] = { &omap44xx_l3_main_2__dss_hdmi, &omap44xx_l4_per__dss_hdmi, }; static struct omap_hwmod_opt_clk dss_hdmi_opt_clks[] = { { .role = "sys_clk", .clk = "dss_sys_clk" }, }; static struct omap_hwmod omap44xx_dss_hdmi_hwmod = { .name = "dss_hdmi", .class = &omap44xx_hdmi_hwmod_class, .clkdm_name = "l3_dss_clkdm", .mpu_irqs = omap44xx_dss_hdmi_irqs, .sdma_reqs = omap44xx_dss_hdmi_sdma_reqs, .main_clk = "dss_48mhz_clk", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET, }, }, .opt_clks = dss_hdmi_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dss_hdmi_opt_clks), .slaves = omap44xx_dss_hdmi_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_dss_hdmi_slaves), }; /* * 'rfbi' class * remote frame buffer interface */ static struct omap_hwmod_class_sysconfig omap44xx_rfbi_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_rfbi_hwmod_class = { .name = "rfbi", .sysc = &omap44xx_rfbi_sysc, }; /* dss_rfbi */ static struct omap_hwmod omap44xx_dss_rfbi_hwmod; static struct omap_hwmod_dma_info omap44xx_dss_rfbi_sdma_reqs[] = { { .dma_req = 13 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_dss_rfbi_dma_addrs[] = { { .pa_start = 0x58002000, .pa_end = 0x580020ff, .flags = ADDR_TYPE_RT }, { } }; /* l3_main_2 -> dss_rfbi */ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_rfbi = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_dss_rfbi_hwmod, .clk = "dss_fck", .addr = omap44xx_dss_rfbi_dma_addrs, .user = OCP_USER_SDMA, }; static struct omap_hwmod_addr_space omap44xx_dss_rfbi_addrs[] = { { .pa_start = 0x48042000, .pa_end = 0x480420ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> dss_rfbi */ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_rfbi = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_dss_rfbi_hwmod, .clk = "l4_div_ck", .addr = omap44xx_dss_rfbi_addrs, .user = OCP_USER_MPU, }; /* dss_rfbi slave ports */ static struct omap_hwmod_ocp_if *omap44xx_dss_rfbi_slaves[] = { &omap44xx_l3_main_2__dss_rfbi, &omap44xx_l4_per__dss_rfbi, }; static struct omap_hwmod_opt_clk dss_rfbi_opt_clks[] = { { .role = "ick", .clk = "dss_fck" }, }; static struct omap_hwmod omap44xx_dss_rfbi_hwmod = { .name = "dss_rfbi", .class = &omap44xx_rfbi_hwmod_class, .clkdm_name = "l3_dss_clkdm", .sdma_reqs = omap44xx_dss_rfbi_sdma_reqs, .main_clk = "dss_dss_clk", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET, }, }, .opt_clks = dss_rfbi_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dss_rfbi_opt_clks), .slaves = omap44xx_dss_rfbi_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_dss_rfbi_slaves), }; /* * 'venc' class * video encoder */ static struct omap_hwmod_class omap44xx_venc_hwmod_class = { .name = "venc", }; /* dss_venc */ static struct omap_hwmod omap44xx_dss_venc_hwmod; static struct omap_hwmod_addr_space omap44xx_dss_venc_dma_addrs[] = { { .pa_start = 0x58003000, .pa_end = 0x580030ff, .flags = ADDR_TYPE_RT }, { } }; /* l3_main_2 -> dss_venc */ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__dss_venc = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_dss_venc_hwmod, .clk = "dss_fck", .addr = omap44xx_dss_venc_dma_addrs, .user = OCP_USER_SDMA, }; static struct omap_hwmod_addr_space omap44xx_dss_venc_addrs[] = { { .pa_start = 0x48043000, .pa_end = 0x480430ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> dss_venc */ static struct omap_hwmod_ocp_if omap44xx_l4_per__dss_venc = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_dss_venc_hwmod, .clk = "l4_div_ck", .addr = omap44xx_dss_venc_addrs, .user = OCP_USER_MPU, }; /* dss_venc slave ports */ static struct omap_hwmod_ocp_if *omap44xx_dss_venc_slaves[] = { &omap44xx_l3_main_2__dss_venc, &omap44xx_l4_per__dss_venc, }; static struct omap_hwmod omap44xx_dss_venc_hwmod = { .name = "dss_venc", .class = &omap44xx_venc_hwmod_class, .clkdm_name = "l3_dss_clkdm", .main_clk = "dss_tv_clk", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_DSS_DSS_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_DSS_DSS_CONTEXT_OFFSET, }, }, .slaves = omap44xx_dss_venc_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_dss_venc_slaves), }; /* * 'gpio' class * general purpose io module */ static struct omap_hwmod_class_sysconfig omap44xx_gpio_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0114, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_gpio_hwmod_class = { .name = "gpio", .sysc = &omap44xx_gpio_sysc, .rev = 2, }; /* gpio dev_attr */ static struct omap_gpio_dev_attr gpio_dev_attr = { .bank_width = 32, .dbck_flag = true, }; /* gpio1 */ static struct omap_hwmod omap44xx_gpio1_hwmod; static struct omap_hwmod_irq_info omap44xx_gpio1_irqs[] = { { .irq = 29 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_gpio1_addrs[] = { { .pa_start = 0x4a310000, .pa_end = 0x4a3101ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_wkup -> gpio1 */ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__gpio1 = { .master = &omap44xx_l4_wkup_hwmod, .slave = &omap44xx_gpio1_hwmod, .clk = "l4_wkup_clk_mux_ck", .addr = omap44xx_gpio1_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* gpio1 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_gpio1_slaves[] = { &omap44xx_l4_wkup__gpio1, }; static struct omap_hwmod_opt_clk gpio1_opt_clks[] = { { .role = "dbclk", .clk = "gpio1_dbclk" }, }; static struct omap_hwmod omap44xx_gpio1_hwmod = { .name = "gpio1", .class = &omap44xx_gpio_hwmod_class, .clkdm_name = "l4_wkup_clkdm", .mpu_irqs = omap44xx_gpio1_irqs, .main_clk = "gpio1_ick", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_WKUP_GPIO1_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_WKUP_GPIO1_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .opt_clks = gpio1_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio1_opt_clks), .dev_attr = &gpio_dev_attr, .slaves = omap44xx_gpio1_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_gpio1_slaves), }; /* gpio2 */ static struct omap_hwmod omap44xx_gpio2_hwmod; static struct omap_hwmod_irq_info omap44xx_gpio2_irqs[] = { { .irq = 30 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_gpio2_addrs[] = { { .pa_start = 0x48055000, .pa_end = 0x480551ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> gpio2 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio2 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_gpio2_hwmod, .clk = "l4_div_ck", .addr = omap44xx_gpio2_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* gpio2 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_gpio2_slaves[] = { &omap44xx_l4_per__gpio2, }; static struct omap_hwmod_opt_clk gpio2_opt_clks[] = { { .role = "dbclk", .clk = "gpio2_dbclk" }, }; static struct omap_hwmod omap44xx_gpio2_hwmod = { .name = "gpio2", .class = &omap44xx_gpio_hwmod_class, .clkdm_name = "l4_per_clkdm", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap44xx_gpio2_irqs, .main_clk = "gpio2_ick", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_GPIO2_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_GPIO2_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .opt_clks = gpio2_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio2_opt_clks), .dev_attr = &gpio_dev_attr, .slaves = omap44xx_gpio2_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_gpio2_slaves), }; /* gpio3 */ static struct omap_hwmod omap44xx_gpio3_hwmod; static struct omap_hwmod_irq_info omap44xx_gpio3_irqs[] = { { .irq = 31 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_gpio3_addrs[] = { { .pa_start = 0x48057000, .pa_end = 0x480571ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> gpio3 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio3 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_gpio3_hwmod, .clk = "l4_div_ck", .addr = omap44xx_gpio3_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* gpio3 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_gpio3_slaves[] = { &omap44xx_l4_per__gpio3, }; static struct omap_hwmod_opt_clk gpio3_opt_clks[] = { { .role = "dbclk", .clk = "gpio3_dbclk" }, }; static struct omap_hwmod omap44xx_gpio3_hwmod = { .name = "gpio3", .class = &omap44xx_gpio_hwmod_class, .clkdm_name = "l4_per_clkdm", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap44xx_gpio3_irqs, .main_clk = "gpio3_ick", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_GPIO3_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_GPIO3_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .opt_clks = gpio3_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio3_opt_clks), .dev_attr = &gpio_dev_attr, .slaves = omap44xx_gpio3_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_gpio3_slaves), }; /* gpio4 */ static struct omap_hwmod omap44xx_gpio4_hwmod; static struct omap_hwmod_irq_info omap44xx_gpio4_irqs[] = { { .irq = 32 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_gpio4_addrs[] = { { .pa_start = 0x48059000, .pa_end = 0x480591ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> gpio4 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio4 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_gpio4_hwmod, .clk = "l4_div_ck", .addr = omap44xx_gpio4_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* gpio4 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_gpio4_slaves[] = { &omap44xx_l4_per__gpio4, }; static struct omap_hwmod_opt_clk gpio4_opt_clks[] = { { .role = "dbclk", .clk = "gpio4_dbclk" }, }; static struct omap_hwmod omap44xx_gpio4_hwmod = { .name = "gpio4", .class = &omap44xx_gpio_hwmod_class, .clkdm_name = "l4_per_clkdm", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap44xx_gpio4_irqs, .main_clk = "gpio4_ick", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_GPIO4_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_GPIO4_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .opt_clks = gpio4_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio4_opt_clks), .dev_attr = &gpio_dev_attr, .slaves = omap44xx_gpio4_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_gpio4_slaves), }; /* gpio5 */ static struct omap_hwmod omap44xx_gpio5_hwmod; static struct omap_hwmod_irq_info omap44xx_gpio5_irqs[] = { { .irq = 33 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_gpio5_addrs[] = { { .pa_start = 0x4805b000, .pa_end = 0x4805b1ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> gpio5 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio5 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_gpio5_hwmod, .clk = "l4_div_ck", .addr = omap44xx_gpio5_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* gpio5 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_gpio5_slaves[] = { &omap44xx_l4_per__gpio5, }; static struct omap_hwmod_opt_clk gpio5_opt_clks[] = { { .role = "dbclk", .clk = "gpio5_dbclk" }, }; static struct omap_hwmod omap44xx_gpio5_hwmod = { .name = "gpio5", .class = &omap44xx_gpio_hwmod_class, .clkdm_name = "l4_per_clkdm", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap44xx_gpio5_irqs, .main_clk = "gpio5_ick", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_GPIO5_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_GPIO5_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .opt_clks = gpio5_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio5_opt_clks), .dev_attr = &gpio_dev_attr, .slaves = omap44xx_gpio5_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_gpio5_slaves), }; /* gpio6 */ static struct omap_hwmod omap44xx_gpio6_hwmod; static struct omap_hwmod_irq_info omap44xx_gpio6_irqs[] = { { .irq = 34 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_gpio6_addrs[] = { { .pa_start = 0x4805d000, .pa_end = 0x4805d1ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> gpio6 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio6 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_gpio6_hwmod, .clk = "l4_div_ck", .addr = omap44xx_gpio6_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* gpio6 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_gpio6_slaves[] = { &omap44xx_l4_per__gpio6, }; static struct omap_hwmod_opt_clk gpio6_opt_clks[] = { { .role = "dbclk", .clk = "gpio6_dbclk" }, }; static struct omap_hwmod omap44xx_gpio6_hwmod = { .name = "gpio6", .class = &omap44xx_gpio_hwmod_class, .clkdm_name = "l4_per_clkdm", .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, .mpu_irqs = omap44xx_gpio6_irqs, .main_clk = "gpio6_ick", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_GPIO6_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_GPIO6_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .opt_clks = gpio6_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio6_opt_clks), .dev_attr = &gpio_dev_attr, .slaves = omap44xx_gpio6_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_gpio6_slaves), }; /* * 'hsi' class * mipi high-speed synchronous serial interface (multichannel and full-duplex * serial if) */ static struct omap_hwmod_class_sysconfig omap44xx_hsi_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_EMUFREE | SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART | MSTANDBY_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_hsi_hwmod_class = { .name = "hsi", .sysc = &omap44xx_hsi_sysc, }; /* hsi */ static struct omap_hwmod_irq_info omap44xx_hsi_irqs[] = { { .name = "mpu_p1", .irq = 67 + OMAP44XX_IRQ_GIC_START }, { .name = "mpu_p2", .irq = 68 + OMAP44XX_IRQ_GIC_START }, { .name = "mpu_dma", .irq = 71 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; /* hsi master ports */ static struct omap_hwmod_ocp_if *omap44xx_hsi_masters[] = { &omap44xx_hsi__l3_main_2, }; static struct omap_hwmod_addr_space omap44xx_hsi_addrs[] = { { .pa_start = 0x4a058000, .pa_end = 0x4a05bfff, .flags = ADDR_TYPE_RT }, { } }; /* l4_cfg -> hsi */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__hsi = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_hsi_hwmod, .clk = "l4_div_ck", .addr = omap44xx_hsi_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* hsi slave ports */ static struct omap_hwmod_ocp_if *omap44xx_hsi_slaves[] = { &omap44xx_l4_cfg__hsi, }; static struct omap_hwmod omap44xx_hsi_hwmod = { .name = "hsi", .class = &omap44xx_hsi_hwmod_class, .clkdm_name = "l3_init_clkdm", .mpu_irqs = omap44xx_hsi_irqs, .main_clk = "hsi_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L3INIT_HSI_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L3INIT_HSI_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .slaves = omap44xx_hsi_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_hsi_slaves), .masters = omap44xx_hsi_masters, .masters_cnt = ARRAY_SIZE(omap44xx_hsi_masters), }; /* * 'i2c' class * multimaster high-speed i2c controller */ static struct omap_hwmod_class_sysconfig omap44xx_i2c_sysc = { .sysc_offs = 0x0010, .syss_offs = 0x0090, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), .clockact = CLOCKACT_TEST_ICLK, .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_i2c_hwmod_class = { .name = "i2c", .sysc = &omap44xx_i2c_sysc, .rev = OMAP_I2C_IP_VERSION_2, .reset = &omap_i2c_reset, }; static struct omap_i2c_dev_attr i2c_dev_attr = { .flags = OMAP_I2C_FLAG_BUS_SHIFT_NONE, }; /* i2c1 */ static struct omap_hwmod omap44xx_i2c1_hwmod; static struct omap_hwmod_irq_info omap44xx_i2c1_irqs[] = { { .irq = 56 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_i2c1_sdma_reqs[] = { { .name = "tx", .dma_req = 26 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 27 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_i2c1_addrs[] = { { .pa_start = 0x48070000, .pa_end = 0x480700ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> i2c1 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__i2c1 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_i2c1_hwmod, .clk = "l4_div_ck", .addr = omap44xx_i2c1_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* i2c1 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_i2c1_slaves[] = { &omap44xx_l4_per__i2c1, }; static struct omap_hwmod omap44xx_i2c1_hwmod = { .name = "i2c1", .class = &omap44xx_i2c_hwmod_class, .clkdm_name = "l4_per_clkdm", .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT, .mpu_irqs = omap44xx_i2c1_irqs, .sdma_reqs = omap44xx_i2c1_sdma_reqs, .main_clk = "i2c1_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_I2C1_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_I2C1_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_i2c1_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_i2c1_slaves), .dev_attr = &i2c_dev_attr, }; /* i2c2 */ static struct omap_hwmod omap44xx_i2c2_hwmod; static struct omap_hwmod_irq_info omap44xx_i2c2_irqs[] = { { .irq = 57 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_i2c2_sdma_reqs[] = { { .name = "tx", .dma_req = 28 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 29 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_i2c2_addrs[] = { { .pa_start = 0x48072000, .pa_end = 0x480720ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> i2c2 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__i2c2 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_i2c2_hwmod, .clk = "l4_div_ck", .addr = omap44xx_i2c2_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* i2c2 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_i2c2_slaves[] = { &omap44xx_l4_per__i2c2, }; static struct omap_hwmod omap44xx_i2c2_hwmod = { .name = "i2c2", .class = &omap44xx_i2c_hwmod_class, .clkdm_name = "l4_per_clkdm", .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT, .mpu_irqs = omap44xx_i2c2_irqs, .sdma_reqs = omap44xx_i2c2_sdma_reqs, .main_clk = "i2c2_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_I2C2_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_I2C2_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_i2c2_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_i2c2_slaves), .dev_attr = &i2c_dev_attr, }; /* i2c3 */ static struct omap_hwmod omap44xx_i2c3_hwmod; static struct omap_hwmod_irq_info omap44xx_i2c3_irqs[] = { { .irq = 61 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_i2c3_sdma_reqs[] = { { .name = "tx", .dma_req = 24 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 25 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_i2c3_addrs[] = { { .pa_start = 0x48060000, .pa_end = 0x480600ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> i2c3 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__i2c3 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_i2c3_hwmod, .clk = "l4_div_ck", .addr = omap44xx_i2c3_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* i2c3 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_i2c3_slaves[] = { &omap44xx_l4_per__i2c3, }; static struct omap_hwmod omap44xx_i2c3_hwmod = { .name = "i2c3", .class = &omap44xx_i2c_hwmod_class, .clkdm_name = "l4_per_clkdm", .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT, .mpu_irqs = omap44xx_i2c3_irqs, .sdma_reqs = omap44xx_i2c3_sdma_reqs, .main_clk = "i2c3_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_I2C3_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_I2C3_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_i2c3_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_i2c3_slaves), .dev_attr = &i2c_dev_attr, }; /* i2c4 */ static struct omap_hwmod omap44xx_i2c4_hwmod; static struct omap_hwmod_irq_info omap44xx_i2c4_irqs[] = { { .irq = 62 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_i2c4_sdma_reqs[] = { { .name = "tx", .dma_req = 123 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 124 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_i2c4_addrs[] = { { .pa_start = 0x48350000, .pa_end = 0x483500ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> i2c4 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__i2c4 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_i2c4_hwmod, .clk = "l4_div_ck", .addr = omap44xx_i2c4_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* i2c4 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_i2c4_slaves[] = { &omap44xx_l4_per__i2c4, }; static struct omap_hwmod omap44xx_i2c4_hwmod = { .name = "i2c4", .class = &omap44xx_i2c_hwmod_class, .clkdm_name = "l4_per_clkdm", .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT, .mpu_irqs = omap44xx_i2c4_irqs, .sdma_reqs = omap44xx_i2c4_sdma_reqs, .main_clk = "i2c4_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_I2C4_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_I2C4_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_i2c4_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_i2c4_slaves), .dev_attr = &i2c_dev_attr, }; /* * 'ipu' class * imaging processor unit */ static struct omap_hwmod_class omap44xx_ipu_hwmod_class = { .name = "ipu", }; /* ipu */ static struct omap_hwmod_irq_info omap44xx_ipu_irqs[] = { { .irq = 100 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_rst_info omap44xx_ipu_c0_resets[] = { { .name = "cpu0", .rst_shift = 0 }, }; static struct omap_hwmod_rst_info omap44xx_ipu_c1_resets[] = { { .name = "cpu1", .rst_shift = 1 }, }; static struct omap_hwmod_rst_info omap44xx_ipu_resets[] = { { .name = "mmu_cache", .rst_shift = 2 }, }; /* ipu master ports */ static struct omap_hwmod_ocp_if *omap44xx_ipu_masters[] = { &omap44xx_ipu__l3_main_2, }; /* l3_main_2 -> ipu */ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__ipu = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_ipu_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* ipu slave ports */ static struct omap_hwmod_ocp_if *omap44xx_ipu_slaves[] = { &omap44xx_l3_main_2__ipu, }; /* Pseudo hwmod for reset control purpose only */ static struct omap_hwmod omap44xx_ipu_c0_hwmod = { .name = "ipu_c0", .class = &omap44xx_ipu_hwmod_class, .clkdm_name = "ducati_clkdm", .flags = HWMOD_INIT_NO_RESET, .rst_lines = omap44xx_ipu_c0_resets, .rst_lines_cnt = ARRAY_SIZE(omap44xx_ipu_c0_resets), .prcm = { .omap4 = { .rstctrl_offs = OMAP4_RM_DUCATI_RSTCTRL_OFFSET, }, }, }; /* Pseudo hwmod for reset control purpose only */ static struct omap_hwmod omap44xx_ipu_c1_hwmod = { .name = "ipu_c1", .class = &omap44xx_ipu_hwmod_class, .clkdm_name = "ducati_clkdm", .flags = HWMOD_INIT_NO_RESET, .rst_lines = omap44xx_ipu_c1_resets, .rst_lines_cnt = ARRAY_SIZE(omap44xx_ipu_c1_resets), .prcm = { .omap4 = { .rstctrl_offs = OMAP4_RM_DUCATI_RSTCTRL_OFFSET, }, }, }; static struct omap_hwmod omap44xx_ipu_hwmod = { .name = "ipu", .class = &omap44xx_ipu_hwmod_class, .clkdm_name = "ducati_clkdm", .mpu_irqs = omap44xx_ipu_irqs, .rst_lines = omap44xx_ipu_resets, .rst_lines_cnt = ARRAY_SIZE(omap44xx_ipu_resets), .main_clk = "ipu_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_DUCATI_DUCATI_CLKCTRL_OFFSET, .rstctrl_offs = OMAP4_RM_DUCATI_RSTCTRL_OFFSET, .context_offs = OMAP4_RM_DUCATI_DUCATI_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .slaves = omap44xx_ipu_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_ipu_slaves), .masters = omap44xx_ipu_masters, .masters_cnt = ARRAY_SIZE(omap44xx_ipu_masters), }; /* * 'iss' class * external images sensor pixel data processor */ static struct omap_hwmod_class_sysconfig omap44xx_iss_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, /* * ISS needs 100 OCP clk cycles delay after a softreset before * accessing sysconfig again. * The lowest frequency at the moment for L3 bus is 100 MHz, so * 1usec delay is needed. Add an x2 margin to be safe (2 usecs). * * TODO: Indicate errata when available. */ .srst_udelay = 2, .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART | MSTANDBY_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type2, }; static struct omap_hwmod_class omap44xx_iss_hwmod_class = { .name = "iss", .sysc = &omap44xx_iss_sysc, }; /* iss */ static struct omap_hwmod_irq_info omap44xx_iss_irqs[] = { { .irq = 24 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_iss_sdma_reqs[] = { { .name = "1", .dma_req = 8 + OMAP44XX_DMA_REQ_START }, { .name = "2", .dma_req = 9 + OMAP44XX_DMA_REQ_START }, { .name = "3", .dma_req = 11 + OMAP44XX_DMA_REQ_START }, { .name = "4", .dma_req = 12 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; /* iss master ports */ static struct omap_hwmod_ocp_if *omap44xx_iss_masters[] = { &omap44xx_iss__l3_main_2, }; static struct omap_hwmod_addr_space omap44xx_iss_addrs[] = { { .pa_start = 0x52000000, .pa_end = 0x520000ff, .flags = ADDR_TYPE_RT }, { } }; /* l3_main_2 -> iss */ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__iss = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_iss_hwmod, .clk = "l3_div_ck", .addr = omap44xx_iss_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* iss slave ports */ static struct omap_hwmod_ocp_if *omap44xx_iss_slaves[] = { &omap44xx_l3_main_2__iss, }; static struct omap_hwmod_opt_clk iss_opt_clks[] = { { .role = "ctrlclk", .clk = "iss_ctrlclk" }, }; static struct omap_hwmod omap44xx_iss_hwmod = { .name = "iss", .class = &omap44xx_iss_hwmod_class, .clkdm_name = "iss_clkdm", .mpu_irqs = omap44xx_iss_irqs, .sdma_reqs = omap44xx_iss_sdma_reqs, .main_clk = "iss_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_CAM_ISS_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_CAM_ISS_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .opt_clks = iss_opt_clks, .opt_clks_cnt = ARRAY_SIZE(iss_opt_clks), .slaves = omap44xx_iss_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_iss_slaves), .masters = omap44xx_iss_masters, .masters_cnt = ARRAY_SIZE(omap44xx_iss_masters), }; /* * 'iva' class * multi-standard video encoder/decoder hardware accelerator */ static struct omap_hwmod_class omap44xx_iva_hwmod_class = { .name = "iva", }; /* iva */ static struct omap_hwmod_irq_info omap44xx_iva_irqs[] = { { .name = "sync_1", .irq = 103 + OMAP44XX_IRQ_GIC_START }, { .name = "sync_0", .irq = 104 + OMAP44XX_IRQ_GIC_START }, { .name = "mailbox_0", .irq = 107 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_rst_info omap44xx_iva_resets[] = { { .name = "logic", .rst_shift = 2 }, }; static struct omap_hwmod_rst_info omap44xx_iva_seq0_resets[] = { { .name = "seq0", .rst_shift = 0 }, }; static struct omap_hwmod_rst_info omap44xx_iva_seq1_resets[] = { { .name = "seq1", .rst_shift = 1 }, }; /* iva master ports */ static struct omap_hwmod_ocp_if *omap44xx_iva_masters[] = { &omap44xx_iva__l3_main_2, &omap44xx_iva__l3_instr, }; static struct omap_hwmod_addr_space omap44xx_iva_addrs[] = { { .pa_start = 0x5a000000, .pa_end = 0x5a07ffff, .flags = ADDR_TYPE_RT }, { } }; /* l3_main_2 -> iva */ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__iva = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_iva_hwmod, .clk = "l3_div_ck", .addr = omap44xx_iva_addrs, .user = OCP_USER_MPU, }; /* iva slave ports */ static struct omap_hwmod_ocp_if *omap44xx_iva_slaves[] = { &omap44xx_dsp__iva, &omap44xx_l3_main_2__iva, }; /* Pseudo hwmod for reset control purpose only */ static struct omap_hwmod omap44xx_iva_seq0_hwmod = { .name = "iva_seq0", .class = &omap44xx_iva_hwmod_class, .clkdm_name = "ivahd_clkdm", .flags = HWMOD_INIT_NO_RESET, .rst_lines = omap44xx_iva_seq0_resets, .rst_lines_cnt = ARRAY_SIZE(omap44xx_iva_seq0_resets), .prcm = { .omap4 = { .rstctrl_offs = OMAP4_RM_IVAHD_RSTCTRL_OFFSET, }, }, }; /* Pseudo hwmod for reset control purpose only */ static struct omap_hwmod omap44xx_iva_seq1_hwmod = { .name = "iva_seq1", .class = &omap44xx_iva_hwmod_class, .clkdm_name = "ivahd_clkdm", .flags = HWMOD_INIT_NO_RESET, .rst_lines = omap44xx_iva_seq1_resets, .rst_lines_cnt = ARRAY_SIZE(omap44xx_iva_seq1_resets), .prcm = { .omap4 = { .rstctrl_offs = OMAP4_RM_IVAHD_RSTCTRL_OFFSET, }, }, }; static struct omap_hwmod omap44xx_iva_hwmod = { .name = "iva", .class = &omap44xx_iva_hwmod_class, .clkdm_name = "ivahd_clkdm", .mpu_irqs = omap44xx_iva_irqs, .rst_lines = omap44xx_iva_resets, .rst_lines_cnt = ARRAY_SIZE(omap44xx_iva_resets), .main_clk = "iva_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_IVAHD_IVAHD_CLKCTRL_OFFSET, .rstctrl_offs = OMAP4_RM_IVAHD_RSTCTRL_OFFSET, .context_offs = OMAP4_RM_IVAHD_IVAHD_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .slaves = omap44xx_iva_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_iva_slaves), .masters = omap44xx_iva_masters, .masters_cnt = ARRAY_SIZE(omap44xx_iva_masters), }; /* * 'kbd' class * keyboard controller */ static struct omap_hwmod_class_sysconfig omap44xx_kbd_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_EMUFREE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_kbd_hwmod_class = { .name = "kbd", .sysc = &omap44xx_kbd_sysc, }; /* kbd */ static struct omap_hwmod omap44xx_kbd_hwmod; static struct omap_hwmod_irq_info omap44xx_kbd_irqs[] = { { .irq = 120 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_kbd_addrs[] = { { .pa_start = 0x4a31c000, .pa_end = 0x4a31c07f, .flags = ADDR_TYPE_RT }, { } }; /* l4_wkup -> kbd */ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__kbd = { .master = &omap44xx_l4_wkup_hwmod, .slave = &omap44xx_kbd_hwmod, .clk = "l4_wkup_clk_mux_ck", .addr = omap44xx_kbd_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* kbd slave ports */ static struct omap_hwmod_ocp_if *omap44xx_kbd_slaves[] = { &omap44xx_l4_wkup__kbd, }; static struct omap_hwmod omap44xx_kbd_hwmod = { .name = "kbd", .class = &omap44xx_kbd_hwmod_class, .clkdm_name = "l4_wkup_clkdm", .mpu_irqs = omap44xx_kbd_irqs, .main_clk = "kbd_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_WKUP_KEYBOARD_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_WKUP_KEYBOARD_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_kbd_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_kbd_slaves), }; /* * 'mailbox' class * mailbox module allowing communication between the on-chip processors using a * queued mailbox-interrupt mechanism. */ static struct omap_hwmod_class_sysconfig omap44xx_mailbox_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .sysc_flags = (SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type2, }; static struct omap_hwmod_class omap44xx_mailbox_hwmod_class = { .name = "mailbox", .sysc = &omap44xx_mailbox_sysc, }; /* mailbox */ static struct omap_hwmod omap44xx_mailbox_hwmod; static struct omap_hwmod_irq_info omap44xx_mailbox_irqs[] = { { .irq = 26 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_mailbox_addrs[] = { { .pa_start = 0x4a0f4000, .pa_end = 0x4a0f41ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_cfg -> mailbox */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__mailbox = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_mailbox_hwmod, .clk = "l4_div_ck", .addr = omap44xx_mailbox_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mailbox slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mailbox_slaves[] = { &omap44xx_l4_cfg__mailbox, }; static struct omap_hwmod omap44xx_mailbox_hwmod = { .name = "mailbox", .class = &omap44xx_mailbox_hwmod_class, .clkdm_name = "l4_cfg_clkdm", .mpu_irqs = omap44xx_mailbox_irqs, .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4CFG_MAILBOX_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4CFG_MAILBOX_CONTEXT_OFFSET, }, }, .slaves = omap44xx_mailbox_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mailbox_slaves), }; /* * 'mcbsp' class * multi channel buffered serial port controller */ static struct omap_hwmod_class_sysconfig omap44xx_mcbsp_sysc = { .sysc_offs = 0x008c, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_mcbsp_hwmod_class = { .name = "mcbsp", .sysc = &omap44xx_mcbsp_sysc, .rev = MCBSP_CONFIG_TYPE4, }; /* mcbsp1 */ static struct omap_hwmod omap44xx_mcbsp1_hwmod; static struct omap_hwmod_irq_info omap44xx_mcbsp1_irqs[] = { { .irq = 17 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mcbsp1_sdma_reqs[] = { { .name = "tx", .dma_req = 32 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 33 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_mcbsp1_addrs[] = { { .name = "mpu", .pa_start = 0x40122000, .pa_end = 0x401220ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> mcbsp1 */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp1 = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_mcbsp1_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_mcbsp1_addrs, .user = OCP_USER_MPU, }; static struct omap_hwmod_addr_space omap44xx_mcbsp1_dma_addrs[] = { { .name = "dma", .pa_start = 0x49022000, .pa_end = 0x490220ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> mcbsp1 (dma) */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp1_dma = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_mcbsp1_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_mcbsp1_dma_addrs, .user = OCP_USER_SDMA, }; /* mcbsp1 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mcbsp1_slaves[] = { &omap44xx_l4_abe__mcbsp1, &omap44xx_l4_abe__mcbsp1_dma, }; static struct omap_hwmod_opt_clk mcbsp1_opt_clks[] = { { .role = "pad_fck", .clk = "pad_clks_ck" }, { .role = "prcm_clk", .clk = "mcbsp1_sync_mux_ck" }, }; static struct omap_hwmod omap44xx_mcbsp1_hwmod = { .name = "mcbsp1", .class = &omap44xx_mcbsp_hwmod_class, .clkdm_name = "abe_clkdm", .mpu_irqs = omap44xx_mcbsp1_irqs, .sdma_reqs = omap44xx_mcbsp1_sdma_reqs, .main_clk = "mcbsp1_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM1_ABE_MCBSP1_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ABE_MCBSP1_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_mcbsp1_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mcbsp1_slaves), .opt_clks = mcbsp1_opt_clks, .opt_clks_cnt = ARRAY_SIZE(mcbsp1_opt_clks), }; /* mcbsp2 */ static struct omap_hwmod omap44xx_mcbsp2_hwmod; static struct omap_hwmod_irq_info omap44xx_mcbsp2_irqs[] = { { .irq = 22 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mcbsp2_sdma_reqs[] = { { .name = "tx", .dma_req = 16 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 17 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_mcbsp2_addrs[] = { { .name = "mpu", .pa_start = 0x40124000, .pa_end = 0x401240ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> mcbsp2 */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp2 = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_mcbsp2_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_mcbsp2_addrs, .user = OCP_USER_MPU, }; static struct omap_hwmod_addr_space omap44xx_mcbsp2_dma_addrs[] = { { .name = "dma", .pa_start = 0x49024000, .pa_end = 0x490240ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> mcbsp2 (dma) */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp2_dma = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_mcbsp2_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_mcbsp2_dma_addrs, .user = OCP_USER_SDMA, }; /* mcbsp2 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mcbsp2_slaves[] = { &omap44xx_l4_abe__mcbsp2, &omap44xx_l4_abe__mcbsp2_dma, }; static struct omap_hwmod_opt_clk mcbsp2_opt_clks[] = { { .role = "pad_fck", .clk = "pad_clks_ck" }, { .role = "prcm_clk", .clk = "mcbsp2_sync_mux_ck" }, }; static struct omap_hwmod omap44xx_mcbsp2_hwmod = { .name = "mcbsp2", .class = &omap44xx_mcbsp_hwmod_class, .clkdm_name = "abe_clkdm", .mpu_irqs = omap44xx_mcbsp2_irqs, .sdma_reqs = omap44xx_mcbsp2_sdma_reqs, .main_clk = "mcbsp2_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM1_ABE_MCBSP2_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ABE_MCBSP2_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_mcbsp2_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mcbsp2_slaves), .opt_clks = mcbsp2_opt_clks, .opt_clks_cnt = ARRAY_SIZE(mcbsp2_opt_clks), }; /* mcbsp3 */ static struct omap_hwmod omap44xx_mcbsp3_hwmod; static struct omap_hwmod_irq_info omap44xx_mcbsp3_irqs[] = { { .irq = 23 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mcbsp3_sdma_reqs[] = { { .name = "tx", .dma_req = 18 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 19 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_mcbsp3_addrs[] = { { .name = "mpu", .pa_start = 0x40126000, .pa_end = 0x401260ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> mcbsp3 */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp3 = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_mcbsp3_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_mcbsp3_addrs, .user = OCP_USER_MPU, }; static struct omap_hwmod_addr_space omap44xx_mcbsp3_dma_addrs[] = { { .name = "dma", .pa_start = 0x49026000, .pa_end = 0x490260ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> mcbsp3 (dma) */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcbsp3_dma = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_mcbsp3_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_mcbsp3_dma_addrs, .user = OCP_USER_SDMA, }; /* mcbsp3 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mcbsp3_slaves[] = { &omap44xx_l4_abe__mcbsp3, &omap44xx_l4_abe__mcbsp3_dma, }; static struct omap_hwmod_opt_clk mcbsp3_opt_clks[] = { { .role = "pad_fck", .clk = "pad_clks_ck" }, { .role = "prcm_clk", .clk = "mcbsp3_sync_mux_ck" }, }; static struct omap_hwmod omap44xx_mcbsp3_hwmod = { .name = "mcbsp3", .class = &omap44xx_mcbsp_hwmod_class, .clkdm_name = "abe_clkdm", .mpu_irqs = omap44xx_mcbsp3_irqs, .sdma_reqs = omap44xx_mcbsp3_sdma_reqs, .main_clk = "mcbsp3_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM1_ABE_MCBSP3_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ABE_MCBSP3_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_mcbsp3_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mcbsp3_slaves), .opt_clks = mcbsp3_opt_clks, .opt_clks_cnt = ARRAY_SIZE(mcbsp3_opt_clks), }; /* mcbsp4 */ static struct omap_hwmod omap44xx_mcbsp4_hwmod; static struct omap_hwmod_irq_info omap44xx_mcbsp4_irqs[] = { { .irq = 16 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mcbsp4_sdma_reqs[] = { { .name = "tx", .dma_req = 30 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 31 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_mcbsp4_addrs[] = { { .pa_start = 0x48096000, .pa_end = 0x480960ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> mcbsp4 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__mcbsp4 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_mcbsp4_hwmod, .clk = "l4_div_ck", .addr = omap44xx_mcbsp4_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mcbsp4 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mcbsp4_slaves[] = { &omap44xx_l4_per__mcbsp4, }; static struct omap_hwmod_opt_clk mcbsp4_opt_clks[] = { { .role = "pad_fck", .clk = "pad_clks_ck" }, { .role = "prcm_clk", .clk = "mcbsp4_sync_mux_ck" }, }; static struct omap_hwmod omap44xx_mcbsp4_hwmod = { .name = "mcbsp4", .class = &omap44xx_mcbsp_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_mcbsp4_irqs, .sdma_reqs = omap44xx_mcbsp4_sdma_reqs, .main_clk = "mcbsp4_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_MCBSP4_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_MCBSP4_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_mcbsp4_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mcbsp4_slaves), .opt_clks = mcbsp4_opt_clks, .opt_clks_cnt = ARRAY_SIZE(mcbsp4_opt_clks), }; /* * 'mcpdm' class * multi channel pdm controller (proprietary interface with phoenix power * ic) */ static struct omap_hwmod_class_sysconfig omap44xx_mcpdm_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type2, }; static struct omap_hwmod_class omap44xx_mcpdm_hwmod_class = { .name = "mcpdm", .sysc = &omap44xx_mcpdm_sysc, }; /* mcpdm */ static struct omap_hwmod omap44xx_mcpdm_hwmod; static struct omap_hwmod_irq_info omap44xx_mcpdm_irqs[] = { { .irq = 112 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mcpdm_sdma_reqs[] = { { .name = "up_link", .dma_req = 64 + OMAP44XX_DMA_REQ_START }, { .name = "dn_link", .dma_req = 65 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_mcpdm_addrs[] = { { .pa_start = 0x40132000, .pa_end = 0x4013207f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> mcpdm */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcpdm = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_mcpdm_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_mcpdm_addrs, .user = OCP_USER_MPU, }; static struct omap_hwmod_addr_space omap44xx_mcpdm_dma_addrs[] = { { .pa_start = 0x49032000, .pa_end = 0x4903207f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> mcpdm (dma) */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcpdm_dma = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_mcpdm_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_mcpdm_dma_addrs, .user = OCP_USER_SDMA, }; /* mcpdm slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mcpdm_slaves[] = { &omap44xx_l4_abe__mcpdm, &omap44xx_l4_abe__mcpdm_dma, }; static struct omap_hwmod omap44xx_mcpdm_hwmod = { .name = "mcpdm", .class = &omap44xx_mcpdm_hwmod_class, .clkdm_name = "abe_clkdm", .mpu_irqs = omap44xx_mcpdm_irqs, .sdma_reqs = omap44xx_mcpdm_sdma_reqs, .main_clk = "mcpdm_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM1_ABE_PDM_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ABE_PDM_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_mcpdm_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mcpdm_slaves), }; /* * 'mcspi' class * multichannel serial port interface (mcspi) / master/slave synchronous serial * bus */ static struct omap_hwmod_class_sysconfig omap44xx_mcspi_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type2, }; static struct omap_hwmod_class omap44xx_mcspi_hwmod_class = { .name = "mcspi", .sysc = &omap44xx_mcspi_sysc, .rev = OMAP4_MCSPI_REV, }; /* mcspi1 */ static struct omap_hwmod omap44xx_mcspi1_hwmod; static struct omap_hwmod_irq_info omap44xx_mcspi1_irqs[] = { { .irq = 65 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mcspi1_sdma_reqs[] = { { .name = "tx0", .dma_req = 34 + OMAP44XX_DMA_REQ_START }, { .name = "rx0", .dma_req = 35 + OMAP44XX_DMA_REQ_START }, { .name = "tx1", .dma_req = 36 + OMAP44XX_DMA_REQ_START }, { .name = "rx1", .dma_req = 37 + OMAP44XX_DMA_REQ_START }, { .name = "tx2", .dma_req = 38 + OMAP44XX_DMA_REQ_START }, { .name = "rx2", .dma_req = 39 + OMAP44XX_DMA_REQ_START }, { .name = "tx3", .dma_req = 40 + OMAP44XX_DMA_REQ_START }, { .name = "rx3", .dma_req = 41 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_mcspi1_addrs[] = { { .pa_start = 0x48098000, .pa_end = 0x480981ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> mcspi1 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__mcspi1 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_mcspi1_hwmod, .clk = "l4_div_ck", .addr = omap44xx_mcspi1_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mcspi1 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mcspi1_slaves[] = { &omap44xx_l4_per__mcspi1, }; /* mcspi1 dev_attr */ static struct omap2_mcspi_dev_attr mcspi1_dev_attr = { .num_chipselect = 4, }; static struct omap_hwmod omap44xx_mcspi1_hwmod = { .name = "mcspi1", .class = &omap44xx_mcspi_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_mcspi1_irqs, .sdma_reqs = omap44xx_mcspi1_sdma_reqs, .main_clk = "mcspi1_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_MCSPI1_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_MCSPI1_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &mcspi1_dev_attr, .slaves = omap44xx_mcspi1_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mcspi1_slaves), }; /* mcspi2 */ static struct omap_hwmod omap44xx_mcspi2_hwmod; static struct omap_hwmod_irq_info omap44xx_mcspi2_irqs[] = { { .irq = 66 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mcspi2_sdma_reqs[] = { { .name = "tx0", .dma_req = 42 + OMAP44XX_DMA_REQ_START }, { .name = "rx0", .dma_req = 43 + OMAP44XX_DMA_REQ_START }, { .name = "tx1", .dma_req = 44 + OMAP44XX_DMA_REQ_START }, { .name = "rx1", .dma_req = 45 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_mcspi2_addrs[] = { { .pa_start = 0x4809a000, .pa_end = 0x4809a1ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> mcspi2 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__mcspi2 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_mcspi2_hwmod, .clk = "l4_div_ck", .addr = omap44xx_mcspi2_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mcspi2 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mcspi2_slaves[] = { &omap44xx_l4_per__mcspi2, }; /* mcspi2 dev_attr */ static struct omap2_mcspi_dev_attr mcspi2_dev_attr = { .num_chipselect = 2, }; static struct omap_hwmod omap44xx_mcspi2_hwmod = { .name = "mcspi2", .class = &omap44xx_mcspi_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_mcspi2_irqs, .sdma_reqs = omap44xx_mcspi2_sdma_reqs, .main_clk = "mcspi2_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_MCSPI2_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_MCSPI2_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &mcspi2_dev_attr, .slaves = omap44xx_mcspi2_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mcspi2_slaves), }; /* mcspi3 */ static struct omap_hwmod omap44xx_mcspi3_hwmod; static struct omap_hwmod_irq_info omap44xx_mcspi3_irqs[] = { { .irq = 91 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mcspi3_sdma_reqs[] = { { .name = "tx0", .dma_req = 14 + OMAP44XX_DMA_REQ_START }, { .name = "rx0", .dma_req = 15 + OMAP44XX_DMA_REQ_START }, { .name = "tx1", .dma_req = 22 + OMAP44XX_DMA_REQ_START }, { .name = "rx1", .dma_req = 23 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_mcspi3_addrs[] = { { .pa_start = 0x480b8000, .pa_end = 0x480b81ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> mcspi3 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__mcspi3 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_mcspi3_hwmod, .clk = "l4_div_ck", .addr = omap44xx_mcspi3_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mcspi3 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mcspi3_slaves[] = { &omap44xx_l4_per__mcspi3, }; /* mcspi3 dev_attr */ static struct omap2_mcspi_dev_attr mcspi3_dev_attr = { .num_chipselect = 2, }; static struct omap_hwmod omap44xx_mcspi3_hwmod = { .name = "mcspi3", .class = &omap44xx_mcspi_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_mcspi3_irqs, .sdma_reqs = omap44xx_mcspi3_sdma_reqs, .main_clk = "mcspi3_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_MCSPI3_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_MCSPI3_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &mcspi3_dev_attr, .slaves = omap44xx_mcspi3_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mcspi3_slaves), }; /* mcspi4 */ static struct omap_hwmod omap44xx_mcspi4_hwmod; static struct omap_hwmod_irq_info omap44xx_mcspi4_irqs[] = { { .irq = 48 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mcspi4_sdma_reqs[] = { { .name = "tx0", .dma_req = 69 + OMAP44XX_DMA_REQ_START }, { .name = "rx0", .dma_req = 70 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_mcspi4_addrs[] = { { .pa_start = 0x480ba000, .pa_end = 0x480ba1ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> mcspi4 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__mcspi4 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_mcspi4_hwmod, .clk = "l4_div_ck", .addr = omap44xx_mcspi4_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mcspi4 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mcspi4_slaves[] = { &omap44xx_l4_per__mcspi4, }; /* mcspi4 dev_attr */ static struct omap2_mcspi_dev_attr mcspi4_dev_attr = { .num_chipselect = 1, }; static struct omap_hwmod omap44xx_mcspi4_hwmod = { .name = "mcspi4", .class = &omap44xx_mcspi_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_mcspi4_irqs, .sdma_reqs = omap44xx_mcspi4_sdma_reqs, .main_clk = "mcspi4_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_MCSPI4_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_MCSPI4_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &mcspi4_dev_attr, .slaves = omap44xx_mcspi4_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mcspi4_slaves), }; /* * 'mmc' class * multimedia card high-speed/sd/sdio (mmc/sd/sdio) host controller */ static struct omap_hwmod_class_sysconfig omap44xx_mmc_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART | MSTANDBY_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type2, }; static struct omap_hwmod_class omap44xx_mmc_hwmod_class = { .name = "mmc", .sysc = &omap44xx_mmc_sysc, }; /* mmc1 */ static struct omap_hwmod_irq_info omap44xx_mmc1_irqs[] = { { .irq = 83 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mmc1_sdma_reqs[] = { { .name = "tx", .dma_req = 60 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 61 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; /* mmc1 master ports */ static struct omap_hwmod_ocp_if *omap44xx_mmc1_masters[] = { &omap44xx_mmc1__l3_main_1, }; static struct omap_hwmod_addr_space omap44xx_mmc1_addrs[] = { { .pa_start = 0x4809c000, .pa_end = 0x4809c3ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> mmc1 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc1 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_mmc1_hwmod, .clk = "l4_div_ck", .addr = omap44xx_mmc1_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mmc1 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mmc1_slaves[] = { &omap44xx_l4_per__mmc1, }; /* mmc1 dev_attr */ static struct omap_mmc_dev_attr mmc1_dev_attr = { .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT, }; static struct omap_hwmod omap44xx_mmc1_hwmod = { .name = "mmc1", .class = &omap44xx_mmc_hwmod_class, .clkdm_name = "l3_init_clkdm", .mpu_irqs = omap44xx_mmc1_irqs, .sdma_reqs = omap44xx_mmc1_sdma_reqs, .main_clk = "mmc1_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L3INIT_MMC1_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L3INIT_MMC1_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &mmc1_dev_attr, .slaves = omap44xx_mmc1_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mmc1_slaves), .masters = omap44xx_mmc1_masters, .masters_cnt = ARRAY_SIZE(omap44xx_mmc1_masters), }; /* mmc2 */ static struct omap_hwmod_irq_info omap44xx_mmc2_irqs[] = { { .irq = 86 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mmc2_sdma_reqs[] = { { .name = "tx", .dma_req = 46 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 47 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; /* mmc2 master ports */ static struct omap_hwmod_ocp_if *omap44xx_mmc2_masters[] = { &omap44xx_mmc2__l3_main_1, }; static struct omap_hwmod_addr_space omap44xx_mmc2_addrs[] = { { .pa_start = 0x480b4000, .pa_end = 0x480b43ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> mmc2 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc2 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_mmc2_hwmod, .clk = "l4_div_ck", .addr = omap44xx_mmc2_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mmc2 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mmc2_slaves[] = { &omap44xx_l4_per__mmc2, }; static struct omap_hwmod omap44xx_mmc2_hwmod = { .name = "mmc2", .class = &omap44xx_mmc_hwmod_class, .clkdm_name = "l3_init_clkdm", .mpu_irqs = omap44xx_mmc2_irqs, .sdma_reqs = omap44xx_mmc2_sdma_reqs, .main_clk = "mmc2_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L3INIT_MMC2_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L3INIT_MMC2_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_mmc2_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mmc2_slaves), .masters = omap44xx_mmc2_masters, .masters_cnt = ARRAY_SIZE(omap44xx_mmc2_masters), }; /* mmc3 */ static struct omap_hwmod omap44xx_mmc3_hwmod; static struct omap_hwmod_irq_info omap44xx_mmc3_irqs[] = { { .irq = 94 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mmc3_sdma_reqs[] = { { .name = "tx", .dma_req = 76 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 77 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_mmc3_addrs[] = { { .pa_start = 0x480ad000, .pa_end = 0x480ad3ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> mmc3 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc3 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_mmc3_hwmod, .clk = "l4_div_ck", .addr = omap44xx_mmc3_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mmc3 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mmc3_slaves[] = { &omap44xx_l4_per__mmc3, }; static struct omap_hwmod omap44xx_mmc3_hwmod = { .name = "mmc3", .class = &omap44xx_mmc_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_mmc3_irqs, .sdma_reqs = omap44xx_mmc3_sdma_reqs, .main_clk = "mmc3_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_MMCSD3_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_MMCSD3_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_mmc3_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mmc3_slaves), }; /* mmc4 */ static struct omap_hwmod omap44xx_mmc4_hwmod; static struct omap_hwmod_irq_info omap44xx_mmc4_irqs[] = { { .irq = 96 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mmc4_sdma_reqs[] = { { .name = "tx", .dma_req = 56 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 57 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_mmc4_addrs[] = { { .pa_start = 0x480d1000, .pa_end = 0x480d13ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> mmc4 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc4 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_mmc4_hwmod, .clk = "l4_div_ck", .addr = omap44xx_mmc4_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mmc4 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mmc4_slaves[] = { &omap44xx_l4_per__mmc4, }; static struct omap_hwmod omap44xx_mmc4_hwmod = { .name = "mmc4", .class = &omap44xx_mmc_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_mmc4_irqs, .sdma_reqs = omap44xx_mmc4_sdma_reqs, .main_clk = "mmc4_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_MMCSD4_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_MMCSD4_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_mmc4_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mmc4_slaves), }; /* mmc5 */ static struct omap_hwmod omap44xx_mmc5_hwmod; static struct omap_hwmod_irq_info omap44xx_mmc5_irqs[] = { { .irq = 59 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_mmc5_sdma_reqs[] = { { .name = "tx", .dma_req = 58 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 59 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_mmc5_addrs[] = { { .pa_start = 0x480d5000, .pa_end = 0x480d53ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> mmc5 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc5 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_mmc5_hwmod, .clk = "l4_div_ck", .addr = omap44xx_mmc5_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* mmc5 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_mmc5_slaves[] = { &omap44xx_l4_per__mmc5, }; static struct omap_hwmod omap44xx_mmc5_hwmod = { .name = "mmc5", .class = &omap44xx_mmc_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_mmc5_irqs, .sdma_reqs = omap44xx_mmc5_sdma_reqs, .main_clk = "mmc5_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_MMCSD5_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_MMCSD5_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_mmc5_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_mmc5_slaves), }; /* * 'mpu' class * mpu sub-system */ static struct omap_hwmod_class omap44xx_mpu_hwmod_class = { .name = "mpu", }; /* mpu */ static struct omap_hwmod_irq_info omap44xx_mpu_irqs[] = { { .name = "pl310", .irq = 0 + OMAP44XX_IRQ_GIC_START }, { .name = "cti0", .irq = 1 + OMAP44XX_IRQ_GIC_START }, { .name = "cti1", .irq = 2 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; /* mpu master ports */ static struct omap_hwmod_ocp_if *omap44xx_mpu_masters[] = { &omap44xx_mpu__l3_main_1, &omap44xx_mpu__l4_abe, &omap44xx_mpu__dmm, }; static struct omap_hwmod omap44xx_mpu_hwmod = { .name = "mpu", .class = &omap44xx_mpu_hwmod_class, .clkdm_name = "mpuss_clkdm", .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET, .mpu_irqs = omap44xx_mpu_irqs, .main_clk = "dpll_mpu_m2_ck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_MPU_MPU_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_MPU_MPU_CONTEXT_OFFSET, }, }, .masters = omap44xx_mpu_masters, .masters_cnt = ARRAY_SIZE(omap44xx_mpu_masters), }; /* * 'smartreflex' class * smartreflex module (monitor silicon performance and outputs a measure of * performance error) */ /* The IP is not compliant to type1 / type2 scheme */ static struct omap_hwmod_sysc_fields omap_hwmod_sysc_type_smartreflex = { .sidle_shift = 24, .enwkup_shift = 26, }; static struct omap_hwmod_class_sysconfig omap44xx_smartreflex_sysc = { .sysc_offs = 0x0038, .sysc_flags = (SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type_smartreflex, }; static struct omap_hwmod_class omap44xx_smartreflex_hwmod_class = { .name = "smartreflex", .sysc = &omap44xx_smartreflex_sysc, .rev = 2, }; /* smartreflex_core */ static struct omap_smartreflex_dev_attr smartreflex_core_dev_attr = { .sensor_voltdm_name = "core", }; static struct omap_hwmod omap44xx_smartreflex_core_hwmod; static struct omap_hwmod_irq_info omap44xx_smartreflex_core_irqs[] = { { .irq = 19 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_smartreflex_core_addrs[] = { { .pa_start = 0x4a0dd000, .pa_end = 0x4a0dd03f, .flags = ADDR_TYPE_RT }, { } }; /* l4_cfg -> smartreflex_core */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__smartreflex_core = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_smartreflex_core_hwmod, .clk = "l4_div_ck", .addr = omap44xx_smartreflex_core_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* smartreflex_core slave ports */ static struct omap_hwmod_ocp_if *omap44xx_smartreflex_core_slaves[] = { &omap44xx_l4_cfg__smartreflex_core, }; static struct omap_hwmod omap44xx_smartreflex_core_hwmod = { .name = "smartreflex_core", .class = &omap44xx_smartreflex_hwmod_class, .clkdm_name = "l4_ao_clkdm", .mpu_irqs = omap44xx_smartreflex_core_irqs, .main_clk = "smartreflex_core_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_ALWON_SR_CORE_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ALWON_SR_CORE_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_smartreflex_core_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_smartreflex_core_slaves), .dev_attr = &smartreflex_core_dev_attr, }; /* smartreflex_iva */ static struct omap_smartreflex_dev_attr smartreflex_iva_dev_attr = { .sensor_voltdm_name = "iva", }; static struct omap_hwmod omap44xx_smartreflex_iva_hwmod; static struct omap_hwmod_irq_info omap44xx_smartreflex_iva_irqs[] = { { .irq = 102 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_smartreflex_iva_addrs[] = { { .pa_start = 0x4a0db000, .pa_end = 0x4a0db03f, .flags = ADDR_TYPE_RT }, { } }; /* l4_cfg -> smartreflex_iva */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__smartreflex_iva = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_smartreflex_iva_hwmod, .clk = "l4_div_ck", .addr = omap44xx_smartreflex_iva_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* smartreflex_iva slave ports */ static struct omap_hwmod_ocp_if *omap44xx_smartreflex_iva_slaves[] = { &omap44xx_l4_cfg__smartreflex_iva, }; static struct omap_hwmod omap44xx_smartreflex_iva_hwmod = { .name = "smartreflex_iva", .class = &omap44xx_smartreflex_hwmod_class, .clkdm_name = "l4_ao_clkdm", .mpu_irqs = omap44xx_smartreflex_iva_irqs, .main_clk = "smartreflex_iva_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_ALWON_SR_IVA_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ALWON_SR_IVA_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_smartreflex_iva_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_smartreflex_iva_slaves), .dev_attr = &smartreflex_iva_dev_attr, }; /* smartreflex_mpu */ static struct omap_smartreflex_dev_attr smartreflex_mpu_dev_attr = { .sensor_voltdm_name = "mpu", }; static struct omap_hwmod omap44xx_smartreflex_mpu_hwmod; static struct omap_hwmod_irq_info omap44xx_smartreflex_mpu_irqs[] = { { .irq = 18 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_smartreflex_mpu_addrs[] = { { .pa_start = 0x4a0d9000, .pa_end = 0x4a0d903f, .flags = ADDR_TYPE_RT }, { } }; /* l4_cfg -> smartreflex_mpu */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__smartreflex_mpu = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_smartreflex_mpu_hwmod, .clk = "l4_div_ck", .addr = omap44xx_smartreflex_mpu_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* smartreflex_mpu slave ports */ static struct omap_hwmod_ocp_if *omap44xx_smartreflex_mpu_slaves[] = { &omap44xx_l4_cfg__smartreflex_mpu, }; static struct omap_hwmod omap44xx_smartreflex_mpu_hwmod = { .name = "smartreflex_mpu", .class = &omap44xx_smartreflex_hwmod_class, .clkdm_name = "l4_ao_clkdm", .mpu_irqs = omap44xx_smartreflex_mpu_irqs, .main_clk = "smartreflex_mpu_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_ALWON_SR_MPU_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ALWON_SR_MPU_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_smartreflex_mpu_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_smartreflex_mpu_slaves), .dev_attr = &smartreflex_mpu_dev_attr, }; /* * 'spinlock' class * spinlock provides hardware assistance for synchronizing the processes * running on multiple processors */ static struct omap_hwmod_class_sysconfig omap44xx_spinlock_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_spinlock_hwmod_class = { .name = "spinlock", .sysc = &omap44xx_spinlock_sysc, }; /* spinlock */ static struct omap_hwmod omap44xx_spinlock_hwmod; static struct omap_hwmod_addr_space omap44xx_spinlock_addrs[] = { { .pa_start = 0x4a0f6000, .pa_end = 0x4a0f6fff, .flags = ADDR_TYPE_RT }, { } }; /* l4_cfg -> spinlock */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__spinlock = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_spinlock_hwmod, .clk = "l4_div_ck", .addr = omap44xx_spinlock_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* spinlock slave ports */ static struct omap_hwmod_ocp_if *omap44xx_spinlock_slaves[] = { &omap44xx_l4_cfg__spinlock, }; static struct omap_hwmod omap44xx_spinlock_hwmod = { .name = "spinlock", .class = &omap44xx_spinlock_hwmod_class, .clkdm_name = "l4_cfg_clkdm", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4CFG_HW_SEM_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4CFG_HW_SEM_CONTEXT_OFFSET, }, }, .slaves = omap44xx_spinlock_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_spinlock_slaves), }; /* * 'timer' class * general purpose timer module with accurate 1ms tick * This class contains several variants: ['timer_1ms', 'timer'] */ static struct omap_hwmod_class_sysconfig omap44xx_timer_1ms_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_EMUFREE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_timer_1ms_hwmod_class = { .name = "timer", .sysc = &omap44xx_timer_1ms_sysc, }; static struct omap_hwmod_class_sysconfig omap44xx_timer_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type2, }; static struct omap_hwmod_class omap44xx_timer_hwmod_class = { .name = "timer", .sysc = &omap44xx_timer_sysc, }; /* always-on timers dev attribute */ static struct omap_timer_capability_dev_attr capability_alwon_dev_attr = { .timer_capability = OMAP_TIMER_ALWON, }; /* pwm timers dev attribute */ static struct omap_timer_capability_dev_attr capability_pwm_dev_attr = { .timer_capability = OMAP_TIMER_HAS_PWM, }; /* timer1 */ static struct omap_hwmod omap44xx_timer1_hwmod; static struct omap_hwmod_irq_info omap44xx_timer1_irqs[] = { { .irq = 37 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_timer1_addrs[] = { { .pa_start = 0x4a318000, .pa_end = 0x4a31807f, .flags = ADDR_TYPE_RT }, { } }; /* l4_wkup -> timer1 */ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__timer1 = { .master = &omap44xx_l4_wkup_hwmod, .slave = &omap44xx_timer1_hwmod, .clk = "l4_wkup_clk_mux_ck", .addr = omap44xx_timer1_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer1 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_timer1_slaves[] = { &omap44xx_l4_wkup__timer1, }; static struct omap_hwmod omap44xx_timer1_hwmod = { .name = "timer1", .class = &omap44xx_timer_1ms_hwmod_class, .clkdm_name = "l4_wkup_clkdm", .mpu_irqs = omap44xx_timer1_irqs, .main_clk = "timer1_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_WKUP_TIMER1_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_WKUP_TIMER1_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_alwon_dev_attr, .slaves = omap44xx_timer1_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_timer1_slaves), }; /* timer2 */ static struct omap_hwmod omap44xx_timer2_hwmod; static struct omap_hwmod_irq_info omap44xx_timer2_irqs[] = { { .irq = 38 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_timer2_addrs[] = { { .pa_start = 0x48032000, .pa_end = 0x4803207f, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> timer2 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__timer2 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_timer2_hwmod, .clk = "l4_div_ck", .addr = omap44xx_timer2_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer2 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_timer2_slaves[] = { &omap44xx_l4_per__timer2, }; static struct omap_hwmod omap44xx_timer2_hwmod = { .name = "timer2", .class = &omap44xx_timer_1ms_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_timer2_irqs, .main_clk = "timer2_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_DMTIMER2_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_DMTIMER2_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_alwon_dev_attr, .slaves = omap44xx_timer2_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_timer2_slaves), }; /* timer3 */ static struct omap_hwmod omap44xx_timer3_hwmod; static struct omap_hwmod_irq_info omap44xx_timer3_irqs[] = { { .irq = 39 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_timer3_addrs[] = { { .pa_start = 0x48034000, .pa_end = 0x4803407f, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> timer3 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__timer3 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_timer3_hwmod, .clk = "l4_div_ck", .addr = omap44xx_timer3_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer3 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_timer3_slaves[] = { &omap44xx_l4_per__timer3, }; static struct omap_hwmod omap44xx_timer3_hwmod = { .name = "timer3", .class = &omap44xx_timer_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_timer3_irqs, .main_clk = "timer3_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_DMTIMER3_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_DMTIMER3_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_alwon_dev_attr, .slaves = omap44xx_timer3_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_timer3_slaves), }; /* timer4 */ static struct omap_hwmod omap44xx_timer4_hwmod; static struct omap_hwmod_irq_info omap44xx_timer4_irqs[] = { { .irq = 40 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_timer4_addrs[] = { { .pa_start = 0x48036000, .pa_end = 0x4803607f, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> timer4 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__timer4 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_timer4_hwmod, .clk = "l4_div_ck", .addr = omap44xx_timer4_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer4 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_timer4_slaves[] = { &omap44xx_l4_per__timer4, }; static struct omap_hwmod omap44xx_timer4_hwmod = { .name = "timer4", .class = &omap44xx_timer_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_timer4_irqs, .main_clk = "timer4_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_DMTIMER4_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_DMTIMER4_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_alwon_dev_attr, .slaves = omap44xx_timer4_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_timer4_slaves), }; /* timer5 */ static struct omap_hwmod omap44xx_timer5_hwmod; static struct omap_hwmod_irq_info omap44xx_timer5_irqs[] = { { .irq = 41 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_timer5_addrs[] = { { .pa_start = 0x40138000, .pa_end = 0x4013807f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> timer5 */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer5 = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_timer5_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_timer5_addrs, .user = OCP_USER_MPU, }; static struct omap_hwmod_addr_space omap44xx_timer5_dma_addrs[] = { { .pa_start = 0x49038000, .pa_end = 0x4903807f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> timer5 (dma) */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer5_dma = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_timer5_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_timer5_dma_addrs, .user = OCP_USER_SDMA, }; /* timer5 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_timer5_slaves[] = { &omap44xx_l4_abe__timer5, &omap44xx_l4_abe__timer5_dma, }; static struct omap_hwmod omap44xx_timer5_hwmod = { .name = "timer5", .class = &omap44xx_timer_hwmod_class, .clkdm_name = "abe_clkdm", .mpu_irqs = omap44xx_timer5_irqs, .main_clk = "timer5_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM1_ABE_TIMER5_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ABE_TIMER5_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_alwon_dev_attr, .slaves = omap44xx_timer5_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_timer5_slaves), }; /* timer6 */ static struct omap_hwmod omap44xx_timer6_hwmod; static struct omap_hwmod_irq_info omap44xx_timer6_irqs[] = { { .irq = 42 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_timer6_addrs[] = { { .pa_start = 0x4013a000, .pa_end = 0x4013a07f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> timer6 */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer6 = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_timer6_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_timer6_addrs, .user = OCP_USER_MPU, }; static struct omap_hwmod_addr_space omap44xx_timer6_dma_addrs[] = { { .pa_start = 0x4903a000, .pa_end = 0x4903a07f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> timer6 (dma) */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer6_dma = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_timer6_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_timer6_dma_addrs, .user = OCP_USER_SDMA, }; /* timer6 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_timer6_slaves[] = { &omap44xx_l4_abe__timer6, &omap44xx_l4_abe__timer6_dma, }; static struct omap_hwmod omap44xx_timer6_hwmod = { .name = "timer6", .class = &omap44xx_timer_hwmod_class, .clkdm_name = "abe_clkdm", .mpu_irqs = omap44xx_timer6_irqs, .main_clk = "timer6_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM1_ABE_TIMER6_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ABE_TIMER6_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_alwon_dev_attr, .slaves = omap44xx_timer6_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_timer6_slaves), }; /* timer7 */ static struct omap_hwmod omap44xx_timer7_hwmod; static struct omap_hwmod_irq_info omap44xx_timer7_irqs[] = { { .irq = 43 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_timer7_addrs[] = { { .pa_start = 0x4013c000, .pa_end = 0x4013c07f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> timer7 */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer7 = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_timer7_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_timer7_addrs, .user = OCP_USER_MPU, }; static struct omap_hwmod_addr_space omap44xx_timer7_dma_addrs[] = { { .pa_start = 0x4903c000, .pa_end = 0x4903c07f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> timer7 (dma) */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer7_dma = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_timer7_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_timer7_dma_addrs, .user = OCP_USER_SDMA, }; /* timer7 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_timer7_slaves[] = { &omap44xx_l4_abe__timer7, &omap44xx_l4_abe__timer7_dma, }; static struct omap_hwmod omap44xx_timer7_hwmod = { .name = "timer7", .class = &omap44xx_timer_hwmod_class, .clkdm_name = "abe_clkdm", .mpu_irqs = omap44xx_timer7_irqs, .main_clk = "timer7_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM1_ABE_TIMER7_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ABE_TIMER7_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_alwon_dev_attr, .slaves = omap44xx_timer7_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_timer7_slaves), }; /* timer8 */ static struct omap_hwmod omap44xx_timer8_hwmod; static struct omap_hwmod_irq_info omap44xx_timer8_irqs[] = { { .irq = 44 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_timer8_addrs[] = { { .pa_start = 0x4013e000, .pa_end = 0x4013e07f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> timer8 */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer8 = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_timer8_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_timer8_addrs, .user = OCP_USER_MPU, }; static struct omap_hwmod_addr_space omap44xx_timer8_dma_addrs[] = { { .pa_start = 0x4903e000, .pa_end = 0x4903e07f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> timer8 (dma) */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__timer8_dma = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_timer8_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_timer8_dma_addrs, .user = OCP_USER_SDMA, }; /* timer8 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_timer8_slaves[] = { &omap44xx_l4_abe__timer8, &omap44xx_l4_abe__timer8_dma, }; static struct omap_hwmod omap44xx_timer8_hwmod = { .name = "timer8", .class = &omap44xx_timer_hwmod_class, .clkdm_name = "abe_clkdm", .mpu_irqs = omap44xx_timer8_irqs, .main_clk = "timer8_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM1_ABE_TIMER8_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ABE_TIMER8_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_pwm_dev_attr, .slaves = omap44xx_timer8_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_timer8_slaves), }; /* timer9 */ static struct omap_hwmod omap44xx_timer9_hwmod; static struct omap_hwmod_irq_info omap44xx_timer9_irqs[] = { { .irq = 45 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_timer9_addrs[] = { { .pa_start = 0x4803e000, .pa_end = 0x4803e07f, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> timer9 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__timer9 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_timer9_hwmod, .clk = "l4_div_ck", .addr = omap44xx_timer9_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer9 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_timer9_slaves[] = { &omap44xx_l4_per__timer9, }; static struct omap_hwmod omap44xx_timer9_hwmod = { .name = "timer9", .class = &omap44xx_timer_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_timer9_irqs, .main_clk = "timer9_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_DMTIMER9_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_DMTIMER9_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_pwm_dev_attr, .slaves = omap44xx_timer9_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_timer9_slaves), }; /* timer10 */ static struct omap_hwmod omap44xx_timer10_hwmod; static struct omap_hwmod_irq_info omap44xx_timer10_irqs[] = { { .irq = 46 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_timer10_addrs[] = { { .pa_start = 0x48086000, .pa_end = 0x4808607f, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> timer10 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__timer10 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_timer10_hwmod, .clk = "l4_div_ck", .addr = omap44xx_timer10_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer10 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_timer10_slaves[] = { &omap44xx_l4_per__timer10, }; static struct omap_hwmod omap44xx_timer10_hwmod = { .name = "timer10", .class = &omap44xx_timer_1ms_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_timer10_irqs, .main_clk = "timer10_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_DMTIMER10_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_DMTIMER10_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_pwm_dev_attr, .slaves = omap44xx_timer10_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_timer10_slaves), }; /* timer11 */ static struct omap_hwmod omap44xx_timer11_hwmod; static struct omap_hwmod_irq_info omap44xx_timer11_irqs[] = { { .irq = 47 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_timer11_addrs[] = { { .pa_start = 0x48088000, .pa_end = 0x4808807f, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> timer11 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__timer11 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_timer11_hwmod, .clk = "l4_div_ck", .addr = omap44xx_timer11_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* timer11 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_timer11_slaves[] = { &omap44xx_l4_per__timer11, }; static struct omap_hwmod omap44xx_timer11_hwmod = { .name = "timer11", .class = &omap44xx_timer_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_timer11_irqs, .main_clk = "timer11_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_DMTIMER11_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_DMTIMER11_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_pwm_dev_attr, .slaves = omap44xx_timer11_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_timer11_slaves), }; /* * 'uart' class * universal asynchronous receiver/transmitter (uart) */ static struct omap_hwmod_class_sysconfig omap44xx_uart_sysc = { .rev_offs = 0x0050, .sysc_offs = 0x0054, .syss_offs = 0x0058, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_uart_hwmod_class = { .name = "uart", .sysc = &omap44xx_uart_sysc, }; /* uart1 */ static struct omap_hwmod omap44xx_uart1_hwmod; static struct omap_hwmod_irq_info omap44xx_uart1_irqs[] = { { .irq = 72 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_uart1_sdma_reqs[] = { { .name = "tx", .dma_req = 48 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 49 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_uart1_addrs[] = { { .pa_start = 0x4806a000, .pa_end = 0x4806a0ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> uart1 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__uart1 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_uart1_hwmod, .clk = "l4_div_ck", .addr = omap44xx_uart1_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* uart1 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_uart1_slaves[] = { &omap44xx_l4_per__uart1, }; static struct omap_hwmod omap44xx_uart1_hwmod = { .name = "uart1", .class = &omap44xx_uart_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_uart1_irqs, .sdma_reqs = omap44xx_uart1_sdma_reqs, .main_clk = "uart1_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_UART1_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_UART1_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_uart1_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_uart1_slaves), }; /* uart2 */ static struct omap_hwmod omap44xx_uart2_hwmod; static struct omap_hwmod_irq_info omap44xx_uart2_irqs[] = { { .irq = 73 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_uart2_sdma_reqs[] = { { .name = "tx", .dma_req = 50 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 51 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_uart2_addrs[] = { { .pa_start = 0x4806c000, .pa_end = 0x4806c0ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> uart2 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__uart2 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_uart2_hwmod, .clk = "l4_div_ck", .addr = omap44xx_uart2_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* uart2 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_uart2_slaves[] = { &omap44xx_l4_per__uart2, }; static struct omap_hwmod omap44xx_uart2_hwmod = { .name = "uart2", .class = &omap44xx_uart_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_uart2_irqs, .sdma_reqs = omap44xx_uart2_sdma_reqs, .main_clk = "uart2_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_UART2_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_UART2_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_uart2_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_uart2_slaves), }; /* uart3 */ static struct omap_hwmod omap44xx_uart3_hwmod; static struct omap_hwmod_irq_info omap44xx_uart3_irqs[] = { { .irq = 74 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_uart3_sdma_reqs[] = { { .name = "tx", .dma_req = 52 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 53 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_uart3_addrs[] = { { .pa_start = 0x48020000, .pa_end = 0x480200ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> uart3 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__uart3 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_uart3_hwmod, .clk = "l4_div_ck", .addr = omap44xx_uart3_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* uart3 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_uart3_slaves[] = { &omap44xx_l4_per__uart3, }; static struct omap_hwmod omap44xx_uart3_hwmod = { .name = "uart3", .class = &omap44xx_uart_hwmod_class, .clkdm_name = "l4_per_clkdm", .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET, .mpu_irqs = omap44xx_uart3_irqs, .sdma_reqs = omap44xx_uart3_sdma_reqs, .main_clk = "uart3_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_UART3_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_UART3_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_uart3_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_uart3_slaves), }; /* uart4 */ static struct omap_hwmod omap44xx_uart4_hwmod; static struct omap_hwmod_irq_info omap44xx_uart4_irqs[] = { { .irq = 70 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_dma_info omap44xx_uart4_sdma_reqs[] = { { .name = "tx", .dma_req = 54 + OMAP44XX_DMA_REQ_START }, { .name = "rx", .dma_req = 55 + OMAP44XX_DMA_REQ_START }, { .dma_req = -1 } }; static struct omap_hwmod_addr_space omap44xx_uart4_addrs[] = { { .pa_start = 0x4806e000, .pa_end = 0x4806e0ff, .flags = ADDR_TYPE_RT }, { } }; /* l4_per -> uart4 */ static struct omap_hwmod_ocp_if omap44xx_l4_per__uart4 = { .master = &omap44xx_l4_per_hwmod, .slave = &omap44xx_uart4_hwmod, .clk = "l4_div_ck", .addr = omap44xx_uart4_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* uart4 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_uart4_slaves[] = { &omap44xx_l4_per__uart4, }; static struct omap_hwmod omap44xx_uart4_hwmod = { .name = "uart4", .class = &omap44xx_uart_hwmod_class, .clkdm_name = "l4_per_clkdm", .mpu_irqs = omap44xx_uart4_irqs, .sdma_reqs = omap44xx_uart4_sdma_reqs, .main_clk = "uart4_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L4PER_UART4_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L4PER_UART4_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_uart4_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_uart4_slaves), }; /* * 'usb_otg_hs' class * high-speed on-the-go universal serial bus (usb_otg_hs) controller */ static struct omap_hwmod_class_sysconfig omap44xx_usb_otg_hs_sysc = { .rev_offs = 0x0400, .sysc_offs = 0x0404, .syss_offs = 0x0408, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_usb_otg_hs_hwmod_class = { .name = "usb_otg_hs", .sysc = &omap44xx_usb_otg_hs_sysc, }; /* usb_otg_hs */ static struct omap_hwmod_irq_info omap44xx_usb_otg_hs_irqs[] = { { .name = "mc", .irq = 92 + OMAP44XX_IRQ_GIC_START }, { .name = "dma", .irq = 93 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; /* usb_otg_hs master ports */ static struct omap_hwmod_ocp_if *omap44xx_usb_otg_hs_masters[] = { &omap44xx_usb_otg_hs__l3_main_2, }; static struct omap_hwmod_addr_space omap44xx_usb_otg_hs_addrs[] = { { .pa_start = 0x4a0ab000, .pa_end = 0x4a0ab003, .flags = ADDR_TYPE_RT }, { } }; /* l4_cfg -> usb_otg_hs */ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usb_otg_hs = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_usb_otg_hs_hwmod, .clk = "l4_div_ck", .addr = omap44xx_usb_otg_hs_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* usb_otg_hs slave ports */ static struct omap_hwmod_ocp_if *omap44xx_usb_otg_hs_slaves[] = { &omap44xx_l4_cfg__usb_otg_hs, }; static struct omap_hwmod_opt_clk usb_otg_hs_opt_clks[] = { { .role = "xclk", .clk = "usb_otg_hs_xclk" }, }; static struct omap_hwmod omap44xx_usb_otg_hs_hwmod = { .name = "usb_otg_hs", .class = &omap44xx_usb_otg_hs_hwmod_class, .clkdm_name = "l3_init_clkdm", .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY, .mpu_irqs = omap44xx_usb_otg_hs_irqs, .main_clk = "usb_otg_hs_ick", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L3INIT_USB_OTG_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L3INIT_USB_OTG_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .opt_clks = usb_otg_hs_opt_clks, .opt_clks_cnt = ARRAY_SIZE(usb_otg_hs_opt_clks), .slaves = omap44xx_usb_otg_hs_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_usb_otg_hs_slaves), .masters = omap44xx_usb_otg_hs_masters, .masters_cnt = ARRAY_SIZE(omap44xx_usb_otg_hs_masters), }; /* * 'wd_timer' class * 32-bit watchdog upward counter that generates a pulse on the reset pin on * overflow condition */ static struct omap_hwmod_class_sysconfig omap44xx_wd_timer_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_wd_timer_hwmod_class = { .name = "wd_timer", .sysc = &omap44xx_wd_timer_sysc, .pre_shutdown = &omap2_wd_timer_disable, }; /* wd_timer2 */ static struct omap_hwmod omap44xx_wd_timer2_hwmod; static struct omap_hwmod_irq_info omap44xx_wd_timer2_irqs[] = { { .irq = 80 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_wd_timer2_addrs[] = { { .pa_start = 0x4a314000, .pa_end = 0x4a31407f, .flags = ADDR_TYPE_RT }, { } }; /* l4_wkup -> wd_timer2 */ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__wd_timer2 = { .master = &omap44xx_l4_wkup_hwmod, .slave = &omap44xx_wd_timer2_hwmod, .clk = "l4_wkup_clk_mux_ck", .addr = omap44xx_wd_timer2_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; /* wd_timer2 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_wd_timer2_slaves[] = { &omap44xx_l4_wkup__wd_timer2, }; static struct omap_hwmod omap44xx_wd_timer2_hwmod = { .name = "wd_timer2", .class = &omap44xx_wd_timer_hwmod_class, .clkdm_name = "l4_wkup_clkdm", .mpu_irqs = omap44xx_wd_timer2_irqs, .main_clk = "wd_timer2_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_WKUP_WDT2_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_WKUP_WDT2_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_wd_timer2_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_wd_timer2_slaves), }; /* wd_timer3 */ static struct omap_hwmod omap44xx_wd_timer3_hwmod; static struct omap_hwmod_irq_info omap44xx_wd_timer3_irqs[] = { { .irq = 36 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_wd_timer3_addrs[] = { { .pa_start = 0x40130000, .pa_end = 0x4013007f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> wd_timer3 */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__wd_timer3 = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_wd_timer3_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_wd_timer3_addrs, .user = OCP_USER_MPU, }; static struct omap_hwmod_addr_space omap44xx_wd_timer3_dma_addrs[] = { { .pa_start = 0x49030000, .pa_end = 0x4903007f, .flags = ADDR_TYPE_RT }, { } }; /* l4_abe -> wd_timer3 (dma) */ static struct omap_hwmod_ocp_if omap44xx_l4_abe__wd_timer3_dma = { .master = &omap44xx_l4_abe_hwmod, .slave = &omap44xx_wd_timer3_hwmod, .clk = "ocp_abe_iclk", .addr = omap44xx_wd_timer3_dma_addrs, .user = OCP_USER_SDMA, }; /* wd_timer3 slave ports */ static struct omap_hwmod_ocp_if *omap44xx_wd_timer3_slaves[] = { &omap44xx_l4_abe__wd_timer3, &omap44xx_l4_abe__wd_timer3_dma, }; static struct omap_hwmod omap44xx_wd_timer3_hwmod = { .name = "wd_timer3", .class = &omap44xx_wd_timer_hwmod_class, .clkdm_name = "abe_clkdm", .mpu_irqs = omap44xx_wd_timer3_irqs, .main_clk = "wd_timer3_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM1_ABE_WDT3_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_ABE_WDT3_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .slaves = omap44xx_wd_timer3_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_wd_timer3_slaves), }; /* * 'usb_host_hs' class * high-speed multi-port usb host controller */ static struct omap_hwmod_ocp_if omap44xx_usb_host_hs__l3_main_2 = { .master = &omap44xx_usb_host_hs_hwmod, .slave = &omap44xx_l3_main_2_hwmod, .clk = "l3_div_ck", .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod_class_sysconfig omap44xx_usb_host_hs_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART | MSTANDBY_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type2, }; static struct omap_hwmod_class omap44xx_usb_host_hs_hwmod_class = { .name = "usb_host_hs", .sysc = &omap44xx_usb_host_hs_sysc, }; static struct omap_hwmod_ocp_if *omap44xx_usb_host_hs_masters[] = { &omap44xx_usb_host_hs__l3_main_2, }; static struct omap_hwmod_addr_space omap44xx_usb_host_hs_addrs[] = { { .name = "uhh", .pa_start = 0x4a064000, .pa_end = 0x4a0647ff, .flags = ADDR_TYPE_RT }, { .name = "ohci", .pa_start = 0x4a064800, .pa_end = 0x4a064bff, }, { .name = "ehci", .pa_start = 0x4a064c00, .pa_end = 0x4a064fff, }, {} }; static struct omap_hwmod_irq_info omap44xx_usb_host_hs_irqs[] = { { .name = "ohci-irq", .irq = 76 + OMAP44XX_IRQ_GIC_START }, { .name = "ehci-irq", .irq = 77 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usb_host_hs = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_usb_host_hs_hwmod, .clk = "l4_div_ck", .addr = omap44xx_usb_host_hs_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod_ocp_if *omap44xx_usb_host_hs_slaves[] = { &omap44xx_l4_cfg__usb_host_hs, }; static struct omap_hwmod omap44xx_usb_host_hs_hwmod = { .name = "usb_host_hs", .class = &omap44xx_usb_host_hs_hwmod_class, .clkdm_name = "l3_init_clkdm", .main_clk = "usb_host_hs_fck", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L3INIT_USB_HOST_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L3INIT_USB_HOST_CONTEXT_OFFSET, .modulemode = MODULEMODE_SWCTRL, }, }, .mpu_irqs = omap44xx_usb_host_hs_irqs, .slaves = omap44xx_usb_host_hs_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_usb_host_hs_slaves), .masters = omap44xx_usb_host_hs_masters, .masters_cnt = ARRAY_SIZE(omap44xx_usb_host_hs_masters), /* * Errata: USBHOST Configured In Smart-Idle Can Lead To a Deadlock * id: i660 * * Description: * In the following configuration : * - USBHOST module is set to smart-idle mode * - PRCM asserts idle_req to the USBHOST module ( This typically * happens when the system is going to a low power mode : all ports * have been suspended, the master part of the USBHOST module has * entered the standby state, and SW has cut the functional clocks) * - an USBHOST interrupt occurs before the module is able to answer * idle_ack, typically a remote wakeup IRQ. * Then the USB HOST module will enter a deadlock situation where it * is no more accessible nor functional. * * Workaround: * Don't use smart idle; use only force idle, hence HWMOD_SWSUP_SIDLE */ /* * Errata: USB host EHCI may stall when entering smart-standby mode * Id: i571 * * Description: * When the USBHOST module is set to smart-standby mode, and when it is * ready to enter the standby state (i.e. all ports are suspended and * all attached devices are in suspend mode), then it can wrongly assert * the Mstandby signal too early while there are still some residual OCP * transactions ongoing. If this condition occurs, the internal state * machine may go to an undefined state and the USB link may be stuck * upon the next resume. * * Workaround: * Don't use smart standby; use only force standby, * hence HWMOD_SWSUP_MSTANDBY */ /* * During system boot; If the hwmod framework resets the module * the module will have smart idle settings; which can lead to deadlock * (above Errata Id:i660); so, dont reset the module during boot; * Use HWMOD_INIT_NO_RESET. */ .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY | HWMOD_INIT_NO_RESET, }; /* * 'usb_tll_hs' class * usb_tll_hs module is the adapter on the usb_host_hs ports */ static struct omap_hwmod_class_sysconfig omap44xx_usb_tll_hs_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class omap44xx_usb_tll_hs_hwmod_class = { .name = "usb_tll_hs", .sysc = &omap44xx_usb_tll_hs_sysc, }; static struct omap_hwmod_irq_info omap44xx_usb_tll_hs_irqs[] = { { .name = "tll-irq", .irq = 78 + OMAP44XX_IRQ_GIC_START }, { .irq = -1 } }; static struct omap_hwmod_addr_space omap44xx_usb_tll_hs_addrs[] = { { .name = "tll", .pa_start = 0x4a062000, .pa_end = 0x4a063fff, .flags = ADDR_TYPE_RT }, {} }; static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usb_tll_hs = { .master = &omap44xx_l4_cfg_hwmod, .slave = &omap44xx_usb_tll_hs_hwmod, .clk = "l4_div_ck", .addr = omap44xx_usb_tll_hs_addrs, .user = OCP_USER_MPU | OCP_USER_SDMA, }; static struct omap_hwmod_ocp_if *omap44xx_usb_tll_hs_slaves[] = { &omap44xx_l4_cfg__usb_tll_hs, }; static struct omap_hwmod omap44xx_usb_tll_hs_hwmod = { .name = "usb_tll_hs", .class = &omap44xx_usb_tll_hs_hwmod_class, .clkdm_name = "l3_init_clkdm", .main_clk = "usb_tll_hs_ick", .prcm = { .omap4 = { .clkctrl_offs = OMAP4_CM_L3INIT_USB_TLL_CLKCTRL_OFFSET, .context_offs = OMAP4_RM_L3INIT_USB_TLL_CONTEXT_OFFSET, .modulemode = MODULEMODE_HWCTRL, }, }, .mpu_irqs = omap44xx_usb_tll_hs_irqs, .slaves = omap44xx_usb_tll_hs_slaves, .slaves_cnt = ARRAY_SIZE(omap44xx_usb_tll_hs_slaves), }; static __initdata struct omap_hwmod *omap44xx_hwmods[] = { /* dmm class */ &omap44xx_dmm_hwmod, /* emif_fw class */ &omap44xx_emif_fw_hwmod, /* l3 class */ &omap44xx_l3_instr_hwmod, &omap44xx_l3_main_1_hwmod, &omap44xx_l3_main_2_hwmod, &omap44xx_l3_main_3_hwmod, /* l4 class */ &omap44xx_l4_abe_hwmod, &omap44xx_l4_cfg_hwmod, &omap44xx_l4_per_hwmod, &omap44xx_l4_wkup_hwmod, /* mpu_bus class */ &omap44xx_mpu_private_hwmod, /* aess class */ /* &omap44xx_aess_hwmod, */ /* bandgap class */ &omap44xx_bandgap_hwmod, /* counter class */ /* &omap44xx_counter_32k_hwmod, */ /* dma class */ &omap44xx_dma_system_hwmod, /* dmic class */ &omap44xx_dmic_hwmod, /* dsp class */ &omap44xx_dsp_hwmod, &omap44xx_dsp_c0_hwmod, /* dss class */ &omap44xx_dss_hwmod, &omap44xx_dss_dispc_hwmod, &omap44xx_dss_dsi1_hwmod, &omap44xx_dss_dsi2_hwmod, &omap44xx_dss_hdmi_hwmod, &omap44xx_dss_rfbi_hwmod, &omap44xx_dss_venc_hwmod, /* gpio class */ &omap44xx_gpio1_hwmod, &omap44xx_gpio2_hwmod, &omap44xx_gpio3_hwmod, &omap44xx_gpio4_hwmod, &omap44xx_gpio5_hwmod, &omap44xx_gpio6_hwmod, /* hsi class */ /* &omap44xx_hsi_hwmod, */ /* i2c class */ &omap44xx_i2c1_hwmod, &omap44xx_i2c2_hwmod, &omap44xx_i2c3_hwmod, &omap44xx_i2c4_hwmod, /* ipu class */ &omap44xx_ipu_hwmod, &omap44xx_ipu_c0_hwmod, &omap44xx_ipu_c1_hwmod, /* iss class */ /* &omap44xx_iss_hwmod, */ /* iva class */ &omap44xx_iva_hwmod, &omap44xx_iva_seq0_hwmod, &omap44xx_iva_seq1_hwmod, /* kbd class */ &omap44xx_kbd_hwmod, /* mailbox class */ &omap44xx_mailbox_hwmod, /* mcbsp class */ &omap44xx_mcbsp1_hwmod, &omap44xx_mcbsp2_hwmod, &omap44xx_mcbsp3_hwmod, &omap44xx_mcbsp4_hwmod, /* mcpdm class */ &omap44xx_mcpdm_hwmod, /* mcspi class */ &omap44xx_mcspi1_hwmod, &omap44xx_mcspi2_hwmod, &omap44xx_mcspi3_hwmod, &omap44xx_mcspi4_hwmod, /* mmc class */ &omap44xx_mmc1_hwmod, &omap44xx_mmc2_hwmod, &omap44xx_mmc3_hwmod, &omap44xx_mmc4_hwmod, &omap44xx_mmc5_hwmod, /* mpu class */ &omap44xx_mpu_hwmod, /* smartreflex class */ &omap44xx_smartreflex_core_hwmod, &omap44xx_smartreflex_iva_hwmod, &omap44xx_smartreflex_mpu_hwmod, /* spinlock class */ &omap44xx_spinlock_hwmod, /* timer class */ &omap44xx_timer1_hwmod, &omap44xx_timer2_hwmod, &omap44xx_timer3_hwmod, &omap44xx_timer4_hwmod, &omap44xx_timer5_hwmod, &omap44xx_timer6_hwmod, &omap44xx_timer7_hwmod, &omap44xx_timer8_hwmod, &omap44xx_timer9_hwmod, &omap44xx_timer10_hwmod, &omap44xx_timer11_hwmod, /* uart class */ &omap44xx_uart1_hwmod, &omap44xx_uart2_hwmod, &omap44xx_uart3_hwmod, &omap44xx_uart4_hwmod, /* usb host class */ &omap44xx_usb_host_hs_hwmod, &omap44xx_usb_tll_hs_hwmod, /* usb_otg_hs class */ &omap44xx_usb_otg_hs_hwmod, /* wd_timer class */ &omap44xx_wd_timer2_hwmod, &omap44xx_wd_timer3_hwmod, NULL, }; int __init omap44xx_hwmod_init(void) { return omap_hwmod_register(omap44xx_hwmods); }
gpl-2.0
rutvik95/speedx_kernel_i9082
lib/cpumask.c
3628
4667
#include <linux/slab.h> #include <linux/kernel.h> #include <linux/bitops.h> #include <linux/cpumask.h> #include <linux/module.h> #include <linux/bootmem.h> int __first_cpu(const cpumask_t *srcp) { return min_t(int, NR_CPUS, find_first_bit(srcp->bits, NR_CPUS)); } EXPORT_SYMBOL(__first_cpu); int __next_cpu(int n, const cpumask_t *srcp) { return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1)); } EXPORT_SYMBOL(__next_cpu); #if NR_CPUS > 64 int __next_cpu_nr(int n, const cpumask_t *srcp) { return min_t(int, nr_cpu_ids, find_next_bit(srcp->bits, nr_cpu_ids, n+1)); } EXPORT_SYMBOL(__next_cpu_nr); #endif int __any_online_cpu(const cpumask_t *mask) { int cpu; for_each_cpu_mask(cpu, *mask) { if (cpu_online(cpu)) break; } return cpu; } EXPORT_SYMBOL(__any_online_cpu); /** * cpumask_next_and - get the next cpu in *src1p & *src2p * @n: the cpu prior to the place to search (ie. return will be > @n) * @src1p: the first cpumask pointer * @src2p: the second cpumask pointer * * Returns >= nr_cpu_ids if no further cpus set in both. */ int cpumask_next_and(int n, const struct cpumask *src1p, const struct cpumask *src2p) { while ((n = cpumask_next(n, src1p)) < nr_cpu_ids) if (cpumask_test_cpu(n, src2p)) break; return n; } EXPORT_SYMBOL(cpumask_next_and); /** * cpumask_any_but - return a "random" in a cpumask, but not this one. * @mask: the cpumask to search * @cpu: the cpu to ignore. * * Often used to find any cpu but smp_processor_id() in a mask. * Returns >= nr_cpu_ids if no cpus set. */ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) { unsigned int i; cpumask_check(cpu); for_each_cpu(i, mask) if (i != cpu) break; return i; } /* These are not inline because of header tangles. */ #ifdef CONFIG_CPUMASK_OFFSTACK /** * alloc_cpumask_var_node - allocate a struct cpumask on a given node * @mask: pointer to cpumask_var_t where the cpumask is returned * @flags: GFP_ flags * * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is * a nop returning a constant 1 (in <linux/cpumask.h>) * Returns TRUE if memory allocation succeeded, FALSE otherwise. * * In addition, mask will be NULL if this fails. Note that gcc is * usually smart enough to know that mask can never be NULL if * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case * too. */ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) { *mask = kmalloc_node(cpumask_size(), flags, node); #ifdef CONFIG_DEBUG_PER_CPU_MAPS if (!*mask) { printk(KERN_ERR "=> alloc_cpumask_var: failed!\n"); dump_stack(); } #endif /* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */ if (*mask) { unsigned char *ptr = (unsigned char *)cpumask_bits(*mask); unsigned int tail; tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long); memset(ptr + cpumask_size() - tail, 0, tail); } return *mask != NULL; } EXPORT_SYMBOL(alloc_cpumask_var_node); bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) { return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node); } EXPORT_SYMBOL(zalloc_cpumask_var_node); /** * alloc_cpumask_var - allocate a struct cpumask * @mask: pointer to cpumask_var_t where the cpumask is returned * @flags: GFP_ flags * * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is * a nop returning a constant 1 (in <linux/cpumask.h>). * * See alloc_cpumask_var_node. */ bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) { return alloc_cpumask_var_node(mask, flags, numa_node_id()); } EXPORT_SYMBOL(alloc_cpumask_var); bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) { return alloc_cpumask_var(mask, flags | __GFP_ZERO); } EXPORT_SYMBOL(zalloc_cpumask_var); /** * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena. * @mask: pointer to cpumask_var_t where the cpumask is returned * * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is * a nop (in <linux/cpumask.h>). * Either returns an allocated (zero-filled) cpumask, or causes the * system to panic. */ void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) { *mask = alloc_bootmem(cpumask_size()); } /** * free_cpumask_var - frees memory allocated for a struct cpumask. * @mask: cpumask to free * * This is safe on a NULL mask. */ void free_cpumask_var(cpumask_var_t mask) { kfree(mask); } EXPORT_SYMBOL(free_cpumask_var); /** * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var * @mask: cpumask to free */ void __init free_bootmem_cpumask_var(cpumask_var_t mask) { free_bootmem((unsigned long)mask, cpumask_size()); } #endif
gpl-2.0
Motorhead1991/android_kernel_samsung_s5pv210
drivers/net/de600.c
4140
13294
static const char version[] = "de600.c: $Revision: 1.41-2.5 $, Bjorn Ekwall (bj0rn@blox.se)\n"; /* * de600.c * * Linux driver for the D-Link DE-600 Ethernet pocket adapter. * * Portions (C) Copyright 1993, 1994 by Bjorn Ekwall * The Author may be reached as bj0rn@blox.se * * Based on adapter information gathered from DE600.ASM by D-Link Inc., * as included on disk C in the v.2.11 of PC/TCP from FTP Software. * For DE600.asm: * Portions (C) Copyright 1990 D-Link, Inc. * Copyright, 1988-1992, Russell Nelson, Crynwr Software * * Adapted to the sample network driver core for linux, * written by: Donald Becker <becker@super.org> * (Now at <becker@scyld.com>) * **************************************************************/ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * **************************************************************/ /* Add more time here if your adapter won't work OK: */ #define DE600_SLOW_DOWN udelay(delay_time) #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/string.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <asm/system.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <asm/io.h> #include "de600.h" static unsigned int check_lost = 1; module_param(check_lost, bool, 0); MODULE_PARM_DESC(check_lost, "If set then check for unplugged de600"); static unsigned int delay_time = 10; module_param(delay_time, int, 0); MODULE_PARM_DESC(delay_time, "DE-600 deley on I/O in microseconds"); /* * D-Link driver variables: */ static volatile int rx_page; #define TX_PAGES 2 static volatile int tx_fifo[TX_PAGES]; static volatile int tx_fifo_in; static volatile int tx_fifo_out; static volatile int free_tx_pages = TX_PAGES; static int was_down; static DEFINE_SPINLOCK(de600_lock); static inline u8 de600_read_status(struct net_device *dev) { u8 status; outb_p(STATUS, DATA_PORT); status = inb(STATUS_PORT); outb_p(NULL_COMMAND | HI_NIBBLE, DATA_PORT); return status; } static inline u8 de600_read_byte(unsigned char type, struct net_device *dev) { /* dev used by macros */ u8 lo; outb_p((type), DATA_PORT); lo = ((unsigned char)inb(STATUS_PORT)) >> 4; outb_p((type) | HI_NIBBLE, DATA_PORT); return ((unsigned char)inb(STATUS_PORT) & (unsigned char)0xf0) | lo; } /* * Open/initialize the board. This is called (in the current kernel) * after booting when 'ifconfig <dev->name> $IP_ADDR' is run (in rc.inet1). * * This routine should set everything up anew at each open, even * registers that "should" only need to be set once at boot, so that * there is a non-reboot way to recover if something goes wrong. */ static int de600_open(struct net_device *dev) { unsigned long flags; int ret = request_irq(DE600_IRQ, de600_interrupt, 0, dev->name, dev); if (ret) { printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name, DE600_IRQ); return ret; } spin_lock_irqsave(&de600_lock, flags); ret = adapter_init(dev); spin_unlock_irqrestore(&de600_lock, flags); return ret; } /* * The inverse routine to de600_open(). */ static int de600_close(struct net_device *dev) { select_nic(); rx_page = 0; de600_put_command(RESET); de600_put_command(STOP_RESET); de600_put_command(0); select_prn(); free_irq(DE600_IRQ, dev); return 0; } static inline void trigger_interrupt(struct net_device *dev) { de600_put_command(FLIP_IRQ); select_prn(); DE600_SLOW_DOWN; select_nic(); de600_put_command(0); } /* * Copy a buffer to the adapter transmit page memory. * Start sending. */ static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned long flags; int transmit_from; int len; int tickssofar; u8 *buffer = skb->data; int i; if (free_tx_pages <= 0) { /* Do timeouts, to avoid hangs. */ tickssofar = jiffies - dev_trans_start(dev); if (tickssofar < HZ/20) return NETDEV_TX_BUSY; /* else */ printk(KERN_WARNING "%s: transmit timed out (%d), %s?\n", dev->name, tickssofar, "network cable problem"); /* Restart the adapter. */ spin_lock_irqsave(&de600_lock, flags); if (adapter_init(dev)) { spin_unlock_irqrestore(&de600_lock, flags); return NETDEV_TX_BUSY; } spin_unlock_irqrestore(&de600_lock, flags); } /* Start real output */ pr_debug("de600_start_xmit:len=%d, page %d/%d\n", skb->len, tx_fifo_in, free_tx_pages); if ((len = skb->len) < RUNT) len = RUNT; spin_lock_irqsave(&de600_lock, flags); select_nic(); tx_fifo[tx_fifo_in] = transmit_from = tx_page_adr(tx_fifo_in) - len; tx_fifo_in = (tx_fifo_in + 1) % TX_PAGES; /* Next free tx page */ if(check_lost) { /* This costs about 40 instructions per packet... */ de600_setup_address(NODE_ADDRESS, RW_ADDR); de600_read_byte(READ_DATA, dev); if (was_down || (de600_read_byte(READ_DATA, dev) != 0xde)) { if (adapter_init(dev)) { spin_unlock_irqrestore(&de600_lock, flags); return NETDEV_TX_BUSY; } } } de600_setup_address(transmit_from, RW_ADDR); for (i = 0; i < skb->len ; ++i, ++buffer) de600_put_byte(*buffer); for (; i < len; ++i) de600_put_byte(0); if (free_tx_pages-- == TX_PAGES) { /* No transmission going on */ dev->trans_start = jiffies; netif_start_queue(dev); /* allow more packets into adapter */ /* Send page and generate a faked interrupt */ de600_setup_address(transmit_from, TX_ADDR); de600_put_command(TX_ENABLE); } else { if (free_tx_pages) netif_start_queue(dev); else netif_stop_queue(dev); select_prn(); } spin_unlock_irqrestore(&de600_lock, flags); dev_kfree_skb(skb); return NETDEV_TX_OK; } /* * The typical workload of the driver: * Handle the network interface interrupts. */ static irqreturn_t de600_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; u8 irq_status; int retrig = 0; int boguscount = 0; spin_lock(&de600_lock); select_nic(); irq_status = de600_read_status(dev); do { pr_debug("de600_interrupt (%02X)\n", irq_status); if (irq_status & RX_GOOD) de600_rx_intr(dev); else if (!(irq_status & RX_BUSY)) de600_put_command(RX_ENABLE); /* Any transmission in progress? */ if (free_tx_pages < TX_PAGES) retrig = de600_tx_intr(dev, irq_status); else retrig = 0; irq_status = de600_read_status(dev); } while ( (irq_status & RX_GOOD) || ((++boguscount < 100) && retrig) ); /* * Yeah, it _looks_ like busy waiting, smells like busy waiting * and I know it's not PC, but please, it will only occur once * in a while and then only for a loop or so (< 1ms for sure!) */ /* Enable adapter interrupts */ select_prn(); if (retrig) trigger_interrupt(dev); spin_unlock(&de600_lock); return IRQ_HANDLED; } static int de600_tx_intr(struct net_device *dev, int irq_status) { /* * Returns 1 if tx still not done */ /* Check if current transmission is done yet */ if (irq_status & TX_BUSY) return 1; /* tx not done, try again */ /* else */ /* If last transmission OK then bump fifo index */ if (!(irq_status & TX_FAILED16)) { tx_fifo_out = (tx_fifo_out + 1) % TX_PAGES; ++free_tx_pages; dev->stats.tx_packets++; netif_wake_queue(dev); } /* More to send, or resend last packet? */ if ((free_tx_pages < TX_PAGES) || (irq_status & TX_FAILED16)) { dev->trans_start = jiffies; de600_setup_address(tx_fifo[tx_fifo_out], TX_ADDR); de600_put_command(TX_ENABLE); return 1; } /* else */ return 0; } /* * We have a good packet, get it out of the adapter. */ static void de600_rx_intr(struct net_device *dev) { struct sk_buff *skb; int i; int read_from; int size; unsigned char *buffer; /* Get size of received packet */ size = de600_read_byte(RX_LEN, dev); /* low byte */ size += (de600_read_byte(RX_LEN, dev) << 8); /* high byte */ size -= 4; /* Ignore trailing 4 CRC-bytes */ /* Tell adapter where to store next incoming packet, enable receiver */ read_from = rx_page_adr(); next_rx_page(); de600_put_command(RX_ENABLE); if ((size < 32) || (size > 1535)) { printk(KERN_WARNING "%s: Bogus packet size %d.\n", dev->name, size); if (size > 10000) adapter_init(dev); return; } skb = dev_alloc_skb(size+2); if (skb == NULL) { printk("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size); return; } /* else */ skb_reserve(skb,2); /* Align */ /* 'skb->data' points to the start of sk_buff data area. */ buffer = skb_put(skb,size); /* copy the packet into the buffer */ de600_setup_address(read_from, RW_ADDR); for (i = size; i > 0; --i, ++buffer) *buffer = de600_read_byte(READ_DATA, dev); skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); /* update stats */ dev->stats.rx_packets++; /* count all receives */ dev->stats.rx_bytes += size; /* count all received bytes */ /* * If any worth-while packets have been received, netif_rx() * will work on them when we get to the tasklets. */ } static const struct net_device_ops de600_netdev_ops = { .ndo_open = de600_open, .ndo_stop = de600_close, .ndo_start_xmit = de600_start_xmit, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static struct net_device * __init de600_probe(void) { int i; struct net_device *dev; int err; dev = alloc_etherdev(0); if (!dev) return ERR_PTR(-ENOMEM); if (!request_region(DE600_IO, 3, "de600")) { printk(KERN_WARNING "DE600: port 0x%x busy\n", DE600_IO); err = -EBUSY; goto out; } printk(KERN_INFO "%s: D-Link DE-600 pocket adapter", dev->name); /* Alpha testers must have the version number to report bugs. */ pr_debug("%s", version); /* probe for adapter */ err = -ENODEV; rx_page = 0; select_nic(); (void)de600_read_status(dev); de600_put_command(RESET); de600_put_command(STOP_RESET); if (de600_read_status(dev) & 0xf0) { printk(": not at I/O %#3x.\n", DATA_PORT); goto out1; } /* * Maybe we found one, * have to check if it is a D-Link DE-600 adapter... */ /* Get the adapter ethernet address from the ROM */ de600_setup_address(NODE_ADDRESS, RW_ADDR); for (i = 0; i < ETH_ALEN; i++) { dev->dev_addr[i] = de600_read_byte(READ_DATA, dev); dev->broadcast[i] = 0xff; } /* Check magic code */ if ((dev->dev_addr[1] == 0xde) && (dev->dev_addr[2] == 0x15)) { /* OK, install real address */ dev->dev_addr[0] = 0x00; dev->dev_addr[1] = 0x80; dev->dev_addr[2] = 0xc8; dev->dev_addr[3] &= 0x0f; dev->dev_addr[3] |= 0x70; } else { printk(" not identified in the printer port\n"); goto out1; } printk(", Ethernet Address: %pM\n", dev->dev_addr); dev->netdev_ops = &de600_netdev_ops; dev->flags&=~IFF_MULTICAST; select_prn(); err = register_netdev(dev); if (err) goto out1; return dev; out1: release_region(DE600_IO, 3); out: free_netdev(dev); return ERR_PTR(err); } static int adapter_init(struct net_device *dev) { int i; select_nic(); rx_page = 0; /* used by RESET */ de600_put_command(RESET); de600_put_command(STOP_RESET); /* Check if it is still there... */ /* Get the some bytes of the adapter ethernet address from the ROM */ de600_setup_address(NODE_ADDRESS, RW_ADDR); de600_read_byte(READ_DATA, dev); if ((de600_read_byte(READ_DATA, dev) != 0xde) || (de600_read_byte(READ_DATA, dev) != 0x15)) { /* was: if (de600_read_status(dev) & 0xf0) { */ printk("Something has happened to the DE-600! Please check it and do a new ifconfig!\n"); /* Goodbye, cruel world... */ dev->flags &= ~IFF_UP; de600_close(dev); was_down = 1; netif_stop_queue(dev); /* Transmit busy... */ return 1; /* failed */ } if (was_down) { printk(KERN_INFO "%s: Thanks, I feel much better now!\n", dev->name); was_down = 0; } tx_fifo_in = 0; tx_fifo_out = 0; free_tx_pages = TX_PAGES; /* set the ether address. */ de600_setup_address(NODE_ADDRESS, RW_ADDR); for (i = 0; i < ETH_ALEN; i++) de600_put_byte(dev->dev_addr[i]); /* where to start saving incoming packets */ rx_page = RX_BP | RX_BASE_PAGE; de600_setup_address(MEM_4K, RW_ADDR); /* Enable receiver */ de600_put_command(RX_ENABLE); select_prn(); netif_start_queue(dev); return 0; /* OK */ } static struct net_device *de600_dev; static int __init de600_init(void) { de600_dev = de600_probe(); if (IS_ERR(de600_dev)) return PTR_ERR(de600_dev); return 0; } static void __exit de600_exit(void) { unregister_netdev(de600_dev); release_region(DE600_IO, 3); free_netdev(de600_dev); } module_init(de600_init); module_exit(de600_exit); MODULE_LICENSE("GPL");
gpl-2.0
pst1337/cm11_mkc_boeffla
drivers/net/de600.c
4140
13294
static const char version[] = "de600.c: $Revision: 1.41-2.5 $, Bjorn Ekwall (bj0rn@blox.se)\n"; /* * de600.c * * Linux driver for the D-Link DE-600 Ethernet pocket adapter. * * Portions (C) Copyright 1993, 1994 by Bjorn Ekwall * The Author may be reached as bj0rn@blox.se * * Based on adapter information gathered from DE600.ASM by D-Link Inc., * as included on disk C in the v.2.11 of PC/TCP from FTP Software. * For DE600.asm: * Portions (C) Copyright 1990 D-Link, Inc. * Copyright, 1988-1992, Russell Nelson, Crynwr Software * * Adapted to the sample network driver core for linux, * written by: Donald Becker <becker@super.org> * (Now at <becker@scyld.com>) * **************************************************************/ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * **************************************************************/ /* Add more time here if your adapter won't work OK: */ #define DE600_SLOW_DOWN udelay(delay_time) #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/string.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <asm/system.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <asm/io.h> #include "de600.h" static unsigned int check_lost = 1; module_param(check_lost, bool, 0); MODULE_PARM_DESC(check_lost, "If set then check for unplugged de600"); static unsigned int delay_time = 10; module_param(delay_time, int, 0); MODULE_PARM_DESC(delay_time, "DE-600 deley on I/O in microseconds"); /* * D-Link driver variables: */ static volatile int rx_page; #define TX_PAGES 2 static volatile int tx_fifo[TX_PAGES]; static volatile int tx_fifo_in; static volatile int tx_fifo_out; static volatile int free_tx_pages = TX_PAGES; static int was_down; static DEFINE_SPINLOCK(de600_lock); static inline u8 de600_read_status(struct net_device *dev) { u8 status; outb_p(STATUS, DATA_PORT); status = inb(STATUS_PORT); outb_p(NULL_COMMAND | HI_NIBBLE, DATA_PORT); return status; } static inline u8 de600_read_byte(unsigned char type, struct net_device *dev) { /* dev used by macros */ u8 lo; outb_p((type), DATA_PORT); lo = ((unsigned char)inb(STATUS_PORT)) >> 4; outb_p((type) | HI_NIBBLE, DATA_PORT); return ((unsigned char)inb(STATUS_PORT) & (unsigned char)0xf0) | lo; } /* * Open/initialize the board. This is called (in the current kernel) * after booting when 'ifconfig <dev->name> $IP_ADDR' is run (in rc.inet1). * * This routine should set everything up anew at each open, even * registers that "should" only need to be set once at boot, so that * there is a non-reboot way to recover if something goes wrong. */ static int de600_open(struct net_device *dev) { unsigned long flags; int ret = request_irq(DE600_IRQ, de600_interrupt, 0, dev->name, dev); if (ret) { printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name, DE600_IRQ); return ret; } spin_lock_irqsave(&de600_lock, flags); ret = adapter_init(dev); spin_unlock_irqrestore(&de600_lock, flags); return ret; } /* * The inverse routine to de600_open(). */ static int de600_close(struct net_device *dev) { select_nic(); rx_page = 0; de600_put_command(RESET); de600_put_command(STOP_RESET); de600_put_command(0); select_prn(); free_irq(DE600_IRQ, dev); return 0; } static inline void trigger_interrupt(struct net_device *dev) { de600_put_command(FLIP_IRQ); select_prn(); DE600_SLOW_DOWN; select_nic(); de600_put_command(0); } /* * Copy a buffer to the adapter transmit page memory. * Start sending. */ static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned long flags; int transmit_from; int len; int tickssofar; u8 *buffer = skb->data; int i; if (free_tx_pages <= 0) { /* Do timeouts, to avoid hangs. */ tickssofar = jiffies - dev_trans_start(dev); if (tickssofar < HZ/20) return NETDEV_TX_BUSY; /* else */ printk(KERN_WARNING "%s: transmit timed out (%d), %s?\n", dev->name, tickssofar, "network cable problem"); /* Restart the adapter. */ spin_lock_irqsave(&de600_lock, flags); if (adapter_init(dev)) { spin_unlock_irqrestore(&de600_lock, flags); return NETDEV_TX_BUSY; } spin_unlock_irqrestore(&de600_lock, flags); } /* Start real output */ pr_debug("de600_start_xmit:len=%d, page %d/%d\n", skb->len, tx_fifo_in, free_tx_pages); if ((len = skb->len) < RUNT) len = RUNT; spin_lock_irqsave(&de600_lock, flags); select_nic(); tx_fifo[tx_fifo_in] = transmit_from = tx_page_adr(tx_fifo_in) - len; tx_fifo_in = (tx_fifo_in + 1) % TX_PAGES; /* Next free tx page */ if(check_lost) { /* This costs about 40 instructions per packet... */ de600_setup_address(NODE_ADDRESS, RW_ADDR); de600_read_byte(READ_DATA, dev); if (was_down || (de600_read_byte(READ_DATA, dev) != 0xde)) { if (adapter_init(dev)) { spin_unlock_irqrestore(&de600_lock, flags); return NETDEV_TX_BUSY; } } } de600_setup_address(transmit_from, RW_ADDR); for (i = 0; i < skb->len ; ++i, ++buffer) de600_put_byte(*buffer); for (; i < len; ++i) de600_put_byte(0); if (free_tx_pages-- == TX_PAGES) { /* No transmission going on */ dev->trans_start = jiffies; netif_start_queue(dev); /* allow more packets into adapter */ /* Send page and generate a faked interrupt */ de600_setup_address(transmit_from, TX_ADDR); de600_put_command(TX_ENABLE); } else { if (free_tx_pages) netif_start_queue(dev); else netif_stop_queue(dev); select_prn(); } spin_unlock_irqrestore(&de600_lock, flags); dev_kfree_skb(skb); return NETDEV_TX_OK; } /* * The typical workload of the driver: * Handle the network interface interrupts. */ static irqreturn_t de600_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; u8 irq_status; int retrig = 0; int boguscount = 0; spin_lock(&de600_lock); select_nic(); irq_status = de600_read_status(dev); do { pr_debug("de600_interrupt (%02X)\n", irq_status); if (irq_status & RX_GOOD) de600_rx_intr(dev); else if (!(irq_status & RX_BUSY)) de600_put_command(RX_ENABLE); /* Any transmission in progress? */ if (free_tx_pages < TX_PAGES) retrig = de600_tx_intr(dev, irq_status); else retrig = 0; irq_status = de600_read_status(dev); } while ( (irq_status & RX_GOOD) || ((++boguscount < 100) && retrig) ); /* * Yeah, it _looks_ like busy waiting, smells like busy waiting * and I know it's not PC, but please, it will only occur once * in a while and then only for a loop or so (< 1ms for sure!) */ /* Enable adapter interrupts */ select_prn(); if (retrig) trigger_interrupt(dev); spin_unlock(&de600_lock); return IRQ_HANDLED; } static int de600_tx_intr(struct net_device *dev, int irq_status) { /* * Returns 1 if tx still not done */ /* Check if current transmission is done yet */ if (irq_status & TX_BUSY) return 1; /* tx not done, try again */ /* else */ /* If last transmission OK then bump fifo index */ if (!(irq_status & TX_FAILED16)) { tx_fifo_out = (tx_fifo_out + 1) % TX_PAGES; ++free_tx_pages; dev->stats.tx_packets++; netif_wake_queue(dev); } /* More to send, or resend last packet? */ if ((free_tx_pages < TX_PAGES) || (irq_status & TX_FAILED16)) { dev->trans_start = jiffies; de600_setup_address(tx_fifo[tx_fifo_out], TX_ADDR); de600_put_command(TX_ENABLE); return 1; } /* else */ return 0; } /* * We have a good packet, get it out of the adapter. */ static void de600_rx_intr(struct net_device *dev) { struct sk_buff *skb; int i; int read_from; int size; unsigned char *buffer; /* Get size of received packet */ size = de600_read_byte(RX_LEN, dev); /* low byte */ size += (de600_read_byte(RX_LEN, dev) << 8); /* high byte */ size -= 4; /* Ignore trailing 4 CRC-bytes */ /* Tell adapter where to store next incoming packet, enable receiver */ read_from = rx_page_adr(); next_rx_page(); de600_put_command(RX_ENABLE); if ((size < 32) || (size > 1535)) { printk(KERN_WARNING "%s: Bogus packet size %d.\n", dev->name, size); if (size > 10000) adapter_init(dev); return; } skb = dev_alloc_skb(size+2); if (skb == NULL) { printk("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size); return; } /* else */ skb_reserve(skb,2); /* Align */ /* 'skb->data' points to the start of sk_buff data area. */ buffer = skb_put(skb,size); /* copy the packet into the buffer */ de600_setup_address(read_from, RW_ADDR); for (i = size; i > 0; --i, ++buffer) *buffer = de600_read_byte(READ_DATA, dev); skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); /* update stats */ dev->stats.rx_packets++; /* count all receives */ dev->stats.rx_bytes += size; /* count all received bytes */ /* * If any worth-while packets have been received, netif_rx() * will work on them when we get to the tasklets. */ } static const struct net_device_ops de600_netdev_ops = { .ndo_open = de600_open, .ndo_stop = de600_close, .ndo_start_xmit = de600_start_xmit, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static struct net_device * __init de600_probe(void) { int i; struct net_device *dev; int err; dev = alloc_etherdev(0); if (!dev) return ERR_PTR(-ENOMEM); if (!request_region(DE600_IO, 3, "de600")) { printk(KERN_WARNING "DE600: port 0x%x busy\n", DE600_IO); err = -EBUSY; goto out; } printk(KERN_INFO "%s: D-Link DE-600 pocket adapter", dev->name); /* Alpha testers must have the version number to report bugs. */ pr_debug("%s", version); /* probe for adapter */ err = -ENODEV; rx_page = 0; select_nic(); (void)de600_read_status(dev); de600_put_command(RESET); de600_put_command(STOP_RESET); if (de600_read_status(dev) & 0xf0) { printk(": not at I/O %#3x.\n", DATA_PORT); goto out1; } /* * Maybe we found one, * have to check if it is a D-Link DE-600 adapter... */ /* Get the adapter ethernet address from the ROM */ de600_setup_address(NODE_ADDRESS, RW_ADDR); for (i = 0; i < ETH_ALEN; i++) { dev->dev_addr[i] = de600_read_byte(READ_DATA, dev); dev->broadcast[i] = 0xff; } /* Check magic code */ if ((dev->dev_addr[1] == 0xde) && (dev->dev_addr[2] == 0x15)) { /* OK, install real address */ dev->dev_addr[0] = 0x00; dev->dev_addr[1] = 0x80; dev->dev_addr[2] = 0xc8; dev->dev_addr[3] &= 0x0f; dev->dev_addr[3] |= 0x70; } else { printk(" not identified in the printer port\n"); goto out1; } printk(", Ethernet Address: %pM\n", dev->dev_addr); dev->netdev_ops = &de600_netdev_ops; dev->flags&=~IFF_MULTICAST; select_prn(); err = register_netdev(dev); if (err) goto out1; return dev; out1: release_region(DE600_IO, 3); out: free_netdev(dev); return ERR_PTR(err); } static int adapter_init(struct net_device *dev) { int i; select_nic(); rx_page = 0; /* used by RESET */ de600_put_command(RESET); de600_put_command(STOP_RESET); /* Check if it is still there... */ /* Get the some bytes of the adapter ethernet address from the ROM */ de600_setup_address(NODE_ADDRESS, RW_ADDR); de600_read_byte(READ_DATA, dev); if ((de600_read_byte(READ_DATA, dev) != 0xde) || (de600_read_byte(READ_DATA, dev) != 0x15)) { /* was: if (de600_read_status(dev) & 0xf0) { */ printk("Something has happened to the DE-600! Please check it and do a new ifconfig!\n"); /* Goodbye, cruel world... */ dev->flags &= ~IFF_UP; de600_close(dev); was_down = 1; netif_stop_queue(dev); /* Transmit busy... */ return 1; /* failed */ } if (was_down) { printk(KERN_INFO "%s: Thanks, I feel much better now!\n", dev->name); was_down = 0; } tx_fifo_in = 0; tx_fifo_out = 0; free_tx_pages = TX_PAGES; /* set the ether address. */ de600_setup_address(NODE_ADDRESS, RW_ADDR); for (i = 0; i < ETH_ALEN; i++) de600_put_byte(dev->dev_addr[i]); /* where to start saving incoming packets */ rx_page = RX_BP | RX_BASE_PAGE; de600_setup_address(MEM_4K, RW_ADDR); /* Enable receiver */ de600_put_command(RX_ENABLE); select_prn(); netif_start_queue(dev); return 0; /* OK */ } static struct net_device *de600_dev; static int __init de600_init(void) { de600_dev = de600_probe(); if (IS_ERR(de600_dev)) return PTR_ERR(de600_dev); return 0; } static void __exit de600_exit(void) { unregister_netdev(de600_dev); release_region(DE600_IO, 3); free_netdev(de600_dev); } module_init(de600_init); module_exit(de600_exit); MODULE_LICENSE("GPL");
gpl-2.0
CyanideL/android_kernel_lge_g3
arch/arm/mach-s5pv210/setup-spi.c
4908
1292
/* linux/arch/arm/mach-s5pv210/setup-spi.c * * Copyright (C) 2011 Samsung Electronics Ltd. * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gpio.h> #include <linux/platform_device.h> #include <plat/gpio-cfg.h> #include <plat/s3c64xx-spi.h> #ifdef CONFIG_S3C64XX_DEV_SPI0 struct s3c64xx_spi_info s3c64xx_spi0_pdata = { .fifo_lvl_mask = 0x1ff, .rx_lvl_offset = 15, .high_speed = 1, .tx_st_done = 25, }; int s3c64xx_spi0_cfg_gpio(struct platform_device *dev) { s3c_gpio_cfgpin(S5PV210_GPB(0), S3C_GPIO_SFN(2)); s3c_gpio_setpull(S5PV210_GPB(0), S3C_GPIO_PULL_UP); s3c_gpio_cfgall_range(S5PV210_GPB(2), 2, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); return 0; } #endif #ifdef CONFIG_S3C64XX_DEV_SPI1 struct s3c64xx_spi_info s3c64xx_spi1_pdata = { .fifo_lvl_mask = 0x7f, .rx_lvl_offset = 15, .high_speed = 1, .tx_st_done = 25, }; int s3c64xx_spi1_cfg_gpio(struct platform_device *dev) { s3c_gpio_cfgpin(S5PV210_GPB(4), S3C_GPIO_SFN(2)); s3c_gpio_setpull(S5PV210_GPB(4), S3C_GPIO_PULL_UP); s3c_gpio_cfgall_range(S5PV210_GPB(6), 2, S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP); return 0; } #endif
gpl-2.0
CM-Tab-S/android_kernel_samsung_chagallwifi
arch/arm/plat-versatile/sched-clock.c
5932
1251
/* * linux/arch/arm/plat-versatile/sched-clock.c * * Copyright (C) 1999 - 2003 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/io.h> #include <asm/sched_clock.h> #include <plat/sched_clock.h> static void __iomem *ctr; static u32 notrace versatile_read_sched_clock(void) { if (ctr) return readl(ctr); return 0; } void __init versatile_sched_clock_init(void __iomem *reg, unsigned long rate) { ctr = reg; setup_sched_clock(versatile_read_sched_clock, 32, rate); }
gpl-2.0
whdgmawkd/NindiKernel
arch/sh/kernel/hw_breakpoint.c
7212
8820
/* * arch/sh/kernel/hw_breakpoint.c * * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC. * * Copyright (C) 2009 - 2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/perf_event.h> #include <linux/hw_breakpoint.h> #include <linux/percpu.h> #include <linux/kallsyms.h> #include <linux/notifier.h> #include <linux/kprobes.h> #include <linux/kdebug.h> #include <linux/io.h> #include <linux/clk.h> #include <asm/hw_breakpoint.h> #include <asm/mmu_context.h> #include <asm/ptrace.h> #include <asm/traps.h> /* * Stores the breakpoints currently in use on each breakpoint address * register for each cpus */ static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]); /* * A dummy placeholder for early accesses until the CPUs get a chance to * register their UBCs later in the boot process. */ static struct sh_ubc ubc_dummy = { .num_events = 0 }; static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy; /* * Install a perf counter breakpoint. * * We seek a free UBC channel and use it for this breakpoint. * * Atomic: we hold the counter->ctx->lock and we only handle variables * and registers local to this cpu. */ int arch_install_hw_breakpoint(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); int i; for (i = 0; i < sh_ubc->num_events; i++) { struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); if (!*slot) { *slot = bp; break; } } if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot")) return -EBUSY; clk_enable(sh_ubc->clk); sh_ubc->enable(info, i); return 0; } /* * Uninstall the breakpoint contained in the given counter. * * First we search the debug address register it uses and then we disable * it. * * Atomic: we hold the counter->ctx->lock and we only handle variables * and registers local to this cpu. */ void arch_uninstall_hw_breakpoint(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); int i; for (i = 0; i < sh_ubc->num_events; i++) { struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); if (*slot == bp) { *slot = NULL; break; } } if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot")) return; sh_ubc->disable(info, i); clk_disable(sh_ubc->clk); } static int get_hbp_len(u16 hbp_len) { unsigned int len_in_bytes = 0; switch (hbp_len) { case SH_BREAKPOINT_LEN_1: len_in_bytes = 1; break; case SH_BREAKPOINT_LEN_2: len_in_bytes = 2; break; case SH_BREAKPOINT_LEN_4: len_in_bytes = 4; break; case SH_BREAKPOINT_LEN_8: len_in_bytes = 8; break; } return len_in_bytes; } /* * Check for virtual address in kernel space. */ int arch_check_bp_in_kernelspace(struct perf_event *bp) { unsigned int len; unsigned long va; struct arch_hw_breakpoint *info = counter_arch_bp(bp); va = info->address; len = get_hbp_len(info->len); return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); } int arch_bp_generic_fields(int sh_len, int sh_type, int *gen_len, int *gen_type) { /* Len */ switch (sh_len) { case SH_BREAKPOINT_LEN_1: *gen_len = HW_BREAKPOINT_LEN_1; break; case SH_BREAKPOINT_LEN_2: *gen_len = HW_BREAKPOINT_LEN_2; break; case SH_BREAKPOINT_LEN_4: *gen_len = HW_BREAKPOINT_LEN_4; break; case SH_BREAKPOINT_LEN_8: *gen_len = HW_BREAKPOINT_LEN_8; break; default: return -EINVAL; } /* Type */ switch (sh_type) { case SH_BREAKPOINT_READ: *gen_type = HW_BREAKPOINT_R; case SH_BREAKPOINT_WRITE: *gen_type = HW_BREAKPOINT_W; break; case SH_BREAKPOINT_RW: *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R; break; default: return -EINVAL; } return 0; } static int arch_build_bp_info(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); info->address = bp->attr.bp_addr; /* Len */ switch (bp->attr.bp_len) { case HW_BREAKPOINT_LEN_1: info->len = SH_BREAKPOINT_LEN_1; break; case HW_BREAKPOINT_LEN_2: info->len = SH_BREAKPOINT_LEN_2; break; case HW_BREAKPOINT_LEN_4: info->len = SH_BREAKPOINT_LEN_4; break; case HW_BREAKPOINT_LEN_8: info->len = SH_BREAKPOINT_LEN_8; break; default: return -EINVAL; } /* Type */ switch (bp->attr.bp_type) { case HW_BREAKPOINT_R: info->type = SH_BREAKPOINT_READ; break; case HW_BREAKPOINT_W: info->type = SH_BREAKPOINT_WRITE; break; case HW_BREAKPOINT_W | HW_BREAKPOINT_R: info->type = SH_BREAKPOINT_RW; break; default: return -EINVAL; } return 0; } /* * Validate the arch-specific HW Breakpoint register settings */ int arch_validate_hwbkpt_settings(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); unsigned int align; int ret; ret = arch_build_bp_info(bp); if (ret) return ret; ret = -EINVAL; switch (info->len) { case SH_BREAKPOINT_LEN_1: align = 0; break; case SH_BREAKPOINT_LEN_2: align = 1; break; case SH_BREAKPOINT_LEN_4: align = 3; break; case SH_BREAKPOINT_LEN_8: align = 7; break; default: return ret; } /* * For kernel-addresses, either the address or symbol name can be * specified. */ if (info->name) info->address = (unsigned long)kallsyms_lookup_name(info->name); /* * Check that the low-order bits of the address are appropriate * for the alignment implied by len. */ if (info->address & align) return -EINVAL; return 0; } /* * Release the user breakpoints used by ptrace */ void flush_ptrace_hw_breakpoint(struct task_struct *tsk) { int i; struct thread_struct *t = &tsk->thread; for (i = 0; i < sh_ubc->num_events; i++) { unregister_hw_breakpoint(t->ptrace_bps[i]); t->ptrace_bps[i] = NULL; } } static int __kprobes hw_breakpoint_handler(struct die_args *args) { int cpu, i, rc = NOTIFY_STOP; struct perf_event *bp; unsigned int cmf, resume_mask; /* * Do an early return if none of the channels triggered. */ cmf = sh_ubc->triggered_mask(); if (unlikely(!cmf)) return NOTIFY_DONE; /* * By default, resume all of the active channels. */ resume_mask = sh_ubc->active_mask(); /* * Disable breakpoints during exception handling. */ sh_ubc->disable_all(); cpu = get_cpu(); for (i = 0; i < sh_ubc->num_events; i++) { unsigned long event_mask = (1 << i); if (likely(!(cmf & event_mask))) continue; /* * The counter may be concurrently released but that can only * occur from a call_rcu() path. We can then safely fetch * the breakpoint, use its callback, touch its counter * while we are in an rcu_read_lock() path. */ rcu_read_lock(); bp = per_cpu(bp_per_reg[i], cpu); if (bp) rc = NOTIFY_DONE; /* * Reset the condition match flag to denote completion of * exception handling. */ sh_ubc->clear_triggered_mask(event_mask); /* * bp can be NULL due to concurrent perf counter * removing. */ if (!bp) { rcu_read_unlock(); break; } /* * Don't restore the channel if the breakpoint is from * ptrace, as it always operates in one-shot mode. */ if (bp->overflow_handler == ptrace_triggered) resume_mask &= ~(1 << i); perf_bp_event(bp, args->regs); /* Deliver the signal to userspace */ if (!arch_check_bp_in_kernelspace(bp)) { siginfo_t info; info.si_signo = args->signr; info.si_errno = notifier_to_errno(rc); info.si_code = TRAP_HWBKPT; force_sig_info(args->signr, &info, current); } rcu_read_unlock(); } if (cmf == 0) rc = NOTIFY_DONE; sh_ubc->enable_all(resume_mask); put_cpu(); return rc; } BUILD_TRAP_HANDLER(breakpoint) { unsigned long ex = lookup_exception_vector(); TRAP_HANDLER_DECL; notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP); } /* * Handle debug exception notifications. */ int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data) { struct die_args *args = data; if (val != DIE_BREAKPOINT) return NOTIFY_DONE; /* * If the breakpoint hasn't been triggered by the UBC, it's * probably from a debugger, so don't do anything more here. * * This also permits the UBC interface clock to remain off for * non-UBC breakpoints, as we don't need to check the triggered * or active channel masks. */ if (args->trapnr != sh_ubc->trap_nr) return NOTIFY_DONE; return hw_breakpoint_handler(data); } void hw_breakpoint_pmu_read(struct perf_event *bp) { /* TODO */ } int register_sh_ubc(struct sh_ubc *ubc) { /* Bail if it's already assigned */ if (sh_ubc != &ubc_dummy) return -EBUSY; sh_ubc = ubc; pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name); WARN_ON(ubc->num_events > HBP_NUM); return 0; }
gpl-2.0
MeltedButter/kernel_msm
arch/m32r/lib/delay.c
13612
2985
/* * linux/arch/m32r/lib/delay.c * * Copyright (c) 2002 Hitoshi Yamamoto, Hirokazu Takata * Copyright (c) 2004 Hirokazu Takata */ #include <linux/param.h> #include <linux/module.h> #ifdef CONFIG_SMP #include <linux/sched.h> #include <asm/current.h> #include <asm/smp.h> #endif /* CONFIG_SMP */ #include <asm/processor.h> void __delay(unsigned long loops) { #ifdef CONFIG_ISA_DUAL_ISSUE __asm__ __volatile__ ( "beqz %0, 2f \n\t" "addi %0, #-1 \n\t" " .fillinsn \n\t" "1: \n\t" "cmpz %0 || addi %0, #-1 \n\t" "bc 2f || cmpz %0 \n\t" "bc 2f || addi %0, #-1 \n\t" "cmpz %0 || addi %0, #-1 \n\t" "bc 2f || cmpz %0 \n\t" "bnc 1b || addi %0, #-1 \n\t" " .fillinsn \n\t" "2: \n\t" : "+r" (loops) : "r" (0) : "cbit" ); #else __asm__ __volatile__ ( "beqz %0, 2f \n\t" " .fillinsn \n\t" "1: \n\t" "addi %0, #-1 \n\t" "blez %0, 2f \n\t" "addi %0, #-1 \n\t" "blez %0, 2f \n\t" "addi %0, #-1 \n\t" "blez %0, 2f \n\t" "addi %0, #-1 \n\t" "bgtz %0, 1b \n\t" " .fillinsn \n\t" "2: \n\t" : "+r" (loops) : "r" (0) ); #endif } void __const_udelay(unsigned long xloops) { #if defined(CONFIG_ISA_M32R2) && defined(CONFIG_ISA_DSP_LEVEL2) /* * loops [1] = (xloops >> 32) [sec] * loops_per_jiffy [1/jiffy] * * HZ [jiffy/sec] * = (xloops >> 32) [sec] * (loops_per_jiffy * HZ) [1/sec] * = (((xloops * loops_per_jiffy) >> 32) * HZ) [1] * * NOTE: * - '[]' depicts variable's dimension in the above equation. * - "rac" instruction rounds the accumulator in word size. */ __asm__ __volatile__ ( "srli %0, #1 \n\t" "mulwhi %0, %1 ; a0 \n\t" "mulwu1 %0, %1 ; a1 \n\t" "sadd ; a0 += (a1 >> 16) \n\t" "rac a0, a0, #1 \n\t" "mvfacmi %0, a0 \n\t" : "+r" (xloops) : "r" (current_cpu_data.loops_per_jiffy) : "a0", "a1" ); #elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R) /* * u64 ull; * ull = (u64)xloops * (u64)current_cpu_data.loops_per_jiffy; * xloops = (ull >> 32); */ __asm__ __volatile__ ( "and3 r4, %0, #0xffff \n\t" "and3 r5, %1, #0xffff \n\t" "mul r4, r5 \n\t" "srl3 r6, %0, #16 \n\t" "srli r4, #16 \n\t" "mul r5, r6 \n\t" "add r4, r5 \n\t" "and3 r5, %0, #0xffff \n\t" "srl3 r6, %1, #16 \n\t" "mul r5, r6 \n\t" "add r4, r5 \n\t" "srl3 r5, %0, #16 \n\t" "srli r4, #16 \n\t" "mul r5, r6 \n\t" "add r4, r5 \n\t" "mv %0, r4 \n\t" : "+r" (xloops) : "r" (current_cpu_data.loops_per_jiffy) : "r4", "r5", "r6" ); #else #error unknown isa configuration #endif __delay(xloops * HZ); } void __udelay(unsigned long usecs) { __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */ } void __ndelay(unsigned long nsecs) { __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ } EXPORT_SYMBOL(__delay); EXPORT_SYMBOL(__const_udelay); EXPORT_SYMBOL(__udelay); EXPORT_SYMBOL(__ndelay);
gpl-2.0
sancome/linux-3.x
arch/m68k/platform/5307/gpio.c
14892
1361
/* * Coldfire generic GPIO support * * (C) Copyright 2009, Steven King <sfking@fdwdc.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <asm/coldfire.h> #include <asm/mcfsim.h> #include <asm/mcfgpio.h> static struct mcf_gpio_chip mcf_gpio_chips[] = { { .gpio_chip = { .label = "PP", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value, .ngpio = 16, }, .pddr = (void __iomem *) MCFSIM_PADDR, .podr = (void __iomem *) MCFSIM_PADAT, .ppdr = (void __iomem *) MCFSIM_PADAT, }, }; static int __init mcf_gpio_init(void) { unsigned i = 0; while (i < ARRAY_SIZE(mcf_gpio_chips)) (void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]); return 0; } core_initcall(mcf_gpio_init);
gpl-2.0
hackthis02/xbmc
xbmc/windows/GUIWindowSplash.cpp
45
1911
/* * Copyright (C) 2015 Team XBMC * http://xbmc.org * * This Program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This Program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with XBMC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * */ #include "GUIWindowSplash.h" #include "filesystem/File.h" #include "filesystem/SpecialProtocol.h" #include "guilib/GUIImage.h" #include "guilib/GUIWindowManager.h" #include "utils/log.h" CGUIWindowSplash::CGUIWindowSplash(void) : CGUIWindow(WINDOW_SPLASH, "") { m_loadType = LOAD_ON_GUI_INIT; m_image = nullptr; } CGUIWindowSplash::~CGUIWindowSplash(void) { } void CGUIWindowSplash::OnInitWindow() { std::string splashImage = "special://home/media/Splash.png"; if (!XFILE::CFile::Exists(splashImage)) splashImage = "special://xbmc/media/Splash.png"; CLog::Log(LOGINFO, "load splash image: %s", CSpecialProtocol::TranslatePath(splashImage).c_str()); m_image = std::unique_ptr<CGUIImage>(new CGUIImage(0, 0, 0, 0, g_graphicsContext.GetWidth(), g_graphicsContext.GetHeight(), CTextureInfo(splashImage))); m_image->SetAspectRatio(CAspectRatio::AR_SCALE); } void CGUIWindowSplash::Render() { g_graphicsContext.SetRenderingResolution(g_graphicsContext.GetResInfo(), true); m_image->SetWidth(g_graphicsContext.GetWidth()); m_image->SetHeight(g_graphicsContext.GetHeight()); m_image->AllocResources(); m_image->Render(); m_image->FreeResources(); }
gpl-2.0
chhapil/Dorimanx-SG2-I9100-Kernel
drivers/media/video/s5c73m3.c
45
83705
/* * driver for LSI S5C73M3 (ISP for 8MP Camera) * * Copyright (c) 2011, Samsung Electronics. All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. */ #include <linux/i2c.h> #include <linux/init.h> #include <media/v4l2-device.h> #include <linux/delay.h> #include <linux/vmalloc.h> #include <linux/firmware.h> #include <linux/videodev2.h> #include <linux/unistd.h> #include <plat/gpio-cfg.h> #include <linux/gpio.h> #define S5C73M3_BUSFREQ_OPP #ifdef S5C73M3_BUSFREQ_OPP #include <mach/dev.h> #include <plat/cpu.h> #endif #ifdef CONFIG_VIDEO_SAMSUNG_V4L2 #include <linux/videodev2_exynos_media.h> #include <linux/videodev2_exynos_camera.h> #endif #include <linux/regulator/machine.h> #ifdef CONFIG_LEDS_AAT1290A #include <linux/leds-aat1290a.h> #endif #include <media/s5c73m3_platform.h> #include "s5c73m3.h" #define S5C73M3_DRIVER_NAME "S5C73M3" extern struct class *camera_class; /*sys/class/camera*/ struct device *s5c73m3_dev; /*sys/class/camera/rear*/ struct v4l2_subdev *sd_internal; #ifdef S5C73M3_BUSFREQ_OPP struct device *bus_dev; #endif /*#define S5C73M3_FROM_BOOTING*/ #define S5C73M3_CORE_VDD "/data/ISP_CV" #define S5C73M3_FW_PATH "/sdcard/SlimISP.bin" #define S5C73M3_FW_VER_LEN 6 #define S5C73M3_FW_VER_FILE_CUR 0x60 #define S5C73M3_I2C_RETRY 5 #define CHECK_ERR(x) if ((x) < 0) { \ cam_err("i2c failed, err %d\n", x); \ return x; \ } struct s5c73m3_fw_version camfw_info[S5C73M3_PATH_MAX]; static const struct s5c73m3_frmsizeenum preview_frmsizes[] = { { S5C73M3_PREVIEW_QVGA, 320, 240, 0x01 }, { S5C73M3_PREVIEW_CIF, 352, 288, 0x0E }, { S5C73M3_PREVIEW_VGA, 640, 480, 0x02 }, { S5C73M3_PREVIEW_880X720, 880, 720, 0x03 }, { S5C73M3_PREVIEW_960X720, 960, 720, 0x04 }, { S5C73M3_PREVIEW_1008X672, 1008, 672, 0x0F }, { S5C73M3_PREVIEW_1184X666, 1184, 666, 0x05 }, { S5C73M3_PREVIEW_720P, 1280, 720, 0x06 }, #ifdef CONFIG_MACH_T0 { S5C73M3_PREVIEW_1280X960, 1280, 960, 0x09 }, #else { S5C73M3_PREVIEW_800X600, 800, 600, 0x09 }, #endif { S5C73M3_VDIS_720P, 1536, 864, 0x07 }, { S5C73M3_PREVIEW_1080P, 1920, 1080, 0x0A}, { S5C73M3_VDIS_1080P, 2304, 1296, 0x0C}, { S5C73M3_PREVIEW_D1, 720, 480, 0x0B }, }; static const struct s5c73m3_frmsizeenum capture_frmsizes[] = { { S5C73M3_CAPTURE_VGA, 640, 480, 0x10 }, { S5C73M3_CAPTURE_960x540, 960, 540, 0x20 }, { S5C73M3_CAPTURE_960x720, 960, 720, 0x30 }, { S5C73M3_CAPTURE_1024X768, 1024, 768, 0xD0 }, { S5C73M3_CAPTURE_HD, 1280, 720, 0x40 }, { S5C73M3_CAPTURE_2MP, 1600, 1200, 0x70 }, { S5C73M3_CAPTURE_W2MP, 2048, 1152, 0x80 }, { S5C73M3_CAPTURE_3MP, 2048, 1536, 0x90 }, { S5C73M3_CAPTURE_5MP, 2560, 1920, 0xB0 }, { S5C73M3_CAPTURE_W6MP, 3264, 1836, 0xE0 }, { S5C73M3_CAPTURE_3264X2176, 3264, 2176, 0xC0 }, { S5C73M3_CAPTURE_8MP, 3264, 2448, 0xF0 }, }; static const struct s5c73m3_effectenum s5c73m3_effects[] = { {IMAGE_EFFECT_NONE, S5C73M3_IMAGE_EFFECT_NONE}, {IMAGE_EFFECT_NEGATIVE, S5C73M3_IMAGE_EFFECT_NEGATIVE}, {IMAGE_EFFECT_AQUA, S5C73M3_IMAGE_EFFECT_AQUA}, {IMAGE_EFFECT_SEPIA, S5C73M3_IMAGE_EFFECT_SEPIA}, {IMAGE_EFFECT_BNW, S5C73M3_IMAGE_EFFECT_MONO}, {IMAGE_EFFECT_SKETCH, S5C73M3_IMAGE_EFFECT_SKETCH}, {IMAGE_EFFECT_WASHED, S5C73M3_IMAGE_EFFECT_WASHED}, {IMAGE_EFFECT_VINTAGE_WARM, S5C73M3_IMAGE_EFFECT_VINTAGE_WARM}, {IMAGE_EFFECT_VINTAGE_COLD, S5C73M3_IMAGE_EFFECT_VINTAGE_COLD}, {IMAGE_EFFECT_SOLARIZE, S5C73M3_IMAGE_EFFECT_SOLARIZE}, {IMAGE_EFFECT_POSTERIZE, S5C73M3_IMAGE_EFFECT_POSTERIZE}, {IMAGE_EFFECT_POINT_BLUE, S5C73M3_IMAGE_EFFECT_POINT_BLUE}, {IMAGE_EFFECT_POINT_RED_YELLOW, S5C73M3_IMAGE_EFFECT_POINT_RED_YELLOW}, {IMAGE_EFFECT_POINT_COLOR_3, S5C73M3_IMAGE_EFFECT_POINT_COLOR_3}, {IMAGE_EFFECT_POINT_GREEN, S5C73M3_IMAGE_EFFECT_POINT_GREEN}, {IMAGE_EFFECT_CARTOONIZE, S5C73M3_IMAGE_EFFECT_CARTOONIZE}, }; static struct s5c73m3_control s5c73m3_ctrls[] = { { .id = V4L2_CID_CAMERA_ISO, .minimum = ISO_AUTO, .maximum = ISO_800, .step = 1, .value = ISO_AUTO, .default_value = ISO_AUTO, }, { .id = V4L2_CID_CAMERA_BRIGHTNESS, .minimum = EV_MINUS_4, .maximum = EV_MAX - 1, .step = 1, .value = EV_DEFAULT, .default_value = EV_DEFAULT, }, { .id = V4L2_CID_CAMERA_SATURATION, .minimum = SATURATION_MINUS_2, .maximum = SATURATION_MAX - 1, .step = 1, .value = SATURATION_DEFAULT, .default_value = SATURATION_DEFAULT, }, { .id = V4L2_CID_CAMERA_SHARPNESS, .minimum = SHARPNESS_MINUS_2, .maximum = SHARPNESS_MAX - 1, .step = 1, .value = SHARPNESS_DEFAULT, .default_value = SHARPNESS_DEFAULT, }, { .id = V4L2_CID_CAMERA_ZOOM, .minimum = ZOOM_LEVEL_0, .maximum = ZOOM_LEVEL_MAX - 1, .step = 1, .value = ZOOM_LEVEL_0, .default_value = ZOOM_LEVEL_0, }, { .id = V4L2_CID_CAM_JPEG_QUALITY, .minimum = 1, .maximum = 100, .step = 1, .value = 100, .default_value = 100, }, }; static u8 sysfs_sensor_fw[10] = {0,}; static u8 sysfs_phone_fw[10] = {0,}; static u8 sysfs_sensor_type[15] = {0,}; static u8 sysfs_isp_core[10] = {0,}; static u8 data_memory[500000] = {0,}; static u32 crc_table[256] = {0,}; static int copied_fw_binary; static u16 isp_chip_info1; static u16 isp_chip_info2; static u16 isp_chip_info3; static int s5c73m3_s_stream_sensor(struct v4l2_subdev *sd, int onoff); static int s5c73m3_set_touch_auto_focus(struct v4l2_subdev *sd); static int s5c73m3_SPI_booting(struct v4l2_subdev *sd); static int s5c73m3_get_af_cal_version(struct v4l2_subdev *sd); static int s5c73m3_set_timing_register_for_vdd(struct v4l2_subdev *sd); static inline struct s5c73m3_state *to_state(struct v4l2_subdev *sd) { return container_of(sd, struct s5c73m3_state, sd); } static int s5c73m3_i2c_write(struct v4l2_subdev *sd, unsigned short addr, unsigned short data) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct i2c_msg msg; unsigned char buf[4]; int i, err; if (!client->adapter) return -ENODEV; msg.addr = client->addr; msg.flags = 0; msg.len = sizeof(buf); msg.buf = buf; buf[0] = addr >> 8; buf[1] = addr & 0xff; buf[2] = data >> 8; buf[3] = data & 0xff; cam_i2c_dbg("addr %#x, data %#x\n", addr, data); for (i = S5C73M3_I2C_RETRY; i; i--) { err = i2c_transfer(client->adapter, &msg, 1); if (err == 1) break; msleep(20); } return err; } static int s5c73m3_i2c_write_block(struct v4l2_subdev *sd, const u32 regs[], int size) { int i, err = 0; for (i = 0; i < size; i++) { err = s5c73m3_i2c_write(sd, (regs[i]>>16), regs[i]); CHECK_ERR(err); } return err; } static int s5c73m3_i2c_read(struct v4l2_subdev *sd, unsigned short addr, unsigned short *data) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct i2c_msg msg; unsigned char buf[2]; int i, err; if (!client->adapter) return -ENODEV; msg.addr = client->addr; msg.flags = 0; msg.len = sizeof(buf); msg.buf = buf; buf[0] = addr >> 8; buf[1] = addr & 0xff; for (i = S5C73M3_I2C_RETRY; i; i--) { err = i2c_transfer(client->adapter, &msg, 1); if (err == 1) break; msleep(20); } if (err != 1) { cam_err("addr %#x\n", addr); return err; } msg.flags = I2C_M_RD; for (i = S5C73M3_I2C_RETRY; i; i--) { err = i2c_transfer(client->adapter, &msg, 1); if (err == 1) break; msleep(20); } if (err != 1) { cam_err("addr %#x\n", addr); return err; } *data = ((buf[0] << 8) | buf[1]); return err; } static int s5c73m3_write(struct v4l2_subdev *sd, unsigned short addr1, unsigned short addr2, unsigned short data) { int err; err = s5c73m3_i2c_write(sd, 0x0050, addr1); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0054, addr2); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0F14, data); CHECK_ERR(err); return err; } static int s5c73m3_read(struct v4l2_subdev *sd, unsigned short addr1, unsigned short addr2, unsigned short *data) { int err; err = s5c73m3_i2c_write(sd, 0xfcfc, 0x3310); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0058, addr1); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x005C, addr2); CHECK_ERR(err); err = s5c73m3_i2c_read(sd, 0x0F14, data); CHECK_ERR(err); return err; } static int s5c73m3_i2c_check_status_with_CRC(struct v4l2_subdev *sd) { int err = 0; int index = 0; u16 status = 0; u16 i2c_status = 0; u16 i2c_seq_status = 0; do { err = s5c73m3_read(sd, 0x0009, S5C73M3_STATUS, &status); err = s5c73m3_read(sd, 0x0009, S5C73M3_I2C_ERR_STATUS, &i2c_status); if (i2c_status & ERROR_STATUS_CHECK_BIN_CRC) { cam_dbg("failed to check CRC value of ISP Ram\n"); err = -1; break; } if (status == 0xffff) break; index++; udelay(500); } while (index < 2000); /* 1 sec */ if (index >= 2000) { err = s5c73m3_read(sd, 0x0009, S5C73M3_I2C_ERR_STATUS, &i2c_status); err = s5c73m3_read(sd, 0x0009, S5C73M3_I2C_SEQ_STATUS, &i2c_seq_status); cam_dbg("TimeOut!! index:%d,status:%#x,i2c_stauts:%#x,i2c_seq_status:%#x\n", index, status, i2c_status, i2c_seq_status); err = -1; } return err; } static int s5c73m3_i2c_check_status(struct v4l2_subdev *sd) { int err = 0; int index = 0; u16 status = 0; u16 i2c_status = 0; u16 i2c_seq_status = 0; do { err = s5c73m3_read(sd, 0x0009, S5C73M3_STATUS, &status); if (status == 0xffff) break; index++; udelay(500); } while (index < 2000); /* 1 sec */ if (index >= 2000) { err = s5c73m3_read(sd, 0x0009, S5C73M3_I2C_ERR_STATUS, &i2c_status); err = s5c73m3_read(sd, 0x0009, S5C73M3_I2C_SEQ_STATUS, &i2c_seq_status); cam_dbg("TimeOut!! index:%d,status:%#x,i2c_stauts:%#x,i2c_seq_status:%#x\n", index, status, i2c_status, i2c_seq_status); err = -1; } return err; } void s5c73m3_make_CRC_table(u32 *table, u32 id) { u32 i, j, k; for (i = 0; i < 256; ++i) { k = i; for (j = 0; j < 8; ++j) { if (k & 1) k = (k >> 1) ^ id; else k >>= 1; } table[i] = k; } } static int s5c73m3_reset_module(struct v4l2_subdev *sd, bool powerReset) { struct s5c73m3_state *state = to_state(sd); int err = 0; cam_trace("E\n"); if (powerReset) { err = state->pdata->power_on_off(0); CHECK_ERR(err); err = state->pdata->power_on_off(1); CHECK_ERR(err); } else { err = state->pdata->is_isp_reset(); CHECK_ERR(err); } err = s5c73m3_set_timing_register_for_vdd(sd); CHECK_ERR(err); cam_trace("X\n"); return err; } static int s5c73m3_writeb(struct v4l2_subdev *sd, unsigned short addr, unsigned short data) { int err; err = s5c73m3_i2c_check_status(sd); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0050, 0x0009); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0054, 0x5000); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0F14, addr); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0F14, data); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0054, 0x5080); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0F14, 0x0001); CHECK_ERR(err); return err; } static int s5c73m3_set_mode(struct v4l2_subdev *sd) { struct s5c73m3_state *state = to_state(sd); int err; cam_trace("E\n"); if (state->format_mode != V4L2_PIX_FMT_MODE_CAPTURE) { if (state->hdr_mode || state->yuv_snapshot) { err = s5c73m3_writeb(sd, S5C73M3_IMG_OUTPUT, S5C73M3_HDR_OUTPUT); CHECK_ERR(err); cam_dbg("hdr ouput mode\n"); } else { err = s5c73m3_writeb(sd, S5C73M3_IMG_OUTPUT, S5C73M3_YUV_OUTPUT); CHECK_ERR(err); cam_dbg("yuv ouput mode\n"); } } else { if (state->hybrid_mode) { err = s5c73m3_writeb(sd, S5C73M3_IMG_OUTPUT, S5C73M3_HYBRID_OUTPUT); CHECK_ERR(err); cam_dbg("hybrid ouput mode\n"); } else { err = s5c73m3_writeb(sd, S5C73M3_IMG_OUTPUT, S5C73M3_INTERLEAVED_OUTPUT); CHECK_ERR(err); cam_dbg("interleaved ouput mode\n"); } } cam_trace("X\n"); return 0; } /* * v4l2_subdev_core_ops */ static int s5c73m3_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc) { int i; for (i = 0; i < ARRAY_SIZE(s5c73m3_ctrls); i++) { if (qc->id == s5c73m3_ctrls[i].id) { qc->maximum = s5c73m3_ctrls[i].maximum; qc->minimum = s5c73m3_ctrls[i].minimum; qc->step = s5c73m3_ctrls[i].step; qc->default_value = s5c73m3_ctrls[i].default_value; return 0; } } return -EINVAL; } static int s5c73m3_set_antibanding(struct v4l2_subdev *sd, int val) { int err = 0; int antibanding_mode = 0; switch (val) { case ANTI_BANDING_OFF: antibanding_mode = S5C73M3_FLICKER_NONE; break; case ANTI_BANDING_50HZ: antibanding_mode = S5C73M3_FLICKER_AUTO_50HZ; break; case ANTI_BANDING_60HZ: antibanding_mode = S5C73M3_FLICKER_AUTO_60HZ; break; case ANTI_BANDING_AUTO: default: antibanding_mode = S5C73M3_FLICKER_AUTO; break; } err = s5c73m3_writeb(sd, S5C73M3_FLICKER_MODE, antibanding_mode); CHECK_ERR(err); return err; } static int s5c73m3_set_af_softlanding(struct v4l2_subdev *sd) { int err = 0; cam_trace("E\n"); err = s5c73m3_writeb(sd, S5C73M3_AF_SOFTLANDING, S5C73M3_AF_SOFTLANDING_ON); CHECK_ERR(err); cam_trace("X\n"); return 0; } static int s5c73m3_dump_fw(struct v4l2_subdev *sd) { return 0; } static int s5c73m3_get_sensor_fw_binary(struct v4l2_subdev *sd) { struct s5c73m3_state *state = to_state(sd); u16 read_val; int i, rxSize; int err = 0; struct file *fp = NULL; mm_segment_t old_fs; long ret = 0; char fw_path[25] = {0,}; u8 mem0 = 0, mem1 = 0; u32 CRC = 0; u32 DataCRC = 0; u32 IntOriginalCRC = 0; u32 crc_index = 0; int retryCnt = 2; #ifdef CONFIG_MACH_T0 if (state->sensor_fw[1] == 'D') { sprintf(fw_path, "/data/cfw/SlimISP_%cK.bin", state->sensor_fw[0]); } else { sprintf(fw_path, "/data/cfw/SlimISP_%c%c.bin", state->sensor_fw[0], state->sensor_fw[1]); } #else if (state->sensor_fw[0] == 'O') { sprintf(fw_path, "/data/cfw/SlimISP_G%c.bin", state->sensor_fw[1]); } else if (state->sensor_fw[0] == 'S') { sprintf(fw_path, "/data/cfw/SlimISP_Z%c.bin", state->sensor_fw[1]); } else { sprintf(fw_path, "/data/cfw/SlimISP_%c%c.bin", state->sensor_fw[0], state->sensor_fw[1]); } #endif /* Make CRC Table */ s5c73m3_make_CRC_table((u32 *)&crc_table, 0xEDB88320); /*ARM go*/ err = s5c73m3_write(sd, 0x3000, 0x0004, 0xFFFF); CHECK_ERR(err); udelay(400); /*Check boot done*/ for (i = 0; i < 3; i++) { err = s5c73m3_read(sd, 0x3010, 0x0010, &read_val); CHECK_ERR(err); if (read_val == 0x0C) break; udelay(100); } if (read_val != 0x0C) { cam_err("boot fail, read_val %#x\n", read_val); return -1; } /* Change I/O Driver Current in order to read from F-ROM */ err = s5c73m3_write(sd, 0x3010, 0x0120, 0x0820); CHECK_ERR(err); err = s5c73m3_write(sd, 0x3010, 0x0124, 0x0820); CHECK_ERR(err); /*P,M,S and Boot Mode*/ err = s5c73m3_write(sd, 0x3010, 0x0014, 0x2146); CHECK_ERR(err); err = s5c73m3_write(sd, 0x3010, 0x0010, 0x230C); CHECK_ERR(err); udelay(200); /*Check SPI ready*/ for (i = 0; i < 300; i++) { err = s5c73m3_read(sd, 0x3010, 0x0010, &read_val); CHECK_ERR(err); if (read_val == 0x230E) break; udelay(100); } if (read_val != 0x230E) { cam_err("SPI not ready, read_val %#x\n", read_val); return -1; } /*ARM reset*/ err = s5c73m3_write(sd, 0x3000, 0x0004, 0xFFFD); CHECK_ERR(err); /*remap*/ err = s5c73m3_write(sd, 0x3010, 0x00A4, 0x0183); CHECK_ERR(err); /*ARM go again*/ err = s5c73m3_write(sd, 0x3000, 0x0004, 0xFFFF); CHECK_ERR(err); mdelay(200); retry: memset(data_memory, 0, sizeof(data_memory)); mem0 = 0, mem1 = 0; CRC = 0; DataCRC = 0; IntOriginalCRC = 0; crc_index = 0; /* SPI Copy mode ready I2C CMD */ err = s5c73m3_writeb(sd, 0x0924, 0x0000); CHECK_ERR(err); cam_dbg("sent SPI ready CMD\n"); rxSize = 64*1024; mdelay(10); s5c73m3_i2c_check_status(sd); err = s5c73m3_spi_read((char *)&data_memory, state->sensor_size, rxSize); CHECK_ERR(err); CRC = ~CRC; for (crc_index = 0; crc_index < (state->sensor_size-4)/2; crc_index++) { /*low byte*/ mem0 = (unsigned char)(data_memory[crc_index*2] & 0x00ff); /*high byte*/ mem1 = (unsigned char)(data_memory[crc_index*2+1] & 0x00ff); CRC = crc_table[(CRC ^ (mem0)) & 0xFF] ^ (CRC >> 8); CRC = crc_table[(CRC ^ (mem1)) & 0xFF] ^ (CRC >> 8); } CRC = ~CRC; DataCRC = (CRC&0x000000ff)<<24; DataCRC += (CRC&0x0000ff00)<<8; DataCRC += (CRC&0x00ff0000)>>8; DataCRC += (CRC&0xff000000)>>24; cam_err("made CSC value by S/W = 0x%x\n", DataCRC); IntOriginalCRC = (data_memory[state->sensor_size-4]&0x00ff)<<24; IntOriginalCRC += (data_memory[state->sensor_size-3]&0x00ff)<<16; IntOriginalCRC += (data_memory[state->sensor_size-2]&0x00ff)<<8; IntOriginalCRC += (data_memory[state->sensor_size-1]&0x00ff); cam_err("Original CRC Int = 0x%x\n", IntOriginalCRC); old_fs = get_fs(); set_fs(KERNEL_DS); if (IntOriginalCRC == DataCRC) { fp = filp_open(fw_path, O_WRONLY|O_CREAT|O_TRUNC, 0644); if (IS_ERR(fp) || fp == NULL) { cam_err("failed to open %s, err %ld\n", fw_path, PTR_ERR(fp)); err = -EINVAL; goto out; } ret = vfs_write(fp, (char __user *)data_memory, state->sensor_size, &fp->f_pos); if (camfw_info[S5C73M3_SD_CARD].opened == 0) { memcpy(state->phone_fw, state->sensor_fw, S5C73M3_FW_VER_LEN); state->phone_fw[S5C73M3_FW_VER_LEN+1] = ' '; memcpy(sysfs_phone_fw, state->phone_fw, sizeof(state->phone_fw)); cam_dbg("Changed to Phone_version = %s\n", state->phone_fw); } } else { if (retryCnt > 0) { set_fs(old_fs); retryCnt--; goto retry; } } if (fp != NULL) filp_close(fp, current->files); out: set_fs(old_fs); return err; } static int s5c73m3_get_sensor_fw_version(struct v4l2_subdev *sd) { struct s5c73m3_state *state = to_state(sd); u16 read_val; u16 sensor_fw; u16 sensor_type; u16 temp_buf; int i; int err = 0; /*ARM go*/ err = s5c73m3_write(sd, 0x3000, 0x0004, 0xFFFF); CHECK_ERR(err); udelay(400); /*Check boot done*/ for (i = 0; i < 3; i++) { err = s5c73m3_read(sd, 0x3010, 0x0010, &read_val); CHECK_ERR(err); if (read_val == 0x0C) break; udelay(100); } if (read_val != 0x0C) { cam_err("boot fail, read_val %#x\n", read_val); return -1; } /* Change I/O Driver Current in order to read from F-ROM */ err = s5c73m3_write(sd, 0x3010, 0x0120, 0x0820); CHECK_ERR(err); err = s5c73m3_write(sd, 0x3010, 0x0124, 0x0820); CHECK_ERR(err); /* Offset Setting */ err = s5c73m3_write(sd, 0x0001, 0x0418, 0x0008); CHECK_ERR(err); /*P,M,S and Boot Mode*/ err = s5c73m3_write(sd, 0x3010, 0x0014, 0x2146); CHECK_ERR(err); err = s5c73m3_write(sd, 0x3010, 0x0010, 0x230C); CHECK_ERR(err); udelay(200); /*Check SPI ready*/ for (i = 0; i < 300; i++) { err = s5c73m3_read(sd, 0x3010, 0x0010, &read_val); CHECK_ERR(err); if (read_val == 0x230E) break; udelay(100); } if (read_val != 0x230E) { cam_err("SPI not ready, read_val %#x\n", read_val); return -1; } /*ARM reset*/ err = s5c73m3_write(sd, 0x3000, 0x0004, 0xFFFD); CHECK_ERR(err); /*remap*/ err = s5c73m3_write(sd, 0x3010, 0x00A4, 0x0183); CHECK_ERR(err); for (i = 0; i < 5; i++) { err = s5c73m3_read(sd, 0x0000, 0x06+i*2, &sensor_type); CHECK_ERR(err); state->sensor_type[i*2] = sensor_type&0x00ff; state->sensor_type[i*2+1] = (sensor_type&0xff00)>>8; #ifdef FEATURE_DEBUG_DUMP cam_err("0x%x\n", sensor_type); #endif } state->sensor_type[i*2+2] = ' '; for (i = 0; i < 3; i++) { err = s5c73m3_read(sd, 0x0000, i*2, &sensor_fw); CHECK_ERR(err); state->sensor_fw[i*2] = sensor_fw&0x00ff; state->sensor_fw[i*2+1] = (sensor_fw&0xff00)>>8; #ifdef FEATURE_DEBUG_DUMP cam_err("0x%x\n", sensor_fw); #endif } state->sensor_fw[i*2+2] = ' '; state->sensor_size = 0; for (i = 0; i < 2; i++) { err = s5c73m3_read(sd, 0x0000, 0x0014+i*2, &temp_buf); state->sensor_size += temp_buf<<(i*16); CHECK_ERR(err); } memcpy(sysfs_sensor_fw, state->sensor_fw, sizeof(state->sensor_fw)); memcpy(sysfs_sensor_type, state->sensor_type, sizeof(state->sensor_type)); cam_dbg("Sensor_version = %s, Sensor_Type = %s\n", state->sensor_fw, state->sensor_type); if ((state->sensor_fw[0] < 'A') || state->sensor_fw[0] > 'Z') { cam_dbg("Sensor Version is invalid data\n"); #ifdef FEATURE_DEBUG_DUMP cam_err("0000h : "); for (i = 0; i < 0x20; i++) { err = s5c73m3_read(sd, 0x0000, i*2, &sensor_fw); cam_err("%x", sensor_fw); if (i == 0x10) cam_err("\n 0010h : "); } mdelay(50); memcpy(state->sensor_type, state->sensor_fw, 0x100000); /* for kernel panic */ #endif err = -1; } return err; } static int s5c73m3_open_firmware_file(struct v4l2_subdev *sd, const char *filename, u8 *buf, u16 offset, u16 size) { struct file *fp; int err = 0; mm_segment_t old_fs; long nread; old_fs = get_fs(); set_fs(KERNEL_DS); fp = filp_open(filename, O_RDONLY, 0); if (IS_ERR(fp)) { err = -ENOENT; goto out; } else { cam_dbg("%s is opened\n", filename); } err = vfs_llseek(fp, offset, SEEK_SET); if (err < 0) { cam_warn("failed to fseek, %d\n", err); goto out; } nread = vfs_read(fp, (char __user *)buf, size, &fp->f_pos); if (nread != size) { cam_err("failed to read firmware file, %ld Bytes\n", nread); err = -EIO; goto out; } out: if (!IS_ERR(fp)) filp_close(fp, current->files); set_fs(old_fs); return err; } static int s5c73m3_compare_date(struct v4l2_subdev *sd, int index1, int index2) { u8 date1[5] = {0,}; u8 date2[5] = {0,}; strncpy((char *)&date1, &camfw_info[index1].ver[2], 4); strncpy((char *)&date2, &camfw_info[index2].ver[2], 4); cam_dbg("date1 = %s, date2 = %s\n, compare result = %d", date1, date2, strcmp((char *)&date1, (char *)&date2)); return strcmp((char *)&date1, (char *)&date2); } static int s5c73m3_get_phone_fw_version(struct v4l2_subdev *sd) { struct device *dev = sd->v4l2_dev->dev; struct s5c73m3_state *state = to_state(sd); const struct firmware *fw = {0, }; char fw_path[20] = {0,}; char fw_path_in_data[25] = {0,}; u8 *buf = NULL; int err = 0; int retVal = 0; int fw_requested = 1; #ifdef CONFIG_MACH_T0 if (state->sensor_fw[1] == 'D') { sprintf(fw_path, "SlimISP_%cK.bin", state->sensor_fw[0]); } else { sprintf(fw_path, "SlimISP_%c%c.bin", state->sensor_fw[0], state->sensor_fw[1]); } #else if (state->sensor_fw[0] == 'O') { sprintf(fw_path, "SlimISP_G%c.bin", state->sensor_fw[1]); } else if (state->sensor_fw[0] == 'S') { sprintf(fw_path, "SlimISP_Z%c.bin", state->sensor_fw[1]); } else { sprintf(fw_path, "SlimISP_%c%c.bin", state->sensor_fw[0], state->sensor_fw[1]); } #endif sprintf(fw_path_in_data, "/data/cfw/%s", fw_path); buf = vmalloc(S5C73M3_FW_VER_LEN+1); if (!buf) { cam_err("failed to allocate memory\n"); err = -ENOMEM; goto out; } retVal = s5c73m3_open_firmware_file(sd, S5C73M3_FW_PATH, buf, S5C73M3_FW_VER_FILE_CUR, S5C73M3_FW_VER_LEN); if (retVal >= 0) { camfw_info[S5C73M3_SD_CARD].opened = 1; memcpy(camfw_info[S5C73M3_SD_CARD].ver, buf, S5C73M3_FW_VER_LEN); camfw_info[S5C73M3_SD_CARD] .ver[S5C73M3_FW_VER_LEN+1] = ' '; state->fw_index = S5C73M3_SD_CARD; fw_requested = 0; } request_fw: if (fw_requested) { /* check fw in data folder */ retVal = s5c73m3_open_firmware_file(sd, fw_path_in_data, buf, S5C73M3_FW_VER_FILE_CUR, S5C73M3_FW_VER_LEN); if (retVal >= 0) { camfw_info[S5C73M3_IN_DATA].opened = 1; memcpy(camfw_info[S5C73M3_IN_DATA].ver, buf, S5C73M3_FW_VER_LEN); camfw_info[S5C73M3_IN_DATA] .ver[S5C73M3_FW_VER_LEN+1] = ' '; } /* check fw in system folder */ retVal = request_firmware(&fw, fw_path, dev); if (retVal == 0) { camfw_info[S5C73M3_IN_SYSTEM].opened = 1; memcpy(camfw_info[S5C73M3_IN_SYSTEM].ver, (u8 *)&fw->data[S5C73M3_FW_VER_FILE_CUR], S5C73M3_FW_VER_LEN); } /* compare */ if (camfw_info[S5C73M3_IN_DATA].opened == 0 && camfw_info[S5C73M3_IN_SYSTEM].opened == 1) { state->fw_index = S5C73M3_IN_SYSTEM; } else if (camfw_info[S5C73M3_IN_DATA].opened == 1 && camfw_info[S5C73M3_IN_SYSTEM].opened == 0) { state->fw_index = S5C73M3_IN_DATA; } else if (camfw_info[S5C73M3_IN_DATA].opened == 1 && camfw_info[S5C73M3_IN_SYSTEM].opened == 1) { retVal = s5c73m3_compare_date(sd, S5C73M3_IN_DATA, S5C73M3_IN_SYSTEM); if (retVal <= 0) { /*unlink(&fw_path_in_data);*/ state->fw_index = S5C73M3_IN_SYSTEM; } else { state->fw_index = S5C73M3_IN_DATA; } } else { cam_dbg("can't open %s Ver. Firmware. so, download from F-ROM\n", state->sensor_fw); if (fw != NULL) release_firmware(fw); retVal = s5c73m3_reset_module(sd, true); CHECK_ERR(retVal); retVal = s5c73m3_get_sensor_fw_binary(sd); CHECK_ERR(retVal); copied_fw_binary = 1; goto request_fw; } } if (!copied_fw_binary) { memcpy(state->phone_fw, camfw_info[state->fw_index].ver, S5C73M3_FW_VER_LEN); state->phone_fw[S5C73M3_FW_VER_LEN+1] = ' '; } memcpy(sysfs_phone_fw, state->phone_fw, sizeof(state->phone_fw)); cam_dbg("Phone_version = %s(index=%d)\n", state->phone_fw, state->fw_index); out: if (buf != NULL) vfree(buf); if (fw_requested) release_firmware(fw); return err; } static int s5c73m3_update_camerafw_to_FROM(struct v4l2_subdev *sd) { int err; int index = 0; u16 status = 0; do { /* stauts 0 : not ready ISP */ if (status == 0) { err = s5c73m3_writeb(sd, 0x0906, 0x0000); CHECK_ERR(err); } err = s5c73m3_read(sd, 0x0009, 0x5906, &status); /* Success : 0x05, Fail : 0x07 , Progressing : 0xFFFF*/ if (status == 0x0005 || status == 0x0007) break; index++; msleep(20); } while (index < 500); /* 10 sec */ if (status == 0x0007) return -1; else return 0; } static int s5c73m3_SPI_booting_by_ISP(struct v4l2_subdev *sd) { u16 read_val; int i; int err = 0; /*ARM go*/ err = s5c73m3_write(sd, 0x3000, 0x0004, 0xFFFF); CHECK_ERR(err); udelay(400); /*Check boot done*/ for (i = 0; i < 3; i++) { err = s5c73m3_read(sd, 0x3010, 0x0010, &read_val); CHECK_ERR(err); if (read_val == 0x0C) break; udelay(100); } if (read_val != 0x0C) { cam_err("boot fail, read_val %#x\n", read_val); return -1; } /* Change I/O Driver Current in order to read from F-ROM */ err = s5c73m3_write(sd, 0x3010, 0x0120, 0x0820); CHECK_ERR(err); err = s5c73m3_write(sd, 0x3010, 0x0124, 0x0820); CHECK_ERR(err); /*P,M,S and Boot Mode*/ err = s5c73m3_write(sd, 0x3010, 0x0014, 0x2146); CHECK_ERR(err); err = s5c73m3_write(sd, 0x3010, 0x0010, 0x230C); CHECK_ERR(err); udelay(200); /*Check SPI ready*/ for (i = 0; i < 300; i++) { err = s5c73m3_read(sd, 0x3010, 0x0010, &read_val); CHECK_ERR(err); if (read_val == 0x230E) break; udelay(100); } if (read_val != 0x230E) { cam_err("SPI not ready, read_val %#x\n", read_val); return -1; } /*ARM reset*/ err = s5c73m3_write(sd, 0x3000, 0x0004, 0xFFFD); CHECK_ERR(err); /*remap*/ err = s5c73m3_write(sd, 0x3010, 0x00A4, 0x0183); CHECK_ERR(err); /*ARM go*/ err = s5c73m3_write(sd, 0x3000, 0x0004, 0xFFFF); CHECK_ERR(err); return err; } static int s5c73m3_check_fw_date(struct v4l2_subdev *sd) { struct s5c73m3_state *state = to_state(sd); u8 sensor_date[5] = {0,}; u8 phone_date[5] = {0,}; strncpy((char *)&sensor_date, &state->sensor_fw[2], 4); strncpy((char *)&phone_date, (const char *)&state->phone_fw[2], 4); cam_dbg("Sensor_date = %s, Phone_date = %s\n, compare result = %d", sensor_date, phone_date, strcmp((char *)&sensor_date, (char *)&phone_date)); #ifdef CONFIG_MACH_T0 if (state->sensor_fw[1] == 'D') return -1; else return strcmp((char *)&sensor_date, (char *)&phone_date); #else return strcmp((char *)&sensor_date, (char *)&phone_date); #endif } static int s5c73m3_check_fw(struct v4l2_subdev *sd, int download) { int err, i; int retVal; copied_fw_binary = 0; if (!download) { for (i = 0; i < S5C73M3_PATH_MAX; i++) camfw_info[i].opened = 0; err = s5c73m3_get_sensor_fw_version(sd); CHECK_ERR(err); s5c73m3_get_af_cal_version(sd); err = s5c73m3_get_phone_fw_version(sd); CHECK_ERR(err); } retVal = s5c73m3_check_fw_date(sd); /* retVal = 0 : Same Version retVal < 0 : Phone Version is latest Version than sensorFW. retVal > 0 : Sensor Version is latest version than phoenFW. */ if (retVal <= 0 || download) { cam_dbg("Loading From PhoneFW......\n"); /* In case that there is no FW in phone and FW needs to be downloaded from F-ROM, ISP power reset is required before loading FW to ISP for F-ROM to work properly.*/ if (copied_fw_binary) err = s5c73m3_reset_module(sd, true); else err = s5c73m3_reset_module(sd, false); CHECK_ERR(err); err = s5c73m3_SPI_booting(sd); CHECK_ERR(err); if (download) { err = s5c73m3_update_camerafw_to_FROM(sd); CHECK_ERR(err); } } else { cam_dbg("Loading From SensorFW......\n"); err = s5c73m3_reset_module(sd, true); CHECK_ERR(err); err = s5c73m3_get_sensor_fw_binary(sd); CHECK_ERR(err); } return 0; } static int s5c73m3_set_sensor_mode(struct v4l2_subdev *sd, int val) { struct s5c73m3_state *state = to_state(sd); int err; cam_dbg("E, value %d\n", val); retry: switch (val) { case SENSOR_CAMERA: err = s5c73m3_writeb(sd, S5C73M3_AE_MODE, S5C73M3_AUTO_MODE_AE_SET); CHECK_ERR(err); break; case SENSOR_MOVIE: err = s5c73m3_writeb(sd, S5C73M3_AE_MODE, S5C73M3_FIXED_30FPS); CHECK_ERR(err); break; default: cam_warn("invalid value, %d\n", val); val = SENSOR_CAMERA; goto retry; } state->sensor_mode = val; cam_trace("X\n"); return 0; } static int s5c73m3_set_flash(struct v4l2_subdev *sd, int val, int recording) { struct s5c73m3_state *state = to_state(sd); int err; u16 pre_flash = false; cam_dbg("E, value %d\n", val); s5c73m3_read(sd, 0x0009, S5C73M3_STILL_PRE_FLASH | 0x5000, &pre_flash); if (pre_flash) { err = s5c73m3_writeb(sd, S5C73M3_STILL_MAIN_FLASH , S5C73M3_STILL_MAIN_FLASH_CANCEL); CHECK_ERR(err); state->isflash = S5C73M3_ISNEED_FLASH_UNDEFINED; } retry: switch (val) { case FLASH_MODE_OFF: err = s5c73m3_writeb(sd, S5C73M3_FLASH_MODE, S5C73M3_FLASH_MODE_OFF); CHECK_ERR(err); err = s5c73m3_writeb(sd, S5C73M3_FLASH_TORCH, S5C73M3_FLASH_TORCH_OFF); CHECK_ERR(err); break; case FLASH_MODE_AUTO: err = s5c73m3_writeb(sd, S5C73M3_FLASH_TORCH, S5C73M3_FLASH_TORCH_OFF); CHECK_ERR(err); err = s5c73m3_writeb(sd, S5C73M3_FLASH_MODE, S5C73M3_FLASH_MODE_AUTO); CHECK_ERR(err); break; case FLASH_MODE_ON: err = s5c73m3_writeb(sd, S5C73M3_FLASH_TORCH, S5C73M3_FLASH_TORCH_OFF); CHECK_ERR(err); err = s5c73m3_writeb(sd, S5C73M3_FLASH_MODE, S5C73M3_FLASH_MODE_ON); CHECK_ERR(err); break; case FLASH_MODE_TORCH: err = s5c73m3_writeb(sd, S5C73M3_FLASH_MODE, S5C73M3_FLASH_MODE_OFF); CHECK_ERR(err); err = s5c73m3_writeb(sd, S5C73M3_FLASH_TORCH, S5C73M3_FLASH_TORCH_ON); CHECK_ERR(err); break; default: cam_warn("invalid value, %d\n", val); val = FLASH_MODE_OFF; goto retry; } state->flash_mode = val; cam_trace("X\n"); return 0; } static int s5c73m3_set_iso(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { int err; cam_dbg("E, value %d\n", ctrl->value); retry: switch (ctrl->value) { case ISO_AUTO: err = s5c73m3_writeb(sd, S5C73M3_ISO, S5C73M3_ISO_AUTO); CHECK_ERR(err); break; case ISO_100: err = s5c73m3_writeb(sd, S5C73M3_ISO, S5C73M3_ISO_100); CHECK_ERR(err); break; case ISO_200: err = s5c73m3_writeb(sd, S5C73M3_ISO, S5C73M3_ISO_200); CHECK_ERR(err); break; case ISO_400: err = s5c73m3_writeb(sd, S5C73M3_ISO, S5C73M3_ISO_400); CHECK_ERR(err); break; case ISO_800: err = s5c73m3_writeb(sd, S5C73M3_ISO, S5C73M3_ISO_800); CHECK_ERR(err); break; default: cam_warn("invalid value, %d\n", ctrl->value); ctrl->value = ISO_AUTO; goto retry; } cam_trace("X\n"); return 0; } static int s5c73m3_set_metering(struct v4l2_subdev *sd, int val) { int err; cam_dbg("E, value %d\n", val); retry: switch (val) { case METERING_CENTER: err = s5c73m3_writeb(sd, S5C73M3_METER, S5C73M3_METER_CENTER); CHECK_ERR(err); break; case METERING_SPOT: err = s5c73m3_writeb(sd, S5C73M3_METER, S5C73M3_METER_SPOT); CHECK_ERR(err); break; case METERING_MATRIX: err = s5c73m3_writeb(sd, S5C73M3_METER, S5C73M3_METER_AVERAGE); CHECK_ERR(err); break; default: cam_warn("invalid value, %d\n", val); val = METERING_CENTER; goto retry; } cam_trace("X\n"); return 0; } static int s5c73m3_set_exposure(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { int err; cam_dbg("E, value %d\n", ctrl->value); if (ctrl->value < -4 || ctrl->value > 4) { cam_warn("invalid value, %d\n", ctrl->value); ctrl->value = 0; } err = s5c73m3_writeb(sd, S5C73M3_EV, ctrl->value + 4); CHECK_ERR(err); cam_trace("X\n"); return 0; } static int s5c73m3_set_contrast(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { int err; int contrast = 0; int temp_contrast = 0; cam_dbg("E, value %d\n", ctrl->value); if (ctrl->value < 0 || ctrl->value > 4) { cam_warn("invalid value, %d\n", ctrl->value); ctrl->value = 2; } temp_contrast = ctrl->value - 2; if (temp_contrast < 0) contrast = (temp_contrast * (-1)) + 2; else contrast = temp_contrast; err = s5c73m3_writeb(sd, S5C73M3_CONTRAST, contrast); CHECK_ERR(err); cam_trace("X\n"); return 0; } static int s5c73m3_set_sharpness(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { int err; int sharpness = 0; int temp_sharpness = 0; cam_dbg("E, value %d\n", ctrl->value); if (ctrl->value < 0 || ctrl->value > 4) { cam_warn("invalid value, %d\n", ctrl->value); ctrl->value = 2; } temp_sharpness = ctrl->value - 2; if (temp_sharpness < 0) sharpness = (temp_sharpness * (-1)) + 2; else sharpness = temp_sharpness; err = s5c73m3_writeb(sd, S5C73M3_SHARPNESS, sharpness); CHECK_ERR(err); cam_trace("X\n"); return 0; } static int s5c73m3_set_whitebalance(struct v4l2_subdev *sd, int val) { struct s5c73m3_state *state = to_state(sd); int err; cam_dbg("E, value %d\n", val); retry: switch (val) { case WHITE_BALANCE_AUTO: err = s5c73m3_writeb(sd, S5C73M3_AWB_MODE, S5C73M3_AWB_MODE_AUTO); CHECK_ERR(err); break; case WHITE_BALANCE_SUNNY: err = s5c73m3_writeb(sd, S5C73M3_AWB_MODE, S5C73M3_AWB_MODE_DAYLIGHT); CHECK_ERR(err); break; case WHITE_BALANCE_CLOUDY: err = s5c73m3_writeb(sd, S5C73M3_AWB_MODE, S5C73M3_AWB_MODE_CLOUDY); CHECK_ERR(err); break; case WHITE_BALANCE_TUNGSTEN: err = s5c73m3_writeb(sd, S5C73M3_AWB_MODE, S5C73M3_AWB_MODE_INCANDESCENT); CHECK_ERR(err); break; case WHITE_BALANCE_FLUORESCENT: err = s5c73m3_writeb(sd, S5C73M3_AWB_MODE, S5C73M3_AWB_MODE_FLUORESCENT1); CHECK_ERR(err); break; default: cam_warn("invalid value, %d\n", val); val = WHITE_BALANCE_AUTO; goto retry; } state->wb_mode = val; cam_trace("X\n"); return 0; } static int s5c73m3_set_scene_mode(struct v4l2_subdev *sd, int val) { struct s5c73m3_state *state = to_state(sd); int err; cam_dbg("E, value %d\n", val); retry: switch (val) { case SCENE_MODE_NONE: err = s5c73m3_writeb(sd, S5C73M3_SCENE_MODE, S5C73M3_SCENE_MODE_NONE); CHECK_ERR(err); break; case SCENE_MODE_PORTRAIT: err = s5c73m3_writeb(sd, S5C73M3_SCENE_MODE, S5C73M3_SCENE_MODE_PORTRAIT); CHECK_ERR(err); break; case SCENE_MODE_LANDSCAPE: err = s5c73m3_writeb(sd, S5C73M3_SCENE_MODE, S5C73M3_SCENE_MODE_LANDSCAPE); CHECK_ERR(err); break; case SCENE_MODE_SPORTS: err = s5c73m3_writeb(sd, S5C73M3_SCENE_MODE, S5C73M3_SCENE_MODE_SPORTS); CHECK_ERR(err); break; case SCENE_MODE_PARTY_INDOOR: err = s5c73m3_writeb(sd, S5C73M3_SCENE_MODE, S5C73M3_SCENE_MODE_INDOOR); CHECK_ERR(err); break; case SCENE_MODE_BEACH_SNOW: err = s5c73m3_writeb(sd, S5C73M3_SCENE_MODE, S5C73M3_SCENE_MODE_BEACH); CHECK_ERR(err); break; case SCENE_MODE_SUNSET: err = s5c73m3_writeb(sd, S5C73M3_SCENE_MODE, S5C73M3_SCENE_MODE_SUNSET); CHECK_ERR(err); break; case SCENE_MODE_DUSK_DAWN: err = s5c73m3_writeb(sd, S5C73M3_SCENE_MODE, S5C73M3_SCENE_MODE_DAWN); CHECK_ERR(err); break; case SCENE_MODE_FALL_COLOR: err = s5c73m3_writeb(sd, S5C73M3_SCENE_MODE, S5C73M3_SCENE_MODE_FALL); CHECK_ERR(err); break; case SCENE_MODE_NIGHTSHOT: err = s5c73m3_writeb(sd, S5C73M3_SCENE_MODE, S5C73M3_SCENE_MODE_NIGHT); CHECK_ERR(err); break; case SCENE_MODE_BACK_LIGHT: err = s5c73m3_writeb(sd, S5C73M3_SCENE_MODE, S5C73M3_SCENE_MODE_AGAINSTLIGHT); CHECK_ERR(err); break; case SCENE_MODE_FIREWORKS: err = s5c73m3_writeb(sd, S5C73M3_SCENE_MODE, S5C73M3_SCENE_MODE_FIRE); CHECK_ERR(err); break; case SCENE_MODE_TEXT: err = s5c73m3_writeb(sd, S5C73M3_SCENE_MODE, S5C73M3_SCENE_MODE_TEXT); CHECK_ERR(err); break; case SCENE_MODE_CANDLE_LIGHT: err = s5c73m3_writeb(sd, S5C73M3_SCENE_MODE, S5C73M3_SCENE_MODE_CANDLE); CHECK_ERR(err); break; case SCENE_MODE_LOW_LIGHT: err = s5c73m3_writeb(sd, S5C73M3_SCENE_MODE, S5C73M3_SCENE_MODE_LOW_LIGHT); CHECK_ERR(err); break; default: cam_warn("invalid value, %d\n", val); val = SCENE_MODE_NONE; goto retry; } state->scene_mode = val; cam_trace("X\n"); return 0; } static int s5c73m3_capture_firework(struct v4l2_subdev *sd) { int err = 0; cam_dbg("E, capture_firework\n"); err = s5c73m3_writeb(sd, S5C73M3_FIREWORK_CAPTURE, 0x0001); CHECK_ERR(err); return err; } static int s5c73m3_capture_nightshot(struct v4l2_subdev *sd) { int err = 0; cam_dbg("E, capture_nightshot\n"); err = s5c73m3_writeb(sd, S5C73M3_NIGHTSHOT_CAPTURE, 0x0001); CHECK_ERR(err); return err; } static int s5c73m3_set_effect(struct v4l2_subdev *sd, int val) { int err = 0; int num_entries = 0; int i = 0; cam_dbg("E, value %d\n", val); if (val < IMAGE_EFFECT_BASE || val > IMAGE_EFFECT_MAX) val = IMAGE_EFFECT_NONE; num_entries = ARRAY_SIZE(s5c73m3_effects); for (i = 0; i < num_entries; i++) { if (val == s5c73m3_effects[i].index) { err = s5c73m3_writeb(sd, S5C73M3_IMAGE_EFFECT, s5c73m3_effects[i].reg_val); CHECK_ERR(err); break; } } cam_trace("X\n"); return 0; } static int s5c73m3_set_wdr(struct v4l2_subdev *sd, int val) { int err; cam_dbg("E, value %d\n", val); retry: switch (val) { case WDR_OFF: err = s5c73m3_writeb(sd, S5C73M3_WDR, S5C73M3_WDR_OFF); CHECK_ERR(err); break; case WDR_ON: err = s5c73m3_writeb(sd, S5C73M3_WDR, S5C73M3_WDR_ON); CHECK_ERR(err); break; default: cam_warn("invalid value, %d\n", val); val = WDR_OFF; goto retry; } cam_trace("X\n"); return 0; } static int s5c73m3_set_antishake(struct v4l2_subdev *sd, int val) { int err = 0; cam_dbg("E, value %d\n", val); if (val) { err = s5c73m3_writeb(sd, S5C73M3_AE_MODE, S5C73M3_ANTI_SHAKE); CHECK_ERR(err); } else { err = s5c73m3_writeb(sd, S5C73M3_AE_MODE, S5C73M3_AUTO_MODE_AE_SET); CHECK_ERR(err); } return err; } static int s5c73m3_get_af_cal_version(struct v4l2_subdev *sd) { struct s5c73m3_state *state = to_state(sd); u32 data = 0; u16 status = 0; int err = 0; /* Calibration Device */ err = s5c73m3_read(sd, 0x0009, 0x300C, &status); CHECK_ERR(err); data = status; status = 0; err = s5c73m3_read(sd, 0x0009, 0x300E, &status); CHECK_ERR(err); data += status<<16; state->cal_device = data; /* Calibration DLL Version */ status = 0; data = 0; err = s5c73m3_read(sd, 0x0009, 0x4FF8, &status); CHECK_ERR(err); data = status; status = 0; err = s5c73m3_read(sd, 0x0009, 0x4FFA, &status); CHECK_ERR(err); data += status<<16; state->cal_dll = data; cam_dbg("Cal_Device = 0x%x, Cal_DLL = 0x%x\n", state->cal_device, state->cal_dll); return 0; } static int s5c73m3_stop_af_lens(struct v4l2_subdev *sd, int val) { struct s5c73m3_state *state = to_state(sd); int err; cam_dbg("E, value %d\n", val); if (val == CAF_START) { if (state->focus.mode == FOCUS_MODE_CONTINOUS_VIDEO) { err = s5c73m3_writeb(sd, S5C73M3_AF_MODE, S5C73M3_AF_MODE_MOVIE_CAF_START); } else { err = s5c73m3_writeb(sd, S5C73M3_AF_MODE, S5C73M3_AF_MODE_PREVIEW_CAF_START); } } else { err = s5c73m3_writeb(sd, S5C73M3_AF_CON, S5C73M3_AF_CON_STOP); } CHECK_ERR(err); cam_trace("X\n"); return err; } static int s5c73m3_set_af(struct v4l2_subdev *sd, int val) { struct s5c73m3_state *state = to_state(sd); int err = 0; cam_info("%s, mode %#x\n", val ? "start" : "stop", state->focus.mode); state->focus.status = 0; if (val) { state->isflash = S5C73M3_ISNEED_FLASH_ON; if (state->focus.mode == FOCUS_MODE_TOUCH) err = s5c73m3_set_touch_auto_focus(sd); else err = s5c73m3_writeb(sd, S5C73M3_AF_CON, S5C73M3_AF_CON_START); } else { err = s5c73m3_writeb(sd, S5C73M3_STILL_MAIN_FLASH , S5C73M3_STILL_MAIN_FLASH_CANCEL); err = s5c73m3_writeb(sd, S5C73M3_AF_CON, S5C73M3_AF_CON_STOP); state->isflash = S5C73M3_ISNEED_FLASH_UNDEFINED; } CHECK_ERR(err); cam_trace("X\n"); return err; } static int s5c73m3_get_pre_flash(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { int err = 0; u16 pre_flash = false; s5c73m3_read(sd, 0x0009, S5C73M3_STILL_PRE_FLASH | 0x5000, &pre_flash); ctrl->value = pre_flash; return err; } static int s5c73m3_get_af_result(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct s5c73m3_state *state = to_state(sd); int err = 0; u16 af_status = S5C73M3_AF_STATUS_UNFOCUSED; /*u16 temp_status = 0;*/ err = s5c73m3_read(sd, 0x0009, S5C73M3_AF_STATUS, &af_status); /*err = s5c73m3_read(sd, 0x0009, 0x5840, &temp_status);*/ switch (af_status) { case S5C73M3_AF_STATUS_FOCUSING: case S5C73M3_CAF_STATUS_FOCUSING: case S5C73M3_CAF_STATUS_FIND_SEARCHING_DIR: case S5C73M3_AF_STATUS_INVALID: ctrl->value = CAMERA_AF_STATUS_IN_PROGRESS; break; case S5C73M3_AF_STATUS_FOCUSED: case S5C73M3_CAF_STATUS_FOCUSED: ctrl->value = CAMERA_AF_STATUS_SUCCESS; break; case S5C73M3_CAF_STATUS_UNFOCUSED: case S5C73M3_AF_STATUS_UNFOCUSED: default: ctrl->value = CAMERA_AF_STATUS_FAIL; break; } state->focus.status = af_status; /*cam_dbg("af_status = %d, frame_cnt = %d\n", state->focus.status, temp_status);*/ return err; } static int s5c73m3_set_af_mode(struct v4l2_subdev *sd, int val) { struct s5c73m3_state *state = to_state(sd); int err; cam_dbg("E, value %d\n", val); retry: switch (val) { case FOCUS_MODE_AUTO: case FOCUS_MODE_INFINITY: if (state->focus.mode != FOCUS_MODE_CONTINOUS_PICTURE) { err = s5c73m3_writeb(sd, S5C73M3_AF_MODE, S5C73M3_AF_MODE_NORMAL); CHECK_ERR(err); } else { err = s5c73m3_writeb(sd, S5C73M3_AF_CON, S5C73M3_AF_CON_STOP); CHECK_ERR(err); } state->focus.mode = val; state->caf_mode = S5C73M3_AF_MODE_NORMAL; break; case FOCUS_MODE_MACRO: if (state->focus.mode != FOCUS_MODE_CONTINOUS_PICTURE_MACRO) { err = s5c73m3_writeb(sd, S5C73M3_AF_MODE, S5C73M3_AF_MODE_MACRO); CHECK_ERR(err); } else { err = s5c73m3_writeb(sd, S5C73M3_AF_CON, S5C73M3_AF_CON_STOP); CHECK_ERR(err); } state->focus.mode = val; state->caf_mode = S5C73M3_AF_MODE_MACRO; break; case FOCUS_MODE_CONTINOUS_PICTURE: state->isflash = S5C73M3_ISNEED_FLASH_UNDEFINED; if (val != state->focus.mode && state->caf_mode != S5C73M3_AF_MODE_NORMAL) { state->focus.mode = val; err = s5c73m3_writeb(sd, S5C73M3_AF_MODE, S5C73M3_AF_MODE_NORMAL); CHECK_ERR(err); state->caf_mode = S5C73M3_AF_MODE_NORMAL; } err = s5c73m3_writeb(sd, S5C73M3_AF_MODE, S5C73M3_AF_MODE_PREVIEW_CAF_START); CHECK_ERR(err); break; case FOCUS_MODE_CONTINOUS_PICTURE_MACRO: state->isflash = S5C73M3_ISNEED_FLASH_UNDEFINED; if (val != state->focus.mode && state->caf_mode != S5C73M3_AF_MODE_MACRO) { state->focus.mode = val; err = s5c73m3_writeb(sd, S5C73M3_AF_MODE, S5C73M3_AF_MODE_MACRO); state->caf_mode = S5C73M3_AF_MODE_MACRO; CHECK_ERR(err); } err = s5c73m3_writeb(sd, S5C73M3_AF_MODE, S5C73M3_AF_MODE_PREVIEW_CAF_START); CHECK_ERR(err); break; case FOCUS_MODE_CONTINOUS_VIDEO: state->focus.mode = val; err = s5c73m3_writeb(sd, S5C73M3_AF_MODE, S5C73M3_AF_MODE_MOVIE_CAF_START); CHECK_ERR(err); break; case FOCUS_MODE_FACEDETECT: state->focus.mode = val; break; case FOCUS_MODE_TOUCH: state->focus.mode = val; break; default: cam_warn("invalid value, %d\n", val); val = FOCUS_MODE_AUTO; goto retry; } state->focus.mode = val; cam_trace("X\n"); return 0; } static int s5c73m3_set_touch_auto_focus(struct v4l2_subdev *sd) { struct s5c73m3_state *state = to_state(sd); int err; err = s5c73m3_i2c_write(sd, 0xfcfc, 0x3310); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0050, 0x0009); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0054, S5C73M3_AF_TOUCH_POSITION); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0F14, state->focus.pos_x); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0F14, state->focus.pos_y); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0F14, state->real_preview_width); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0F14, state->real_preview_height); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0050, 0x0009); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0054, 0x5000); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0F14, 0x0E0A); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0F14, 0x0000); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0054, 0x5080); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0F14, 0x0001); CHECK_ERR(err); return 0; } static int s5c73m3_set_zoom(struct v4l2_subdev *sd, int value) { int err; cam_dbg("E, value %d\n", value); retry: if (value < 0 || value > 30) { cam_warn("invalid value, %d\n", value); value = 0; goto retry; } err = s5c73m3_writeb(sd, S5C73M3_ZOOM_STEP, value); CHECK_ERR(err); cam_trace("X\n"); return 0; } static int s5c73m3_set_jpeg_quality(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { int val = ctrl->value, err; cam_dbg("E, value %d\n", val); if (val <= 65) /* Normal */ err = s5c73m3_writeb(sd, S5C73M3_IMAGE_QUALITY, S5C73M3_IMAGE_QUALITY_NORMAL); else if (val <= 75) /* Fine */ err = s5c73m3_writeb(sd, S5C73M3_IMAGE_QUALITY, S5C73M3_IMAGE_QUALITY_FINE); else /* Superfine */ err = s5c73m3_writeb(sd, S5C73M3_IMAGE_QUALITY, S5C73M3_IMAGE_QUALITY_SUPERFINE); CHECK_ERR(err); cam_trace("X\n"); return 0; } static int s5c73m3_aeawb_lock_unlock(struct v4l2_subdev *sd, int val) { struct s5c73m3_state *state = to_state(sd); int err = 0; int ae_lock = val & 0x1; int awb_lock = (val & 0x2) >> 1; int ae_lock_changed = ~(ae_lock & state->ae_lock) & (ae_lock | state->ae_lock); int awb_lock_changed = ~(awb_lock & state->awb_lock) & (awb_lock | state->awb_lock); if (ae_lock_changed) { cam_dbg("ae lock - %s\n", ae_lock ? "true" : "false"); err = s5c73m3_writeb(sd, S5C73M3_AE_CON, ae_lock ? S5C73M3_AE_STOP : S5C73M3_AE_START); CHECK_ERR(err); state->ae_lock = ae_lock; } if (awb_lock_changed && state->wb_mode == WHITE_BALANCE_AUTO) { cam_dbg("awb lock - %s\n", awb_lock ? "true" : "false"); err = s5c73m3_writeb(sd, S5C73M3_AWB_CON, awb_lock ? S5C73M3_AWB_STOP : S5C73M3_AWB_START); CHECK_ERR(err); state->awb_lock = awb_lock; } return 0; } static void s5c73m3_wait_for_preflash_fire(struct v4l2_subdev *sd) { u16 pre_flash = false; u16 timeout_cnt = 0; do { s5c73m3_read(sd, 0x0009, S5C73M3_STILL_PRE_FLASH | 0x5000, &pre_flash); if (pre_flash || timeout_cnt > 20) { if (!pre_flash) { cam_dbg("pre_Flash = %d, timeout_cnt = %d\n", pre_flash, timeout_cnt); } break; } else timeout_cnt++; mdelay(15); } while (1); } static int s5c73m3_start_capture(struct v4l2_subdev *sd, int val) { struct s5c73m3_state *state = to_state(sd); int err = 0; u16 isneed_flash = false; u16 pre_flash = false; s5c73m3_read(sd, 0x0009, S5C73M3_STILL_PRE_FLASH | 0x5000, &pre_flash); if (state->flash_mode == FLASH_MODE_ON) { if (!pre_flash) { err = s5c73m3_writeb(sd, S5C73M3_STILL_PRE_FLASH , S5C73M3_STILL_PRE_FLASH_FIRE); s5c73m3_wait_for_preflash_fire(sd); } err = s5c73m3_writeb(sd, S5C73M3_STILL_MAIN_FLASH , S5C73M3_STILL_MAIN_FLASH_FIRE); } else if (state->flash_mode == FLASH_MODE_AUTO) { if (pre_flash) { err = s5c73m3_writeb(sd, S5C73M3_STILL_MAIN_FLASH , S5C73M3_STILL_MAIN_FLASH_FIRE); } else if (state->isflash != S5C73M3_ISNEED_FLASH_ON) { err = s5c73m3_read(sd, 0x0009, S5C73M3_AE_ISNEEDFLASH | 0x5000, &isneed_flash); if (isneed_flash) { err = s5c73m3_writeb(sd, S5C73M3_STILL_PRE_FLASH , S5C73M3_STILL_PRE_FLASH_FIRE); s5c73m3_wait_for_preflash_fire(sd); err = s5c73m3_writeb(sd, S5C73M3_STILL_MAIN_FLASH, S5C73M3_STILL_MAIN_FLASH_FIRE); } } } state->isflash = S5C73M3_ISNEED_FLASH_UNDEFINED; return 0; } static int s5c73m3_set_auto_bracket_mode(struct v4l2_subdev *sd) { int err = 0; err = s5c73m3_writeb(sd, S5C73M3_AE_AUTO_BRAKET, S5C73M3_AE_AUTO_BRAKET_EV20); CHECK_ERR(err); return err; } static int s5c73m3_set_frame_rate(struct v4l2_subdev *sd, int fps) { int err = 0; struct s5c73m3_state *state = to_state(sd); if (!state->stream_enable) { state->fps = fps; return 0; } cam_dbg("E, value %d\n", fps); switch (fps) { case 120: err = s5c73m3_writeb(sd, S5C73M3_AE_MODE, S5C73M3_FIXED_120FPS); /* 120fps */ break; case 90: err = s5c73m3_writeb(sd, S5C73M3_AE_MODE, S5C73M3_FIXED_90FPS); /* 90fps */ break; case 60: err = s5c73m3_writeb(sd, S5C73M3_AE_MODE, S5C73M3_FIXED_60FPS); /* 60fps */ break; case 30: err = s5c73m3_writeb(sd, S5C73M3_AE_MODE, S5C73M3_FIXED_30FPS); /* 30fps */ break; case 20: err = s5c73m3_writeb(sd, S5C73M3_AE_MODE, S5C73M3_FIXED_20FPS); /* 20fps */ break; case 15: err = s5c73m3_writeb(sd, S5C73M3_AE_MODE, S5C73M3_FIXED_15FPS); /* 15fps */ break; case 10: err = s5c73m3_writeb(sd, S5C73M3_AE_MODE, S5C73M3_FIXED_10FPS); /* 10fps */ break; case 7: err = s5c73m3_writeb(sd, S5C73M3_AE_MODE, S5C73M3_FIXED_7FPS); /* 7fps */ break; default: err = s5c73m3_writeb(sd, S5C73M3_AE_MODE, S5C73M3_AUTO_MODE_AE_SET); /* auto */ break; } return err; } static int s5c73m3_set_face_zoom(struct v4l2_subdev *sd, int val) { struct s5c73m3_state *state = to_state(sd); int err; cam_dbg("s5c73m3_set_face_zoom\n"); err = s5c73m3_writeb(sd, S5C73M3_AF_CON, S5C73M3_AF_CON_STOP); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0xfcfc, 0x3310); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0050, 0x0009); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0054, S5C73M3_AF_TOUCH_POSITION); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0F14, state->focus.pos_x); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0F14, state->focus.pos_y); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0F14, state->preview->width); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0F14, state->preview->height); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0050, 0x0009); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0054, 0x5000); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0F14, S5C73M3_AF_FACE_ZOOM); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0F14, val); /*0:reset, 1:Start*/ CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0054, 0x5080); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0F14, 0x0001); CHECK_ERR(err); udelay(400); err = s5c73m3_writeb(sd, S5C73M3_AF_MODE, S5C73M3_AF_MODE_PREVIEW_CAF_START); CHECK_ERR(err); return 0; } static int s5c73m3_set_face_detection(struct v4l2_subdev *sd, int val) { int err; cam_dbg("E, value %d\n", val); retry: switch (val) { case FACE_DETECTION_ON: err = s5c73m3_writeb(sd, S5C73M3_FACE_DET, S5C73M3_FACE_DET_ON); CHECK_ERR(err); err = s5c73m3_writeb(sd, S5C73M3_AF_MODE, S5C73M3_AF_MODE_PREVIEW_CAF_START); CHECK_ERR(err); break; case FACE_DETECTION_OFF: err = s5c73m3_writeb(sd, S5C73M3_FACE_DET, S5C73M3_FACE_DET_OFF); CHECK_ERR(err); err = s5c73m3_writeb(sd, S5C73M3_AF_MODE, S5C73M3_AF_MODE_PREVIEW_CAF_START); CHECK_ERR(err); break; default: cam_warn("invalid value, %d\n", val); val = FACE_DETECTION_OFF; goto retry; } cam_trace("X\n"); return 0; } static int s5c73m3_set_hybrid_capture(struct v4l2_subdev *sd) { int err; cam_trace("E\n"); err = s5c73m3_writeb(sd, S5C73M3_HYBRID_CAPTURE, 1); CHECK_ERR(err); cam_trace("X\n"); return 0; } static int s5c73m3_get_lux(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { int err = 0; u16 lux_val = 0; err = s5c73m3_read(sd, 0x0009, 0x5C88, &lux_val); ctrl->value = lux_val; return err; } static int s5c73m3_set_low_light_mode(struct v4l2_subdev *sd, int val) { int err; cam_dbg("E, value %d\n", val); err = s5c73m3_writeb(sd, S5C73M3_AE_LOW_LIGHT_MODE, val); CHECK_ERR(err); cam_trace("X\n"); return 0; } static int s5c73m3_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct s5c73m3_state *state = to_state(sd); int err = 0; printk(KERN_INFO "id %d, value %d\n", ctrl->id - V4L2_CID_PRIVATE_BASE, ctrl->value); if (unlikely(state->isp.bad_fw && ctrl->id != V4L2_CID_CAM_UPDATE_FW)) { cam_err("\"Unknown\" state, please update F/W"); return -ENOSYS; } switch (ctrl->id) { case V4L2_CID_CAMERA_FRAME_RATE: err = s5c73m3_set_frame_rate(sd, ctrl->value); break; case V4L2_CID_CAMERA_FACE_DETECTION: err = s5c73m3_set_face_detection(sd, ctrl->value); break; case V4L2_CID_CAMERA_FACE_ZOOM: err = s5c73m3_set_face_zoom(sd, ctrl->value); break; case V4L2_CID_CAM_UPDATE_FW: if (ctrl->value == FW_MODE_DUMP) err = s5c73m3_dump_fw(sd); else if (ctrl->value == FW_MODE_UPDATE) err = s5c73m3_check_fw(sd, 1); else err = 0; break; case V4L2_CID_CAMERA_SENSOR_MODE: err = s5c73m3_set_sensor_mode(sd, ctrl->value); break; case V4L2_CID_CAMERA_FLASH_MODE: err = s5c73m3_set_flash(sd, ctrl->value, 0); break; case V4L2_CID_CAMERA_ANTI_BANDING: err = s5c73m3_set_antibanding(sd, ctrl->value); break; case V4L2_CID_CAMERA_ISO: err = s5c73m3_set_iso(sd, ctrl); break; case V4L2_CID_CAMERA_METERING: err = s5c73m3_set_metering(sd, ctrl->value); break; case V4L2_CID_CAMERA_BRIGHTNESS: err = s5c73m3_set_exposure(sd, ctrl); break; case V4L2_CID_CAMERA_CONTRAST: err = s5c73m3_set_contrast(sd, ctrl); break; case V4L2_CID_CAMERA_SHARPNESS: err = s5c73m3_set_sharpness(sd, ctrl); break; case V4L2_CID_CAMERA_WHITE_BALANCE: err = s5c73m3_set_whitebalance(sd, ctrl->value); break; case V4L2_CID_CAMERA_SCENE_MODE: err = s5c73m3_set_scene_mode(sd, ctrl->value); break; case V4L2_CID_CAMERA_EFFECT: err = s5c73m3_set_effect(sd, ctrl->value); break; case V4L2_CID_CAMERA_WDR: err = s5c73m3_set_wdr(sd, ctrl->value); break; case V4L2_CID_CAMERA_ANTI_SHAKE: if (state->sensor_mode == SENSOR_CAMERA) err = s5c73m3_set_antishake(sd, ctrl->value); break; case V4L2_CID_CAMERA_DEFAULT_FOCUS_POSITION: /*err = s5c73m3_set_af_mode(sd, state->focus.mode);*/ err = 0; break; case V4L2_CID_CAMERA_FOCUS_MODE: /*state->focus.mode = ctrl->value;*/ err = s5c73m3_set_af_mode(sd, ctrl->value); break; case V4L2_CID_CAMERA_SET_AUTO_FOCUS: state->real_preview_width = ((u32)ctrl->value >> 20) & 0xFFF; state->real_preview_height = ((u32)ctrl->value >> 8) & 0xFFF; err = s5c73m3_set_af(sd, (u32)ctrl->value & 0x000F); break; case V4L2_CID_CAMERA_OBJECT_POSITION_X: state->focus.pos_x = ctrl->value; break; case V4L2_CID_CAMERA_OBJECT_POSITION_Y: state->focus.pos_y = ctrl->value; break; case V4L2_CID_CAMERA_ZOOM: err = s5c73m3_set_zoom(sd, ctrl->value); break; case V4L2_CID_CAM_JPEG_QUALITY: err = s5c73m3_set_jpeg_quality(sd, ctrl); break; case V4L2_CID_CAMERA_CAPTURE: err = s5c73m3_start_capture(sd, ctrl->value); if (state->scene_mode == SCENE_MODE_FIREWORKS) err = s5c73m3_capture_firework(sd); else if (state->scene_mode == SCENE_MODE_NIGHTSHOT) err = s5c73m3_capture_nightshot(sd); break; case V4L2_CID_CAMERA_HDR: state->hdr_mode = ctrl->value; err = 0; break; case V4L2_CID_CAMERA_HYBRID: state->hybrid_mode = ctrl->value; err = 0; break; case V4L2_CID_CAMERA_FAST_MODE: state->fast_mode = ctrl->value; err = 0; break; case V4L2_CID_CAMERA_YUV_SNAPSHOT: state->yuv_snapshot = ctrl->value; err = 0; break; case V4L2_CID_CAMERA_HYBRID_CAPTURE: err = s5c73m3_set_hybrid_capture(sd); break; case V4L2_CID_CAMERA_VT_MODE: state->vt_mode = ctrl->value; break; case V4L2_CID_CAMERA_JPEG_RESOLUTION: state->jpeg_width = (u32)ctrl->value >> 16; state->jpeg_height = (u32)ctrl->value & 0x0FFFF; break; case V4L2_CID_CAMERA_AEAWB_LOCK_UNLOCK: err = s5c73m3_aeawb_lock_unlock(sd, ctrl->value); break; case V4L2_CID_CAMERA_CAF_START_STOP: err = s5c73m3_stop_af_lens(sd, ctrl->value); break; case V4L2_CID_CAMERA_LOW_LIGHT_MODE: err = s5c73m3_set_low_light_mode(sd, ctrl->value); break; default: cam_err("no such control id %d, value %d\n", ctrl->id - V4L2_CID_PRIVATE_BASE, ctrl->value); /*err = -ENOIOCTLCMD;*/ err = 0; break; } if (err < 0 && err != -ENOIOCTLCMD) cam_err("failed, id %d, value %d\n", ctrl->id - V4L2_CID_PRIVATE_BASE, ctrl->value); return err; } static int s5c73m3_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { int err = 0; switch (ctrl->id) { case V4L2_CID_CAMERA_CAPTURE: err = s5c73m3_get_pre_flash(sd, ctrl); break; case V4L2_CID_CAMERA_AUTO_FOCUS_RESULT: err = s5c73m3_get_af_result(sd, ctrl); break; case V4L2_CID_CAM_JPEG_MEMSIZE: ctrl->value = 0xA00000; /*interleaved data size*/ break; case V4L2_CID_CAMERA_GET_LUX: err = s5c73m3_get_lux(sd, ctrl); break; default: cam_err("no such control id %d\n", ctrl->id - V4L2_CID_PRIVATE_BASE); /*err = -ENOIOCTLCMD*/ err = 0; break; } if (err < 0 && err != -ENOIOCTLCMD) cam_err("failed, id %d\n", ctrl->id - V4L2_CID_PRIVATE_BASE); return err; } static int s5c73m3_g_ext_ctrl(struct v4l2_subdev *sd, struct v4l2_ext_control *ctrl) { struct s5c73m3_state *state = to_state(sd); int err = 0; switch (ctrl->id) { case V4L2_CID_CAM_SENSOR_FW_VER: strcpy(ctrl->string, state->phone_fw); break; default: cam_err("no such control id %d\n", ctrl->id - V4L2_CID_CAMERA_CLASS_BASE); /*err = -ENOIOCTLCMD*/ break; } return err; } static int s5c73m3_g_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *ctrls) { struct v4l2_ext_control *ctrl = ctrls->controls; int i, err = 0; for (i = 0; i < ctrls->count; i++, ctrl++) { err = s5c73m3_g_ext_ctrl(sd, ctrl); if (err) { ctrls->error_idx = i; break; } } return err; } #ifndef CONFIG_VIDEO_S5C73M3_SPI int s5c73m3_spi_write(const u8 *addr, const int len, const int txSize) { return 0; } #endif static int s5c73m3_load_fw(struct v4l2_subdev *sd) { struct device *dev = sd->v4l2_dev->dev; struct s5c73m3_state *state = to_state(sd); const struct firmware *fw; char fw_path[20] = {0,}; char fw_path_in_data[25] = {0,}; u8 *buf = NULL; int err = 0; int txSize = 0; struct file *fp = NULL; mm_segment_t old_fs; long fsize = 0, nread; #ifdef CONFIG_MACH_T0 if (state->sensor_fw[1] == 'D') { sprintf(fw_path, "SlimISP_%cK.bin", state->sensor_fw[0]); } else { sprintf(fw_path, "SlimISP_%c%c.bin", state->sensor_fw[0], state->sensor_fw[1]); } #else if (state->sensor_fw[0] == 'O') { sprintf(fw_path, "SlimISP_G%c.bin", state->sensor_fw[1]); } else if (state->sensor_fw[0] == 'S') { sprintf(fw_path, "SlimISP_Z%c.bin", state->sensor_fw[1]); } else { sprintf(fw_path, "SlimISP_%c%c.bin", state->sensor_fw[0], state->sensor_fw[1]); } #endif sprintf(fw_path_in_data, "/data/cfw/%s", fw_path); old_fs = get_fs(); set_fs(KERNEL_DS); if (state->fw_index == S5C73M3_SD_CARD || state->fw_index == S5C73M3_IN_DATA) { if (state->fw_index == S5C73M3_SD_CARD) fp = filp_open(S5C73M3_FW_PATH, O_RDONLY, 0); else fp = filp_open(fw_path_in_data, O_RDONLY, 0); if (IS_ERR(fp)) goto out; else cam_dbg("%s is opened\n", state->fw_index == S5C73M3_SD_CARD ? S5C73M3_FW_PATH : fw_path_in_data); fsize = fp->f_path.dentry->d_inode->i_size; nread = vfs_read(fp, (char __user *)data_memory, fsize, &fp->f_pos); if (nread != fsize) { cam_err("failed to read firmware file_2\n"); err = -EIO; goto out; } set_fs(old_fs); } else { set_fs(old_fs); err = request_firmware(&fw, fw_path, dev); if (err != 0) { /*cam_err("request_firmware falied\n");*/ err = -EINVAL; goto out; } /*cam_dbg("start, size %d Bytes\n", fw->size);*/ buf = (u8 *)fw->data; fsize = fw->size; } txSize = 60*1024; /*60KB*/ if (state->fw_index != S5C73M3_IN_SYSTEM) { err = s5c73m3_spi_write((char *)&data_memory, fsize, txSize); if (err < 0) { cam_err("s5c73m3_spi_write falied\n"); goto out; } } else { err = s5c73m3_spi_write((char *)buf, fsize, txSize); } out: if (state->fw_index == S5C73M3_SD_CARD || state->fw_index == S5C73M3_IN_DATA) { if (!IS_ERR(fp) && fp != NULL) filp_close(fp, current->files); vfree(buf); set_fs(old_fs); } else { release_firmware(fw); } return err; } /* * v4l2_subdev_video_ops */ static const struct s5c73m3_frmsizeenum *s5c73m3_get_frmsize (const struct s5c73m3_frmsizeenum *frmsizes, int num_entries, int index) { int i; for (i = 0; i < num_entries; i++) { if (frmsizes[i].index == index) return &frmsizes[i]; } return NULL; } static int s5c73m3_set_frmsize(struct v4l2_subdev *sd) { struct s5c73m3_state *state = to_state(sd); int err ; cam_trace("E\n"); if (state->fast_mode == FAST_MODE_SUBSAMPLING_HALF) { cam_dbg("S5C73M3_FAST_MODE_SUBSAMPLING_HALF\n"); err = s5c73m3_writeb(sd, S5C73M3_CHG_MODE, S5C73M3_FAST_MODE_SUBSAMPLING_HALF | state->preview->reg_val | (state->sensor_mode<<8)); CHECK_ERR(err); } else if (state->fast_mode == FAST_MODE_SUBSAMPLING_QUARTER) { cam_dbg("S5C73M3_FAST_MODE_SUBSAMPLING_QUARTER\n"); err = s5c73m3_writeb(sd, S5C73M3_CHG_MODE, S5C73M3_FAST_MODE_SUBSAMPLING_QUARTER | state->preview->reg_val | (state->sensor_mode<<8)); CHECK_ERR(err); } else { cam_dbg("S5C73M3_DEFAULT_MODE\n"); err = s5c73m3_writeb(sd, S5C73M3_CHG_MODE, S5C73M3_DEFAULT_MODE | state->capture->reg_val | state->preview->reg_val |(state->sensor_mode<<8)); CHECK_ERR(err); } cam_trace("X\n"); return 0; } static int s5c73m3_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *ffmt) { struct s5c73m3_state *state = to_state(sd); const struct s5c73m3_frmsizeenum **frmsize; const struct s5c73m3_frmsizeenum **capfrmsize; u32 width = ffmt->width; u32 height = ffmt->height; u32 tmp_width; u32 old_index, old_index_cap; int i, num_entries; cam_trace("E\n"); if (unlikely(state->isp.bad_fw)) { cam_err("\"Unknown\" state, please update F/W"); return -ENOSYS; } if (ffmt->width < ffmt->height) { tmp_width = ffmt->height; height = ffmt->width; width = tmp_width; } if (ffmt->colorspace == V4L2_COLORSPACE_JPEG) state->format_mode = V4L2_PIX_FMT_MODE_CAPTURE; else state->format_mode = V4L2_PIX_FMT_MODE_PREVIEW; s5c73m3_set_mode(sd); /*set frame size for preview(yuv)*/ frmsize = &state->preview; old_index = *frmsize ? (*frmsize)->index : -1; *frmsize = NULL; num_entries = ARRAY_SIZE(preview_frmsizes); for (i = 0; i < num_entries; i++) { if (width == preview_frmsizes[i].width && height == preview_frmsizes[i].height) { *frmsize = &preview_frmsizes[i]; break; } } if (*frmsize == NULL) { cam_warn("invalid yuv frame size %dx%d\n", width, height); *frmsize = s5c73m3_get_frmsize(preview_frmsizes, num_entries, S5C73M3_PREVIEW_960X720); } /*set frame size for capture(jpeg)*/ /*it's meaningful for interleaved mode*/ capfrmsize = &state->capture; old_index_cap = *capfrmsize ? (*capfrmsize)->index : -1; *capfrmsize = NULL; width = state->jpeg_width; height = state->jpeg_height; num_entries = ARRAY_SIZE(capture_frmsizes); for (i = 0; i < num_entries; i++) { if (width == capture_frmsizes[i].width && height == capture_frmsizes[i].height) { *capfrmsize = &capture_frmsizes[i]; break; } } if (*capfrmsize == NULL) { cam_warn("invalid jpeg frame size %dx%d\n", width, height); *capfrmsize = s5c73m3_get_frmsize(capture_frmsizes, num_entries, S5C73M3_CAPTURE_VGA); } cam_dbg("yuv %dx%d\n", (*frmsize)->width, (*frmsize)->height); cam_dbg("jpeg %dx%d\n", (*capfrmsize)->width, (*capfrmsize)->height); if (state->stream_enable) { if (ffmt->colorspace == V4L2_COLORSPACE_JPEG) { if ((old_index != (*frmsize)->index) || (old_index_cap != (*capfrmsize)->index)) s5c73m3_set_frmsize(sd); } else { if (old_index != (*frmsize)->index) s5c73m3_set_frmsize(sd); } } else s5c73m3_set_frmsize(sd); cam_trace("X\n"); return 0; } static int s5c73m3_g_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *a) { struct s5c73m3_state *state = to_state(sd); a->parm.capture.timeperframe.numerator = 1; a->parm.capture.timeperframe.denominator = state->fps; return 0; } static int s5c73m3_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *a) { struct s5c73m3_state *state = to_state(sd); int err = 0; u32 fps = a->parm.capture.timeperframe.denominator / a->parm.capture.timeperframe.numerator; if (unlikely(state->isp.bad_fw)) { cam_err("\"Unknown\" state, please update F/W"); return -ENOSYS; } if (fps != state->fps) { if (fps <= 0 || fps > 30) { cam_err("invalid frame rate %d\n", fps); fps = 30; } state->fps = fps; } cam_err("Frame rate = %d(%d)\n", fps, state->fps); err = s5c73m3_set_frame_rate(sd, state->fps); CHECK_ERR(err); return 0; } static int s5c73m3_enum_framesizes(struct v4l2_subdev *sd, struct v4l2_frmsizeenum *fsize) { struct s5c73m3_state *state = to_state(sd); /* * The camera interface should read this value, this is the resolution * at which the sensor would provide framedata to the camera i/f * In case of image capture, * this returns the default camera resolution (VGA) */ if (state->preview == NULL) return -EINVAL; fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; if (state->hdr_mode || state->yuv_snapshot) { fsize->discrete.width = state->capture->width; fsize->discrete.height = state->capture->height; } else { fsize->discrete.width = state->preview->width; fsize->discrete.height = state->preview->height; } return 0; } static int s5c73m3_s_stream_sensor(struct v4l2_subdev *sd, int onoff) { int err = 0; int index = 0; u16 status = 0; u16 i2c_status = 0; u16 i2c_seq_status = 0; cam_info("onoff=%d\n", onoff); err = s5c73m3_writeb(sd, S5C73M3_SENSOR_STREAMING, onoff ? S5C73M3_SENSOR_STREAMING_ON : S5C73M3_SENSOR_STREAMING_OFF); CHECK_ERR(err); do { err = s5c73m3_read(sd, 0x0009, S5C73M3_STATUS, &status); if (status == 0xffff) break; index++; msleep(20); } while (index < 30); if (index >= 30) { err = s5c73m3_read(sd, 0x0009, S5C73M3_I2C_ERR_STATUS, &i2c_status); err = s5c73m3_read(sd, 0x0009, S5C73M3_I2C_SEQ_STATUS, &i2c_seq_status); cam_dbg("TimeOut!! index:%d,status:%#x,i2c_stauts:%#x,i2c_seq_status:%#x\n", index, status, i2c_status, i2c_seq_status); err = -1; } return err; } static int s5c73m3_s_stream_hdr(struct v4l2_subdev *sd, int enable) { struct s5c73m3_state *state = to_state(sd); int err = 0; cam_trace("E\n"); if (enable) { err = s5c73m3_i2c_write(sd, 0x0050, 0x0009); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0054, 0x5000); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0F14, 0x0902); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0F14, 0x0008); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0F14, 0x091A); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0F14, 0x0002); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0F14, 0x0B10); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0F14, 0x8000 | state->capture->reg_val | state->preview->reg_val); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0054, 0x5080); CHECK_ERR(err); err = s5c73m3_i2c_write(sd, 0x0F14, 0x0003); CHECK_ERR(err); err = s5c73m3_s_stream_sensor(sd, enable); err = s5c73m3_set_auto_bracket_mode(sd); } else { err = s5c73m3_s_stream_sensor(sd, enable); } cam_trace("X\n"); return 0; } static int s5c73m3_s_stream(struct v4l2_subdev *sd, int enable) { struct s5c73m3_state *state = to_state(sd); int err; cam_trace("E\n"); if (unlikely(state->isp.bad_fw)) { cam_err("\"Unknown\" state, please update F/W"); return -ENOSYS; } switch (enable) { case STREAM_MODE_CAM_ON: case STREAM_MODE_CAM_OFF: switch (state->format_mode) { case V4L2_PIX_FMT_MODE_CAPTURE: cam_info("capture %s", enable == STREAM_MODE_CAM_ON ? "on" : "off"); s5c73m3_s_stream_sensor(sd, enable); if (enable == STREAM_MODE_CAM_ON && (state->focus.mode >= FOCUS_MODE_CONTINOUS && state->focus.mode <= FOCUS_MODE_CONTINOUS_VIDEO)) { s5c73m3_set_af_mode(sd, state->focus.mode); } break; default: cam_info("preview %s", enable == STREAM_MODE_CAM_ON ? "on" : "off"); if (state->hdr_mode) { err = s5c73m3_set_flash(sd, FLASH_MODE_OFF, 0); err = s5c73m3_s_stream_hdr(sd, enable); } else { err = s5c73m3_s_stream_sensor(sd, enable); if (enable == STREAM_MODE_CAM_ON && (state->focus.mode >= FOCUS_MODE_CONTINOUS && state->focus.mode <= FOCUS_MODE_CONTINOUS_VIDEO)) { s5c73m3_set_af_mode(sd, state->focus.mode); } } break; } break; case STREAM_MODE_MOVIE_ON: if (state->flash_mode != FLASH_MODE_OFF) err = s5c73m3_set_flash(sd, state->flash_mode, 1); if (state->preview->index == S5C73M3_PREVIEW_720P || state->preview->index == S5C73M3_PREVIEW_1080P) err = s5c73m3_set_af(sd, 1); break; case STREAM_MODE_MOVIE_OFF: if (state->preview->index == S5C73M3_PREVIEW_720P || state->preview->index == S5C73M3_PREVIEW_1080P) err = s5c73m3_set_af(sd, 0); s5c73m3_set_flash(sd, FLASH_MODE_OFF, 1); break; default: cam_err("invalid stream option, %d\n", enable); break; } #if 0 err = s5c73m3_writeb(sd, S5C73M3_AF_CAL, 0); CHECK_ERR(err); #endif state->stream_enable = enable; if (state->stream_enable && state->hdr_mode == 0) { if (state->fps) s5c73m3_set_frame_rate(sd, state->fps); } cam_trace("X\n"); return 0; } static int s5c73m3_init_param(struct v4l2_subdev *sd) { s5c73m3_set_flash(sd, FLASH_MODE_OFF, 0); return 0; } static int s5c73m3_FROM_booting(struct v4l2_subdev *sd) { u16 read_val; int i, err; cam_trace("E\n"); /*ARM go*/ err = s5c73m3_write(sd, 0x3000, 0x0004, 0xFFFF); CHECK_ERR(err); udelay(400); /*Check boot done*/ for (i = 0; i < 4; i++) { err = s5c73m3_read(sd, 0x3010, 0x0010, &read_val); CHECK_ERR(err); if (read_val == 0x0C) break; udelay(100); } if (read_val != 0x0C) { cam_err("boot fail, read_val %#x\n", read_val); return -1; } /*P,M,S and Boot Mode*/ err = s5c73m3_write(sd, 0x3100, 0x010C, 0x0044); CHECK_ERR(err); err = s5c73m3_write(sd, 0x3100, 0x0108, 0x000D); CHECK_ERR(err); err = s5c73m3_write(sd, 0x3100, 0x0304, 0x0001); CHECK_ERR(err); err = s5c73m3_write(sd, 0x0001, 0x0000, 0x5800); CHECK_ERR(err); err = s5c73m3_write(sd, 0x0001, 0x0002, 0x0002); CHECK_ERR(err); err = s5c73m3_write(sd, 0x3100, 0x0000, 0x0001); CHECK_ERR(err); err = s5c73m3_write(sd, 0x3010, 0x0014, 0x1B85); CHECK_ERR(err); err = s5c73m3_write(sd, 0x3010, 0x0010, 0x230C); CHECK_ERR(err); mdelay(300); /*Check binary read done*/ for (i = 0; i < 3; i++) { err = s5c73m3_read(sd, 0x3010, 0x0010, &read_val); CHECK_ERR(err); if (read_val == 0x230E) break; udelay(100); } if (read_val != 0x230E) { cam_err("binary read fail, read_val %#x\n", read_val); return -1; } /*ARM reset*/ err = s5c73m3_write(sd, 0x3000, 0x0004, 0xFFFD); CHECK_ERR(err); /*remap*/ err = s5c73m3_write(sd, 0x3010, 0x00A4, 0x0183); CHECK_ERR(err); /*ARM go again*/ err = s5c73m3_write(sd, 0x3000, 0x0004, 0xFFFF); CHECK_ERR(err); cam_trace("X\n"); return 0; } static int s5c73m3_SPI_booting(struct v4l2_subdev *sd) { u16 read_val; int i, err; /*ARM go*/ err = s5c73m3_write(sd, 0x3000, 0x0004, 0xFFFF); CHECK_ERR(err); udelay(400); /*Check boot done*/ for (i = 0; i < 3; i++) { err = s5c73m3_read(sd, 0x3010, 0x0010, &read_val); CHECK_ERR(err); if (read_val == 0x0C) break; udelay(100); } if (read_val != 0x0C) { cam_err("boot fail, read_val %#x\n", read_val); return -1; } /*P,M,S and Boot Mode*/ err = s5c73m3_write(sd, 0x3010, 0x0014, 0x2146); CHECK_ERR(err); err = s5c73m3_write(sd, 0x3010, 0x0010, 0x210C); CHECK_ERR(err); udelay(200); /*Check SPI ready*/ for (i = 0; i < 3; i++) { err = s5c73m3_read(sd, 0x3010, 0x0010, &read_val); CHECK_ERR(err); if (read_val == 0x210D) break; udelay(100); } if (read_val != 0x210D) { cam_err("SPI not ready, read_val %#x\n", read_val); return -1; } /*download fw by SPI*/ s5c73m3_load_fw(sd); /*ARM reset*/ err = s5c73m3_write(sd, 0x3000, 0x0004, 0xFFFD); CHECK_ERR(err); /*remap*/ err = s5c73m3_write(sd, 0x3010, 0x00A4, 0x0183); CHECK_ERR(err); /*ARM go again*/ err = s5c73m3_write(sd, 0x3000, 0x0004, 0xFFFF); CHECK_ERR(err); return 0; } static int s5c73m3_read_vdd_core(struct v4l2_subdev *sd) { struct s5c73m3_state *state = to_state(sd); u8 *buf = NULL; u16 read_val; u32 vdd_core_val = 0; int err; struct file *fp; mm_segment_t old_fs; cam_trace("E\n"); /*Initialize OTP Controller*/ err = s5c73m3_write(sd, 0x3800, 0xA004, 0x0000); CHECK_ERR(err); err = s5c73m3_write(sd, 0x3800, 0xA000, 0x0004); CHECK_ERR(err); err = s5c73m3_write(sd, 0x3800, 0xA0D8, 0x0000); CHECK_ERR(err); err = s5c73m3_write(sd, 0x3800, 0xA0DC, 0x0004); CHECK_ERR(err); err = s5c73m3_write(sd, 0x3800, 0xA0C4, 0x4000); CHECK_ERR(err); err = s5c73m3_write(sd, 0x3800, 0xA0D4, 0x0015); CHECK_ERR(err); err = s5c73m3_write(sd, 0x3800, 0xA000, 0x0001); CHECK_ERR(err); err = s5c73m3_write(sd, 0x3800, 0xA0B4, 0x9F90); CHECK_ERR(err); err = s5c73m3_write(sd, 0x3800, 0xA09C, 0x9A95); CHECK_ERR(err); /*Page Select*/ err = s5c73m3_write(sd, 0x3800, 0xA0C4, 0x4800); CHECK_ERR(err); err = s5c73m3_write(sd, 0x3800, 0xA0C4, 0x4400); CHECK_ERR(err); err = s5c73m3_write(sd, 0x3800, 0xA0C4, 0x4200); CHECK_ERR(err); err = s5c73m3_write(sd, 0x3800, 0xA004, 0x00C0); CHECK_ERR(err); err = s5c73m3_write(sd, 0x3800, 0xA000, 0x0001); CHECK_ERR(err); #if 0 /*read_val should be 0x7383*/ err = s5c73m3_read(sd, 0x0000, 0x131C, &read_val); CHECK_ERR(err); cam_dbg("read_val %#x\n", read_val); #endif /*Read Data*/ err = s5c73m3_read(sd, 0x3800, 0xA034, &read_val); CHECK_ERR(err); cam_dbg("read_val %#x\n", read_val); err = s5c73m3_read(sd, 0x3800, 0xA040, &isp_chip_info1); CHECK_ERR(err); err = s5c73m3_read(sd, 0x3800, 0xA044, &isp_chip_info2); CHECK_ERR(err); err = s5c73m3_read(sd, 0x3800, 0xA048, &isp_chip_info3); CHECK_ERR(err); /*Read Data End*/ err = s5c73m3_write(sd, 0x3800, 0xA000, 0x0000); CHECK_ERR(err); if (read_val & 0x200) { strcpy(sysfs_isp_core, "1.15V"); state->pdata->set_vdd_core(1150000); vdd_core_val = 1150000; } else if (read_val & 0x800) { strcpy(sysfs_isp_core, "1.10V"); #ifdef CONFIG_MACH_M3 state->pdata->set_vdd_core(1150000); vdd_core_val = 1150000; #else state->pdata->set_vdd_core(1100000); vdd_core_val = 1100000; #endif } else if (read_val & 0x2000) { strcpy(sysfs_isp_core, "1.05V"); state->pdata->set_vdd_core(1100000); vdd_core_val = 1100000; } else if (read_val & 0x8000) { strcpy(sysfs_isp_core, "1.00V"); state->pdata->set_vdd_core(1000000); vdd_core_val = 1000000; } else { strcpy(sysfs_isp_core, "1.15V"); state->pdata->set_vdd_core(1150000); vdd_core_val = 1150000; } old_fs = get_fs(); set_fs(KERNEL_DS); fp = filp_open(S5C73M3_CORE_VDD, O_WRONLY|O_CREAT|O_TRUNC, 0644); if (IS_ERR(fp)) goto out; buf = vmalloc(10); if (!buf) { cam_err("failed to allocate memory\n"); err = -ENOMEM; goto out; } sprintf(buf, "%d\n", vdd_core_val); err = vfs_write(fp, (char __user *)buf, 10, &fp->f_pos); /*cam_dbg("return value of vfs_write = %d\n", err);*/ out: if (buf != NULL) vfree(buf); if (fp != NULL) filp_close(fp, current->files); set_fs(old_fs); cam_trace("X\n"); return 0; } static int s5c73m3_set_timing_register_for_vdd(struct v4l2_subdev *sd) { int err = 0; err = s5c73m3_write(sd, 0x3010, 0x0018, 0x0618); CHECK_ERR(err); err = s5c73m3_write(sd, 0x3010, 0x001C, 0x10C1); CHECK_ERR(err); err = s5c73m3_write(sd, 0x3010, 0x0020, 0x249E); CHECK_ERR(err); return err; } static int s5c73m3_init(struct v4l2_subdev *sd, u32 val) { struct s5c73m3_state *state = to_state(sd); int err = 0; int retVal = 0; sd_internal = sd; /* Default state values */ state->isp.bad_fw = 1; state->preview = NULL; state->capture = NULL; state->fw_index = S5C73M3_PATH_MAX; state->format_mode = V4L2_PIX_FMT_MODE_PREVIEW; state->sensor_mode = SENSOR_CAMERA; state->flash_mode = FLASH_MODE_OFF; state->wb_mode = WHITE_BALANCE_AUTO; state->focus.mode = FOCUS_MODE_CONTINOUS_PICTURE; state->focus.touch = 0; state->fps = 0; /* auto */ memset(&state->focus, 0, sizeof(state->focus)); if (!state->pdata->is_vdd_core_set()) s5c73m3_read_vdd_core(sd); cam_dbg("vdd core value from OTP : %s", sysfs_isp_core); cam_dbg("chip info from OTP : %#x, %#x, %#x\n", isp_chip_info1, isp_chip_info2, isp_chip_info3); #ifdef S5C73M3_FROM_BOOTING err = s5c73m3_FROM_booting(sd); #else err = s5c73m3_set_timing_register_for_vdd(sd); CHECK_ERR(err); err = s5c73m3_check_fw(sd, 0); if (err < 0) { cam_dbg("isp.bad_fw is true\n"); state->isp.bad_fw = 1; } #endif CHECK_ERR(err); err = s5c73m3_i2c_check_status_with_CRC(sd); if (err < 0) { cam_err("ISP is not ready. retry loading fw!!\n"); /* retry */ retVal = s5c73m3_check_fw_date(sd); /* retVal = 0 : Same Version retVal < 0 : Phone Version is latest Version than sensorFW. retVal > 0 : Sensor Version is latest version than phoenFW. */ if (retVal <= 0) { cam_dbg("Loading From PhoneFW......\n"); err = s5c73m3_reset_module(sd, false); CHECK_ERR(err); err = s5c73m3_SPI_booting(sd); CHECK_ERR(err); } else { cam_dbg("Loading From SensorFW......\n"); err = s5c73m3_reset_module(sd, true); CHECK_ERR(err); err = s5c73m3_get_sensor_fw_binary(sd); CHECK_ERR(err); } } state->isp.bad_fw = 0; s5c73m3_init_param(sd); return 0; } static const struct v4l2_subdev_core_ops s5c73m3_core_ops = { .init = s5c73m3_init, /* initializing API */ .load_fw = s5c73m3_load_fw, .queryctrl = s5c73m3_queryctrl, .g_ctrl = s5c73m3_g_ctrl, .s_ctrl = s5c73m3_s_ctrl, .g_ext_ctrls = s5c73m3_g_ext_ctrls, }; static const struct v4l2_subdev_video_ops s5c73m3_video_ops = { .s_mbus_fmt = s5c73m3_s_fmt, .g_parm = s5c73m3_g_parm, .s_parm = s5c73m3_s_parm, .enum_framesizes = s5c73m3_enum_framesizes, .s_stream = s5c73m3_s_stream, }; static const struct v4l2_subdev_ops s5c73m3_ops = { .core = &s5c73m3_core_ops, .video = &s5c73m3_video_ops, }; static ssize_t s5c73m3_camera_rear_camtype_show(struct device *dev, struct device_attribute *attr, char *buf) { char type[25]; strcpy(type, sysfs_sensor_type); return sprintf(buf, "%s\n", type); } static ssize_t s5c73m3_camera_rear_camfw_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%s %s\n", sysfs_sensor_fw, sysfs_phone_fw); } static ssize_t s5c73m3_camera_rear_flash(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { #ifdef CONFIG_LEDS_AAT1290A return aat1290a_power(dev, attr, buf, count); #else return count; #endif } static ssize_t s5c73m3_camera_isp_core_show(struct device *dev, struct device_attribute *attr, char *buf) { char core[10]; strcpy(core, sysfs_isp_core); return sprintf(buf, "%s\n", core); } static DEVICE_ATTR(rear_camtype, S_IRUGO, s5c73m3_camera_rear_camtype_show, NULL); static DEVICE_ATTR(rear_camfw, S_IRUGO, s5c73m3_camera_rear_camfw_show, NULL); static DEVICE_ATTR(rear_flash, S_IWUSR|S_IWGRP|S_IROTH, NULL, s5c73m3_camera_rear_flash); static DEVICE_ATTR(isp_core, S_IRUGO, s5c73m3_camera_isp_core_show, NULL); /* * s5c73m3_probe * Fetching platform data is being done with s_config subdev call. * In probe routine, we just register subdev device */ static int __devinit s5c73m3_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct s5c73m3_state *state; struct v4l2_subdev *sd; state = kzalloc(sizeof(struct s5c73m3_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; strcpy(sd->name, S5C73M3_DRIVER_NAME); state->pdata = client->dev.platform_data; /* Registering subdev */ v4l2_i2c_subdev_init(sd, client, &s5c73m3_ops); #ifdef CAM_DEBUG state->dbg_level = CAM_DEBUG; #endif #ifdef S5C73M3_BUSFREQ_OPP /* lock bus frequency */ if (samsung_rev() >= EXYNOS4412_REV_2_0) dev_lock(bus_dev, s5c73m3_dev, 440220); else dev_lock(bus_dev, s5c73m3_dev, 400200); #endif if (s5c73m3_dev) dev_set_drvdata(s5c73m3_dev, state); printk(KERN_DEBUG "%s\n", __func__); return 0; } static int __devexit s5c73m3_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct s5c73m3_state *state = to_state(sd); if (unlikely(state->isp.bad_fw)) { cam_err("camera is not ready!!\n"); } else { if (s5c73m3_set_af_softlanding(sd) < 0) cam_err("failed to set soft landing\n"); } v4l2_device_unregister_subdev(sd); #ifdef S5C73M3_BUSFREQ_OPP /* Unlock bus frequency */ dev_unlock(bus_dev, s5c73m3_dev); #endif kfree(state); return 0; } static const struct i2c_device_id s5c73m3_id[] = { { S5C73M3_DRIVER_NAME, 0 }, { } }; MODULE_DEVICE_TABLE(i2c, s5c73m3_id); static struct i2c_driver s5c73m3_i2c_driver = { .driver = { .name = S5C73M3_DRIVER_NAME, }, .probe = s5c73m3_probe, .remove = __devexit_p(s5c73m3_remove), .id_table = s5c73m3_id, }; static int __init s5c73m3_mod_init(void) { #ifdef S5C73M3_BUSFREQ_OPP /* To lock bus frequency in OPP mode */ bus_dev = dev_get("exynos-busfreq"); #endif if (!s5c73m3_dev) { s5c73m3_dev = device_create(camera_class, NULL, 0, NULL, "rear"); if (IS_ERR(s5c73m3_dev)) { cam_warn("failed to create device!\n"); return 0; } if (device_create_file(s5c73m3_dev, &dev_attr_rear_camtype) < 0) { cam_warn("failed to create device file, %s\n", dev_attr_rear_camtype.attr.name); } if (device_create_file(s5c73m3_dev, &dev_attr_rear_camfw) < 0) { cam_warn("failed to create device file, %s\n", dev_attr_rear_camfw.attr.name); } if (device_create_file(s5c73m3_dev, &dev_attr_rear_flash) < 0) { cam_warn("failed to create device file, %s\n", dev_attr_rear_flash.attr.name); } if (device_create_file(s5c73m3_dev, &dev_attr_isp_core) < 0) { cam_warn("failed to create device file, %s\n", dev_attr_isp_core.attr.name); } } return i2c_add_driver(&s5c73m3_i2c_driver); } static void __exit s5c73m3_mod_exit(void) { i2c_del_driver(&s5c73m3_i2c_driver); } module_init(s5c73m3_mod_init); module_exit(s5c73m3_mod_exit); MODULE_DESCRIPTION("driver for LSI S5C73M3"); MODULE_LICENSE("GPL");
gpl-2.0
AshishNamdev/linux
tools/testing/selftests/media_tests/media_device_open.c
301
1909
/* * media_device_open.c - Media Controller Device Open Test * * Copyright (c) 2016 Shuah Khan <shuahkh@osg.samsung.com> * Copyright (c) 2016 Samsung Electronics Co., Ltd. * * This file is released under the GPLv2. */ /* * This file adds a test for Media Controller API. * This test should be run as root and should not be * included in the Kselftest run. This test should be * run when hardware and driver that makes use Media * Controller API are present in the system. * * This test opens user specified Media Device and calls * MEDIA_IOC_DEVICE_INFO ioctl, closes the file, and exits. * * Usage: * sudo ./media_device_open -d /dev/mediaX * * Run this test is a loop and run bind/unbind on the driver. */ #include <stdio.h> #include <unistd.h> #include <stdlib.h> #include <errno.h> #include <string.h> #include <fcntl.h> #include <sys/ioctl.h> #include <sys/stat.h> #include <linux/media.h> int main(int argc, char **argv) { int opt; char media_device[256]; int count = 0; struct media_device_info mdi; int ret; int fd; if (argc < 2) { printf("Usage: %s [-d </dev/mediaX>]\n", argv[0]); exit(-1); } /* Process arguments */ while ((opt = getopt(argc, argv, "d:")) != -1) { switch (opt) { case 'd': strncpy(media_device, optarg, sizeof(media_device) - 1); media_device[sizeof(media_device)-1] = '\0'; break; default: printf("Usage: %s [-d </dev/mediaX>]\n", argv[0]); exit(-1); } } if (getuid() != 0) { printf("Please run the test as root - Exiting.\n"); exit(-1); } /* Open Media device and keep it open */ fd = open(media_device, O_RDWR); if (fd == -1) { printf("Media Device open errno %s\n", strerror(errno)); exit(-1); } ret = ioctl(fd, MEDIA_IOC_DEVICE_INFO, &mdi); if (ret < 0) printf("Media Device Info errno %s\n", strerror(errno)); else printf("Media device model %s driver %s\n", mdi.model, mdi.driver); }
gpl-2.0
edowar/FIH-msm7x30-ics
net/ipv4/netfilter/ipt_NETMAP.c
813
2741
/* NETMAP - static NAT mapping of IP network addresses (1:1). * The mapping can be applied to source (POSTROUTING), * destination (PREROUTING), or both (with separate rules). */ /* (C) 2000-2001 Svenning Soerensen <svenning@post5.tele.dk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/ip.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter/x_tables.h> #include <net/netfilter/nf_nat_rule.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Svenning Soerensen <svenning@post5.tele.dk>"); MODULE_DESCRIPTION("Xtables: 1:1 NAT mapping of IPv4 subnets"); static bool netmap_tg_check(const struct xt_tgchk_param *par) { const struct nf_nat_multi_range_compat *mr = par->targinfo; if (!(mr->range[0].flags & IP_NAT_RANGE_MAP_IPS)) { pr_debug("NETMAP:check: bad MAP_IPS.\n"); return false; } if (mr->rangesize != 1) { pr_debug("NETMAP:check: bad rangesize %u.\n", mr->rangesize); return false; } return true; } static unsigned int netmap_tg(struct sk_buff *skb, const struct xt_target_param *par) { struct nf_conn *ct; enum ip_conntrack_info ctinfo; __be32 new_ip, netmask; const struct nf_nat_multi_range_compat *mr = par->targinfo; struct nf_nat_range newrange; NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING || par->hooknum == NF_INET_POST_ROUTING || par->hooknum == NF_INET_LOCAL_OUT); ct = nf_ct_get(skb, &ctinfo); netmask = ~(mr->range[0].min_ip ^ mr->range[0].max_ip); if (par->hooknum == NF_INET_PRE_ROUTING || par->hooknum == NF_INET_LOCAL_OUT) new_ip = ip_hdr(skb)->daddr & ~netmask; else new_ip = ip_hdr(skb)->saddr & ~netmask; new_ip |= mr->range[0].min_ip & netmask; newrange = ((struct nf_nat_range) { mr->range[0].flags | IP_NAT_RANGE_MAP_IPS, new_ip, new_ip, mr->range[0].min, mr->range[0].max }); /* Hand modified range to generic setup. */ return nf_nat_setup_info(ct, &newrange, HOOK2MANIP(par->hooknum)); } static struct xt_target netmap_tg_reg __read_mostly = { .name = "NETMAP", .family = NFPROTO_IPV4, .target = netmap_tg, .targetsize = sizeof(struct nf_nat_multi_range_compat), .table = "nat", .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_OUT), .checkentry = netmap_tg_check, .me = THIS_MODULE }; static int __init netmap_tg_init(void) { return xt_register_target(&netmap_tg_reg); } static void __exit netmap_tg_exit(void) { xt_unregister_target(&netmap_tg_reg); } module_init(netmap_tg_init); module_exit(netmap_tg_exit);
gpl-2.0
stuffandthings/linux
drivers/staging/rtl8723au/hal/hal_com.c
813
23519
/****************************************************************************** * * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * ******************************************************************************/ #include <osdep_service.h> #include <drv_types.h> #include <hal_intf.h> #include <hal_com.h> #include <rtl8723a_hal.h> #include <usb_ops_linux.h> #define _HAL_INIT_C_ #define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80 /* return the final channel plan decision */ /* hw_channel_plan: channel plan from HW (efuse/eeprom) */ /* sw_channel_plan: channel plan from SW (registry/module param) */ /* def_channel_plan: channel plan used when the former two is invalid */ u8 hal_com_get_channel_plan23a(struct rtw_adapter *padapter, u8 hw_channel_plan, u8 sw_channel_plan, u8 def_channel_plan, bool AutoLoadFail) { u8 swConfig; u8 chnlPlan; swConfig = true; if (!AutoLoadFail) { if (!rtw_is_channel_plan_valid(sw_channel_plan)) swConfig = false; if (hw_channel_plan & EEPROM_CHANNEL_PLAN_BY_HW_MASK) swConfig = false; } if (swConfig == true) chnlPlan = sw_channel_plan; else chnlPlan = hw_channel_plan & (~EEPROM_CHANNEL_PLAN_BY_HW_MASK); if (!rtw_is_channel_plan_valid(chnlPlan)) chnlPlan = def_channel_plan; return chnlPlan; } u8 MRateToHwRate23a(u8 rate) { u8 ret = DESC_RATE1M; switch (rate) { /* CCK and OFDM non-HT rates */ case IEEE80211_CCK_RATE_1MB: ret = DESC_RATE1M; break; case IEEE80211_CCK_RATE_2MB: ret = DESC_RATE2M; break; case IEEE80211_CCK_RATE_5MB: ret = DESC_RATE5_5M; break; case IEEE80211_CCK_RATE_11MB: ret = DESC_RATE11M; break; case IEEE80211_OFDM_RATE_6MB: ret = DESC_RATE6M; break; case IEEE80211_OFDM_RATE_9MB: ret = DESC_RATE9M; break; case IEEE80211_OFDM_RATE_12MB: ret = DESC_RATE12M; break; case IEEE80211_OFDM_RATE_18MB: ret = DESC_RATE18M; break; case IEEE80211_OFDM_RATE_24MB: ret = DESC_RATE24M; break; case IEEE80211_OFDM_RATE_36MB: ret = DESC_RATE36M; break; case IEEE80211_OFDM_RATE_48MB: ret = DESC_RATE48M; break; case IEEE80211_OFDM_RATE_54MB: ret = DESC_RATE54M; break; /* HT rates since here */ /* case MGN_MCS0: ret = DESC_RATEMCS0; break; */ /* case MGN_MCS1: ret = DESC_RATEMCS1; break; */ /* case MGN_MCS2: ret = DESC_RATEMCS2; break; */ /* case MGN_MCS3: ret = DESC_RATEMCS3; break; */ /* case MGN_MCS4: ret = DESC_RATEMCS4; break; */ /* case MGN_MCS5: ret = DESC_RATEMCS5; break; */ /* case MGN_MCS6: ret = DESC_RATEMCS6; break; */ /* case MGN_MCS7: ret = DESC_RATEMCS7; break; */ default: break; } return ret; } void HalSetBrateCfg23a(struct rtw_adapter *padapter, u8 *mBratesOS) { struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter); u8 i, is_brate, brate; u16 brate_cfg = 0; u8 rate_index; for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) { is_brate = mBratesOS[i] & IEEE80211_BASIC_RATE_MASK; brate = mBratesOS[i] & 0x7f; if (is_brate) { switch (brate) { case IEEE80211_CCK_RATE_1MB: brate_cfg |= RATE_1M; break; case IEEE80211_CCK_RATE_2MB: brate_cfg |= RATE_2M; break; case IEEE80211_CCK_RATE_5MB: brate_cfg |= RATE_5_5M; break; case IEEE80211_CCK_RATE_11MB: brate_cfg |= RATE_11M; break; case IEEE80211_OFDM_RATE_6MB: brate_cfg |= RATE_6M; break; case IEEE80211_OFDM_RATE_9MB: brate_cfg |= RATE_9M; break; case IEEE80211_OFDM_RATE_12MB: brate_cfg |= RATE_12M; break; case IEEE80211_OFDM_RATE_18MB: brate_cfg |= RATE_18M; break; case IEEE80211_OFDM_RATE_24MB: brate_cfg |= RATE_24M; break; case IEEE80211_OFDM_RATE_36MB: brate_cfg |= RATE_36M; break; case IEEE80211_OFDM_RATE_48MB: brate_cfg |= RATE_48M; break; case IEEE80211_OFDM_RATE_54MB: brate_cfg |= RATE_54M; break; } } } /* 2007.01.16, by Emily */ /* Select RRSR (in Legacy-OFDM and CCK) */ /* For 8190, we select only 24M, 12M, 6M, 11M, 5.5M, 2M, and 1M from the Basic rate. */ /* We do not use other rates. */ /* 2011.03.30 add by Luke Lee */ /* CCK 2M ACK should be disabled for some BCM and Atheros AP IOT */ /* because CCK 2M has poor TXEVM */ /* CCK 5.5M & 11M ACK should be enabled for better performance */ brate_cfg = (brate_cfg | 0xd) & 0x15d; pHalData->BasicRateSet = brate_cfg; brate_cfg |= 0x01; /* default enable 1M ACK rate */ DBG_8723A("HW_VAR_BASIC_RATE: BrateCfg(%#x)\n", brate_cfg); /* Set RRSR rate table. */ rtl8723au_write8(padapter, REG_RRSR, brate_cfg & 0xff); rtl8723au_write8(padapter, REG_RRSR + 1, (brate_cfg >> 8) & 0xff); rtl8723au_write8(padapter, REG_RRSR + 2, rtl8723au_read8(padapter, REG_RRSR + 2) & 0xf0); rate_index = 0; /* Set RTS initial rate */ while (brate_cfg > 0x1) { brate_cfg >>= 1; rate_index++; } /* Ziv - Check */ rtl8723au_write8(padapter, REG_INIRTS_RATE_SEL, rate_index); } static void _OneOutPipeMapping(struct rtw_adapter *pAdapter) { struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(pAdapter); pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0]; /* VO */ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0]; /* VI */ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[0]; /* BE */ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[0]; /* BK */ pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0]; /* BCN */ pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0]; /* MGT */ pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0]; /* HIGH */ pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0]; /* TXCMD */ } static void _TwoOutPipeMapping(struct rtw_adapter *pAdapter, bool bWIFICfg) { struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(pAdapter); if (bWIFICfg) { /* WMM */ /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */ /* 0, 1, 0, 1, 0, 0, 0, 0, 0 }; */ /* 0:H, 1:L */ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[1]; /* VO */ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0]; /* VI */ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[1]; /* BE */ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[0]; /* BK */ pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0]; /* BCN */ pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0]; /* MGT */ pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0]; /* HIGH */ pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0]; /* TXCMD*/ } else { /* typical setting */ /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */ /* 1, 1, 0, 0, 0, 0, 0, 0, 0 }; */ /* 0:H, 1:L */ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0]; /* VO */ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0]; /* VI */ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[1]; /* BE */ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[1]; /* BK */ pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0]; /* BCN */ pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0]; /* MGT */ pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0]; /* HIGH */ pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0]; /* TXCMD*/ } } static void _ThreeOutPipeMapping(struct rtw_adapter *pAdapter, bool bWIFICfg) { struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(pAdapter); if (bWIFICfg) { /* for WMM */ /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */ /* 1, 2, 1, 0, 0, 0, 0, 0, 0 }; */ /* 0:H, 1:N, 2:L */ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0]; /* VO */ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[1]; /* VI */ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[2]; /* BE */ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[1]; /* BK */ pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0]; /* BCN */ pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0]; /* MGT */ pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0]; /* HIGH */ pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0]; /* TXCMD*/ } else { /* typical setting */ /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */ /* 2, 2, 1, 0, 0, 0, 0, 0, 0 }; */ /* 0:H, 1:N, 2:L */ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0]; /* VO */ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[1]; /* VI */ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[2]; /* BE */ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[2]; /* BK */ pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0]; /* BCN */ pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0]; /* MGT */ pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0]; /* HIGH */ pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0]; /* TXCMD*/ } } bool Hal_MappingOutPipe23a(struct rtw_adapter *pAdapter, u8 NumOutPipe) { struct registry_priv *pregistrypriv = &pAdapter->registrypriv; bool bWIFICfg = (pregistrypriv->wifi_spec) ? true : false; bool result = true; switch (NumOutPipe) { case 2: _TwoOutPipeMapping(pAdapter, bWIFICfg); break; case 3: _ThreeOutPipeMapping(pAdapter, bWIFICfg); break; case 1: _OneOutPipeMapping(pAdapter); break; default: result = false; break; } return result; } /* * C2H event format: * Field TRIGGER CONTENT CMD_SEQ CMD_LEN CMD_ID * BITS [127:120] [119:16] [15:8] [7:4] [3:0] */ void c2h_evt_clear23a(struct rtw_adapter *adapter) { rtl8723au_write8(adapter, REG_C2HEVT_CLEAR, C2H_EVT_HOST_CLOSE); } int c2h_evt_read23a(struct rtw_adapter *adapter, u8 *buf) { int ret = _FAIL; struct c2h_evt_hdr *c2h_evt; int i; u8 trigger; if (buf == NULL) goto exit; trigger = rtl8723au_read8(adapter, REG_C2HEVT_CLEAR); if (trigger == C2H_EVT_HOST_CLOSE) goto exit; /* Not ready */ else if (trigger != C2H_EVT_FW_CLOSE) goto clear_evt; /* Not a valid value */ c2h_evt = (struct c2h_evt_hdr *)buf; memset(c2h_evt, 0, 16); *buf = rtl8723au_read8(adapter, REG_C2HEVT_MSG_NORMAL); *(buf + 1) = rtl8723au_read8(adapter, REG_C2HEVT_MSG_NORMAL + 1); RT_PRINT_DATA(_module_hal_init_c_, _drv_info_, "c2h_evt_read23a(): ", &c2h_evt, sizeof(c2h_evt)); if (0) { DBG_8723A("%s id:%u, len:%u, seq:%u, trigger:0x%02x\n", __func__, c2h_evt->id, c2h_evt->plen, c2h_evt->seq, trigger); } /* Read the content */ for (i = 0; i < c2h_evt->plen; i++) c2h_evt->payload[i] = rtl8723au_read8(adapter, REG_C2HEVT_MSG_NORMAL + sizeof(*c2h_evt) + i); RT_PRINT_DATA(_module_hal_init_c_, _drv_info_, "c2h_evt_read23a(): Command Content:\n", c2h_evt->payload, c2h_evt->plen); ret = _SUCCESS; clear_evt: /* * Clear event to notify FW we have read the command. * If this field isn't clear, the FW won't update the * next command message. */ c2h_evt_clear23a(adapter); exit: return ret; } void rtl8723a_set_ampdu_min_space(struct rtw_adapter *padapter, u8 MinSpacingToSet) { u8 SecMinSpace; if (MinSpacingToSet <= 7) { switch (padapter->securitypriv.dot11PrivacyAlgrthm) { case 0: case WLAN_CIPHER_SUITE_CCMP: SecMinSpace = 0; break; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_TKIP: SecMinSpace = 6; break; default: SecMinSpace = 7; break; } if (MinSpacingToSet < SecMinSpace) MinSpacingToSet = SecMinSpace; MinSpacingToSet |= rtl8723au_read8(padapter, REG_AMPDU_MIN_SPACE) & 0xf8; rtl8723au_write8(padapter, REG_AMPDU_MIN_SPACE, MinSpacingToSet); } } void rtl8723a_set_ampdu_factor(struct rtw_adapter *padapter, u8 FactorToSet) { u8 RegToSet_Normal[4] = { 0x41, 0xa8, 0x72, 0xb9 }; u8 MaxAggNum; u8 *pRegToSet; u8 index = 0; pRegToSet = RegToSet_Normal; /* 0xb972a841; */ if (rtl8723a_BT_enabled(padapter) && rtl8723a_BT_using_antenna_1(padapter)) MaxAggNum = 0x8; else MaxAggNum = 0xF; if (FactorToSet <= 3) { FactorToSet = 1 << (FactorToSet + 2); if (FactorToSet > MaxAggNum) FactorToSet = MaxAggNum; for (index = 0; index < 4; index++) { if ((pRegToSet[index] & 0xf0) > (FactorToSet << 4)) pRegToSet[index] = (pRegToSet[index] & 0x0f) | (FactorToSet << 4); if ((pRegToSet[index] & 0x0f) > FactorToSet) pRegToSet[index] = (pRegToSet[index] & 0xf0) | FactorToSet; rtl8723au_write8(padapter, REG_AGGLEN_LMT + index, pRegToSet[index]); } } } void rtl8723a_set_acm_ctrl(struct rtw_adapter *padapter, u8 ctrl) { u8 hwctrl = 0; if (ctrl != 0) { hwctrl |= AcmHw_HwEn; if (ctrl & BIT(1)) /* BE */ hwctrl |= AcmHw_BeqEn; if (ctrl & BIT(2)) /* VI */ hwctrl |= AcmHw_ViqEn; if (ctrl & BIT(3)) /* VO */ hwctrl |= AcmHw_VoqEn; } DBG_8723A("[HW_VAR_ACM_CTRL] Write 0x%02X\n", hwctrl); rtl8723au_write8(padapter, REG_ACMHWCTRL, hwctrl); } void rtl8723a_set_media_status(struct rtw_adapter *padapter, u8 status) { u8 val8; val8 = rtl8723au_read8(padapter, MSR) & 0x0c; val8 |= status; rtl8723au_write8(padapter, MSR, val8); } void rtl8723a_set_media_status1(struct rtw_adapter *padapter, u8 status) { u8 val8; val8 = rtl8723au_read8(padapter, MSR) & 0x03; val8 |= status << 2; rtl8723au_write8(padapter, MSR, val8); } void rtl8723a_set_bcn_func(struct rtw_adapter *padapter, u8 val) { if (val) SetBcnCtrlReg23a(padapter, EN_BCN_FUNCTION | EN_TXBCN_RPT, 0); else SetBcnCtrlReg23a(padapter, 0, EN_BCN_FUNCTION | EN_TXBCN_RPT); } void rtl8723a_check_bssid(struct rtw_adapter *padapter, u8 val) { u32 val32; val32 = rtl8723au_read32(padapter, REG_RCR); if (val) val32 |= RCR_CBSSID_DATA | RCR_CBSSID_BCN; else val32 &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN); rtl8723au_write32(padapter, REG_RCR, val32); } void rtl8723a_mlme_sitesurvey(struct rtw_adapter *padapter, u8 flag) { if (flag) { /* under sitesurvey */ u32 v32; /* config RCR to receive different BSSID & not to receive data frame */ v32 = rtl8723au_read32(padapter, REG_RCR); v32 &= ~(RCR_CBSSID_BCN); rtl8723au_write32(padapter, REG_RCR, v32); /* reject all data frame */ rtl8723au_write16(padapter, REG_RXFLTMAP2, 0); /* disable update TSF */ SetBcnCtrlReg23a(padapter, DIS_TSF_UDT, 0); } else { /* sitesurvey done */ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo; u32 v32; pmlmeinfo = &pmlmeext->mlmext_info; if ((is_client_associated_to_ap23a(padapter) == true) || ((pmlmeinfo->state & 0x03) == MSR_ADHOC) || ((pmlmeinfo->state & 0x03) == MSR_AP)) { /* enable to rx data frame */ rtl8723au_write16(padapter, REG_RXFLTMAP2, 0xFFFF); /* enable update TSF */ SetBcnCtrlReg23a(padapter, 0, DIS_TSF_UDT); } v32 = rtl8723au_read32(padapter, REG_RCR); v32 |= RCR_CBSSID_BCN; rtl8723au_write32(padapter, REG_RCR, v32); } rtl8723a_BT_wifiscan_notify(padapter, flag ? true : false); } void rtl8723a_on_rcr_am(struct rtw_adapter *padapter) { rtl8723au_write32(padapter, REG_RCR, rtl8723au_read32(padapter, REG_RCR) | RCR_AM); DBG_8723A("%s, %d, RCR = %x\n", __func__, __LINE__, rtl8723au_read32(padapter, REG_RCR)); } void rtl8723a_off_rcr_am(struct rtw_adapter *padapter) { rtl8723au_write32(padapter, REG_RCR, rtl8723au_read32(padapter, REG_RCR) & (~RCR_AM)); DBG_8723A("%s, %d, RCR = %x\n", __func__, __LINE__, rtl8723au_read32(padapter, REG_RCR)); } void rtl8723a_set_slot_time(struct rtw_adapter *padapter, u8 slottime) { u8 u1bAIFS, aSifsTime; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info; rtl8723au_write8(padapter, REG_SLOT, slottime); if (pmlmeinfo->WMM_enable == 0) { if (pmlmeext->cur_wireless_mode == WIRELESS_11B) aSifsTime = 10; else aSifsTime = 16; u1bAIFS = aSifsTime + (2 * pmlmeinfo->slotTime); /* <Roger_EXP> Temporary removed, 2008.06.20. */ rtl8723au_write8(padapter, REG_EDCA_VO_PARAM, u1bAIFS); rtl8723au_write8(padapter, REG_EDCA_VI_PARAM, u1bAIFS); rtl8723au_write8(padapter, REG_EDCA_BE_PARAM, u1bAIFS); rtl8723au_write8(padapter, REG_EDCA_BK_PARAM, u1bAIFS); } } void rtl8723a_ack_preamble(struct rtw_adapter *padapter, u8 bShortPreamble) { struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter); u8 regTmp; /* Joseph marked out for Netgear 3500 TKIP channel 7 issue.(Temporarily) */ regTmp = (pHalData->nCur40MhzPrimeSC) << 5; /* regTmp = 0; */ if (bShortPreamble) regTmp |= 0x80; rtl8723au_write8(padapter, REG_RRSR + 2, regTmp); } void rtl8723a_set_sec_cfg(struct rtw_adapter *padapter, u8 sec) { rtl8723au_write8(padapter, REG_SECCFG, sec); } void rtl8723a_cam_empty_entry(struct rtw_adapter *padapter, u8 ucIndex) { u8 i; u32 ulCommand = 0; u32 ulContent = 0; u32 ulEncAlgo = CAM_AES; for (i = 0; i < CAM_CONTENT_COUNT; i++) { /* filled id in CAM config 2 byte */ if (i == 0) { ulContent |= (ucIndex & 0x03) | ((u16) (ulEncAlgo) << 2); /* ulContent |= CAM_VALID; */ } else { ulContent = 0; } /* polling bit, and No Write enable, and address */ ulCommand = CAM_CONTENT_COUNT * ucIndex + i; ulCommand = ulCommand | CAM_POLLINIG | CAM_WRITE; /* write content 0 is equall to mark invalid */ /* delay_ms(40); */ rtl8723au_write32(padapter, WCAMI, ulContent); /* delay_ms(40); */ rtl8723au_write32(padapter, REG_CAMCMD, ulCommand); } } void rtl8723a_cam_invalidate_all(struct rtw_adapter *padapter) { rtl8723au_write32(padapter, REG_CAMCMD, CAM_POLLINIG | BIT(30)); } void rtl8723a_cam_write(struct rtw_adapter *padapter, u8 entry, u16 ctrl, const u8 *mac, const u8 *key) { u32 cmd; unsigned int i, val, addr; int j; addr = entry << 3; for (j = 5; j >= 0; j--) { switch (j) { case 0: val = ctrl | (mac[0] << 16) | (mac[1] << 24); break; case 1: val = mac[2] | (mac[3] << 8) | (mac[4] << 16) | (mac[5] << 24); break; default: i = (j - 2) << 2; val = key[i] | (key[i+1] << 8) | (key[i+2] << 16) | (key[i+3] << 24); break; } rtl8723au_write32(padapter, WCAMI, val); cmd = CAM_POLLINIG | CAM_WRITE | (addr + j); rtl8723au_write32(padapter, REG_CAMCMD, cmd); /* DBG_8723A("%s => cam write: %x, %x\n", __func__, cmd, val);*/ } } void rtl8723a_fifo_cleanup(struct rtw_adapter *padapter) { #define RW_RELEASE_EN BIT(18) #define RXDMA_IDLE BIT(17) struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv; u8 trycnt = 100; /* pause tx */ rtl8723au_write8(padapter, REG_TXPAUSE, 0xff); /* keep sn */ padapter->xmitpriv.nqos_ssn = rtl8723au_read8(padapter, REG_NQOS_SEQ); if (pwrpriv->bkeepfwalive != true) { u32 v32; /* RX DMA stop */ v32 = rtl8723au_read32(padapter, REG_RXPKT_NUM); v32 |= RW_RELEASE_EN; rtl8723au_write32(padapter, REG_RXPKT_NUM, v32); do { v32 = rtl8723au_read32(padapter, REG_RXPKT_NUM) & RXDMA_IDLE; if (!v32) break; } while (trycnt--); if (trycnt == 0) DBG_8723A("Stop RX DMA failed......\n"); /* RQPN Load 0 */ rtl8723au_write16(padapter, REG_RQPN_NPQ, 0); rtl8723au_write32(padapter, REG_RQPN, 0x80000000); mdelay(10); } } void rtl8723a_bcn_valid(struct rtw_adapter *padapter) { /* BCN_VALID, BIT16 of REG_TDECTRL = BIT0 of REG_TDECTRL+2, write 1 to clear, Clear by sw */ rtl8723au_write8(padapter, REG_TDECTRL + 2, rtl8723au_read8(padapter, REG_TDECTRL + 2) | BIT(0)); } bool rtl8723a_get_bcn_valid(struct rtw_adapter *padapter) { bool retval; retval = (rtl8723au_read8(padapter, REG_TDECTRL + 2) & BIT(0)) ? true : false; return retval; } void rtl8723a_set_beacon_interval(struct rtw_adapter *padapter, u16 interval) { rtl8723au_write16(padapter, REG_BCN_INTERVAL, interval); } void rtl8723a_set_resp_sifs(struct rtw_adapter *padapter, u8 r2t1, u8 r2t2, u8 t2t1, u8 t2t2) { /* SIFS_Timer = 0x0a0a0808; */ /* RESP_SIFS for CCK */ /* SIFS_T2T_CCK (0x08) */ rtl8723au_write8(padapter, REG_R2T_SIFS, r2t1); /* SIFS_R2T_CCK(0x08) */ rtl8723au_write8(padapter, REG_R2T_SIFS + 1, r2t2); /* RESP_SIFS for OFDM */ /* SIFS_T2T_OFDM (0x0a) */ rtl8723au_write8(padapter, REG_T2T_SIFS, t2t1); /* SIFS_R2T_OFDM(0x0a) */ rtl8723au_write8(padapter, REG_T2T_SIFS + 1, t2t2); } void rtl8723a_set_ac_param_vo(struct rtw_adapter *padapter, u32 vo) { rtl8723au_write32(padapter, REG_EDCA_VO_PARAM, vo); } void rtl8723a_set_ac_param_vi(struct rtw_adapter *padapter, u32 vi) { rtl8723au_write32(padapter, REG_EDCA_VI_PARAM, vi); } void rtl8723a_set_ac_param_be(struct rtw_adapter *padapter, u32 be) { struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter); pHalData->AcParam_BE = be; rtl8723au_write32(padapter, REG_EDCA_BE_PARAM, be); } void rtl8723a_set_ac_param_bk(struct rtw_adapter *padapter, u32 bk) { rtl8723au_write32(padapter, REG_EDCA_BK_PARAM, bk); } void rtl8723a_set_rxdma_agg_pg_th(struct rtw_adapter *padapter, u8 val) { rtl8723au_write8(padapter, REG_RXDMA_AGG_PG_TH, val); } void rtl8723a_set_initial_gain(struct rtw_adapter *padapter, u32 rx_gain) { struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter); struct dig_t *pDigTable = &pHalData->odmpriv.DM_DigTable; if (rx_gain == 0xff) /* restore rx gain */ ODM_Write_DIG23a(&pHalData->odmpriv, pDigTable->BackupIGValue); else { pDigTable->BackupIGValue = pDigTable->CurIGValue; ODM_Write_DIG23a(&pHalData->odmpriv, rx_gain); } } void rtl8723a_odm_support_ability_restore(struct rtw_adapter *padapter) { struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter); pHalData->odmpriv.SupportAbility = pHalData->odmpriv.BK_SupportAbility; } void rtl8723a_odm_support_ability_backup(struct rtw_adapter *padapter) { struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter); pHalData->odmpriv.BK_SupportAbility = pHalData->odmpriv.SupportAbility; } void rtl8723a_odm_support_ability_set(struct rtw_adapter *padapter, u32 val) { struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter); if (val == DYNAMIC_ALL_FUNC_ENABLE) pHalData->odmpriv.SupportAbility = pHalData->dmpriv.InitODMFlag; else pHalData->odmpriv.SupportAbility |= val; } void rtl8723a_odm_support_ability_clr(struct rtw_adapter *padapter, u32 val) { struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter); pHalData->odmpriv.SupportAbility &= val; } void rtl8723a_set_rpwm(struct rtw_adapter *padapter, u8 val) { rtl8723au_write8(padapter, REG_USB_HRPWM, val); } u8 rtl8723a_get_rf_type(struct rtw_adapter *padapter) { struct hal_data_8723a *pHalData = GET_HAL_DATA(padapter); return pHalData->rf_type; } bool rtl8723a_get_fwlps_rf_on(struct rtw_adapter *padapter) { bool retval; u32 valRCR; /* When we halt NIC, we should check if FW LPS is leave. */ if ((padapter->bSurpriseRemoved == true) || (padapter->pwrctrlpriv.rf_pwrstate == rf_off)) { /* If it is in HW/SW Radio OFF or IPS state, we do not check Fw LPS Leave, because Fw is unload. */ retval = true; } else { valRCR = rtl8723au_read32(padapter, REG_RCR); if (valRCR & 0x00070000) retval = false; else retval = true; } return retval; } bool rtl8723a_chk_hi_queue_empty(struct rtw_adapter *padapter) { u32 hgq; hgq = rtl8723au_read32(padapter, REG_HGQ_INFORMATION); return ((hgq & 0x0000ff00) == 0) ? true : false; }
gpl-2.0
kmihelich/linux-espressobin
drivers/staging/rtl8723au/core/rtw_mlme.c
813
64934
/****************************************************************************** * * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * ******************************************************************************/ #define _RTW_MLME_C_ #include <osdep_service.h> #include <drv_types.h> #include <recv_osdep.h> #include <xmit_osdep.h> #include <hal_intf.h> #include <mlme_osdep.h> #include <sta_info.h> #include <linux/ieee80211.h> #include <wifi.h> #include <wlan_bssdef.h> #include <rtw_sreset.h> static struct wlan_network * rtw_select_candidate_from_queue(struct mlme_priv *pmlmepriv); static int rtw_do_join(struct rtw_adapter *padapter); static void rtw_init_mlme_timer(struct rtw_adapter *padapter) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; setup_timer(&pmlmepriv->assoc_timer, rtw23a_join_to_handler, (unsigned long)padapter); setup_timer(&pmlmepriv->scan_to_timer, rtw_scan_timeout_handler23a, (unsigned long)padapter); setup_timer(&pmlmepriv->dynamic_chk_timer, rtw_dynamic_check_timer_handler, (unsigned long)padapter); setup_timer(&pmlmepriv->set_scan_deny_timer, rtw_set_scan_deny_timer_hdl, (unsigned long)padapter); } int rtw_init_mlme_priv23a(struct rtw_adapter *padapter) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; pmlmepriv->nic_hdl = padapter; pmlmepriv->fw_state = 0; pmlmepriv->cur_network.network.ifmode = NL80211_IFTYPE_UNSPECIFIED; /* 1: active, 0: pasive. Maybe someday we should rename this varable to "active_mode" (Jeff) */ pmlmepriv->scan_mode = SCAN_ACTIVE; spin_lock_init(&pmlmepriv->lock); _rtw_init_queue23a(&pmlmepriv->scanned_queue); memset(&pmlmepriv->assoc_ssid, 0, sizeof(struct cfg80211_ssid)); rtw_clear_scan_deny(padapter); rtw_init_mlme_timer(padapter); return _SUCCESS; } #ifdef CONFIG_8723AU_AP_MODE static void rtw_free_mlme_ie_data(u8 **ppie, u32 *plen) { if (*ppie) { kfree(*ppie); *plen = 0; *ppie = NULL; } } #endif void rtw23a_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv) { #ifdef CONFIG_8723AU_AP_MODE kfree(pmlmepriv->assoc_req); kfree(pmlmepriv->assoc_rsp); rtw_free_mlme_ie_data(&pmlmepriv->wps_probe_req_ie, &pmlmepriv->wps_probe_req_ie_len); #endif } void rtw_free_mlme_priv23a(struct mlme_priv *pmlmepriv) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, "rtw_free_mlme_priv23a\n"); rtw23a_free_mlme_priv_ie_data(pmlmepriv); } struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv, gfp_t gfp) { struct wlan_network *pnetwork; pnetwork = kzalloc(sizeof(struct wlan_network), gfp); if (pnetwork) { INIT_LIST_HEAD(&pnetwork->list); pnetwork->network_type = 0; pnetwork->fixed = false; pnetwork->last_scanned = jiffies; pnetwork->join_res = 0; } return pnetwork; } static void _rtw_free_network23a(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork) { if (!pnetwork) return; if (pnetwork->fixed == true) return; list_del_init(&pnetwork->list); kfree(pnetwork); } /* return the wlan_network with the matching addr Shall be called under atomic context... to avoid possible racing condition... */ struct wlan_network * rtw_find_network23a(struct rtw_queue *scanned_queue, u8 *addr) { struct list_head *phead, *plist; struct wlan_network *pnetwork = NULL; if (is_zero_ether_addr(addr)) { pnetwork = NULL; goto exit; } /* spin_lock_bh(&scanned_queue->lock); */ phead = get_list_head(scanned_queue); plist = phead->next; while (plist != phead) { pnetwork = container_of(plist, struct wlan_network, list); if (ether_addr_equal(addr, pnetwork->network.MacAddress)) break; plist = plist->next; } if (plist == phead) pnetwork = NULL; /* spin_unlock_bh(&scanned_queue->lock); */ exit: return pnetwork; } void rtw_free_network_queue23a(struct rtw_adapter *padapter) { struct list_head *phead, *plist, *ptmp; struct wlan_network *pnetwork; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct rtw_queue *scanned_queue = &pmlmepriv->scanned_queue; spin_lock_bh(&scanned_queue->lock); phead = get_list_head(scanned_queue); list_for_each_safe(plist, ptmp, phead) { pnetwork = container_of(plist, struct wlan_network, list); _rtw_free_network23a(pmlmepriv, pnetwork); } spin_unlock_bh(&scanned_queue->lock); } int rtw_if_up23a(struct rtw_adapter *padapter) { int res; if (padapter->bDriverStopped || padapter->bSurpriseRemoved || !check_fwstate(&padapter->mlmepriv, _FW_LINKED)) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, "rtw_if_up23a:bDriverStopped(%d) OR bSurpriseRemoved(%d)\n", padapter->bDriverStopped, padapter->bSurpriseRemoved); res = false; } else res = true; return res; } void rtw_generate_random_ibss23a(u8 *pibss) { unsigned long curtime = jiffies; pibss[0] = 0x02; /* in ad-hoc mode bit1 must set to 1 */ pibss[1] = 0x11; pibss[2] = 0x87; pibss[3] = curtime & 0xff;/* p[0]; */ pibss[4] = (curtime >> 8) & 0xff;/* p[1]; */ pibss[5] = (curtime >> 16) & 0xff;/* p[2]; */ } void rtw_set_roaming(struct rtw_adapter *adapter, u8 to_roaming) { if (to_roaming == 0) adapter->mlmepriv.to_join = false; adapter->mlmepriv.to_roaming = to_roaming; } static void _rtw_roaming(struct rtw_adapter *padapter, struct wlan_network *tgt_network) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct wlan_network *pnetwork; int do_join_r; if (tgt_network) pnetwork = tgt_network; else pnetwork = &pmlmepriv->cur_network; if (padapter->mlmepriv.to_roaming > 0) { DBG_8723A("roaming from %s(%pM), length:%d\n", pnetwork->network.Ssid.ssid, pnetwork->network.MacAddress, pnetwork->network.Ssid.ssid_len); memcpy(&pmlmepriv->assoc_ssid, &pnetwork->network.Ssid, sizeof(struct cfg80211_ssid)); pmlmepriv->assoc_by_bssid = false; while (1) { do_join_r = rtw_do_join(padapter); if (do_join_r == _SUCCESS) break; else { DBG_8723A("roaming do_join return %d\n", do_join_r); pmlmepriv->to_roaming--; if (padapter->mlmepriv.to_roaming > 0) continue; else { DBG_8723A("%s(%d) -to roaming fail, " "indicate_disconnect\n", __func__, __LINE__); rtw_indicate_disconnect23a(padapter); break; } } } } } void rtw23a_roaming(struct rtw_adapter *padapter, struct wlan_network *tgt_network) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; spin_lock_bh(&pmlmepriv->lock); _rtw_roaming(padapter, tgt_network); spin_unlock_bh(&pmlmepriv->lock); } static void rtw_free_network_nolock(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork) { _rtw_free_network23a(pmlmepriv, pnetwork); } bool rtw_is_same_ibss23a(struct rtw_adapter *adapter, struct wlan_network *pnetwork) { int ret; struct security_priv *psecuritypriv = &adapter->securitypriv; if (psecuritypriv->dot11PrivacyAlgrthm != 0 && pnetwork->network.Privacy == 0) ret = false; else if (psecuritypriv->dot11PrivacyAlgrthm == 0 && pnetwork->network.Privacy == 1) ret = false; else ret = true; return ret; } inline int is_same_ess(struct wlan_bssid_ex *a, struct wlan_bssid_ex *b); inline int is_same_ess(struct wlan_bssid_ex *a, struct wlan_bssid_ex *b) { return (a->Ssid.ssid_len == b->Ssid.ssid_len) && !memcmp(a->Ssid.ssid, b->Ssid.ssid, a->Ssid.ssid_len); } int is_same_network23a(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst) { u16 s_cap, d_cap; s_cap = src->capability; d_cap = dst->capability; return ((src->Ssid.ssid_len == dst->Ssid.ssid_len) && /* (src->DSConfig == dst->DSConfig) && */ ether_addr_equal(src->MacAddress, dst->MacAddress) && !memcmp(src->Ssid.ssid, dst->Ssid.ssid, src->Ssid.ssid_len) && (s_cap & WLAN_CAPABILITY_IBSS) == (d_cap & WLAN_CAPABILITY_IBSS) && (s_cap & WLAN_CAPABILITY_ESS) == (d_cap & WLAN_CAPABILITY_ESS)); } struct wlan_network * rtw_get_oldest_wlan_network23a(struct rtw_queue *scanned_queue) { struct list_head *plist, *phead; struct wlan_network *pwlan; struct wlan_network *oldest = NULL; phead = get_list_head(scanned_queue); list_for_each(plist, phead) { pwlan = container_of(plist, struct wlan_network, list); if (pwlan->fixed != true) { if (!oldest || time_after(oldest->last_scanned, pwlan->last_scanned)) oldest = pwlan; } } return oldest; } void update_network23a(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src, struct rtw_adapter *padapter, bool update_ie) { u8 ss_ori = dst->SignalStrength; u8 sq_ori = dst->SignalQuality; long rssi_ori = dst->Rssi; u8 ss_smp = src->SignalStrength; u8 sq_smp = src->SignalQuality; long rssi_smp = src->Rssi; u8 ss_final; u8 sq_final; long rssi_final; DBG_8723A("%s %s(%pM, ch%u) ss_ori:%3u, sq_ori:%3u, rssi_ori:%3ld, " "ss_smp:%3u, sq_smp:%3u, rssi_smp:%3ld\n", __func__, src->Ssid.ssid, src->MacAddress, src->DSConfig, ss_ori, sq_ori, rssi_ori, ss_smp, sq_smp, rssi_smp ); /* The rule below is 1/5 for sample value, 4/5 for history value */ if (check_fwstate(&padapter->mlmepriv, _FW_LINKED) && is_same_network23a(&padapter->mlmepriv.cur_network.network, src)) { /* Take the recvpriv's value for the connected AP*/ ss_final = padapter->recvpriv.signal_strength; sq_final = padapter->recvpriv.signal_qual; /* the rssi value here is undecorated, and will be used for antenna diversity */ if (sq_smp != 101) /* from the right channel */ rssi_final = (src->Rssi+dst->Rssi*4)/5; else rssi_final = rssi_ori; } else { if (sq_smp != 101) { /* from the right channel */ ss_final = ((u32)src->SignalStrength + (u32)dst->SignalStrength * 4) / 5; sq_final = ((u32)src->SignalQuality + (u32)dst->SignalQuality * 4) / 5; rssi_final = src->Rssi+dst->Rssi * 4 / 5; } else { /* bss info not receiving from the right channel, use the original RX signal infos */ ss_final = dst->SignalStrength; sq_final = dst->SignalQuality; rssi_final = dst->Rssi; } } if (update_ie) memcpy(dst, src, get_wlan_bssid_ex_sz(src)); dst->SignalStrength = ss_final; dst->SignalQuality = sq_final; dst->Rssi = rssi_final; DBG_8723A("%s %s(%pM), SignalStrength:%u, SignalQuality:%u, " "RawRSSI:%ld\n", __func__, dst->Ssid.ssid, dst->MacAddress, dst->SignalStrength, dst->SignalQuality, dst->Rssi); } static void update_current_network(struct rtw_adapter *adapter, struct wlan_bssid_ex *pnetwork) { struct mlme_priv *pmlmepriv = &adapter->mlmepriv; if (check_fwstate(pmlmepriv, _FW_LINKED) && is_same_network23a(&pmlmepriv->cur_network.network, pnetwork)) { update_network23a(&pmlmepriv->cur_network.network, pnetwork, adapter, true); rtw_update_protection23a(adapter, pmlmepriv->cur_network.network.IEs, pmlmepriv->cur_network.network.IELength); } } /* Caller must hold pmlmepriv->lock first. */ static void rtw_update_scanned_network(struct rtw_adapter *adapter, struct wlan_bssid_ex *target) { struct list_head *plist, *phead; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct wlan_network *pnetwork = NULL; struct wlan_network *oldest = NULL; struct rtw_queue *queue = &pmlmepriv->scanned_queue; u32 bssid_ex_sz; int found = 0; spin_lock_bh(&queue->lock); phead = get_list_head(queue); list_for_each(plist, phead) { pnetwork = container_of(plist, struct wlan_network, list); if (is_same_network23a(&pnetwork->network, target)) { found = 1; break; } if (!oldest || time_after(oldest->last_scanned, pnetwork->last_scanned)) oldest = pnetwork; } /* If we didn't find a match, then get a new network slot to initialize * with this beacon's information */ if (!found) { pnetwork = rtw_alloc_network(pmlmepriv, GFP_ATOMIC); if (!pnetwork) { if (!oldest) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, "something wrong here\n"); goto exit; } pnetwork = oldest; } else list_add_tail(&pnetwork->list, &queue->queue); bssid_ex_sz = get_wlan_bssid_ex_sz(target); target->Length = bssid_ex_sz; memcpy(&pnetwork->network, target, bssid_ex_sz); /* variable initialize */ pnetwork->fixed = false; pnetwork->last_scanned = jiffies; pnetwork->network_type = 0; pnetwork->join_res = 0; /* bss info not receiving from the right channel */ if (pnetwork->network.SignalQuality == 101) pnetwork->network.SignalQuality = 0; } else { /* * we have an entry and we are going to update it. But * this entry may be already expired. In this case we * do the same as we found a new net and call the * new_net handler */ bool update_ie = true; pnetwork->last_scanned = jiffies; /* target.reserved == 1, means that scanned network is * a bcn frame. */ if (pnetwork->network.IELength > target->IELength && target->reserved == 1) update_ie = false; update_network23a(&pnetwork->network, target, adapter, update_ie); } exit: spin_unlock_bh(&queue->lock); } static void rtw_add_network(struct rtw_adapter *adapter, struct wlan_bssid_ex *pnetwork) { update_current_network(adapter, pnetwork); rtw_update_scanned_network(adapter, pnetwork); } /* select the desired network based on the capability of the (i)bss. */ /* check items: (1) security */ /* (2) network_type */ /* (3) WMM */ /* (4) HT */ /* (5) others */ static int rtw_is_desired_network(struct rtw_adapter *adapter, struct wlan_network *pnetwork) { struct security_priv *psecuritypriv = &adapter->securitypriv; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; u32 desired_encmode; u32 privacy; int bselected = true; desired_encmode = psecuritypriv->ndisencryptstatus; privacy = pnetwork->network.Privacy; if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) { if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPA, pnetwork->network.IEs, pnetwork->network.IELength)) return true; else return false; } if (adapter->registrypriv.wifi_spec == 1) { /* for correct flow of 8021X to do.... */ if (desired_encmode == Ndis802_11EncryptionDisabled && privacy != 0) bselected = false; } if (desired_encmode != Ndis802_11EncryptionDisabled && privacy == 0) { DBG_8723A("desired_encmode: %d, privacy: %d\n", desired_encmode, privacy); bselected = false; } if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) { if (pnetwork->network.ifmode != pmlmepriv->cur_network.network.ifmode) bselected = false; } return bselected; } void rtw_survey_event_cb23a(struct rtw_adapter *adapter, const u8 *pbuf) { u32 len; struct wlan_bssid_ex *pnetwork; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct survey_event *survey = (struct survey_event *)pbuf; pnetwork = survey->bss; RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, "rtw_survey_event_cb23a, ssid=%s\n", pnetwork->Ssid.ssid); len = get_wlan_bssid_ex_sz(pnetwork); if (len > (sizeof(struct wlan_bssid_ex))) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, "****rtw_survey_event_cb23a: return a wrong bss ***\n"); return; } spin_lock_bh(&pmlmepriv->lock); /* update IBSS_network 's timestamp */ if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) { if (ether_addr_equal(pmlmepriv->cur_network.network.MacAddress, pnetwork->MacAddress)) { struct wlan_network *ibss_wlan; pmlmepriv->cur_network.network.beacon_interval = pnetwork->beacon_interval; pmlmepriv->cur_network.network.capability = pnetwork->capability; pmlmepriv->cur_network.network.tsf = pnetwork->tsf; spin_lock_bh(&pmlmepriv->scanned_queue.lock); ibss_wlan = rtw_find_network23a( &pmlmepriv->scanned_queue, pnetwork->MacAddress); if (ibss_wlan) { pmlmepriv->cur_network.network.beacon_interval = ibss_wlan->network.beacon_interval; pmlmepriv->cur_network.network.capability = ibss_wlan->network.capability; pmlmepriv->cur_network.network.tsf = ibss_wlan->network.tsf; spin_unlock_bh(&pmlmepriv->scanned_queue.lock); goto exit; } spin_unlock_bh(&pmlmepriv->scanned_queue.lock); } } /* lock pmlmepriv->lock when you accessing network_q */ if (!check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) { if (pnetwork->Ssid.ssid[0] == 0) pnetwork->Ssid.ssid_len = 0; rtw_add_network(adapter, pnetwork); } exit: spin_unlock_bh(&pmlmepriv->lock); kfree(survey->bss); survey->bss = NULL; } void rtw_surveydone_event_callback23a(struct rtw_adapter *adapter, const u8 *pbuf) { struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv; int ret; spin_lock_bh(&pmlmepriv->lock); if (pmlmepriv->wps_probe_req_ie) { pmlmepriv->wps_probe_req_ie_len = 0; kfree(pmlmepriv->wps_probe_req_ie); pmlmepriv->wps_probe_req_ie = NULL; } RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, "rtw_surveydone_event_callback23a: fw_state:%x\n", get_fwstate(pmlmepriv)); if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) { del_timer_sync(&pmlmepriv->scan_to_timer); _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY); } else { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, "nic status =%x, survey done event comes too late!\n", get_fwstate(pmlmepriv)); } rtw_set_signal_stat_timer(&adapter->recvpriv); if (pmlmepriv->to_join == true) { set_fwstate(pmlmepriv, _FW_UNDER_LINKING); if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) { ret = rtw_select_and_join_from_scanned_queue23a( pmlmepriv); if (ret != _SUCCESS) rtw_do_join_adhoc(adapter); } else { pmlmepriv->to_join = false; ret = rtw_select_and_join_from_scanned_queue23a( pmlmepriv); if (ret != _SUCCESS) { DBG_8723A("try_to_join, but select scanning " "queue fail, to_roaming:%d\n", adapter->mlmepriv.to_roaming); if (adapter->mlmepriv.to_roaming) { if (--pmlmepriv->to_roaming == 0 || rtw_sitesurvey_cmd23a( adapter, &pmlmepriv->assoc_ssid, 1, NULL, 0) != _SUCCESS) { rtw_set_roaming(adapter, 0); rtw_free_assoc_resources23a( adapter, 1); rtw_indicate_disconnect23a( adapter); } else pmlmepriv->to_join = true; } _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING); } } } spin_unlock_bh(&pmlmepriv->lock); rtw_os_xmit_schedule23a(adapter); if (pmlmeext->sitesurvey_res.bss_cnt == 0) rtw_sreset_reset(adapter); rtw_cfg80211_surveydone_event_callback(adapter); } static void free_scanqueue(struct mlme_priv *pmlmepriv) { struct wlan_network *pnetwork; struct rtw_queue *scan_queue = &pmlmepriv->scanned_queue; struct list_head *plist, *phead, *ptemp; RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, "+free_scanqueue\n"); spin_lock_bh(&scan_queue->lock); phead = get_list_head(scan_queue); list_for_each_safe(plist, ptemp, phead) { pnetwork = container_of(plist, struct wlan_network, list); pnetwork->fixed = false; _rtw_free_network23a(pmlmepriv, pnetwork); } spin_unlock_bh(&scan_queue->lock); } /* *rtw_free_assoc_resources23a: the caller has to lock pmlmepriv->lock */ void rtw_free_assoc_resources23a(struct rtw_adapter *adapter, int lock_scanned_queue) { struct wlan_network *pwlan; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct sta_priv *pstapriv = &adapter->stapriv; struct wlan_network *tgt_network = &pmlmepriv->cur_network; struct sta_info *psta; RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, "+rtw_free_assoc_resources23a\n"); RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, "tgt_network->network.MacAddress=%pM ssid=%s\n", tgt_network->network.MacAddress, tgt_network->network.Ssid.ssid); if (check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_AP_STATE)) { psta = rtw_get_stainfo23a(&adapter->stapriv, tgt_network->network.MacAddress); spin_lock_bh(&pstapriv->sta_hash_lock); rtw_free_stainfo23a(adapter, psta); spin_unlock_bh(&pstapriv->sta_hash_lock); } if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE|WIFI_AP_STATE)) { rtw_free_all_stainfo23a(adapter); psta = rtw_get_bcmc_stainfo23a(adapter); spin_lock_bh(&pstapriv->sta_hash_lock); rtw_free_stainfo23a(adapter, psta); spin_unlock_bh(&pstapriv->sta_hash_lock); rtw_init_bcmc_stainfo23a(adapter); } if (lock_scanned_queue) spin_lock_bh(&pmlmepriv->scanned_queue.lock); pwlan = rtw_find_network23a(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress); if (pwlan) pwlan->fixed = false; else RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, "rtw_free_assoc_resources23a : pwlan== NULL\n"); if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) && adapter->stapriv.asoc_sta_count == 1) rtw_free_network_nolock(pmlmepriv, pwlan); if (lock_scanned_queue) spin_unlock_bh(&pmlmepriv->scanned_queue.lock); pmlmepriv->key_mask = 0; } /* *rtw_indicate_connect23a: the caller has to lock pmlmepriv->lock */ void rtw_indicate_connect23a(struct rtw_adapter *padapter) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, "+rtw_indicate_connect23a\n"); pmlmepriv->to_join = false; if (!check_fwstate(&padapter->mlmepriv, _FW_LINKED)) { set_fwstate(pmlmepriv, _FW_LINKED); rtw_cfg80211_indicate_connect(padapter); netif_carrier_on(padapter->pnetdev); if (padapter->pid[2] != 0) kill_pid(find_vpid(padapter->pid[2]), SIGALRM, 1); } rtw_set_roaming(padapter, 0); rtw_set_scan_deny(padapter, 3000); RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, "-rtw_indicate_connect23a: fw_state=0x%08x\n", get_fwstate(pmlmepriv)); } /* *rtw_indicate_disconnect23a: the caller has to lock pmlmepriv->lock */ void rtw_indicate_disconnect23a(struct rtw_adapter *padapter) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, "+rtw_indicate_disconnect23a\n"); _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING|WIFI_UNDER_WPS); /* DBG_8723A("clear wps when %s\n", __func__); */ if (padapter->mlmepriv.to_roaming > 0) _clr_fwstate_(pmlmepriv, _FW_LINKED); if (check_fwstate(&padapter->mlmepriv, _FW_LINKED) || padapter->mlmepriv.to_roaming <= 0) { rtw_os_indicate_disconnect23a(padapter); /* set ips_deny_time to avoid enter IPS before LPS leave */ padapter->pwrctrlpriv.ips_deny_time = jiffies + msecs_to_jiffies(3000); _clr_fwstate_(pmlmepriv, _FW_LINKED); rtw_clear_scan_deny(padapter); } rtw_lps_ctrl_wk_cmd23a(padapter, LPS_CTRL_DISCONNECT, 1); } void rtw_scan_abort23a(struct rtw_adapter *adapter) { unsigned long start; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv; start = jiffies; pmlmeext->scan_abort = true; while (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) && jiffies_to_msecs(jiffies - start) <= 200) { if (adapter->bDriverStopped || adapter->bSurpriseRemoved) break; DBG_8723A("%s(%s): fw_state = _FW_UNDER_SURVEY!\n", __func__, adapter->pnetdev->name); msleep(20); } if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) { if (!adapter->bDriverStopped && !adapter->bSurpriseRemoved) DBG_8723A("%s(%s): waiting for scan_abort time out!\n", __func__, adapter->pnetdev->name); rtw_cfg80211_indicate_scan_done(wdev_to_priv(adapter->rtw_wdev), true); } pmlmeext->scan_abort = false; } static struct sta_info * rtw_joinbss_update_stainfo(struct rtw_adapter *padapter, struct wlan_network *pnetwork) { int i; struct sta_info *bmc_sta, *psta; struct recv_reorder_ctrl *preorder_ctrl; struct sta_priv *pstapriv = &padapter->stapriv; psta = rtw_get_stainfo23a(pstapriv, pnetwork->network.MacAddress); if (!psta) psta = rtw_alloc_stainfo23a(pstapriv, pnetwork->network.MacAddress, GFP_ATOMIC); if (psta) { /* update ptarget_sta */ DBG_8723A("%s\n", __func__); psta->aid = pnetwork->join_res; psta->mac_id = 0; /* sta mode */ rtl8723a_SetHalODMVar(padapter, HAL_ODM_STA_INFO, psta, true); /* security related */ if (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) { padapter->securitypriv.binstallGrpkey = 0; padapter->securitypriv.busetkipkey = 0; psta->ieee8021x_blocked = true; psta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm; memset(&psta->dot118021x_UncstKey, 0, sizeof (union Keytype)); memset(&psta->dot11tkiprxmickey, 0, sizeof (union Keytype)); memset(&psta->dot11tkiptxmickey, 0, sizeof (union Keytype)); memset(&psta->dot11txpn, 0, sizeof (union pn48)); memset(&psta->dot11rxpn, 0, sizeof (union pn48)); } /* Commented by Albert 2012/07/21 */ /* When doing the WPS, the wps_ie_len won't equal to 0 */ /* And the Wi-Fi driver shouldn't allow the data packet to be transmitted. */ if (padapter->securitypriv.wps_ie_len != 0) { psta->ieee8021x_blocked = true; padapter->securitypriv.wps_ie_len = 0; } /* for A-MPDU Rx reordering buffer control for bmc_sta & * sta_info */ /* if A-MPDU Rx is enabled, resetting rx_ordering_ctrl wstart_b(indicate_seq) to default value = 0xffff */ /* todo: check if AP can send A-MPDU packets */ for (i = 0; i < 16 ; i++) { /* preorder_ctrl = &precvpriv->recvreorder_ctrl[i]; */ preorder_ctrl = &psta->recvreorder_ctrl[i]; preorder_ctrl->enable = false; preorder_ctrl->indicate_seq = 0xffff; preorder_ctrl->wend_b = 0xffff; /* max_ampdu_sz; ex. 32(kbytes) -> wsize_b = 32 */ preorder_ctrl->wsize_b = 64; } bmc_sta = rtw_get_bcmc_stainfo23a(padapter); if (bmc_sta) { for (i = 0; i < 16 ; i++) { preorder_ctrl = &bmc_sta->recvreorder_ctrl[i]; preorder_ctrl->enable = false; preorder_ctrl->indicate_seq = 0xffff; preorder_ctrl->wend_b = 0xffff; /* max_ampdu_sz; ex. 32(kbytes) -> wsize_b = 32 */ preorder_ctrl->wsize_b = 64; } } /* misc. */ update_sta_info23a(padapter, psta); } return psta; } /* pnetwork : returns from rtw23a_joinbss_event_cb */ /* ptarget_wlan: found from scanned_queue */ static void rtw_joinbss_update_network23a(struct rtw_adapter *padapter, struct wlan_network *ptarget_wlan, struct wlan_network *pnetwork) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct wlan_network *cur_network = &pmlmepriv->cur_network; DBG_8723A("%s\n", __func__); RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, "fw_state:%x, BSSID:%pM\n", get_fwstate(pmlmepriv), pnetwork->network.MacAddress); /* why not use ptarget_wlan?? */ memcpy(&cur_network->network, &pnetwork->network, pnetwork->network.Length); /* some IEs in pnetwork is wrong, so we should use ptarget_wlan IEs */ cur_network->network.IELength = ptarget_wlan->network.IELength; memcpy(&cur_network->network.IEs[0], &ptarget_wlan->network.IEs[0], MAX_IE_SZ); cur_network->network.capability = ptarget_wlan->network.capability; cur_network->network.beacon_interval = ptarget_wlan->network.beacon_interval; cur_network->network.tsf = ptarget_wlan->network.tsf; rtw_set_signal_stat_timer(&padapter->recvpriv); padapter->recvpriv.signal_strength = ptarget_wlan->network.SignalStrength; padapter->recvpriv.signal_qual = ptarget_wlan->network.SignalQuality; /* * the ptarget_wlan->network.Rssi is raw data, we use * ptarget_wlan->network.SignalStrength instead (has scaled) */ DBG_8723A("%s signal_strength:%3u, signal_qual:%3u\n", __func__, padapter->recvpriv.signal_strength, padapter->recvpriv.signal_qual); rtw_set_signal_stat_timer(&padapter->recvpriv); /* update fw_state will clr _FW_UNDER_LINKING here indirectly */ switch (pnetwork->network.ifmode) { case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_STATION: if (pmlmepriv->fw_state & WIFI_UNDER_WPS) pmlmepriv->fw_state = WIFI_STATION_STATE|WIFI_UNDER_WPS; else pmlmepriv->fw_state = WIFI_STATION_STATE; break; case NL80211_IFTYPE_ADHOC: pmlmepriv->fw_state = WIFI_ADHOC_STATE; break; default: pmlmepriv->fw_state = WIFI_NULL_STATE; RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, "Invalid network_mode\n"); break; } rtw_update_protection23a(padapter, cur_network->network.IEs, cur_network->network.IELength); rtw_update_ht_cap23a(padapter, cur_network->network.IEs, cur_network->network.IELength); } /* * Notes: * the function could be > passive_level (the same context as Rx tasklet) * pnetwork : returns from rtw23a_joinbss_event_cb * ptarget_wlan: found from scanned_queue * if join_res > 0, for (fw_state==WIFI_STATION_STATE), * we check if "ptarget_sta" & "ptarget_wlan" exist. * if join_res > 0, for (fw_state==WIFI_ADHOC_STATE), * we only check if "ptarget_wlan" exist. * if join_res > 0, update "cur_network->network" from "pnetwork->network" * if (ptarget_wlan !=NULL). */ void rtw_joinbss_event_prehandle23a(struct rtw_adapter *adapter, u8 *pbuf) { struct sta_info *ptarget_sta, *pcur_sta; struct sta_priv *pstapriv = &adapter->stapriv; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct wlan_network *pnetwork = (struct wlan_network *)pbuf; struct wlan_network *cur_network = &pmlmepriv->cur_network; struct wlan_network *pcur_wlan, *ptarget_wlan = NULL; bool the_same_macaddr; RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, "joinbss event call back received with res=%d\n", pnetwork->join_res); if (pmlmepriv->assoc_ssid.ssid_len == 0) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, "@@@@@ joinbss event call back for Any SSid\n"); } else { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, "@@@@@ rtw23a_joinbss_event_cb for SSid:%s\n", pmlmepriv->assoc_ssid.ssid); } if (ether_addr_equal(pnetwork->network.MacAddress, cur_network->network.MacAddress)) the_same_macaddr = true; else the_same_macaddr = false; pnetwork->network.Length = get_wlan_bssid_ex_sz(&pnetwork->network); if (pnetwork->network.Length > sizeof(struct wlan_bssid_ex)) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, "***joinbss_evt_callback return a wrong bss ***\n"); return; } spin_lock_bh(&pmlmepriv->lock); RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, "rtw23a_joinbss_event_cb !! _enter_critical\n"); if (pnetwork->join_res > 0) { spin_lock_bh(&pmlmepriv->scanned_queue.lock); if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) { /* s1. find ptarget_wlan */ if (check_fwstate(pmlmepriv, _FW_LINKED)) { if (the_same_macaddr) { ptarget_wlan = rtw_find_network23a(&pmlmepriv->scanned_queue, cur_network->network.MacAddress); } else { pcur_wlan = rtw_find_network23a(&pmlmepriv->scanned_queue, cur_network->network.MacAddress); if (pcur_wlan) pcur_wlan->fixed = false; pcur_sta = rtw_get_stainfo23a(pstapriv, cur_network->network.MacAddress); if (pcur_sta) { spin_lock_bh(&pstapriv->sta_hash_lock); rtw_free_stainfo23a(adapter, pcur_sta); spin_unlock_bh(&pstapriv->sta_hash_lock); } ptarget_wlan = rtw_find_network23a(&pmlmepriv->scanned_queue, pnetwork->network.MacAddress); if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) { if (ptarget_wlan) ptarget_wlan->fixed = true; } } } else { ptarget_wlan = rtw_find_network23a( &pmlmepriv->scanned_queue, pnetwork->network.MacAddress); if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) { if (ptarget_wlan) ptarget_wlan->fixed = true; } } /* s2. update cur_network */ if (ptarget_wlan) rtw_joinbss_update_network23a(adapter, ptarget_wlan, pnetwork); else { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, "Can't find ptarget_wlan when joinbss_event callback\n"); spin_unlock_bh(&pmlmepriv->scanned_queue.lock); goto ignore_joinbss_callback; } /* s3. find ptarget_sta & update ptarget_sta after update cur_network only for station mode */ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) { ptarget_sta = rtw_joinbss_update_stainfo( adapter, pnetwork); if (!ptarget_sta) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, "Can't update stainfo when joinbss_event callback\n"); spin_unlock_bh(&pmlmepriv->scanned_queue.lock); goto ignore_joinbss_callback; } } /* s4. indicate connect */ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) rtw_indicate_connect23a(adapter); else { /* adhoc mode will rtw_indicate_connect23a when rtw_stassoc_event_callback23a */ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, "adhoc mode, fw_state:%x\n", get_fwstate(pmlmepriv)); } /* s5. Cancle assoc_timer */ del_timer_sync(&pmlmepriv->assoc_timer); RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, "Cancle assoc_timer\n"); } else { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, "rtw23a_joinbss_event_cb err: fw_state:%x\n", get_fwstate(pmlmepriv)); spin_unlock_bh(&pmlmepriv->scanned_queue.lock); goto ignore_joinbss_callback; } spin_unlock_bh(&pmlmepriv->scanned_queue.lock); } else if (pnetwork->join_res == -4) { rtw_reset_securitypriv23a(adapter); mod_timer(&pmlmepriv->assoc_timer, jiffies + msecs_to_jiffies(1)); /* rtw_free_assoc_resources23a(adapter, 1); */ if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, "fail! clear _FW_UNDER_LINKING ^^^fw_state=%x\n", get_fwstate(pmlmepriv)); _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING); } } else { /* if join_res < 0 (join fails), then try again */ mod_timer(&pmlmepriv->assoc_timer, jiffies + msecs_to_jiffies(1)); _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING); } ignore_joinbss_callback: spin_unlock_bh(&pmlmepriv->lock); } void rtw23a_joinbss_event_cb(struct rtw_adapter *adapter, const u8 *pbuf) { struct wlan_network *pnetwork = (struct wlan_network *)pbuf; mlmeext_joinbss_event_callback23a(adapter, pnetwork->join_res); rtw_os_xmit_schedule23a(adapter); } void rtw_stassoc_event_callback23a(struct rtw_adapter *adapter, const u8 *pbuf) { struct sta_info *psta; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct stassoc_event *pstassoc = (struct stassoc_event *)pbuf; struct wlan_network *cur_network = &pmlmepriv->cur_network; struct wlan_network *ptarget_wlan; if (rtw_access_ctrl23a(adapter, pstassoc->macaddr) == false) return; #ifdef CONFIG_8723AU_AP_MODE if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) { psta = rtw_get_stainfo23a(&adapter->stapriv, pstassoc->macaddr); if (psta) { /* bss_cap_update_on_sta_join23a(adapter, psta); */ /* sta_info_update23a(adapter, psta); */ ap_sta_info_defer_update23a(adapter, psta); } return; } #endif /* for AD-HOC mode */ psta = rtw_get_stainfo23a(&adapter->stapriv, pstassoc->macaddr); if (psta != NULL) { /* the sta have been in sta_info_queue => do nothing */ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, "Error: rtw_stassoc_event_callback23a: sta has been in sta_hash_queue\n"); /* between drv has received this event before and fw have not yet to set key to CAM_ENTRY) */ return; } psta = rtw_alloc_stainfo23a(&adapter->stapriv, pstassoc->macaddr, GFP_KERNEL); if (!psta) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, "Can't alloc sta_info when rtw_stassoc_event_callback23a\n"); return; } /* to do : init sta_info variable */ psta->qos_option = 0; psta->mac_id = (uint)pstassoc->cam_id; /* psta->aid = (uint)pstassoc->cam_id; */ DBG_8723A("%s\n", __func__); /* for ad-hoc mode */ rtl8723a_SetHalODMVar(adapter, HAL_ODM_STA_INFO, psta, true); if (adapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) psta->dot118021XPrivacy = adapter->securitypriv.dot11PrivacyAlgrthm; psta->ieee8021x_blocked = false; spin_lock_bh(&pmlmepriv->lock); if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) || check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) { if (adapter->stapriv.asoc_sta_count == 2) { spin_lock_bh(&pmlmepriv->scanned_queue.lock); ptarget_wlan = rtw_find_network23a(&pmlmepriv->scanned_queue, cur_network->network.MacAddress); if (ptarget_wlan) ptarget_wlan->fixed = true; spin_unlock_bh(&pmlmepriv->scanned_queue.lock); /* a sta + bc/mc_stainfo (not Ibss_stainfo) */ rtw_indicate_connect23a(adapter); } } spin_unlock_bh(&pmlmepriv->lock); mlmeext_sta_add_event_callback23a(adapter, psta); } void rtw_stadel_event_callback23a(struct rtw_adapter *adapter, const u8 *pbuf) { int mac_id; struct sta_info *psta; struct wlan_network *pwlan; struct wlan_bssid_ex *pdev_network; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct stadel_event *pstadel = (struct stadel_event *)pbuf; struct sta_priv *pstapriv = &adapter->stapriv; struct wlan_network *tgt_network = &pmlmepriv->cur_network; psta = rtw_get_stainfo23a(&adapter->stapriv, pstadel->macaddr); if (psta) mac_id = psta->mac_id; else mac_id = pstadel->mac_id; DBG_8723A("%s(mac_id=%d)=%pM\n", __func__, mac_id, pstadel->macaddr); if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) return; mlmeext_sta_del_event_callback23a(adapter); spin_lock_bh(&pmlmepriv->lock); if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) { if (adapter->mlmepriv.to_roaming > 0) { /* this stadel_event is caused by roaming, decrease to_roaming */ pmlmepriv->to_roaming--; } else if (adapter->mlmepriv.to_roaming == 0) rtw_set_roaming(adapter, adapter->registrypriv.max_roaming_times); if (*((u16 *)pstadel->rsvd) != WLAN_REASON_EXPIRATION_CHK) rtw_set_roaming(adapter, 0); /* don't roam */ rtw_free_uc_swdec_pending_queue23a(adapter); rtw_free_assoc_resources23a(adapter, 1); rtw_indicate_disconnect23a(adapter); spin_lock_bh(&pmlmepriv->scanned_queue.lock); /* remove the network entry in scanned_queue */ pwlan = rtw_find_network23a(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress); if (pwlan) { pwlan->fixed = false; rtw_free_network_nolock(pmlmepriv, pwlan); } spin_unlock_bh(&pmlmepriv->scanned_queue.lock); _rtw_roaming(adapter, tgt_network); } if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) || check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) { spin_lock_bh(&pstapriv->sta_hash_lock); rtw_free_stainfo23a(adapter, psta); spin_unlock_bh(&pstapriv->sta_hash_lock); /* a sta + bc/mc_stainfo (not Ibss_stainfo) */ if (adapter->stapriv.asoc_sta_count == 1) { spin_lock_bh(&pmlmepriv->scanned_queue.lock); /* free old ibss network */ /* pwlan = rtw_find_network23a( &pmlmepriv->scanned_queue, pstadel->macaddr); */ pwlan = rtw_find_network23a(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress); if (pwlan) { pwlan->fixed = false; rtw_free_network_nolock(pmlmepriv, pwlan); } spin_unlock_bh(&pmlmepriv->scanned_queue.lock); /* re-create ibss */ pdev_network = &adapter->registrypriv.dev_network; memcpy(pdev_network, &tgt_network->network, get_wlan_bssid_ex_sz(&tgt_network->network)); rtw_do_join_adhoc(adapter); } } spin_unlock_bh(&pmlmepriv->lock); } /* * rtw23a_join_to_handler - Timeout/failure handler for CMD JoinBss * @adapter: pointer to _adapter structure */ void rtw23a_join_to_handler (unsigned long data) { struct rtw_adapter *adapter = (struct rtw_adapter *)data; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; int do_join_r; DBG_8723A("%s, fw_state=%x\n", __func__, get_fwstate(pmlmepriv)); if (adapter->bDriverStopped || adapter->bSurpriseRemoved) return; spin_lock_bh(&pmlmepriv->lock); if (adapter->mlmepriv.to_roaming > 0) { /* join timeout caused by roaming */ while (1) { pmlmepriv->to_roaming--; if (adapter->mlmepriv.to_roaming != 0) { /* try another */ DBG_8723A("%s try another roaming\n", __func__); do_join_r = rtw_do_join(adapter); if (do_join_r != _SUCCESS) { DBG_8723A("%s roaming do_join return " "%d\n", __func__ , do_join_r); continue; } break; } else { DBG_8723A("%s We've try roaming but fail\n", __func__); rtw_indicate_disconnect23a(adapter); break; } } } else { rtw_indicate_disconnect23a(adapter); free_scanqueue(pmlmepriv);/* */ /* indicate disconnect for the case that join_timeout and check_fwstate != FW_LINKED */ rtw_cfg80211_indicate_disconnect(adapter); } spin_unlock_bh(&pmlmepriv->lock); } /* * rtw_scan_timeout_handler23a - Timeout/Failure handler for CMD SiteSurvey * @data: pointer to _adapter structure */ void rtw_scan_timeout_handler23a(unsigned long data) { struct rtw_adapter *adapter = (struct rtw_adapter *)data; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; DBG_8723A("%s(%s): fw_state =%x\n", __func__, adapter->pnetdev->name, get_fwstate(pmlmepriv)); spin_lock_bh(&pmlmepriv->lock); _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY); spin_unlock_bh(&pmlmepriv->lock); rtw_cfg80211_indicate_scan_done(wdev_to_priv(adapter->rtw_wdev), true); } void rtw_dynamic_check_timer_handler(unsigned long data) { struct rtw_adapter *adapter = (struct rtw_adapter *)data; if (adapter->hw_init_completed == false) goto out; if (adapter->bDriverStopped == true || adapter->bSurpriseRemoved == true) goto out; if (adapter->net_closed == true) goto out; rtw_dynamic_chk_wk_cmd23a(adapter); out: mod_timer(&adapter->mlmepriv.dynamic_chk_timer, jiffies + msecs_to_jiffies(2000)); } inline bool rtw_is_scan_deny(struct rtw_adapter *adapter) { struct mlme_priv *mlmepriv = &adapter->mlmepriv; return (atomic_read(&mlmepriv->set_scan_deny) != 0) ? true : false; } void rtw_clear_scan_deny(struct rtw_adapter *adapter) { struct mlme_priv *mlmepriv = &adapter->mlmepriv; atomic_set(&mlmepriv->set_scan_deny, 0); } void rtw_set_scan_deny_timer_hdl(unsigned long data) { struct rtw_adapter *adapter = (struct rtw_adapter *)data; rtw_clear_scan_deny(adapter); } void rtw_set_scan_deny(struct rtw_adapter *adapter, u32 ms) { struct mlme_priv *mlmepriv = &adapter->mlmepriv; atomic_set(&mlmepriv->set_scan_deny, 1); mod_timer(&mlmepriv->set_scan_deny_timer, jiffies + msecs_to_jiffies(ms)); } #if defined(IEEE80211_SCAN_RESULT_EXPIRE) #define RTW_SCAN_RESULT_EXPIRE \ ((IEEE80211_SCAN_RESULT_EXPIRE / (HZ*1000)) - 1000) /* 3000 -1000 */ #else #define RTW_SCAN_RESULT_EXPIRE 2000 #endif /* * Select a new join candidate from the original @param candidate and * @param competitor * @return true: candidate is updated * @return false: candidate is not updated */ static int rtw_check_join_candidate(struct mlme_priv *pmlmepriv, struct wlan_network **candidate, struct wlan_network *competitor) { int updated = false; struct rtw_adapter *adapter; adapter = container_of(pmlmepriv, struct rtw_adapter, mlmepriv); /* check bssid, if needed */ if (pmlmepriv->assoc_by_bssid == true) { if (!ether_addr_equal(competitor->network.MacAddress, pmlmepriv->assoc_bssid)) goto exit; } /* check ssid, if needed */ if (pmlmepriv->assoc_ssid.ssid_len) { if (competitor->network.Ssid.ssid_len != pmlmepriv->assoc_ssid.ssid_len || memcmp(competitor->network.Ssid.ssid, pmlmepriv->assoc_ssid.ssid, pmlmepriv->assoc_ssid.ssid_len)) goto exit; } if (rtw_is_desired_network(adapter, competitor) == false) goto exit; if (adapter->mlmepriv.to_roaming > 0) { unsigned int passed; passed = jiffies_to_msecs(jiffies - competitor->last_scanned); if (passed >= RTW_SCAN_RESULT_EXPIRE || is_same_ess(&competitor->network, &pmlmepriv->cur_network.network) == false) goto exit; } if (!*candidate || (*candidate)->network.Rssi<competitor->network.Rssi) { *candidate = competitor; updated = true; } if (updated) { DBG_8723A("[by_bssid:%u][assoc_ssid:%s][to_roaming:%u] new candidate: %s(%pM) rssi:%d\n", pmlmepriv->assoc_by_bssid, pmlmepriv->assoc_ssid.ssid, adapter->mlmepriv.to_roaming, (*candidate)->network.Ssid.ssid, (*candidate)->network.MacAddress, (int)(*candidate)->network.Rssi); } exit: return updated; } /* Calling context: The caller of the sub-routine will be in critical section... The caller must hold the following spinlock pmlmepriv->lock */ static int rtw_do_join(struct rtw_adapter *padapter) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; int ret; pmlmepriv->cur_network.join_res = -2; set_fwstate(pmlmepriv, _FW_UNDER_LINKING); pmlmepriv->to_join = true; ret = rtw_select_and_join_from_scanned_queue23a(pmlmepriv); if (ret == _SUCCESS) { pmlmepriv->to_join = false; } else { if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) { /* switch to ADHOC_MASTER */ ret = rtw_do_join_adhoc(padapter); if (ret != _SUCCESS) goto exit; } else { /* can't associate ; reset under-linking */ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING); ret = _FAIL; pmlmepriv->to_join = false; } } exit: return ret; } static struct wlan_network * rtw_select_candidate_from_queue(struct mlme_priv *pmlmepriv) { struct wlan_network *pnetwork, *candidate = NULL; struct rtw_queue *queue = &pmlmepriv->scanned_queue; struct list_head *phead, *plist, *ptmp; spin_lock_bh(&pmlmepriv->scanned_queue.lock); phead = get_list_head(queue); list_for_each_safe(plist, ptmp, phead) { pnetwork = container_of(plist, struct wlan_network, list); if (!pnetwork) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, "%s: return _FAIL:(pnetwork == NULL)\n", __func__); goto exit; } rtw_check_join_candidate(pmlmepriv, &candidate, pnetwork); } exit: spin_unlock_bh(&pmlmepriv->scanned_queue.lock); return candidate; } int rtw_do_join_adhoc(struct rtw_adapter *adapter) { struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct wlan_bssid_ex *pdev_network; u8 *ibss; int ret; pdev_network = &adapter->registrypriv.dev_network; ibss = adapter->registrypriv.dev_network.MacAddress; _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY); RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, "switching to adhoc master\n"); memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct cfg80211_ssid)); rtw_update_registrypriv_dev_network23a(adapter); rtw_generate_random_ibss23a(ibss); pmlmepriv->fw_state = WIFI_ADHOC_MASTER_STATE; ret = rtw_createbss_cmd23a(adapter); if (ret != _SUCCESS) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, "Error =>rtw_createbss_cmd23a status FAIL\n"); } else { pmlmepriv->to_join = false; } return ret; } int rtw_do_join_network(struct rtw_adapter *adapter, struct wlan_network *candidate) { int ret; /* check for situation of _FW_LINKED */ if (check_fwstate(&adapter->mlmepriv, _FW_LINKED)) { DBG_8723A("%s: _FW_LINKED while ask_for_joinbss!\n", __func__); rtw_disassoc_cmd23a(adapter, 0, true); rtw_indicate_disconnect23a(adapter); rtw_free_assoc_resources23a(adapter, 0); } set_fwstate(&adapter->mlmepriv, _FW_UNDER_LINKING); ret = rtw_joinbss_cmd23a(adapter, candidate); if (ret == _SUCCESS) mod_timer(&adapter->mlmepriv.assoc_timer, jiffies + msecs_to_jiffies(MAX_JOIN_TIMEOUT)); return ret; } int rtw_select_and_join_from_scanned_queue23a(struct mlme_priv *pmlmepriv) { struct rtw_adapter *adapter; struct wlan_network *candidate = NULL; int ret; adapter = pmlmepriv->nic_hdl; candidate = rtw_select_candidate_from_queue(pmlmepriv); if (!candidate) { DBG_8723A("%s: return _FAIL(candidate == NULL)\n", __func__); ret = _FAIL; goto exit; } else { DBG_8723A("%s: candidate: %s(%pM, ch:%u)\n", __func__, candidate->network.Ssid.ssid, candidate->network.MacAddress, candidate->network.DSConfig); } ret = rtw_do_join_network(adapter, candidate); exit: return ret; } int rtw_set_auth23a(struct rtw_adapter *adapter, struct security_priv *psecuritypriv) { struct cmd_obj *pcmd; struct setauth_parm *psetauthparm; struct cmd_priv *pcmdpriv = &adapter->cmdpriv; int res = _SUCCESS; pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); if (!pcmd) { res = _FAIL; /* try again */ goto exit; } psetauthparm = kzalloc(sizeof(struct setauth_parm), GFP_KERNEL); if (!psetauthparm) { kfree(pcmd); res = _FAIL; goto exit; } psetauthparm->mode = (unsigned char)psecuritypriv->dot11AuthAlgrthm; pcmd->cmdcode = _SetAuth_CMD_; pcmd->parmbuf = (unsigned char *)psetauthparm; pcmd->cmdsz = (sizeof(struct setauth_parm)); pcmd->rsp = NULL; pcmd->rspsz = 0; RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, "after enqueue set_auth_cmd, auth_mode=%x\n", psecuritypriv->dot11AuthAlgrthm); res = rtw_enqueue_cmd23a(pcmdpriv, pcmd); exit: return res; } int rtw_set_key23a(struct rtw_adapter *adapter, struct security_priv *psecuritypriv, int keyid, u8 set_tx) { u8 keylen; struct cmd_obj *pcmd; struct setkey_parm *psetkeyparm; struct cmd_priv *pcmdpriv = &adapter->cmdpriv; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; int res = _SUCCESS; if (keyid >= 4) { res = _FAIL; goto exit; } pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); if (!pcmd) { res = _FAIL; /* try again */ goto exit; } psetkeyparm = kzalloc(sizeof(struct setkey_parm), GFP_KERNEL); if (!psetkeyparm) { kfree(pcmd); res = _FAIL; goto exit; } if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) { psetkeyparm->algorithm = (unsigned char) psecuritypriv->dot118021XGrpPrivacy; RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, "rtw_set_key23a: psetkeyparm->algorithm = (unsigned char)psecuritypriv->dot118021XGrpPrivacy =%d\n", psetkeyparm->algorithm); } else { psetkeyparm->algorithm = (u8)psecuritypriv->dot11PrivacyAlgrthm; RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, "rtw_set_key23a: psetkeyparm->algorithm = (u8)psecuritypriv->dot11PrivacyAlgrthm =%d\n", psetkeyparm->algorithm); } psetkeyparm->keyid = keyid;/* 0~3 */ psetkeyparm->set_tx = set_tx; if (is_wep_enc(psetkeyparm->algorithm)) pmlmepriv->key_mask |= BIT(psetkeyparm->keyid); DBG_8723A("==> rtw_set_key23a algorithm(%x), keyid(%x), key_mask(%x)\n", psetkeyparm->algorithm, psetkeyparm->keyid, pmlmepriv->key_mask); RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, "rtw_set_key23a: psetkeyparm->algorithm =%d psetkeyparm->keyid = (u8)keyid =%d\n", psetkeyparm->algorithm, keyid); switch (psetkeyparm->algorithm) { case WLAN_CIPHER_SUITE_WEP40: keylen = 5; memcpy(&psetkeyparm->key[0], &psecuritypriv->wep_key[keyid].key, keylen); break; case WLAN_CIPHER_SUITE_WEP104: keylen = 13; memcpy(&psetkeyparm->key[0], &psecuritypriv->wep_key[keyid].key, keylen); break; case WLAN_CIPHER_SUITE_TKIP: keylen = 16; memcpy(&psetkeyparm->key, &psecuritypriv->dot118021XGrpKey[keyid], keylen); psetkeyparm->grpkey = 1; break; case WLAN_CIPHER_SUITE_CCMP: keylen = 16; memcpy(&psetkeyparm->key, &psecuritypriv->dot118021XGrpKey[keyid], keylen); psetkeyparm->grpkey = 1; break; default: RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, "rtw_set_key23a:psecuritypriv->dot11PrivacyAlgrthm = %x (must be 1 or 2 or 4 or 5)\n", psecuritypriv->dot11PrivacyAlgrthm); res = _FAIL; kfree(pcmd); kfree(psetkeyparm); goto exit; } pcmd->cmdcode = _SetKey_CMD_; pcmd->parmbuf = (u8 *)psetkeyparm; pcmd->cmdsz = (sizeof(struct setkey_parm)); pcmd->rsp = NULL; pcmd->rspsz = 0; /* sema_init(&pcmd->cmd_sem, 0); */ res = rtw_enqueue_cmd23a(pcmdpriv, pcmd); exit: return res; } /* adjust IEs for rtw_joinbss_cmd23a in WMM */ int rtw_restruct_wmm_ie23a(struct rtw_adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_len, uint initial_out_len) { int ielength; const u8 *p; ielength = initial_out_len; p = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WMM, in_ie, in_len); if (p && p[1]) { memcpy(out_ie + initial_out_len, p, 9); out_ie[initial_out_len + 1] = 7; out_ie[initial_out_len + 6] = 0; out_ie[initial_out_len + 8] = 0; ielength += 9; } return ielength; } /* */ /* Ported from 8185: IsInPreAuthKeyList(). (Renamed from SecIsInPreAuthKeyList(), 2006-10-13.) */ /* Added by Annie, 2006-05-07. */ /* */ /* Search by BSSID, */ /* Return Value: */ /* -1 :if there is no pre-auth key in the table */ /* >= 0 :if there is pre-auth key, and return the entry id */ /* */ /* */ static int SecIsInPMKIDList(struct rtw_adapter *Adapter, u8 *bssid) { struct security_priv *psecuritypriv = &Adapter->securitypriv; int i = 0; do { if (psecuritypriv->PMKIDList[i].bUsed && ether_addr_equal(psecuritypriv->PMKIDList[i].Bssid, bssid)) { break; } else { i++; /* continue; */ } } while (i < NUM_PMKID_CACHE); if (i == NUM_PMKID_CACHE) i = -1;/* Could not find. */ else { /* There is one Pre-Authentication Key for the specific BSSID. */ } return i; } /* */ /* Check the RSN IE length */ /* If the RSN IE length <= 20, the RSN IE didn't include the PMKID information */ /* 0-11th element in the array are the fixed IE */ /* 12th element in the array is the IE */ /* 13th element in the array is the IE length */ /* */ static int rtw_append_pmkid(struct rtw_adapter *Adapter, int iEntry, u8 *ie, uint ie_len) { struct security_priv *psecuritypriv = &Adapter->securitypriv; if (ie[1] <= 20) { /* The RSN IE didn't include the PMK ID, append the PMK information */ ie[ie_len] = 1; ie_len++; ie[ie_len] = 0; /* PMKID count = 0x0100 */ ie_len++; memcpy(&ie[ie_len], &psecuritypriv->PMKIDList[iEntry].PMKID, 16); ie_len += 16; ie[1] += 18;/* PMKID length = 2+16 */ } return ie_len; } int rtw_restruct_sec_ie23a(struct rtw_adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_len) { u8 authmode; uint ielength; int iEntry; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct security_priv *psecuritypriv = &adapter->securitypriv; uint ndisauthmode = psecuritypriv->ndisauthtype; uint ndissecuritytype = psecuritypriv->ndisencryptstatus; RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, "+rtw_restruct_sec_ie23a: ndisauthmode=%d ndissecuritytype=%d\n", ndisauthmode, ndissecuritytype); ielength = 0; if (ndisauthmode == Ndis802_11AuthModeWPA || ndisauthmode == Ndis802_11AuthModeWPAPSK) authmode = WLAN_EID_VENDOR_SPECIFIC; if (ndisauthmode == Ndis802_11AuthModeWPA2 || ndisauthmode == Ndis802_11AuthModeWPA2PSK) authmode = WLAN_EID_RSN; if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) { memcpy(out_ie + ielength, psecuritypriv->wps_ie, psecuritypriv->wps_ie_len); ielength += psecuritypriv->wps_ie_len; } else if (authmode == WLAN_EID_VENDOR_SPECIFIC || authmode == WLAN_EID_RSN) { /* copy RSN or SSN */ memcpy(&out_ie[ielength], &psecuritypriv->supplicant_ie[0], psecuritypriv->supplicant_ie[1] + 2); ielength += psecuritypriv->supplicant_ie[1] + 2; } iEntry = SecIsInPMKIDList(adapter, pmlmepriv->assoc_bssid); if (iEntry < 0) return ielength; else { if (authmode == WLAN_EID_RSN) ielength = rtw_append_pmkid(adapter, iEntry, out_ie, ielength); } return ielength; } void rtw_init_registrypriv_dev_network23a(struct rtw_adapter *adapter) { struct registry_priv *pregistrypriv = &adapter->registrypriv; struct eeprom_priv *peepriv = &adapter->eeprompriv; struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network; u8 *myhwaddr = myid(peepriv); ether_addr_copy(pdev_network->MacAddress, myhwaddr); memcpy(&pdev_network->Ssid, &pregistrypriv->ssid, sizeof(struct cfg80211_ssid)); pdev_network->beacon_interval = 100; } void rtw_update_registrypriv_dev_network23a(struct rtw_adapter *adapter) { int sz = 0; struct registry_priv *pregistrypriv = &adapter->registrypriv; struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network; struct security_priv *psecuritypriv = &adapter->securitypriv; struct wlan_network *cur_network = &adapter->mlmepriv.cur_network; /* struct xmit_priv *pxmitpriv = &adapter->xmitpriv; */ pdev_network->Privacy = (psecuritypriv->dot11PrivacyAlgrthm > 0 ? 1 : 0); pdev_network->Rssi = 0; pdev_network->DSConfig = pregistrypriv->channel; RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, "pregistrypriv->channel =%d, pdev_network->DSConfig = 0x%x\n", pregistrypriv->channel, pdev_network->DSConfig); if (cur_network->network.ifmode == NL80211_IFTYPE_ADHOC) pdev_network->ATIMWindow = 0; pdev_network->ifmode = cur_network->network.ifmode; /* 1. Supported rates */ /* 2. IE */ sz = rtw_generate_ie23a(pregistrypriv); pdev_network->IELength = sz; pdev_network->Length = get_wlan_bssid_ex_sz(pdev_network); /* notes: translate IELength & Length after assign the Length to cmdsz in createbss_cmd(); */ /* pdev_network->IELength = cpu_to_le32(sz); */ } /* the function is at passive_level */ void rtw_joinbss_reset23a(struct rtw_adapter *padapter) { u8 threshold; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct ht_priv *phtpriv = &pmlmepriv->htpriv; /* todo: if you want to do something io/reg/hw setting before join_bss, please add code here */ pmlmepriv->num_FortyMHzIntolerant = 0; pmlmepriv->num_sta_no_ht = 0; phtpriv->ampdu_enable = false;/* reset to disabled */ /* TH = 1 => means that invalidate usb rx aggregation */ /* TH = 0 => means that validate usb rx aggregation, use init value. */ if (phtpriv->ht_option) { if (padapter->registrypriv.wifi_spec == 1) threshold = 1; else threshold = 0; } else threshold = 1; rtl8723a_set_rxdma_agg_pg_th(padapter, threshold); } /* the function is >= passive_level */ bool rtw_restructure_ht_ie23a(struct rtw_adapter *padapter, u8 *in_ie, u8 *out_ie, uint in_len, uint *pout_len) { u32 out_len; int max_rx_ampdu_factor; unsigned char *pframe; const u8 *p; struct ieee80211_ht_cap ht_capie; u8 WMM_IE[7] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01, 0x00}; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct ht_priv *phtpriv = &pmlmepriv->htpriv; phtpriv->ht_option = false; p = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, in_ie, in_len); if (p && p[1] > 0) { u32 rx_packet_offset, max_recvbuf_sz; if (pmlmepriv->qos_option == 0) { out_len = *pout_len; pframe = rtw_set_ie23a(out_ie + out_len, WLAN_EID_VENDOR_SPECIFIC, sizeof(WMM_IE), WMM_IE, pout_len); pmlmepriv->qos_option = 1; } out_len = *pout_len; memset(&ht_capie, 0, sizeof(struct ieee80211_ht_cap)); ht_capie.cap_info = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_TX_STBC | IEEE80211_HT_CAP_DSSSCCK40); GetHalDefVar8192CUsb(padapter, HAL_DEF_RX_PACKET_OFFSET, &rx_packet_offset); GetHalDefVar8192CUsb(padapter, HAL_DEF_MAX_RECVBUF_SZ, &max_recvbuf_sz); GetHalDefVar8192CUsb(padapter, HW_VAR_MAX_RX_AMPDU_FACTOR, &max_rx_ampdu_factor); ht_capie.ampdu_params_info = max_rx_ampdu_factor & 0x03; if (padapter->securitypriv.dot11PrivacyAlgrthm == WLAN_CIPHER_SUITE_CCMP) ht_capie.ampdu_params_info |= (IEEE80211_HT_AMPDU_PARM_DENSITY& (0x07 << 2)); else ht_capie.ampdu_params_info |= (IEEE80211_HT_AMPDU_PARM_DENSITY & 0x00); pframe = rtw_set_ie23a(out_ie + out_len, WLAN_EID_HT_CAPABILITY, sizeof(struct ieee80211_ht_cap), (unsigned char *)&ht_capie, pout_len); phtpriv->ht_option = true; p = cfg80211_find_ie(WLAN_EID_HT_OPERATION, in_ie, in_len); if (p && (p[1] == sizeof(struct ieee80211_ht_operation))) { out_len = *pout_len; pframe = rtw_set_ie23a(out_ie + out_len, WLAN_EID_HT_OPERATION, p[1], p + 2 , pout_len); } } return phtpriv->ht_option; } /* the function is > passive_level (in critical_section) */ void rtw_update_ht_cap23a(struct rtw_adapter *padapter, u8 *pie, uint ie_len) { u8 max_ampdu_sz; const u8 *p; struct ieee80211_ht_cap *pht_capie; struct ieee80211_ht_operation *pht_addtinfo; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct ht_priv *phtpriv = &pmlmepriv->htpriv; struct registry_priv *pregistrypriv = &padapter->registrypriv; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info; if (!phtpriv->ht_option) return; if ((!pmlmeinfo->HT_info_enable) || (!pmlmeinfo->HT_caps_enable)) return; DBG_8723A("+rtw_update_ht_cap23a()\n"); /* maybe needs check if ap supports rx ampdu. */ if (!phtpriv->ampdu_enable && pregistrypriv->ampdu_enable == 1) { if (pregistrypriv->wifi_spec == 1) phtpriv->ampdu_enable = false; else phtpriv->ampdu_enable = true; } else if (pregistrypriv->ampdu_enable == 2) phtpriv->ampdu_enable = true; /* check Max Rx A-MPDU Size */ p = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, pie, ie_len); if (p && p[1] > 0) { pht_capie = (struct ieee80211_ht_cap *)(p + 2); max_ampdu_sz = pht_capie->ampdu_params_info & IEEE80211_HT_AMPDU_PARM_FACTOR; /* max_ampdu_sz (kbytes); */ max_ampdu_sz = 1 << (max_ampdu_sz + 3); phtpriv->rx_ampdu_maxlen = max_ampdu_sz; } p = cfg80211_find_ie(WLAN_EID_HT_OPERATION, pie, ie_len); if (p && p[1] > 0) { pht_addtinfo = (struct ieee80211_ht_operation *)(p + 2); /* todo: */ } /* update cur_bwmode & cur_ch_offset */ if (pregistrypriv->cbw40_enable && pmlmeinfo->ht_cap.cap_info & cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40) && pmlmeinfo->HT_info.ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY) { int i; u8 rf_type; rf_type = rtl8723a_get_rf_type(padapter); /* update the MCS rates */ for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) { if (rf_type == RF_1T1R || rf_type == RF_1T2R) pmlmeinfo->ht_cap.mcs.rx_mask[i] &= MCS_rate_1R23A[i]; else pmlmeinfo->ht_cap.mcs.rx_mask[i] &= MCS_rate_2R23A[i]; } /* switch to the 40M Hz mode according to the AP */ pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_40; switch (pmlmeinfo->HT_info.ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER; break; case IEEE80211_HT_PARAM_CHA_SEC_BELOW: pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER; break; default: pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE; break; } } /* */ /* Config SM Power Save setting */ /* */ pmlmeinfo->SM_PS = (le16_to_cpu(pmlmeinfo->ht_cap.cap_info) & IEEE80211_HT_CAP_SM_PS) >> IEEE80211_HT_CAP_SM_PS_SHIFT; if (pmlmeinfo->SM_PS == WLAN_HT_CAP_SM_PS_STATIC) DBG_8723A("%s(): WLAN_HT_CAP_SM_PS_STATIC\n", __func__); /* */ /* Config current HT Protection mode. */ /* */ pmlmeinfo->HT_protection = le16_to_cpu(pmlmeinfo->HT_info.operation_mode) & IEEE80211_HT_OP_MODE_PROTECTION; } void rtw_issue_addbareq_cmd23a(struct rtw_adapter *padapter, struct xmit_frame *pxmitframe) { u8 issued; int priority; struct sta_info *psta; struct ht_priv *phtpriv; struct pkt_attrib *pattrib = &pxmitframe->attrib; s32 bmcst = is_multicast_ether_addr(pattrib->ra); if (bmcst || padapter->mlmepriv.LinkDetectInfo.NumTxOkInPeriod < 100) return; priority = pattrib->priority; if (pattrib->psta) psta = pattrib->psta; else { DBG_8723A("%s, call rtw_get_stainfo23a()\n", __func__); psta = rtw_get_stainfo23a(&padapter->stapriv, pattrib->ra); } if (!psta) { DBG_8723A("%s, psta == NUL\n", __func__); return; } if (!(psta->state &_FW_LINKED)) { DBG_8723A("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, psta->state); return; } phtpriv = &psta->htpriv; if (phtpriv->ht_option && phtpriv->ampdu_enable) { issued = (phtpriv->agg_enable_bitmap>>priority)&0x1; issued |= (phtpriv->candidate_tid_bitmap>>priority)&0x1; if (issued == 0) { DBG_8723A("rtw_issue_addbareq_cmd23a, p =%d\n", priority); psta->htpriv.candidate_tid_bitmap |= BIT(priority); rtw_addbareq_cmd23a(padapter, (u8) priority, pattrib->ra); } } } int rtw_linked_check(struct rtw_adapter *padapter) { if (check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE) || check_fwstate(&padapter->mlmepriv, WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE)) { if (padapter->stapriv.asoc_sta_count > 2) return true; } else { /* Station mode */ if (check_fwstate(&padapter->mlmepriv, _FW_LINKED)) return true; } return false; }
gpl-2.0
The-Sickness/S6-MM
drivers/staging/ti-soc-thermal/ti-thermal-common.c
2093
9230
/* * OMAP thermal driver interface * * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ * Contact: * Eduardo Valentin <eduardo.valentin@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/device.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/gfp.h> #include <linux/kernel.h> #include <linux/workqueue.h> #include <linux/thermal.h> #include <linux/cpufreq.h> #include <linux/cpumask.h> #include <linux/cpu_cooling.h> #include "ti-thermal.h" #include "ti-bandgap.h" /* common data structures */ struct ti_thermal_data { struct thermal_zone_device *ti_thermal; struct thermal_cooling_device *cool_dev; struct ti_bandgap *bgp; enum thermal_device_mode mode; struct work_struct thermal_wq; int sensor_id; }; static void ti_thermal_work(struct work_struct *work) { struct ti_thermal_data *data = container_of(work, struct ti_thermal_data, thermal_wq); thermal_zone_device_update(data->ti_thermal); dev_dbg(&data->ti_thermal->device, "updated thermal zone %s\n", data->ti_thermal->type); } /** * ti_thermal_hotspot_temperature - returns sensor extrapolated temperature * @t: omap sensor temperature * @s: omap sensor slope value * @c: omap sensor const value */ static inline int ti_thermal_hotspot_temperature(int t, int s, int c) { int delta = t * s / 1000 + c; if (delta < 0) delta = 0; return t + delta; } /* thermal zone ops */ /* Get temperature callback function for thermal zone*/ static inline int ti_thermal_get_temp(struct thermal_zone_device *thermal, unsigned long *temp) { struct ti_thermal_data *data = thermal->devdata; struct ti_bandgap *bgp; const struct ti_temp_sensor *s; int ret, tmp, pcb_temp, slope, constant; if (!data) return 0; bgp = data->bgp; s = &bgp->conf->sensors[data->sensor_id]; ret = ti_bandgap_read_temperature(bgp, data->sensor_id, &tmp); if (ret) return ret; pcb_temp = 0; /* TODO: Introduce pcb temperature lookup */ /* In case pcb zone is available, use the extrapolation rule with it */ if (pcb_temp) { tmp -= pcb_temp; slope = s->slope_pcb; constant = s->constant_pcb; } else { slope = s->slope; constant = s->constant; } *temp = ti_thermal_hotspot_temperature(tmp, slope, constant); return ret; } /* Bind callback functions for thermal zone */ static int ti_thermal_bind(struct thermal_zone_device *thermal, struct thermal_cooling_device *cdev) { struct ti_thermal_data *data = thermal->devdata; int id; if (IS_ERR_OR_NULL(data)) return -ENODEV; /* check if this is the cooling device we registered */ if (data->cool_dev != cdev) return 0; id = data->sensor_id; /* Simple thing, two trips, one passive another critical */ return thermal_zone_bind_cooling_device(thermal, 0, cdev, /* bind with min and max states defined by cpu_cooling */ THERMAL_NO_LIMIT, THERMAL_NO_LIMIT); } /* Unbind callback functions for thermal zone */ static int ti_thermal_unbind(struct thermal_zone_device *thermal, struct thermal_cooling_device *cdev) { struct ti_thermal_data *data = thermal->devdata; if (IS_ERR_OR_NULL(data)) return -ENODEV; /* check if this is the cooling device we registered */ if (data->cool_dev != cdev) return 0; /* Simple thing, two trips, one passive another critical */ return thermal_zone_unbind_cooling_device(thermal, 0, cdev); } /* Get mode callback functions for thermal zone */ static int ti_thermal_get_mode(struct thermal_zone_device *thermal, enum thermal_device_mode *mode) { struct ti_thermal_data *data = thermal->devdata; if (data) *mode = data->mode; return 0; } /* Set mode callback functions for thermal zone */ static int ti_thermal_set_mode(struct thermal_zone_device *thermal, enum thermal_device_mode mode) { struct ti_thermal_data *data = thermal->devdata; if (!data->ti_thermal) { dev_notice(&thermal->device, "thermal zone not registered\n"); return 0; } mutex_lock(&data->ti_thermal->lock); if (mode == THERMAL_DEVICE_ENABLED) data->ti_thermal->polling_delay = FAST_TEMP_MONITORING_RATE; else data->ti_thermal->polling_delay = 0; mutex_unlock(&data->ti_thermal->lock); data->mode = mode; thermal_zone_device_update(data->ti_thermal); dev_dbg(&thermal->device, "thermal polling set for duration=%d msec\n", data->ti_thermal->polling_delay); return 0; } /* Get trip type callback functions for thermal zone */ static int ti_thermal_get_trip_type(struct thermal_zone_device *thermal, int trip, enum thermal_trip_type *type) { if (!ti_thermal_is_valid_trip(trip)) return -EINVAL; if (trip + 1 == OMAP_TRIP_NUMBER) *type = THERMAL_TRIP_CRITICAL; else *type = THERMAL_TRIP_PASSIVE; return 0; } /* Get trip temperature callback functions for thermal zone */ static int ti_thermal_get_trip_temp(struct thermal_zone_device *thermal, int trip, unsigned long *temp) { if (!ti_thermal_is_valid_trip(trip)) return -EINVAL; *temp = ti_thermal_get_trip_value(trip); return 0; } /* Get the temperature trend callback functions for thermal zone */ static int ti_thermal_get_trend(struct thermal_zone_device *thermal, int trip, enum thermal_trend *trend) { struct ti_thermal_data *data = thermal->devdata; struct ti_bandgap *bgp; int id, tr, ret = 0; bgp = data->bgp; id = data->sensor_id; ret = ti_bandgap_get_trend(bgp, id, &tr); if (ret) return ret; if (tr > 0) *trend = THERMAL_TREND_RAISING; else if (tr < 0) *trend = THERMAL_TREND_DROPPING; else *trend = THERMAL_TREND_STABLE; return 0; } /* Get critical temperature callback functions for thermal zone */ static int ti_thermal_get_crit_temp(struct thermal_zone_device *thermal, unsigned long *temp) { /* shutdown zone */ return ti_thermal_get_trip_temp(thermal, OMAP_TRIP_NUMBER - 1, temp); } static struct thermal_zone_device_ops ti_thermal_ops = { .get_temp = ti_thermal_get_temp, .get_trend = ti_thermal_get_trend, .bind = ti_thermal_bind, .unbind = ti_thermal_unbind, .get_mode = ti_thermal_get_mode, .set_mode = ti_thermal_set_mode, .get_trip_type = ti_thermal_get_trip_type, .get_trip_temp = ti_thermal_get_trip_temp, .get_crit_temp = ti_thermal_get_crit_temp, }; static struct ti_thermal_data *ti_thermal_build_data(struct ti_bandgap *bgp, int id) { struct ti_thermal_data *data; data = devm_kzalloc(bgp->dev, sizeof(*data), GFP_KERNEL); if (!data) { dev_err(bgp->dev, "kzalloc fail\n"); return NULL; } data->sensor_id = id; data->bgp = bgp; data->mode = THERMAL_DEVICE_ENABLED; INIT_WORK(&data->thermal_wq, ti_thermal_work); return data; } int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id, char *domain) { struct ti_thermal_data *data; data = ti_bandgap_get_sensor_data(bgp, id); if (IS_ERR_OR_NULL(data)) data = ti_thermal_build_data(bgp, id); if (!data) return -EINVAL; /* Create thermal zone */ data->ti_thermal = thermal_zone_device_register(domain, OMAP_TRIP_NUMBER, 0, data, &ti_thermal_ops, NULL, FAST_TEMP_MONITORING_RATE, FAST_TEMP_MONITORING_RATE); if (IS_ERR_OR_NULL(data->ti_thermal)) { dev_err(bgp->dev, "thermal zone device is NULL\n"); return PTR_ERR(data->ti_thermal); } data->ti_thermal->polling_delay = FAST_TEMP_MONITORING_RATE; ti_bandgap_set_sensor_data(bgp, id, data); return 0; } int ti_thermal_remove_sensor(struct ti_bandgap *bgp, int id) { struct ti_thermal_data *data; data = ti_bandgap_get_sensor_data(bgp, id); thermal_zone_device_unregister(data->ti_thermal); return 0; } int ti_thermal_report_sensor_temperature(struct ti_bandgap *bgp, int id) { struct ti_thermal_data *data; data = ti_bandgap_get_sensor_data(bgp, id); schedule_work(&data->thermal_wq); return 0; } int ti_thermal_register_cpu_cooling(struct ti_bandgap *bgp, int id) { struct ti_thermal_data *data; data = ti_bandgap_get_sensor_data(bgp, id); if (IS_ERR_OR_NULL(data)) data = ti_thermal_build_data(bgp, id); if (!data) return -EINVAL; if (!cpufreq_get_current_driver()) { dev_dbg(bgp->dev, "no cpufreq driver yet\n"); return -EPROBE_DEFER; } /* Register cooling device */ data->cool_dev = cpufreq_cooling_register(cpu_present_mask); if (IS_ERR_OR_NULL(data->cool_dev)) { dev_err(bgp->dev, "Failed to register cpufreq cooling device\n"); return PTR_ERR(data->cool_dev); } ti_bandgap_set_sensor_data(bgp, id, data); return 0; } int ti_thermal_unregister_cpu_cooling(struct ti_bandgap *bgp, int id) { struct ti_thermal_data *data; data = ti_bandgap_get_sensor_data(bgp, id); cpufreq_cooling_unregister(data->cool_dev); return 0; }
gpl-2.0
crewrktablets/rk3x_kernel_3.10
tools/testing/selftests/net/psock_tpacket.c
2093
18797
/* * Copyright 2013 Red Hat, Inc. * Author: Daniel Borkmann <dborkman@redhat.com> * * A basic test of packet socket's TPACKET_V1/TPACKET_V2/TPACKET_V3 behavior. * * Control: * Test the setup of the TPACKET socket with different patterns that are * known to fail (TODO) resp. succeed (OK). * * Datapath: * Open a pair of packet sockets and send resp. receive an a priori known * packet pattern accross the sockets and check if it was received resp. * sent correctly. Fanout in combination with RX_RING is currently not * tested here. * * The test currently runs for * - TPACKET_V1: RX_RING, TX_RING * - TPACKET_V2: RX_RING, TX_RING * - TPACKET_V3: RX_RING * * License (GPLv2): * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/socket.h> #include <sys/mman.h> #include <linux/if_packet.h> #include <linux/filter.h> #include <ctype.h> #include <fcntl.h> #include <unistd.h> #include <bits/wordsize.h> #include <net/ethernet.h> #include <netinet/ip.h> #include <arpa/inet.h> #include <stdint.h> #include <string.h> #include <assert.h> #include <net/if.h> #include <inttypes.h> #include <poll.h> #include "psock_lib.h" #ifndef bug_on # define bug_on(cond) assert(!(cond)) #endif #ifndef __aligned_tpacket # define __aligned_tpacket __attribute__((aligned(TPACKET_ALIGNMENT))) #endif #ifndef __align_tpacket # define __align_tpacket(x) __attribute__((aligned(TPACKET_ALIGN(x)))) #endif #define BLOCK_STATUS(x) ((x)->h1.block_status) #define BLOCK_NUM_PKTS(x) ((x)->h1.num_pkts) #define BLOCK_O2FP(x) ((x)->h1.offset_to_first_pkt) #define BLOCK_LEN(x) ((x)->h1.blk_len) #define BLOCK_SNUM(x) ((x)->h1.seq_num) #define BLOCK_O2PRIV(x) ((x)->offset_to_priv) #define BLOCK_PRIV(x) ((void *) ((uint8_t *) (x) + BLOCK_O2PRIV(x))) #define BLOCK_HDR_LEN (ALIGN_8(sizeof(struct block_desc))) #define ALIGN_8(x) (((x) + 8 - 1) & ~(8 - 1)) #define BLOCK_PLUS_PRIV(sz_pri) (BLOCK_HDR_LEN + ALIGN_8((sz_pri))) #define NUM_PACKETS 100 struct ring { struct iovec *rd; uint8_t *mm_space; size_t mm_len, rd_len; struct sockaddr_ll ll; void (*walk)(int sock, struct ring *ring); int type, rd_num, flen, version; union { struct tpacket_req req; struct tpacket_req3 req3; }; }; struct block_desc { uint32_t version; uint32_t offset_to_priv; struct tpacket_hdr_v1 h1; }; union frame_map { struct { struct tpacket_hdr tp_h __aligned_tpacket; struct sockaddr_ll s_ll __align_tpacket(sizeof(struct tpacket_hdr)); } *v1; struct { struct tpacket2_hdr tp_h __aligned_tpacket; struct sockaddr_ll s_ll __align_tpacket(sizeof(struct tpacket2_hdr)); } *v2; void *raw; }; static unsigned int total_packets, total_bytes; static int pfsocket(int ver) { int ret, sock = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL)); if (sock == -1) { perror("socket"); exit(1); } ret = setsockopt(sock, SOL_PACKET, PACKET_VERSION, &ver, sizeof(ver)); if (ret == -1) { perror("setsockopt"); exit(1); } return sock; } static void status_bar_update(void) { if (total_packets % 10 == 0) { fprintf(stderr, "."); fflush(stderr); } } static void test_payload(void *pay, size_t len) { struct ethhdr *eth = pay; if (len < sizeof(struct ethhdr)) { fprintf(stderr, "test_payload: packet too " "small: %zu bytes!\n", len); exit(1); } if (eth->h_proto != htons(ETH_P_IP)) { fprintf(stderr, "test_payload: wrong ethernet " "type: 0x%x!\n", ntohs(eth->h_proto)); exit(1); } } static void create_payload(void *pay, size_t *len) { int i; struct ethhdr *eth = pay; struct iphdr *ip = pay + sizeof(*eth); /* Lets create some broken crap, that still passes * our BPF filter. */ *len = DATA_LEN + 42; memset(pay, 0xff, ETH_ALEN * 2); eth->h_proto = htons(ETH_P_IP); for (i = 0; i < sizeof(*ip); ++i) ((uint8_t *) pay)[i + sizeof(*eth)] = (uint8_t) rand(); ip->ihl = 5; ip->version = 4; ip->protocol = 0x11; ip->frag_off = 0; ip->ttl = 64; ip->tot_len = htons((uint16_t) *len - sizeof(*eth)); ip->saddr = htonl(INADDR_LOOPBACK); ip->daddr = htonl(INADDR_LOOPBACK); memset(pay + sizeof(*eth) + sizeof(*ip), DATA_CHAR, DATA_LEN); } static inline int __v1_rx_kernel_ready(struct tpacket_hdr *hdr) { return ((hdr->tp_status & TP_STATUS_USER) == TP_STATUS_USER); } static inline void __v1_rx_user_ready(struct tpacket_hdr *hdr) { hdr->tp_status = TP_STATUS_KERNEL; __sync_synchronize(); } static inline int __v2_rx_kernel_ready(struct tpacket2_hdr *hdr) { return ((hdr->tp_status & TP_STATUS_USER) == TP_STATUS_USER); } static inline void __v2_rx_user_ready(struct tpacket2_hdr *hdr) { hdr->tp_status = TP_STATUS_KERNEL; __sync_synchronize(); } static inline int __v1_v2_rx_kernel_ready(void *base, int version) { switch (version) { case TPACKET_V1: return __v1_rx_kernel_ready(base); case TPACKET_V2: return __v2_rx_kernel_ready(base); default: bug_on(1); return 0; } } static inline void __v1_v2_rx_user_ready(void *base, int version) { switch (version) { case TPACKET_V1: __v1_rx_user_ready(base); break; case TPACKET_V2: __v2_rx_user_ready(base); break; } } static void walk_v1_v2_rx(int sock, struct ring *ring) { struct pollfd pfd; int udp_sock[2]; union frame_map ppd; unsigned int frame_num = 0; bug_on(ring->type != PACKET_RX_RING); pair_udp_open(udp_sock, PORT_BASE); pair_udp_setfilter(sock); memset(&pfd, 0, sizeof(pfd)); pfd.fd = sock; pfd.events = POLLIN | POLLERR; pfd.revents = 0; pair_udp_send(udp_sock, NUM_PACKETS); while (total_packets < NUM_PACKETS * 2) { while (__v1_v2_rx_kernel_ready(ring->rd[frame_num].iov_base, ring->version)) { ppd.raw = ring->rd[frame_num].iov_base; switch (ring->version) { case TPACKET_V1: test_payload((uint8_t *) ppd.raw + ppd.v1->tp_h.tp_mac, ppd.v1->tp_h.tp_snaplen); total_bytes += ppd.v1->tp_h.tp_snaplen; break; case TPACKET_V2: test_payload((uint8_t *) ppd.raw + ppd.v2->tp_h.tp_mac, ppd.v2->tp_h.tp_snaplen); total_bytes += ppd.v2->tp_h.tp_snaplen; break; } status_bar_update(); total_packets++; __v1_v2_rx_user_ready(ppd.raw, ring->version); frame_num = (frame_num + 1) % ring->rd_num; } poll(&pfd, 1, 1); } pair_udp_close(udp_sock); if (total_packets != 2 * NUM_PACKETS) { fprintf(stderr, "walk_v%d_rx: received %u out of %u pkts\n", ring->version, total_packets, NUM_PACKETS); exit(1); } fprintf(stderr, " %u pkts (%u bytes)", NUM_PACKETS, total_bytes >> 1); } static inline int __v1_tx_kernel_ready(struct tpacket_hdr *hdr) { return !(hdr->tp_status & (TP_STATUS_SEND_REQUEST | TP_STATUS_SENDING)); } static inline void __v1_tx_user_ready(struct tpacket_hdr *hdr) { hdr->tp_status = TP_STATUS_SEND_REQUEST; __sync_synchronize(); } static inline int __v2_tx_kernel_ready(struct tpacket2_hdr *hdr) { return !(hdr->tp_status & (TP_STATUS_SEND_REQUEST | TP_STATUS_SENDING)); } static inline void __v2_tx_user_ready(struct tpacket2_hdr *hdr) { hdr->tp_status = TP_STATUS_SEND_REQUEST; __sync_synchronize(); } static inline int __v1_v2_tx_kernel_ready(void *base, int version) { switch (version) { case TPACKET_V1: return __v1_tx_kernel_ready(base); case TPACKET_V2: return __v2_tx_kernel_ready(base); default: bug_on(1); return 0; } } static inline void __v1_v2_tx_user_ready(void *base, int version) { switch (version) { case TPACKET_V1: __v1_tx_user_ready(base); break; case TPACKET_V2: __v2_tx_user_ready(base); break; } } static void __v1_v2_set_packet_loss_discard(int sock) { int ret, discard = 1; ret = setsockopt(sock, SOL_PACKET, PACKET_LOSS, (void *) &discard, sizeof(discard)); if (ret == -1) { perror("setsockopt"); exit(1); } } static void walk_v1_v2_tx(int sock, struct ring *ring) { struct pollfd pfd; int rcv_sock, ret; size_t packet_len; union frame_map ppd; char packet[1024]; unsigned int frame_num = 0, got = 0; struct sockaddr_ll ll = { .sll_family = PF_PACKET, .sll_halen = ETH_ALEN, }; bug_on(ring->type != PACKET_TX_RING); bug_on(ring->rd_num < NUM_PACKETS); rcv_sock = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL)); if (rcv_sock == -1) { perror("socket"); exit(1); } pair_udp_setfilter(rcv_sock); ll.sll_ifindex = if_nametoindex("lo"); ret = bind(rcv_sock, (struct sockaddr *) &ll, sizeof(ll)); if (ret == -1) { perror("bind"); exit(1); } memset(&pfd, 0, sizeof(pfd)); pfd.fd = sock; pfd.events = POLLOUT | POLLERR; pfd.revents = 0; total_packets = NUM_PACKETS; create_payload(packet, &packet_len); while (total_packets > 0) { while (__v1_v2_tx_kernel_ready(ring->rd[frame_num].iov_base, ring->version) && total_packets > 0) { ppd.raw = ring->rd[frame_num].iov_base; switch (ring->version) { case TPACKET_V1: ppd.v1->tp_h.tp_snaplen = packet_len; ppd.v1->tp_h.tp_len = packet_len; memcpy((uint8_t *) ppd.raw + TPACKET_HDRLEN - sizeof(struct sockaddr_ll), packet, packet_len); total_bytes += ppd.v1->tp_h.tp_snaplen; break; case TPACKET_V2: ppd.v2->tp_h.tp_snaplen = packet_len; ppd.v2->tp_h.tp_len = packet_len; memcpy((uint8_t *) ppd.raw + TPACKET2_HDRLEN - sizeof(struct sockaddr_ll), packet, packet_len); total_bytes += ppd.v2->tp_h.tp_snaplen; break; } status_bar_update(); total_packets--; __v1_v2_tx_user_ready(ppd.raw, ring->version); frame_num = (frame_num + 1) % ring->rd_num; } poll(&pfd, 1, 1); } bug_on(total_packets != 0); ret = sendto(sock, NULL, 0, 0, NULL, 0); if (ret == -1) { perror("sendto"); exit(1); } while ((ret = recvfrom(rcv_sock, packet, sizeof(packet), 0, NULL, NULL)) > 0 && total_packets < NUM_PACKETS) { got += ret; test_payload(packet, ret); status_bar_update(); total_packets++; } close(rcv_sock); if (total_packets != NUM_PACKETS) { fprintf(stderr, "walk_v%d_rx: received %u out of %u pkts\n", ring->version, total_packets, NUM_PACKETS); exit(1); } fprintf(stderr, " %u pkts (%u bytes)", NUM_PACKETS, got); } static void walk_v1_v2(int sock, struct ring *ring) { if (ring->type == PACKET_RX_RING) walk_v1_v2_rx(sock, ring); else walk_v1_v2_tx(sock, ring); } static uint64_t __v3_prev_block_seq_num = 0; void __v3_test_block_seq_num(struct block_desc *pbd) { if (__v3_prev_block_seq_num + 1 != BLOCK_SNUM(pbd)) { fprintf(stderr, "\nprev_block_seq_num:%"PRIu64", expected " "seq:%"PRIu64" != actual seq:%"PRIu64"\n", __v3_prev_block_seq_num, __v3_prev_block_seq_num + 1, (uint64_t) BLOCK_SNUM(pbd)); exit(1); } __v3_prev_block_seq_num = BLOCK_SNUM(pbd); } static void __v3_test_block_len(struct block_desc *pbd, uint32_t bytes, int block_num) { if (BLOCK_NUM_PKTS(pbd)) { if (bytes != BLOCK_LEN(pbd)) { fprintf(stderr, "\nblock:%u with %upackets, expected " "len:%u != actual len:%u\n", block_num, BLOCK_NUM_PKTS(pbd), bytes, BLOCK_LEN(pbd)); exit(1); } } else { if (BLOCK_LEN(pbd) != BLOCK_PLUS_PRIV(13)) { fprintf(stderr, "\nblock:%u, expected len:%lu != " "actual len:%u\n", block_num, BLOCK_HDR_LEN, BLOCK_LEN(pbd)); exit(1); } } } static void __v3_test_block_header(struct block_desc *pbd, const int block_num) { uint32_t block_status = BLOCK_STATUS(pbd); if ((block_status & TP_STATUS_USER) == 0) { fprintf(stderr, "\nblock %u: not in TP_STATUS_USER\n", block_num); exit(1); } __v3_test_block_seq_num(pbd); } static void __v3_walk_block(struct block_desc *pbd, const int block_num) { int num_pkts = BLOCK_NUM_PKTS(pbd), i; unsigned long bytes = 0; unsigned long bytes_with_padding = BLOCK_PLUS_PRIV(13); struct tpacket3_hdr *ppd; __v3_test_block_header(pbd, block_num); ppd = (struct tpacket3_hdr *) ((uint8_t *) pbd + BLOCK_O2FP(pbd)); for (i = 0; i < num_pkts; ++i) { bytes += ppd->tp_snaplen; if (ppd->tp_next_offset) bytes_with_padding += ppd->tp_next_offset; else bytes_with_padding += ALIGN_8(ppd->tp_snaplen + ppd->tp_mac); test_payload((uint8_t *) ppd + ppd->tp_mac, ppd->tp_snaplen); status_bar_update(); total_packets++; ppd = (struct tpacket3_hdr *) ((uint8_t *) ppd + ppd->tp_next_offset); __sync_synchronize(); } __v3_test_block_len(pbd, bytes_with_padding, block_num); total_bytes += bytes; } void __v3_flush_block(struct block_desc *pbd) { BLOCK_STATUS(pbd) = TP_STATUS_KERNEL; __sync_synchronize(); } static void walk_v3_rx(int sock, struct ring *ring) { unsigned int block_num = 0; struct pollfd pfd; struct block_desc *pbd; int udp_sock[2]; bug_on(ring->type != PACKET_RX_RING); pair_udp_open(udp_sock, PORT_BASE); pair_udp_setfilter(sock); memset(&pfd, 0, sizeof(pfd)); pfd.fd = sock; pfd.events = POLLIN | POLLERR; pfd.revents = 0; pair_udp_send(udp_sock, NUM_PACKETS); while (total_packets < NUM_PACKETS * 2) { pbd = (struct block_desc *) ring->rd[block_num].iov_base; while ((BLOCK_STATUS(pbd) & TP_STATUS_USER) == 0) poll(&pfd, 1, 1); __v3_walk_block(pbd, block_num); __v3_flush_block(pbd); block_num = (block_num + 1) % ring->rd_num; } pair_udp_close(udp_sock); if (total_packets != 2 * NUM_PACKETS) { fprintf(stderr, "walk_v3_rx: received %u out of %u pkts\n", total_packets, NUM_PACKETS); exit(1); } fprintf(stderr, " %u pkts (%u bytes)", NUM_PACKETS, total_bytes >> 1); } static void walk_v3(int sock, struct ring *ring) { if (ring->type == PACKET_RX_RING) walk_v3_rx(sock, ring); else bug_on(1); } static void __v1_v2_fill(struct ring *ring, unsigned int blocks) { ring->req.tp_block_size = getpagesize() << 2; ring->req.tp_frame_size = TPACKET_ALIGNMENT << 7; ring->req.tp_block_nr = blocks; ring->req.tp_frame_nr = ring->req.tp_block_size / ring->req.tp_frame_size * ring->req.tp_block_nr; ring->mm_len = ring->req.tp_block_size * ring->req.tp_block_nr; ring->walk = walk_v1_v2; ring->rd_num = ring->req.tp_frame_nr; ring->flen = ring->req.tp_frame_size; } static void __v3_fill(struct ring *ring, unsigned int blocks) { ring->req3.tp_retire_blk_tov = 64; ring->req3.tp_sizeof_priv = 13; ring->req3.tp_feature_req_word |= TP_FT_REQ_FILL_RXHASH; ring->req3.tp_block_size = getpagesize() << 2; ring->req3.tp_frame_size = TPACKET_ALIGNMENT << 7; ring->req3.tp_block_nr = blocks; ring->req3.tp_frame_nr = ring->req3.tp_block_size / ring->req3.tp_frame_size * ring->req3.tp_block_nr; ring->mm_len = ring->req3.tp_block_size * ring->req3.tp_block_nr; ring->walk = walk_v3; ring->rd_num = ring->req3.tp_block_nr; ring->flen = ring->req3.tp_block_size; } static void setup_ring(int sock, struct ring *ring, int version, int type) { int ret = 0; unsigned int blocks = 256; ring->type = type; ring->version = version; switch (version) { case TPACKET_V1: case TPACKET_V2: if (type == PACKET_TX_RING) __v1_v2_set_packet_loss_discard(sock); __v1_v2_fill(ring, blocks); ret = setsockopt(sock, SOL_PACKET, type, &ring->req, sizeof(ring->req)); break; case TPACKET_V3: __v3_fill(ring, blocks); ret = setsockopt(sock, SOL_PACKET, type, &ring->req3, sizeof(ring->req3)); break; } if (ret == -1) { perror("setsockopt"); exit(1); } ring->rd_len = ring->rd_num * sizeof(*ring->rd); ring->rd = malloc(ring->rd_len); if (ring->rd == NULL) { perror("malloc"); exit(1); } total_packets = 0; total_bytes = 0; } static void mmap_ring(int sock, struct ring *ring) { int i; ring->mm_space = mmap(0, ring->mm_len, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED | MAP_POPULATE, sock, 0); if (ring->mm_space == MAP_FAILED) { perror("mmap"); exit(1); } memset(ring->rd, 0, ring->rd_len); for (i = 0; i < ring->rd_num; ++i) { ring->rd[i].iov_base = ring->mm_space + (i * ring->flen); ring->rd[i].iov_len = ring->flen; } } static void bind_ring(int sock, struct ring *ring) { int ret; ring->ll.sll_family = PF_PACKET; ring->ll.sll_protocol = htons(ETH_P_ALL); ring->ll.sll_ifindex = if_nametoindex("lo"); ring->ll.sll_hatype = 0; ring->ll.sll_pkttype = 0; ring->ll.sll_halen = 0; ret = bind(sock, (struct sockaddr *) &ring->ll, sizeof(ring->ll)); if (ret == -1) { perror("bind"); exit(1); } } static void walk_ring(int sock, struct ring *ring) { ring->walk(sock, ring); } static void unmap_ring(int sock, struct ring *ring) { munmap(ring->mm_space, ring->mm_len); free(ring->rd); } static int test_kernel_bit_width(void) { char in[512], *ptr; int num = 0, fd; ssize_t ret; fd = open("/proc/kallsyms", O_RDONLY); if (fd == -1) { perror("open"); exit(1); } ret = read(fd, in, sizeof(in)); if (ret <= 0) { perror("read"); exit(1); } close(fd); ptr = in; while(!isspace(*ptr)) { num++; ptr++; } return num * 4; } static int test_user_bit_width(void) { return __WORDSIZE; } static const char *tpacket_str[] = { [TPACKET_V1] = "TPACKET_V1", [TPACKET_V2] = "TPACKET_V2", [TPACKET_V3] = "TPACKET_V3", }; static const char *type_str[] = { [PACKET_RX_RING] = "PACKET_RX_RING", [PACKET_TX_RING] = "PACKET_TX_RING", }; static int test_tpacket(int version, int type) { int sock; struct ring ring; fprintf(stderr, "test: %s with %s ", tpacket_str[version], type_str[type]); fflush(stderr); if (version == TPACKET_V1 && test_kernel_bit_width() != test_user_bit_width()) { fprintf(stderr, "test: skip %s %s since user and kernel " "space have different bit width\n", tpacket_str[version], type_str[type]); return 0; } sock = pfsocket(version); memset(&ring, 0, sizeof(ring)); setup_ring(sock, &ring, version, type); mmap_ring(sock, &ring); bind_ring(sock, &ring); walk_ring(sock, &ring); unmap_ring(sock, &ring); close(sock); fprintf(stderr, "\n"); return 0; } int main(void) { int ret = 0; ret |= test_tpacket(TPACKET_V1, PACKET_RX_RING); ret |= test_tpacket(TPACKET_V1, PACKET_TX_RING); ret |= test_tpacket(TPACKET_V2, PACKET_RX_RING); ret |= test_tpacket(TPACKET_V2, PACKET_TX_RING); ret |= test_tpacket(TPACKET_V3, PACKET_RX_RING); if (ret) return 1; printf("OK. All tests passed\n"); return 0; }
gpl-2.0
TesterTerbon/kernel_trebon
arch/mips/lantiq/setup.c
2605
1465
/* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * Copyright (C) 2010 John Crispin <blogic@openwrt.org> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/io.h> #include <linux/ioport.h> #include <asm/bootinfo.h> #include <lantiq_soc.h> #include "machtypes.h" #include "devices.h" #include "prom.h" void __init plat_mem_setup(void) { /* assume 16M as default incase uboot fails to pass proper ramsize */ unsigned long memsize = 16; char **envp = (char **) KSEG1ADDR(fw_arg2); ioport_resource.start = IOPORT_RESOURCE_START; ioport_resource.end = IOPORT_RESOURCE_END; iomem_resource.start = IOMEM_RESOURCE_START; iomem_resource.end = IOMEM_RESOURCE_END; set_io_port_base((unsigned long) KSEG1); while (*envp) { char *e = (char *)KSEG1ADDR(*envp); if (!strncmp(e, "memsize=", 8)) { e += 8; if (strict_strtoul(e, 0, &memsize)) pr_warn("bad memsize specified\n"); } envp++; } memsize *= 1024 * 1024; add_memory_region(0x00000000, memsize, BOOT_MEM_RAM); } static int __init lantiq_setup(void) { ltq_soc_setup(); mips_machine_setup(); return 0; } arch_initcall(lantiq_setup); static void __init lantiq_generic_init(void) { /* Nothing to do */ } MIPS_MACHINE(LTQ_MACH_GENERIC, "Generic", "Generic Lantiq based board", lantiq_generic_init);
gpl-2.0
linuxsky/linux-80211n-csitool
arch/arm/mach-pxa/leds-idp.c
4909
2143
/* * linux/arch/arm/mach-pxa/leds-idp.c * * Copyright (C) 2000 John Dorsey <john+@cs.cmu.edu> * * Copyright (c) 2001 Jeff Sutherland <jeffs@accelent.com> * * Original (leds-footbridge.c) by Russell King * * Macros for actual LED manipulation should be in machine specific * files in this 'mach' directory. */ #include <linux/init.h> #include <mach/hardware.h> #include <asm/leds.h> #include <mach/pxa25x.h> #include <mach/idp.h> #include "leds.h" #define LED_STATE_ENABLED 1 #define LED_STATE_CLAIMED 2 static unsigned int led_state; static unsigned int hw_led_state; void idp_leds_event(led_event_t evt) { unsigned long flags; local_irq_save(flags); switch (evt) { case led_start: hw_led_state = IDP_HB_LED | IDP_BUSY_LED; led_state = LED_STATE_ENABLED; break; case led_stop: led_state &= ~LED_STATE_ENABLED; break; case led_claim: led_state |= LED_STATE_CLAIMED; hw_led_state = IDP_HB_LED | IDP_BUSY_LED; break; case led_release: led_state &= ~LED_STATE_CLAIMED; hw_led_state = IDP_HB_LED | IDP_BUSY_LED; break; #ifdef CONFIG_LEDS_TIMER case led_timer: if (!(led_state & LED_STATE_CLAIMED)) hw_led_state ^= IDP_HB_LED; break; #endif #ifdef CONFIG_LEDS_CPU case led_idle_start: if (!(led_state & LED_STATE_CLAIMED)) hw_led_state &= ~IDP_BUSY_LED; break; case led_idle_end: if (!(led_state & LED_STATE_CLAIMED)) hw_led_state |= IDP_BUSY_LED; break; #endif case led_halted: break; case led_green_on: if (led_state & LED_STATE_CLAIMED) hw_led_state |= IDP_HB_LED; break; case led_green_off: if (led_state & LED_STATE_CLAIMED) hw_led_state &= ~IDP_HB_LED; break; case led_amber_on: break; case led_amber_off: break; case led_red_on: if (led_state & LED_STATE_CLAIMED) hw_led_state |= IDP_BUSY_LED; break; case led_red_off: if (led_state & LED_STATE_CLAIMED) hw_led_state &= ~IDP_BUSY_LED; break; default: break; } if (led_state & LED_STATE_ENABLED) IDP_CPLD_LED_CONTROL = ( (IDP_CPLD_LED_CONTROL | IDP_LEDS_MASK) & ~hw_led_state); else IDP_CPLD_LED_CONTROL |= IDP_LEDS_MASK; local_irq_restore(flags); }
gpl-2.0
jamieg71/kernel
tools/perf/util/scripting-engines/trace-event-perl.c
4909
15503
/* * trace-event-perl. Feed perf script events to an embedded Perl interpreter. * * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include <errno.h> #include "../../perf.h" #include "../util.h" #include "../thread.h" #include "../event.h" #include "../trace-event.h" #include "../evsel.h" #include <EXTERN.h> #include <perl.h> void boot_Perf__Trace__Context(pTHX_ CV *cv); void boot_DynaLoader(pTHX_ CV *cv); typedef PerlInterpreter * INTERP; void xs_init(pTHX); void xs_init(pTHX) { const char *file = __FILE__; dXSUB_SYS; newXS("Perf::Trace::Context::bootstrap", boot_Perf__Trace__Context, file); newXS("DynaLoader::boot_DynaLoader", boot_DynaLoader, file); } INTERP my_perl; #define FTRACE_MAX_EVENT \ ((1 << (sizeof(unsigned short) * 8)) - 1) struct event *events[FTRACE_MAX_EVENT]; extern struct scripting_context *scripting_context; static char *cur_field_name; static int zero_flag_atom; static void define_symbolic_value(const char *ev_name, const char *field_name, const char *field_value, const char *field_str) { unsigned long long value; dSP; value = eval_flag(field_value); ENTER; SAVETMPS; PUSHMARK(SP); XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); XPUSHs(sv_2mortal(newSVpv(field_name, 0))); XPUSHs(sv_2mortal(newSVuv(value))); XPUSHs(sv_2mortal(newSVpv(field_str, 0))); PUTBACK; if (get_cv("main::define_symbolic_value", 0)) call_pv("main::define_symbolic_value", G_SCALAR); SPAGAIN; PUTBACK; FREETMPS; LEAVE; } static void define_symbolic_values(struct print_flag_sym *field, const char *ev_name, const char *field_name) { define_symbolic_value(ev_name, field_name, field->value, field->str); if (field->next) define_symbolic_values(field->next, ev_name, field_name); } static void define_symbolic_field(const char *ev_name, const char *field_name) { dSP; ENTER; SAVETMPS; PUSHMARK(SP); XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); XPUSHs(sv_2mortal(newSVpv(field_name, 0))); PUTBACK; if (get_cv("main::define_symbolic_field", 0)) call_pv("main::define_symbolic_field", G_SCALAR); SPAGAIN; PUTBACK; FREETMPS; LEAVE; } static void define_flag_value(const char *ev_name, const char *field_name, const char *field_value, const char *field_str) { unsigned long long value; dSP; value = eval_flag(field_value); ENTER; SAVETMPS; PUSHMARK(SP); XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); XPUSHs(sv_2mortal(newSVpv(field_name, 0))); XPUSHs(sv_2mortal(newSVuv(value))); XPUSHs(sv_2mortal(newSVpv(field_str, 0))); PUTBACK; if (get_cv("main::define_flag_value", 0)) call_pv("main::define_flag_value", G_SCALAR); SPAGAIN; PUTBACK; FREETMPS; LEAVE; } static void define_flag_values(struct print_flag_sym *field, const char *ev_name, const char *field_name) { define_flag_value(ev_name, field_name, field->value, field->str); if (field->next) define_flag_values(field->next, ev_name, field_name); } static void define_flag_field(const char *ev_name, const char *field_name, const char *delim) { dSP; ENTER; SAVETMPS; PUSHMARK(SP); XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); XPUSHs(sv_2mortal(newSVpv(field_name, 0))); XPUSHs(sv_2mortal(newSVpv(delim, 0))); PUTBACK; if (get_cv("main::define_flag_field", 0)) call_pv("main::define_flag_field", G_SCALAR); SPAGAIN; PUTBACK; FREETMPS; LEAVE; } static void define_event_symbols(struct event *event, const char *ev_name, struct print_arg *args) { switch (args->type) { case PRINT_NULL: break; case PRINT_ATOM: define_flag_value(ev_name, cur_field_name, "0", args->atom.atom); zero_flag_atom = 0; break; case PRINT_FIELD: if (cur_field_name) free(cur_field_name); cur_field_name = strdup(args->field.name); break; case PRINT_FLAGS: define_event_symbols(event, ev_name, args->flags.field); define_flag_field(ev_name, cur_field_name, args->flags.delim); define_flag_values(args->flags.flags, ev_name, cur_field_name); break; case PRINT_SYMBOL: define_event_symbols(event, ev_name, args->symbol.field); define_symbolic_field(ev_name, cur_field_name); define_symbolic_values(args->symbol.symbols, ev_name, cur_field_name); break; case PRINT_STRING: break; case PRINT_TYPE: define_event_symbols(event, ev_name, args->typecast.item); break; case PRINT_OP: if (strcmp(args->op.op, ":") == 0) zero_flag_atom = 1; define_event_symbols(event, ev_name, args->op.left); define_event_symbols(event, ev_name, args->op.right); break; default: /* we should warn... */ return; } if (args->next) define_event_symbols(event, ev_name, args->next); } static inline struct event *find_cache_event(int type) { static char ev_name[256]; struct event *event; if (events[type]) return events[type]; events[type] = event = trace_find_event(type); if (!event) return NULL; sprintf(ev_name, "%s::%s", event->system, event->name); define_event_symbols(event, ev_name, event->print_fmt.args); return event; } static void perl_process_tracepoint(union perf_event *pevent __unused, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine __unused, struct thread *thread) { struct format_field *field; static char handler[256]; unsigned long long val; unsigned long s, ns; struct event *event; int type; int pid; int cpu = sample->cpu; void *data = sample->raw_data; unsigned long long nsecs = sample->time; char *comm = thread->comm; dSP; if (evsel->attr.type != PERF_TYPE_TRACEPOINT) return; type = trace_parse_common_type(data); event = find_cache_event(type); if (!event) die("ug! no event found for type %d", type); pid = trace_parse_common_pid(data); sprintf(handler, "%s::%s", event->system, event->name); s = nsecs / NSECS_PER_SEC; ns = nsecs - s * NSECS_PER_SEC; scripting_context->event_data = data; ENTER; SAVETMPS; PUSHMARK(SP); XPUSHs(sv_2mortal(newSVpv(handler, 0))); XPUSHs(sv_2mortal(newSViv(PTR2IV(scripting_context)))); XPUSHs(sv_2mortal(newSVuv(cpu))); XPUSHs(sv_2mortal(newSVuv(s))); XPUSHs(sv_2mortal(newSVuv(ns))); XPUSHs(sv_2mortal(newSViv(pid))); XPUSHs(sv_2mortal(newSVpv(comm, 0))); /* common fields other than pid can be accessed via xsub fns */ for (field = event->format.fields; field; field = field->next) { if (field->flags & FIELD_IS_STRING) { int offset; if (field->flags & FIELD_IS_DYNAMIC) { offset = *(int *)(data + field->offset); offset &= 0xffff; } else offset = field->offset; XPUSHs(sv_2mortal(newSVpv((char *)data + offset, 0))); } else { /* FIELD_IS_NUMERIC */ val = read_size(data + field->offset, field->size); if (field->flags & FIELD_IS_SIGNED) { XPUSHs(sv_2mortal(newSViv(val))); } else { XPUSHs(sv_2mortal(newSVuv(val))); } } } PUTBACK; if (get_cv(handler, 0)) call_pv(handler, G_SCALAR); else if (get_cv("main::trace_unhandled", 0)) { XPUSHs(sv_2mortal(newSVpv(handler, 0))); XPUSHs(sv_2mortal(newSViv(PTR2IV(scripting_context)))); XPUSHs(sv_2mortal(newSVuv(cpu))); XPUSHs(sv_2mortal(newSVuv(nsecs))); XPUSHs(sv_2mortal(newSViv(pid))); XPUSHs(sv_2mortal(newSVpv(comm, 0))); call_pv("main::trace_unhandled", G_SCALAR); } SPAGAIN; PUTBACK; FREETMPS; LEAVE; } static void perl_process_event_generic(union perf_event *pevent __unused, struct perf_sample *sample, struct perf_evsel *evsel __unused, struct machine *machine __unused, struct thread *thread __unused) { dSP; if (!get_cv("process_event", 0)) return; ENTER; SAVETMPS; PUSHMARK(SP); XPUSHs(sv_2mortal(newSVpvn((const char *)pevent, pevent->header.size))); XPUSHs(sv_2mortal(newSVpvn((const char *)&evsel->attr, sizeof(evsel->attr)))); XPUSHs(sv_2mortal(newSVpvn((const char *)sample, sizeof(*sample)))); XPUSHs(sv_2mortal(newSVpvn((const char *)sample->raw_data, sample->raw_size))); PUTBACK; call_pv("process_event", G_SCALAR); SPAGAIN; PUTBACK; FREETMPS; LEAVE; } static void perl_process_event(union perf_event *pevent, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine, struct thread *thread) { perl_process_tracepoint(pevent, sample, evsel, machine, thread); perl_process_event_generic(pevent, sample, evsel, machine, thread); } static void run_start_sub(void) { dSP; /* access to Perl stack */ PUSHMARK(SP); if (get_cv("main::trace_begin", 0)) call_pv("main::trace_begin", G_DISCARD | G_NOARGS); } /* * Start trace script */ static int perl_start_script(const char *script, int argc, const char **argv) { const char **command_line; int i, err = 0; command_line = malloc((argc + 2) * sizeof(const char *)); command_line[0] = ""; command_line[1] = script; for (i = 2; i < argc + 2; i++) command_line[i] = argv[i - 2]; my_perl = perl_alloc(); perl_construct(my_perl); if (perl_parse(my_perl, xs_init, argc + 2, (char **)command_line, (char **)NULL)) { err = -1; goto error; } if (perl_run(my_perl)) { err = -1; goto error; } if (SvTRUE(ERRSV)) { err = -1; goto error; } run_start_sub(); free(command_line); return 0; error: perl_free(my_perl); free(command_line); return err; } /* * Stop trace script */ static int perl_stop_script(void) { dSP; /* access to Perl stack */ PUSHMARK(SP); if (get_cv("main::trace_end", 0)) call_pv("main::trace_end", G_DISCARD | G_NOARGS); perl_destruct(my_perl); perl_free(my_perl); return 0; } static int perl_generate_script(const char *outfile) { struct event *event = NULL; struct format_field *f; char fname[PATH_MAX]; int not_first, count; FILE *ofp; sprintf(fname, "%s.pl", outfile); ofp = fopen(fname, "w"); if (ofp == NULL) { fprintf(stderr, "couldn't open %s\n", fname); return -1; } fprintf(ofp, "# perf script event handlers, " "generated by perf script -g perl\n"); fprintf(ofp, "# Licensed under the terms of the GNU GPL" " License version 2\n\n"); fprintf(ofp, "# The common_* event handler fields are the most useful " "fields common to\n"); fprintf(ofp, "# all events. They don't necessarily correspond to " "the 'common_*' fields\n"); fprintf(ofp, "# in the format files. Those fields not available as " "handler params can\n"); fprintf(ofp, "# be retrieved using Perl functions of the form " "common_*($context).\n"); fprintf(ofp, "# See Context.pm for the list of available " "functions.\n\n"); fprintf(ofp, "use lib \"$ENV{'PERF_EXEC_PATH'}/scripts/perl/" "Perf-Trace-Util/lib\";\n"); fprintf(ofp, "use lib \"./Perf-Trace-Util/lib\";\n"); fprintf(ofp, "use Perf::Trace::Core;\n"); fprintf(ofp, "use Perf::Trace::Context;\n"); fprintf(ofp, "use Perf::Trace::Util;\n\n"); fprintf(ofp, "sub trace_begin\n{\n\t# optional\n}\n\n"); fprintf(ofp, "sub trace_end\n{\n\t# optional\n}\n\n"); while ((event = trace_find_next_event(event))) { fprintf(ofp, "sub %s::%s\n{\n", event->system, event->name); fprintf(ofp, "\tmy ("); fprintf(ofp, "$event_name, "); fprintf(ofp, "$context, "); fprintf(ofp, "$common_cpu, "); fprintf(ofp, "$common_secs, "); fprintf(ofp, "$common_nsecs,\n"); fprintf(ofp, "\t $common_pid, "); fprintf(ofp, "$common_comm,\n\t "); not_first = 0; count = 0; for (f = event->format.fields; f; f = f->next) { if (not_first++) fprintf(ofp, ", "); if (++count % 5 == 0) fprintf(ofp, "\n\t "); fprintf(ofp, "$%s", f->name); } fprintf(ofp, ") = @_;\n\n"); fprintf(ofp, "\tprint_header($event_name, $common_cpu, " "$common_secs, $common_nsecs,\n\t " "$common_pid, $common_comm);\n\n"); fprintf(ofp, "\tprintf(\""); not_first = 0; count = 0; for (f = event->format.fields; f; f = f->next) { if (not_first++) fprintf(ofp, ", "); if (count && count % 4 == 0) { fprintf(ofp, "\".\n\t \""); } count++; fprintf(ofp, "%s=", f->name); if (f->flags & FIELD_IS_STRING || f->flags & FIELD_IS_FLAG || f->flags & FIELD_IS_SYMBOLIC) fprintf(ofp, "%%s"); else if (f->flags & FIELD_IS_SIGNED) fprintf(ofp, "%%d"); else fprintf(ofp, "%%u"); } fprintf(ofp, "\\n\",\n\t "); not_first = 0; count = 0; for (f = event->format.fields; f; f = f->next) { if (not_first++) fprintf(ofp, ", "); if (++count % 5 == 0) fprintf(ofp, "\n\t "); if (f->flags & FIELD_IS_FLAG) { if ((count - 1) % 5 != 0) { fprintf(ofp, "\n\t "); count = 4; } fprintf(ofp, "flag_str(\""); fprintf(ofp, "%s::%s\", ", event->system, event->name); fprintf(ofp, "\"%s\", $%s)", f->name, f->name); } else if (f->flags & FIELD_IS_SYMBOLIC) { if ((count - 1) % 5 != 0) { fprintf(ofp, "\n\t "); count = 4; } fprintf(ofp, "symbol_str(\""); fprintf(ofp, "%s::%s\", ", event->system, event->name); fprintf(ofp, "\"%s\", $%s)", f->name, f->name); } else fprintf(ofp, "$%s", f->name); } fprintf(ofp, ");\n"); fprintf(ofp, "}\n\n"); } fprintf(ofp, "sub trace_unhandled\n{\n\tmy ($event_name, $context, " "$common_cpu, $common_secs, $common_nsecs,\n\t " "$common_pid, $common_comm) = @_;\n\n"); fprintf(ofp, "\tprint_header($event_name, $common_cpu, " "$common_secs, $common_nsecs,\n\t $common_pid, " "$common_comm);\n}\n\n"); fprintf(ofp, "sub print_header\n{\n" "\tmy ($event_name, $cpu, $secs, $nsecs, $pid, $comm) = @_;\n\n" "\tprintf(\"%%-20s %%5u %%05u.%%09u %%8u %%-20s \",\n\t " "$event_name, $cpu, $secs, $nsecs, $pid, $comm);\n}\n"); fprintf(ofp, "\n# Packed byte string args of process_event():\n" "#\n" "# $event:\tunion perf_event\tutil/event.h\n" "# $attr:\tstruct perf_event_attr\tlinux/perf_event.h\n" "# $sample:\tstruct perf_sample\tutil/event.h\n" "# $raw_data:\tperf_sample->raw_data\tutil/event.h\n" "\n" "sub process_event\n" "{\n" "\tmy ($event, $attr, $sample, $raw_data) = @_;\n" "\n" "\tmy @event\t= unpack(\"LSS\", $event);\n" "\tmy @attr\t= unpack(\"LLQQQQQLLQQ\", $attr);\n" "\tmy @sample\t= unpack(\"QLLQQQQQLL\", $sample);\n" "\tmy @raw_data\t= unpack(\"C*\", $raw_data);\n" "\n" "\tuse Data::Dumper;\n" "\tprint Dumper \\@event, \\@attr, \\@sample, \\@raw_data;\n" "}\n"); fclose(ofp); fprintf(stderr, "generated Perl script: %s\n", fname); return 0; } struct scripting_ops perl_scripting_ops = { .name = "Perl", .start_script = perl_start_script, .stop_script = perl_stop_script, .process_event = perl_process_event, .generate_script = perl_generate_script, };
gpl-2.0
flwh/android_kernel_S856
fs/jffs2/build.c
8749
11164
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mtd/mtd.h> #include "nodelist.h" static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *, struct jffs2_inode_cache *, struct jffs2_full_dirent **); static inline struct jffs2_inode_cache * first_inode_chain(int *i, struct jffs2_sb_info *c) { for (; *i < c->inocache_hashsize; (*i)++) { if (c->inocache_list[*i]) return c->inocache_list[*i]; } return NULL; } static inline struct jffs2_inode_cache * next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c) { /* More in this chain? */ if (ic->next) return ic->next; (*i)++; return first_inode_chain(i, c); } #define for_each_inode(i, c, ic) \ for (i = 0, ic = first_inode_chain(&i, (c)); \ ic; \ ic = next_inode(&i, ic, (c))) static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) { struct jffs2_full_dirent *fd; dbg_fsbuild("building directory inode #%u\n", ic->ino); /* For each child, increase nlink */ for(fd = ic->scan_dents; fd; fd = fd->next) { struct jffs2_inode_cache *child_ic; if (!fd->ino) continue; /* we can get high latency here with huge directories */ child_ic = jffs2_get_ino_cache(c, fd->ino); if (!child_ic) { dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n", fd->name, fd->ino, ic->ino); jffs2_mark_node_obsolete(c, fd->raw); continue; } if (fd->type == DT_DIR) { if (child_ic->pino_nlink) { JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", fd->name, fd->ino, ic->ino); /* TODO: What do we do about it? */ } else { child_ic->pino_nlink = ic->ino; } } else child_ic->pino_nlink++; dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino); /* Can't free scan_dents so far. We might need them in pass 2 */ } } /* Scan plan: - Scan physical nodes. Build map of inodes/dirents. Allocate inocaches as we go - Scan directory tree from top down, setting nlink in inocaches - Scan inocaches for inodes with nlink==0 */ static int jffs2_build_filesystem(struct jffs2_sb_info *c) { int ret; int i; struct jffs2_inode_cache *ic; struct jffs2_full_dirent *fd; struct jffs2_full_dirent *dead_fds = NULL; dbg_fsbuild("build FS data structures\n"); /* First, scan the medium and build all the inode caches with lists of physical nodes */ c->flags |= JFFS2_SB_FLAG_SCANNING; ret = jffs2_scan_medium(c); c->flags &= ~JFFS2_SB_FLAG_SCANNING; if (ret) goto exit; dbg_fsbuild("scanned flash completely\n"); jffs2_dbg_dump_block_lists_nolock(c); dbg_fsbuild("pass 1 starting\n"); c->flags |= JFFS2_SB_FLAG_BUILDING; /* Now scan the directory tree, increasing nlink according to every dirent found. */ for_each_inode(i, c, ic) { if (ic->scan_dents) { jffs2_build_inode_pass1(c, ic); cond_resched(); } } dbg_fsbuild("pass 1 complete\n"); /* Next, scan for inodes with nlink == 0 and remove them. If they were directories, then decrement the nlink of their children too, and repeat the scan. As that's going to be a fairly uncommon occurrence, it's not so evil to do it this way. Recursion bad. */ dbg_fsbuild("pass 2 starting\n"); for_each_inode(i, c, ic) { if (ic->pino_nlink) continue; jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); cond_resched(); } dbg_fsbuild("pass 2a starting\n"); while (dead_fds) { fd = dead_fds; dead_fds = fd->next; ic = jffs2_get_ino_cache(c, fd->ino); if (ic) jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); jffs2_free_full_dirent(fd); } dbg_fsbuild("pass 2a complete\n"); dbg_fsbuild("freeing temporary data structures\n"); /* Finally, we can scan again and free the dirent structs */ for_each_inode(i, c, ic) { while(ic->scan_dents) { fd = ic->scan_dents; ic->scan_dents = fd->next; jffs2_free_full_dirent(fd); } ic->scan_dents = NULL; cond_resched(); } jffs2_build_xattr_subsystem(c); c->flags &= ~JFFS2_SB_FLAG_BUILDING; dbg_fsbuild("FS build complete\n"); /* Rotate the lists by some number to ensure wear levelling */ jffs2_rotate_lists(c); ret = 0; exit: if (ret) { for_each_inode(i, c, ic) { while(ic->scan_dents) { fd = ic->scan_dents; ic->scan_dents = fd->next; jffs2_free_full_dirent(fd); } } jffs2_clear_xattr_subsystem(c); } return ret; } static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, struct jffs2_full_dirent **dead_fds) { struct jffs2_raw_node_ref *raw; struct jffs2_full_dirent *fd; dbg_fsbuild("removing ino #%u with nlink == zero.\n", ic->ino); raw = ic->nodes; while (raw != (void *)ic) { struct jffs2_raw_node_ref *next = raw->next_in_ino; dbg_fsbuild("obsoleting node at 0x%08x\n", ref_offset(raw)); jffs2_mark_node_obsolete(c, raw); raw = next; } if (ic->scan_dents) { int whinged = 0; dbg_fsbuild("inode #%u was a directory which may have children...\n", ic->ino); while(ic->scan_dents) { struct jffs2_inode_cache *child_ic; fd = ic->scan_dents; ic->scan_dents = fd->next; if (!fd->ino) { /* It's a deletion dirent. Ignore it */ dbg_fsbuild("child \"%s\" is a deletion dirent, skipping...\n", fd->name); jffs2_free_full_dirent(fd); continue; } if (!whinged) whinged = 1; dbg_fsbuild("removing child \"%s\", ino #%u\n", fd->name, fd->ino); child_ic = jffs2_get_ino_cache(c, fd->ino); if (!child_ic) { dbg_fsbuild("cannot remove child \"%s\", ino #%u, because it doesn't exist\n", fd->name, fd->ino); jffs2_free_full_dirent(fd); continue; } /* Reduce nlink of the child. If it's now zero, stick it on the dead_fds list to be cleaned up later. Else just free the fd */ if (fd->type == DT_DIR) child_ic->pino_nlink = 0; else child_ic->pino_nlink--; if (!child_ic->pino_nlink) { dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n", fd->ino, fd->name); fd->next = *dead_fds; *dead_fds = fd; } else { dbg_fsbuild("inode #%u (\"%s\") has now got nlink %d. Ignoring.\n", fd->ino, fd->name, child_ic->pino_nlink); jffs2_free_full_dirent(fd); } } } /* We don't delete the inocache from the hash list and free it yet. The erase code will do that, when all the nodes are completely gone. */ } static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c) { uint32_t size; /* Deletion should almost _always_ be allowed. We're fairly buggered once we stop allowing people to delete stuff because there's not enough free space... */ c->resv_blocks_deletion = 2; /* Be conservative about how much space we need before we allow writes. On top of that which is required for deletia, require an extra 2% of the medium to be available, for overhead caused by nodes being split across blocks, etc. */ size = c->flash_size / 50; /* 2% of flash size */ size += c->nr_blocks * 100; /* And 100 bytes per eraseblock */ size += c->sector_size - 1; /* ... and round up */ c->resv_blocks_write = c->resv_blocks_deletion + (size / c->sector_size); /* When do we let the GC thread run in the background */ c->resv_blocks_gctrigger = c->resv_blocks_write + 1; /* When do we allow garbage collection to merge nodes to make long-term progress at the expense of short-term space exhaustion? */ c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1; /* When do we allow garbage collection to eat from bad blocks rather than actually making progress? */ c->resv_blocks_gcbad = 0;//c->resv_blocks_deletion + 2; /* What number of 'very dirty' eraseblocks do we allow before we trigger the GC thread even if we don't _need_ the space. When we can't mark nodes obsolete on the medium, the old dirty nodes cause performance problems because we have to inspect and discard them. */ c->vdirty_blocks_gctrigger = c->resv_blocks_gctrigger; if (jffs2_can_mark_obsolete(c)) c->vdirty_blocks_gctrigger *= 10; /* If there's less than this amount of dirty space, don't bother trying to GC to make more space. It'll be a fruitless task */ c->nospc_dirty_size = c->sector_size + (c->flash_size / 100); dbg_fsbuild("trigger levels (size %d KiB, block size %d KiB, %d blocks)\n", c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks); dbg_fsbuild("Blocks required to allow deletion: %d (%d KiB)\n", c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024); dbg_fsbuild("Blocks required to allow writes: %d (%d KiB)\n", c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024); dbg_fsbuild("Blocks required to quiesce GC thread: %d (%d KiB)\n", c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024); dbg_fsbuild("Blocks required to allow GC merges: %d (%d KiB)\n", c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024); dbg_fsbuild("Blocks required to GC bad blocks: %d (%d KiB)\n", c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024); dbg_fsbuild("Amount of dirty space required to GC: %d bytes\n", c->nospc_dirty_size); dbg_fsbuild("Very dirty blocks before GC triggered: %d\n", c->vdirty_blocks_gctrigger); } int jffs2_do_mount_fs(struct jffs2_sb_info *c) { int ret; int i; int size; c->free_size = c->flash_size; c->nr_blocks = c->flash_size / c->sector_size; size = sizeof(struct jffs2_eraseblock) * c->nr_blocks; #ifndef __ECOS if (jffs2_blocks_use_vmalloc(c)) c->blocks = vzalloc(size); else #endif c->blocks = kzalloc(size, GFP_KERNEL); if (!c->blocks) return -ENOMEM; for (i=0; i<c->nr_blocks; i++) { INIT_LIST_HEAD(&c->blocks[i].list); c->blocks[i].offset = i * c->sector_size; c->blocks[i].free_size = c->sector_size; } INIT_LIST_HEAD(&c->clean_list); INIT_LIST_HEAD(&c->very_dirty_list); INIT_LIST_HEAD(&c->dirty_list); INIT_LIST_HEAD(&c->erasable_list); INIT_LIST_HEAD(&c->erasing_list); INIT_LIST_HEAD(&c->erase_checking_list); INIT_LIST_HEAD(&c->erase_pending_list); INIT_LIST_HEAD(&c->erasable_pending_wbuf_list); INIT_LIST_HEAD(&c->erase_complete_list); INIT_LIST_HEAD(&c->free_list); INIT_LIST_HEAD(&c->bad_list); INIT_LIST_HEAD(&c->bad_used_list); c->highest_ino = 1; c->summary = NULL; ret = jffs2_sum_init(c); if (ret) goto out_free; if (jffs2_build_filesystem(c)) { dbg_fsbuild("build_fs failed\n"); jffs2_free_ino_caches(c); jffs2_free_raw_node_refs(c); ret = -EIO; goto out_free; } jffs2_calc_trigger_levels(c); return 0; out_free: #ifndef __ECOS if (jffs2_blocks_use_vmalloc(c)) vfree(c->blocks); else #endif kfree(c->blocks); return ret; }
gpl-2.0
AICP/kernel_motorola_msm8960dt-common
drivers/mtd/maps/l440gx.c
9773
4077
/* * BIOS Flash chip on Intel 440GX board. * * Bugs this currently does not work under linuxBIOS. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/init.h> #include <asm/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #define PIIXE_IOBASE_RESOURCE 11 #define WINDOW_ADDR 0xfff00000 #define WINDOW_SIZE 0x00100000 #define BUSWIDTH 1 static u32 iobase; #define IOBASE iobase #define TRIBUF_PORT (IOBASE+0x37) #define VPP_PORT (IOBASE+0x28) static struct mtd_info *mymtd; /* Is this really the vpp port? */ static DEFINE_SPINLOCK(l440gx_vpp_lock); static int l440gx_vpp_refcnt; static void l440gx_set_vpp(struct map_info *map, int vpp) { unsigned long flags; spin_lock_irqsave(&l440gx_vpp_lock, flags); if (vpp) { if (++l440gx_vpp_refcnt == 1) /* first nested 'on' */ outl(inl(VPP_PORT) | 1, VPP_PORT); } else { if (--l440gx_vpp_refcnt == 0) /* last nested 'off' */ outl(inl(VPP_PORT) & ~1, VPP_PORT); } spin_unlock_irqrestore(&l440gx_vpp_lock, flags); } static struct map_info l440gx_map = { .name = "L440GX BIOS", .size = WINDOW_SIZE, .bankwidth = BUSWIDTH, .phys = WINDOW_ADDR, #if 0 /* FIXME verify that this is the * appripriate code for vpp enable/disable */ .set_vpp = l440gx_set_vpp #endif }; static int __init init_l440gx(void) { struct pci_dev *dev, *pm_dev; struct resource *pm_iobase; __u16 word; dev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0, NULL); pm_dev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, NULL); pci_dev_put(dev); if (!dev || !pm_dev) { printk(KERN_NOTICE "L440GX flash mapping: failed to find PIIX4 ISA bridge, cannot continue\n"); pci_dev_put(pm_dev); return -ENODEV; } l440gx_map.virt = ioremap_nocache(WINDOW_ADDR, WINDOW_SIZE); if (!l440gx_map.virt) { printk(KERN_WARNING "Failed to ioremap L440GX flash region\n"); pci_dev_put(pm_dev); return -ENOMEM; } simple_map_init(&l440gx_map); printk(KERN_NOTICE "window_addr = 0x%08lx\n", (unsigned long)l440gx_map.virt); /* Setup the pm iobase resource * This code should move into some kind of generic bridge * driver but for the moment I'm content with getting the * allocation correct. */ pm_iobase = &pm_dev->resource[PIIXE_IOBASE_RESOURCE]; if (!(pm_iobase->flags & IORESOURCE_IO)) { pm_iobase->name = "pm iobase"; pm_iobase->start = 0; pm_iobase->end = 63; pm_iobase->flags = IORESOURCE_IO; /* Put the current value in the resource */ pci_read_config_dword(pm_dev, 0x40, &iobase); iobase &= ~1; pm_iobase->start += iobase & ~1; pm_iobase->end += iobase & ~1; pci_dev_put(pm_dev); /* Allocate the resource region */ if (pci_assign_resource(pm_dev, PIIXE_IOBASE_RESOURCE) != 0) { pci_dev_put(dev); pci_dev_put(pm_dev); printk(KERN_WARNING "Could not allocate pm iobase resource\n"); iounmap(l440gx_map.virt); return -ENXIO; } } /* Set the iobase */ iobase = pm_iobase->start; pci_write_config_dword(pm_dev, 0x40, iobase | 1); /* Set XBCS# */ pci_read_config_word(dev, 0x4e, &word); word |= 0x4; pci_write_config_word(dev, 0x4e, word); /* Supply write voltage to the chip */ l440gx_set_vpp(&l440gx_map, 1); /* Enable the gate on the WE line */ outb(inb(TRIBUF_PORT) & ~1, TRIBUF_PORT); printk(KERN_NOTICE "Enabled WE line to L440GX BIOS flash chip.\n"); mymtd = do_map_probe("jedec_probe", &l440gx_map); if (!mymtd) { printk(KERN_NOTICE "JEDEC probe on BIOS chip failed. Using ROM\n"); mymtd = do_map_probe("map_rom", &l440gx_map); } if (mymtd) { mymtd->owner = THIS_MODULE; mtd_device_register(mymtd, NULL, 0); return 0; } iounmap(l440gx_map.virt); return -ENXIO; } static void __exit cleanup_l440gx(void) { mtd_device_unregister(mymtd); map_destroy(mymtd); iounmap(l440gx_map.virt); } module_init(init_l440gx); module_exit(cleanup_l440gx); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); MODULE_DESCRIPTION("MTD map driver for BIOS chips on Intel L440GX motherboards");
gpl-2.0
Filmetrics/smartinstr-linux-kernel
drivers/clocksource/mmio.c
10797
1904
/* * Generic MMIO clocksource support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/clocksource.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> struct clocksource_mmio { void __iomem *reg; struct clocksource clksrc; }; static inline struct clocksource_mmio *to_mmio_clksrc(struct clocksource *c) { return container_of(c, struct clocksource_mmio, clksrc); } cycle_t clocksource_mmio_readl_up(struct clocksource *c) { return readl_relaxed(to_mmio_clksrc(c)->reg); } cycle_t clocksource_mmio_readl_down(struct clocksource *c) { return ~readl_relaxed(to_mmio_clksrc(c)->reg); } cycle_t clocksource_mmio_readw_up(struct clocksource *c) { return readw_relaxed(to_mmio_clksrc(c)->reg); } cycle_t clocksource_mmio_readw_down(struct clocksource *c) { return ~(unsigned)readw_relaxed(to_mmio_clksrc(c)->reg); } /** * clocksource_mmio_init - Initialize a simple mmio based clocksource * @base: Virtual address of the clock readout register * @name: Name of the clocksource * @hz: Frequency of the clocksource in Hz * @rating: Rating of the clocksource * @bits: Number of valid bits * @read: One of clocksource_mmio_read*() above */ int __init clocksource_mmio_init(void __iomem *base, const char *name, unsigned long hz, int rating, unsigned bits, cycle_t (*read)(struct clocksource *)) { struct clocksource_mmio *cs; if (bits > 32 || bits < 16) return -EINVAL; cs = kzalloc(sizeof(struct clocksource_mmio), GFP_KERNEL); if (!cs) return -ENOMEM; cs->reg = base; cs->clksrc.name = name; cs->clksrc.rating = rating; cs->clksrc.read = read; cs->clksrc.mask = CLOCKSOURCE_MASK(bits); cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS; return clocksource_register_hz(&cs->clksrc, hz); }
gpl-2.0
gbrouse/linux-2.6-imx
arch/s390/mm/fault.c
46
16646
/* * arch/s390/mm/fault.c * * S390 version * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Hartmut Penner (hp@de.ibm.com) * Ulrich Weigand (uweigand@de.ibm.com) * * Derived from "arch/i386/mm/fault.c" * Copyright (C) 1995 Linus Torvalds */ #include <linux/kernel_stat.h> #include <linux/perf_event.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/compat.h> #include <linux/smp.h> #include <linux/kdebug.h> #include <linux/init.h> #include <linux/console.h> #include <linux/module.h> #include <linux/hardirq.h> #include <linux/kprobes.h> #include <linux/uaccess.h> #include <linux/hugetlb.h> #include <asm/asm-offsets.h> #include <asm/system.h> #include <asm/pgtable.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/compat.h> #include "../kernel/entry.h" #ifndef CONFIG_64BIT #define __FAIL_ADDR_MASK 0x7ffff000 #define __SUBCODE_MASK 0x0200 #define __PF_RES_FIELD 0ULL #else /* CONFIG_64BIT */ #define __FAIL_ADDR_MASK -4096L #define __SUBCODE_MASK 0x0600 #define __PF_RES_FIELD 0x8000000000000000ULL #endif /* CONFIG_64BIT */ #define VM_FAULT_BADCONTEXT 0x010000 #define VM_FAULT_BADMAP 0x020000 #define VM_FAULT_BADACCESS 0x040000 static unsigned long store_indication; void fault_init(void) { if (test_facility(2) && test_facility(75)) store_indication = 0xc00; } static inline int notify_page_fault(struct pt_regs *regs) { int ret = 0; /* kprobe_running() needs smp_processor_id() */ if (kprobes_built_in() && !user_mode(regs)) { preempt_disable(); if (kprobe_running() && kprobe_fault_handler(regs, 14)) ret = 1; preempt_enable(); } return ret; } /* * Unlock any spinlocks which will prevent us from getting the * message out. */ void bust_spinlocks(int yes) { if (yes) { oops_in_progress = 1; } else { int loglevel_save = console_loglevel; console_unblank(); oops_in_progress = 0; /* * OK, the message is on the console. Now we call printk() * without oops_in_progress set so that printk will give klogd * a poke. Hold onto your hats... */ console_loglevel = 15; printk(" "); console_loglevel = loglevel_save; } } /* * Returns the address space associated with the fault. * Returns 0 for kernel space and 1 for user space. */ static inline int user_space_fault(unsigned long trans_exc_code) { /* * The lowest two bits of the translation exception * identification indicate which paging table was used. */ trans_exc_code &= 3; if (trans_exc_code == 2) /* Access via secondary space, set_fs setting decides */ return current->thread.mm_segment.ar4; if (user_mode == HOME_SPACE_MODE) /* User space if the access has been done via home space. */ return trans_exc_code == 3; /* * If the user space is not the home space the kernel runs in home * space. Access via secondary space has already been covered, * access via primary space or access register is from user space * and access via home space is from the kernel. */ return trans_exc_code != 3; } static inline void report_user_fault(struct pt_regs *regs, long signr) { if ((task_pid_nr(current) > 1) && !show_unhandled_signals) return; if (!unhandled_signal(current, signr)) return; if (!printk_ratelimit()) return; printk(KERN_ALERT "User process fault: interruption code 0x%X ", regs->int_code); print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN); printk(KERN_CONT "\n"); printk(KERN_ALERT "failing address: %lX\n", regs->int_parm_long & __FAIL_ADDR_MASK); show_regs(regs); } /* * Send SIGSEGV to task. This is an external routine * to keep the stack usage of do_page_fault small. */ static noinline void do_sigsegv(struct pt_regs *regs, int si_code) { struct siginfo si; report_user_fault(regs, SIGSEGV); si.si_signo = SIGSEGV; si.si_code = si_code; si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK); force_sig_info(SIGSEGV, &si, current); } static noinline void do_no_context(struct pt_regs *regs) { const struct exception_table_entry *fixup; unsigned long address; /* Are we prepared to handle this kernel fault? */ fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); if (fixup) { regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; return; } /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ address = regs->int_parm_long & __FAIL_ADDR_MASK; if (!user_space_fault(regs->int_parm_long)) printk(KERN_ALERT "Unable to handle kernel pointer dereference" " at virtual kernel address %p\n", (void *)address); else printk(KERN_ALERT "Unable to handle kernel paging request" " at virtual user address %p\n", (void *)address); die(regs, "Oops"); do_exit(SIGKILL); } static noinline void do_low_address(struct pt_regs *regs) { /* Low-address protection hit in kernel mode means NULL pointer write access in kernel mode. */ if (regs->psw.mask & PSW_MASK_PSTATE) { /* Low-address protection hit in user mode 'cannot happen'. */ die (regs, "Low-address protection"); do_exit(SIGKILL); } do_no_context(regs); } static noinline void do_sigbus(struct pt_regs *regs) { struct task_struct *tsk = current; struct siginfo si; /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ si.si_signo = SIGBUS; si.si_errno = 0; si.si_code = BUS_ADRERR; si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK); force_sig_info(SIGBUS, &si, tsk); } static noinline void do_fault_error(struct pt_regs *regs, int fault) { int si_code; switch (fault) { case VM_FAULT_BADACCESS: case VM_FAULT_BADMAP: /* Bad memory access. Check if it is kernel or user space. */ if (regs->psw.mask & PSW_MASK_PSTATE) { /* User mode accesses just cause a SIGSEGV */ si_code = (fault == VM_FAULT_BADMAP) ? SEGV_MAPERR : SEGV_ACCERR; do_sigsegv(regs, si_code); return; } case VM_FAULT_BADCONTEXT: do_no_context(regs); break; default: /* fault & VM_FAULT_ERROR */ if (fault & VM_FAULT_OOM) { if (!(regs->psw.mask & PSW_MASK_PSTATE)) do_no_context(regs); else pagefault_out_of_memory(); } else if (fault & VM_FAULT_SIGBUS) { /* Kernel mode? Handle exceptions or die */ if (!(regs->psw.mask & PSW_MASK_PSTATE)) do_no_context(regs); else do_sigbus(regs); } else BUG(); break; } } /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. * * interruption code (int_code): * 04 Protection -> Write-Protection (suprression) * 10 Segment translation -> Not present (nullification) * 11 Page translation -> Not present (nullification) * 3b Region third trans. -> Not present (nullification) */ static inline int do_exception(struct pt_regs *regs, int access) { struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct *vma; unsigned long trans_exc_code; unsigned long address; unsigned int flags; int fault; if (notify_page_fault(regs)) return 0; tsk = current; mm = tsk->mm; trans_exc_code = regs->int_parm_long; /* * Verify that the fault happened in user space, that * we are not in an interrupt and that there is a * user context. */ fault = VM_FAULT_BADCONTEXT; if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) goto out; address = trans_exc_code & __FAIL_ADDR_MASK; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); flags = FAULT_FLAG_ALLOW_RETRY; if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) flags |= FAULT_FLAG_WRITE; down_read(&mm->mmap_sem); #ifdef CONFIG_PGSTE if (test_tsk_thread_flag(current, TIF_SIE) && S390_lowcore.gmap) { address = __gmap_fault(address, (struct gmap *) S390_lowcore.gmap); if (address == -EFAULT) { fault = VM_FAULT_BADMAP; goto out_up; } if (address == -ENOMEM) { fault = VM_FAULT_OOM; goto out_up; } } #endif retry: fault = VM_FAULT_BADMAP; vma = find_vma(mm, address); if (!vma) goto out_up; if (unlikely(vma->vm_start > address)) { if (!(vma->vm_flags & VM_GROWSDOWN)) goto out_up; if (expand_stack(vma, address)) goto out_up; } /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ fault = VM_FAULT_BADACCESS; if (unlikely(!(vma->vm_flags & access))) goto out_up; if (is_vm_hugetlb_page(vma)) address &= HPAGE_MASK; /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. */ fault = handle_mm_fault(mm, vma, address, flags); if (unlikely(fault & VM_FAULT_ERROR)) goto out_up; /* * Major/minor page fault accounting is only done on the * initial attempt. If we go through a retry, it is extremely * likely that the page will be found in page cache at that point. */ if (flags & FAULT_FLAG_ALLOW_RETRY) { if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); } else { tsk->min_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } if (fault & VM_FAULT_RETRY) { /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk * of starvation. */ flags &= ~FAULT_FLAG_ALLOW_RETRY; down_read(&mm->mmap_sem); goto retry; } } /* * The instruction that caused the program check will * be repeated. Don't signal single step via SIGTRAP. */ clear_tsk_thread_flag(tsk, TIF_PER_TRAP); fault = 0; out_up: up_read(&mm->mmap_sem); out: return fault; } void __kprobes do_protection_exception(struct pt_regs *regs) { unsigned long trans_exc_code; int fault; trans_exc_code = regs->int_parm_long; /* Protection exception is suppressing, decrement psw address. */ regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16); /* * Check for low-address protection. This needs to be treated * as a special case because the translation exception code * field is not guaranteed to contain valid data in this case. */ if (unlikely(!(trans_exc_code & 4))) { do_low_address(regs); return; } fault = do_exception(regs, VM_WRITE); if (unlikely(fault)) do_fault_error(regs, fault); } void __kprobes do_dat_exception(struct pt_regs *regs) { int access, fault; access = VM_READ | VM_EXEC | VM_WRITE; fault = do_exception(regs, access); if (unlikely(fault)) do_fault_error(regs, fault); } #ifdef CONFIG_64BIT void __kprobes do_asce_exception(struct pt_regs *regs) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long trans_exc_code; trans_exc_code = regs->int_parm_long; if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) goto no_context; down_read(&mm->mmap_sem); vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK); up_read(&mm->mmap_sem); if (vma) { update_mm(mm, current); return; } /* User mode accesses just cause a SIGSEGV */ if (regs->psw.mask & PSW_MASK_PSTATE) { do_sigsegv(regs, SEGV_MAPERR); return; } no_context: do_no_context(regs); } #endif int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write) { struct pt_regs regs; int access, fault; regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK; if (!irqs_disabled()) regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT; regs.psw.addr = (unsigned long) __builtin_return_address(0); regs.psw.addr |= PSW_ADDR_AMODE; regs.int_code = pgm_int_code; regs.int_parm_long = (uaddr & PAGE_MASK) | 2; access = write ? VM_WRITE : VM_READ; fault = do_exception(&regs, access); if (unlikely(fault)) { if (fault & VM_FAULT_OOM) return -EFAULT; else if (fault & VM_FAULT_SIGBUS) do_sigbus(&regs); } return fault ? -EFAULT : 0; } #ifdef CONFIG_PFAULT /* * 'pfault' pseudo page faults routines. */ static int pfault_disable; static int __init nopfault(char *str) { pfault_disable = 1; return 1; } __setup("nopfault", nopfault); struct pfault_refbk { u16 refdiagc; u16 reffcode; u16 refdwlen; u16 refversn; u64 refgaddr; u64 refselmk; u64 refcmpmk; u64 reserved; } __attribute__ ((packed, aligned(8))); int pfault_init(void) { struct pfault_refbk refbk = { .refdiagc = 0x258, .reffcode = 0, .refdwlen = 5, .refversn = 2, .refgaddr = __LC_CURRENT_PID, .refselmk = 1ULL << 48, .refcmpmk = 1ULL << 48, .reserved = __PF_RES_FIELD }; int rc; if (pfault_disable) return -1; asm volatile( " diag %1,%0,0x258\n" "0: j 2f\n" "1: la %0,8\n" "2:\n" EX_TABLE(0b,1b) : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc"); return rc; } void pfault_fini(void) { struct pfault_refbk refbk = { .refdiagc = 0x258, .reffcode = 1, .refdwlen = 5, .refversn = 2, }; if (pfault_disable) return; asm volatile( " diag %0,0,0x258\n" "0:\n" EX_TABLE(0b,0b) : : "a" (&refbk), "m" (refbk) : "cc"); } static DEFINE_SPINLOCK(pfault_lock); static LIST_HEAD(pfault_list); static void pfault_interrupt(unsigned int ext_int_code, unsigned int param32, unsigned long param64) { struct task_struct *tsk; __u16 subcode; pid_t pid; /* * Get the external interruption subcode & pfault * initial/completion signal bit. VM stores this * in the 'cpu address' field associated with the * external interrupt. */ subcode = ext_int_code >> 16; if ((subcode & 0xff00) != __SUBCODE_MASK) return; kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++; if (subcode & 0x0080) { /* Get the token (= pid of the affected task). */ pid = sizeof(void *) == 4 ? param32 : param64; rcu_read_lock(); tsk = find_task_by_pid_ns(pid, &init_pid_ns); if (tsk) get_task_struct(tsk); rcu_read_unlock(); if (!tsk) return; } else { tsk = current; } spin_lock(&pfault_lock); if (subcode & 0x0080) { /* signal bit is set -> a page has been swapped in by VM */ if (tsk->thread.pfault_wait == 1) { /* Initial interrupt was faster than the completion * interrupt. pfault_wait is valid. Set pfault_wait * back to zero and wake up the process. This can * safely be done because the task is still sleeping * and can't produce new pfaults. */ tsk->thread.pfault_wait = 0; list_del(&tsk->thread.list); wake_up_process(tsk); } else { /* Completion interrupt was faster than initial * interrupt. Set pfault_wait to -1 so the initial * interrupt doesn't put the task to sleep. * If the task is not running, ignore the completion * interrupt since it must be a leftover of a PFAULT * CANCEL operation which didn't remove all pending * completion interrupts. */ if (tsk->state == TASK_RUNNING) tsk->thread.pfault_wait = -1; } put_task_struct(tsk); } else { /* signal bit not set -> a real page is missing. */ if (tsk->thread.pfault_wait == -1) { /* Completion interrupt was faster than the initial * interrupt (pfault_wait == -1). Set pfault_wait * back to zero and exit. */ tsk->thread.pfault_wait = 0; } else { /* Initial interrupt arrived before completion * interrupt. Let the task sleep. */ tsk->thread.pfault_wait = 1; list_add(&tsk->thread.list, &pfault_list); set_task_state(tsk, TASK_UNINTERRUPTIBLE); set_tsk_need_resched(tsk); } } spin_unlock(&pfault_lock); } static int __cpuinit pfault_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { struct thread_struct *thread, *next; struct task_struct *tsk; switch (action) { case CPU_DEAD: case CPU_DEAD_FROZEN: spin_lock_irq(&pfault_lock); list_for_each_entry_safe(thread, next, &pfault_list, list) { thread->pfault_wait = 0; list_del(&thread->list); tsk = container_of(thread, struct task_struct, thread); wake_up_process(tsk); } spin_unlock_irq(&pfault_lock); break; default: break; } return NOTIFY_OK; } static int __init pfault_irq_init(void) { int rc; rc = register_external_interrupt(0x2603, pfault_interrupt); if (rc) goto out_extint; rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP; if (rc) goto out_pfault; service_subclass_irq_register(); hotcpu_notifier(pfault_cpu_notify, 0); return 0; out_pfault: unregister_external_interrupt(0x2603, pfault_interrupt); out_extint: pfault_disable = 1; return rc; } early_initcall(pfault_irq_init); #endif /* CONFIG_PFAULT */
gpl-2.0
SaberMod/gdb-saber
gdb/testsuite/gdb.threads/no-unwaited-for-left.c
46
1609
/* Copyright 2007-2015 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <pthread.h> #include <assert.h> #include <unistd.h> static volatile pthread_t main_thread; pthread_barrier_t barrier; static void * thread_a (void *arg) { int i; return 0; /* break-here */ } static void * thread_b (void *arg) { int i; pthread_barrier_wait (&barrier); i = pthread_join (main_thread, NULL); assert (i == 0); return arg; } int main (void) { pthread_t thread; int i; /* First test resuming only `thread_a', which exits. */ i = pthread_create (&thread, NULL, thread_a, NULL); assert (i == 0); pthread_join (thread, NULL); /* Then test resuming only the leader, which also exits. */ main_thread = pthread_self (); pthread_barrier_init (&barrier, NULL, 2); i = pthread_create (&thread, NULL, thread_b, NULL); assert (i == 0); pthread_barrier_wait (&barrier); pthread_exit (NULL); /* break-here-2 */ /* NOTREACHED */ return 0; }
gpl-2.0
vlw/Kernel_MSM8916_Samsung
drivers/platform/msm/msm_bus/msm-buspm-dev.c
302
8510
/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* #define DEBUG */ #include <linux/module.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/device.h> #include <linux/uaccess.h> #include <linux/miscdevice.h> #include <linux/dma-mapping.h> #include <soc/qcom/rpm-smd.h> #include <uapi/linux/msm-buspm-dev.h> #define MSM_BUSPM_DRV_NAME "msm-buspm-dev" #ifdef CONFIG_COMPAT static long msm_buspm_dev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); #else #define msm_buspm_dev_compat_ioctl NULL #endif static long msm_buspm_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); static int msm_buspm_dev_mmap(struct file *filp, struct vm_area_struct *vma); static int msm_buspm_dev_release(struct inode *inode, struct file *filp); static int msm_buspm_dev_open(struct inode *inode, struct file *filp); static const struct file_operations msm_buspm_dev_fops = { .owner = THIS_MODULE, .mmap = msm_buspm_dev_mmap, .open = msm_buspm_dev_open, .unlocked_ioctl = msm_buspm_dev_ioctl, .compat_ioctl = msm_buspm_dev_compat_ioctl, .llseek = noop_llseek, .release = msm_buspm_dev_release, }; struct miscdevice msm_buspm_misc = { .minor = MISC_DYNAMIC_MINOR, .name = MSM_BUSPM_DRV_NAME, .fops = &msm_buspm_dev_fops, }; enum msm_buspm_spdm_res { SPDM_RES_ID = 0, SPDM_RES_TYPE = 0x63707362, SPDM_KEY = 0x00006e65, SPDM_SIZE = 4, }; /* * Allocate kernel buffer. * Currently limited to one buffer per file descriptor. If alloc() is * called twice for the same descriptor, the original buffer is freed. * There is also no locking protection so the same descriptor can not be shared. */ static inline void *msm_buspm_dev_get_vaddr(struct file *filp) { struct msm_buspm_map_dev *dev = filp->private_data; return (dev) ? dev->vaddr : NULL; } static inline unsigned int msm_buspm_dev_get_buflen(struct file *filp) { struct msm_buspm_map_dev *dev = filp->private_data; return dev ? dev->buflen : 0; } static inline unsigned long msm_buspm_dev_get_paddr(struct file *filp) { struct msm_buspm_map_dev *dev = filp->private_data; return (dev) ? dev->paddr : 0L; } static void msm_buspm_dev_free(struct file *filp) { struct msm_buspm_map_dev *dev = filp->private_data; if (dev && dev->vaddr) { pr_debug("freeing memory at 0x%p\n", dev->vaddr); dma_free_coherent(msm_buspm_misc.this_device, dev->buflen, dev->vaddr, dev->paddr); dev->paddr = 0L; dev->vaddr = NULL; } } static int msm_buspm_dev_open(struct inode *inode, struct file *filp) { struct msm_buspm_map_dev *dev; if (capable(CAP_SYS_ADMIN)) { dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev) filp->private_data = dev; else return -ENOMEM; } else { return -EPERM; } return 0; } static int msm_buspm_dev_alloc(struct file *filp, struct buspm_alloc_params data) { dma_addr_t paddr; void *vaddr; struct msm_buspm_map_dev *dev = filp->private_data; /* If buffer already allocated, then free it */ if (dev->vaddr) msm_buspm_dev_free(filp); /* Allocate uncached memory */ vaddr = dma_alloc_coherent(msm_buspm_misc.this_device, data.size, &paddr, GFP_KERNEL); if (vaddr == NULL) { pr_err("allocation of 0x%zu bytes failed", data.size); return -ENOMEM; } dev->vaddr = vaddr; dev->paddr = paddr; dev->buflen = data.size; filp->f_pos = 0; pr_debug("virt addr = 0x%p\n", dev->vaddr); pr_debug("phys addr = 0x%lx\n", dev->paddr); return 0; } static int msm_bus_rpm_req(u32 rsc_type, u32 key, u32 hwid, int ctx, u32 val) { struct msm_rpm_request *rpm_req; int ret, msg_id; rpm_req = msm_rpm_create_request(ctx, rsc_type, SPDM_RES_ID, 1); if (rpm_req == NULL) { pr_err("RPM: Couldn't create RPM Request\n"); return -ENXIO; } ret = msm_rpm_add_kvp_data(rpm_req, key, (const uint8_t *)&val, (int)(sizeof(uint32_t))); if (ret) { pr_err("RPM: Add KVP failed for RPM Req:%u\n", rsc_type); goto err; } pr_debug("Added Key: %d, Val: %u, size: %zu\n", key, (uint32_t)val, sizeof(uint32_t)); msg_id = msm_rpm_send_request(rpm_req); if (!msg_id) { pr_err("RPM: No message ID for req\n"); ret = -ENXIO; goto err; } ret = msm_rpm_wait_for_ack(msg_id); if (ret) { pr_err("RPM: Ack failed\n"); goto err; } err: msm_rpm_free_request(rpm_req); return ret; } static int msm_buspm_ioc_cmds(uint32_t arg) { switch (arg) { case MSM_BUSPM_SPDM_CLK_DIS: case MSM_BUSPM_SPDM_CLK_EN: return msm_bus_rpm_req(SPDM_RES_TYPE, SPDM_KEY, 0, MSM_RPM_CTX_ACTIVE_SET, arg); default: pr_warn("Unsupported ioctl command: %d\n", arg); return -EINVAL; } } static long msm_buspm_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct buspm_xfer_req xfer; struct buspm_alloc_params alloc_data; unsigned long paddr; int retval = 0; void *buf = msm_buspm_dev_get_vaddr(filp); unsigned int buflen = msm_buspm_dev_get_buflen(filp); unsigned char *dbgbuf = buf; if (_IOC_TYPE(cmd) != MSM_BUSPM_IOC_MAGIC) { pr_err("Wrong IOC_MAGIC.Exiting\n"); return -ENOTTY; } switch (cmd) { case MSM_BUSPM_IOC_FREE: pr_debug("cmd = 0x%x (FREE)\n", cmd); msm_buspm_dev_free(filp); break; case MSM_BUSPM_IOC_ALLOC: pr_debug("cmd = 0x%x (ALLOC)\n", cmd); retval = __get_user(alloc_data.size, (uint32_t __user *)arg); if (retval == 0) retval = msm_buspm_dev_alloc(filp, alloc_data); break; case MSM_BUSPM_IOC_RD_PHYS_ADDR: pr_debug("Read Physical Address\n"); paddr = msm_buspm_dev_get_paddr(filp); if (paddr == 0L) { retval = -EINVAL; } else { pr_debug("phys addr = 0x%lx\n", paddr); retval = __put_user(paddr, (unsigned long __user *)arg); } break; case MSM_BUSPM_IOC_RDBUF: pr_debug("Read Buffer: 0x%x%x%x%x\n", dbgbuf[0], dbgbuf[1], dbgbuf[2], dbgbuf[3]); if (!buf) { retval = -EINVAL; break; } if (copy_from_user(&xfer, (void __user *)arg, sizeof(xfer))) { retval = -EFAULT; break; } if ((xfer.size <= buflen) && (copy_to_user((void __user *)xfer.data, buf, xfer.size))) { retval = -EFAULT; break; } break; case MSM_BUSPM_IOC_WRBUF: pr_debug("Write Buffer\n"); if (!buf) { retval = -EINVAL; break; } if (copy_from_user(&xfer, (void __user *)arg, sizeof(xfer))) { retval = -EFAULT; break; } if ((buflen <= xfer.size) && (copy_from_user(buf, (void __user *)xfer.data, xfer.size))) { retval = -EFAULT; break; } break; case MSM_BUSPM_IOC_CMD: pr_debug("IOCTL command: cmd: %d arg: %lu\n", cmd, arg); retval = msm_buspm_ioc_cmds(arg); break; default: pr_debug("Unknown command 0x%x\n", cmd); retval = -EINVAL; break; } return retval; } static int msm_buspm_dev_release(struct inode *inode, struct file *filp) { struct msm_buspm_map_dev *dev = filp->private_data; msm_buspm_dev_free(filp); kfree(dev); filp->private_data = NULL; return 0; } static int msm_buspm_dev_mmap(struct file *filp, struct vm_area_struct *vma) { pr_debug("vma = 0x%p\n", vma); /* Mappings are uncached */ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot)) return -EFAULT; return 0; } #ifdef CONFIG_COMPAT static long msm_buspm_dev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { return msm_buspm_dev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); } #endif static int __init msm_buspm_dev_init(void) { int ret = 0; ret = misc_register(&msm_buspm_misc); if (ret < 0) pr_err("%s: Cannot register misc device\n", __func__); if (msm_buspm_misc.this_device->coherent_dma_mask == 0) msm_buspm_misc.this_device->coherent_dma_mask = DMA_BIT_MASK(32); return ret; } static void __exit msm_buspm_dev_exit(void) { misc_deregister(&msm_buspm_misc); } module_init(msm_buspm_dev_init); module_exit(msm_buspm_dev_exit); MODULE_LICENSE("GPL v2"); MODULE_VERSION("1.0"); MODULE_ALIAS("platform:"MSM_BUSPM_DRV_NAME);
gpl-2.0
mzueger/linux-phycore-mpc5200b
drivers/media/video/zoran/zoran_driver.c
558
80972
/* * Zoran zr36057/zr36067 PCI controller driver, for the * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux * Media Labs LML33/LML33R10. * * Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx> * * Changes for BUZ by Wolfgang Scherr <scherr@net4you.net> * * Changes for DC10/DC30 by Laurent Pinchart <laurent.pinchart@skynet.be> * * Changes for LML33R10 by Maxim Yevtyushkin <max@linuxmedialabs.com> * * Changes for videodev2/v4l2 by Ronald Bultje <rbultje@ronald.bitfreak.net> * * Based on * * Miro DC10 driver * Copyright (C) 1999 Wolfgang Scherr <scherr@net4you.net> * * Iomega Buz driver version 1.0 * Copyright (C) 1999 Rainer Johanni <Rainer@Johanni.de> * * buz.0.0.3 * Copyright (C) 1998 Dave Perks <dperks@ibm.net> * * bttv - Bt848 frame grabber driver * Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de) * & Marcus Metzler (mocm@thp.uni-koeln.de) * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/vmalloc.h> #include <linux/wait.h> #include <linux/interrupt.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #include <linux/spinlock.h> #include <linux/videodev2.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include "videocodec.h" #include <asm/byteorder.h> #include <asm/io.h> #include <asm/uaccess.h> #include <linux/proc_fs.h> #include <linux/mutex.h> #include "zoran.h" #include "zoran_device.h" #include "zoran_card.h" const struct zoran_format zoran_formats[] = { { .name = "15-bit RGB LE", .fourcc = V4L2_PIX_FMT_RGB555, .colorspace = V4L2_COLORSPACE_SRGB, .depth = 15, .flags = ZORAN_FORMAT_CAPTURE | ZORAN_FORMAT_OVERLAY, .vfespfr = ZR36057_VFESPFR_RGB555|ZR36057_VFESPFR_ErrDif| ZR36057_VFESPFR_LittleEndian, }, { .name = "15-bit RGB BE", .fourcc = V4L2_PIX_FMT_RGB555X, .colorspace = V4L2_COLORSPACE_SRGB, .depth = 15, .flags = ZORAN_FORMAT_CAPTURE | ZORAN_FORMAT_OVERLAY, .vfespfr = ZR36057_VFESPFR_RGB555|ZR36057_VFESPFR_ErrDif, }, { .name = "16-bit RGB LE", .fourcc = V4L2_PIX_FMT_RGB565, .colorspace = V4L2_COLORSPACE_SRGB, .depth = 16, .flags = ZORAN_FORMAT_CAPTURE | ZORAN_FORMAT_OVERLAY, .vfespfr = ZR36057_VFESPFR_RGB565|ZR36057_VFESPFR_ErrDif| ZR36057_VFESPFR_LittleEndian, }, { .name = "16-bit RGB BE", .fourcc = V4L2_PIX_FMT_RGB565X, .colorspace = V4L2_COLORSPACE_SRGB, .depth = 16, .flags = ZORAN_FORMAT_CAPTURE | ZORAN_FORMAT_OVERLAY, .vfespfr = ZR36057_VFESPFR_RGB565|ZR36057_VFESPFR_ErrDif, }, { .name = "24-bit RGB", .fourcc = V4L2_PIX_FMT_BGR24, .colorspace = V4L2_COLORSPACE_SRGB, .depth = 24, .flags = ZORAN_FORMAT_CAPTURE | ZORAN_FORMAT_OVERLAY, .vfespfr = ZR36057_VFESPFR_RGB888|ZR36057_VFESPFR_Pack24, }, { .name = "32-bit RGB LE", .fourcc = V4L2_PIX_FMT_BGR32, .colorspace = V4L2_COLORSPACE_SRGB, .depth = 32, .flags = ZORAN_FORMAT_CAPTURE | ZORAN_FORMAT_OVERLAY, .vfespfr = ZR36057_VFESPFR_RGB888|ZR36057_VFESPFR_LittleEndian, }, { .name = "32-bit RGB BE", .fourcc = V4L2_PIX_FMT_RGB32, .colorspace = V4L2_COLORSPACE_SRGB, .depth = 32, .flags = ZORAN_FORMAT_CAPTURE | ZORAN_FORMAT_OVERLAY, .vfespfr = ZR36057_VFESPFR_RGB888, }, { .name = "4:2:2, packed, YUYV", .fourcc = V4L2_PIX_FMT_YUYV, .colorspace = V4L2_COLORSPACE_SMPTE170M, .depth = 16, .flags = ZORAN_FORMAT_CAPTURE | ZORAN_FORMAT_OVERLAY, .vfespfr = ZR36057_VFESPFR_YUV422, }, { .name = "4:2:2, packed, UYVY", .fourcc = V4L2_PIX_FMT_UYVY, .colorspace = V4L2_COLORSPACE_SMPTE170M, .depth = 16, .flags = ZORAN_FORMAT_CAPTURE | ZORAN_FORMAT_OVERLAY, .vfespfr = ZR36057_VFESPFR_YUV422|ZR36057_VFESPFR_LittleEndian, }, { .name = "Hardware-encoded Motion-JPEG", .fourcc = V4L2_PIX_FMT_MJPEG, .colorspace = V4L2_COLORSPACE_SMPTE170M, .depth = 0, .flags = ZORAN_FORMAT_CAPTURE | ZORAN_FORMAT_PLAYBACK | ZORAN_FORMAT_COMPRESSED, } }; #define NUM_FORMATS ARRAY_SIZE(zoran_formats) /* small helper function for calculating buffersizes for v4l2 * we calculate the nearest higher power-of-two, which * will be the recommended buffersize */ static __u32 zoran_v4l2_calc_bufsize (struct zoran_jpg_settings *settings) { __u8 div = settings->VerDcm * settings->HorDcm * settings->TmpDcm; __u32 num = (1024 * 512) / (div); __u32 result = 2; num--; while (num) { num >>= 1; result <<= 1; } if (result > jpg_bufsize) return jpg_bufsize; if (result < 8192) return 8192; return result; } /* forward references */ static void v4l_fbuffer_free(struct zoran_fh *fh); static void jpg_fbuffer_free(struct zoran_fh *fh); /* Set mapping mode */ static void map_mode_raw(struct zoran_fh *fh) { fh->map_mode = ZORAN_MAP_MODE_RAW; fh->buffers.buffer_size = v4l_bufsize; fh->buffers.num_buffers = v4l_nbufs; } static void map_mode_jpg(struct zoran_fh *fh, int play) { fh->map_mode = play ? ZORAN_MAP_MODE_JPG_PLAY : ZORAN_MAP_MODE_JPG_REC; fh->buffers.buffer_size = jpg_bufsize; fh->buffers.num_buffers = jpg_nbufs; } static inline const char *mode_name(enum zoran_map_mode mode) { return mode == ZORAN_MAP_MODE_RAW ? "V4L" : "JPG"; } /* * Allocate the V4L grab buffers * * These have to be pysically contiguous. */ static int v4l_fbuffer_alloc(struct zoran_fh *fh) { struct zoran *zr = fh->zr; int i, off; unsigned char *mem; for (i = 0; i < fh->buffers.num_buffers; i++) { if (fh->buffers.buffer[i].v4l.fbuffer) dprintk(2, KERN_WARNING "%s: %s - buffer %d already allocated!?\n", ZR_DEVNAME(zr), __func__, i); //udelay(20); mem = kmalloc(fh->buffers.buffer_size, GFP_KERNEL | __GFP_NOWARN); if (!mem) { dprintk(1, KERN_ERR "%s: %s - kmalloc for V4L buf %d failed\n", ZR_DEVNAME(zr), __func__, i); v4l_fbuffer_free(fh); return -ENOBUFS; } fh->buffers.buffer[i].v4l.fbuffer = mem; fh->buffers.buffer[i].v4l.fbuffer_phys = virt_to_phys(mem); fh->buffers.buffer[i].v4l.fbuffer_bus = virt_to_bus(mem); for (off = 0; off < fh->buffers.buffer_size; off += PAGE_SIZE) SetPageReserved(virt_to_page(mem + off)); dprintk(4, KERN_INFO "%s: %s - V4L frame %d mem 0x%lx (bus: 0x%llx)\n", ZR_DEVNAME(zr), __func__, i, (unsigned long) mem, (unsigned long long)virt_to_bus(mem)); } fh->buffers.allocated = 1; return 0; } /* free the V4L grab buffers */ static void v4l_fbuffer_free(struct zoran_fh *fh) { struct zoran *zr = fh->zr; int i, off; unsigned char *mem; dprintk(4, KERN_INFO "%s: %s\n", ZR_DEVNAME(zr), __func__); for (i = 0; i < fh->buffers.num_buffers; i++) { if (!fh->buffers.buffer[i].v4l.fbuffer) continue; mem = fh->buffers.buffer[i].v4l.fbuffer; for (off = 0; off < fh->buffers.buffer_size; off += PAGE_SIZE) ClearPageReserved(virt_to_page(mem + off)); kfree(fh->buffers.buffer[i].v4l.fbuffer); fh->buffers.buffer[i].v4l.fbuffer = NULL; } fh->buffers.allocated = 0; } /* * Allocate the MJPEG grab buffers. * * If a Natoma chipset is present and this is a revision 1 zr36057, * each MJPEG buffer needs to be physically contiguous. * (RJ: This statement is from Dave Perks' original driver, * I could never check it because I have a zr36067) * * RJ: The contents grab buffers needs never be accessed in the driver. * Therefore there is no need to allocate them with vmalloc in order * to get a contiguous virtual memory space. * I don't understand why many other drivers first allocate them with * vmalloc (which uses internally also get_zeroed_page, but delivers you * virtual addresses) and then again have to make a lot of efforts * to get the physical address. * * Ben Capper: * On big-endian architectures (such as ppc) some extra steps * are needed. When reading and writing to the stat_com array * and fragment buffers, the device expects to see little- * endian values. The use of cpu_to_le32() and le32_to_cpu() * in this function (and one or two others in zoran_device.c) * ensure that these values are always stored in little-endian * form, regardless of architecture. The zr36057 does Very Bad * Things on big endian architectures if the stat_com array * and fragment buffers are not little-endian. */ static int jpg_fbuffer_alloc(struct zoran_fh *fh) { struct zoran *zr = fh->zr; int i, j, off; u8 *mem; for (i = 0; i < fh->buffers.num_buffers; i++) { if (fh->buffers.buffer[i].jpg.frag_tab) dprintk(2, KERN_WARNING "%s: %s - buffer %d already allocated!?\n", ZR_DEVNAME(zr), __func__, i); /* Allocate fragment table for this buffer */ mem = (void *)get_zeroed_page(GFP_KERNEL); if (!mem) { dprintk(1, KERN_ERR "%s: %s - get_zeroed_page (frag_tab) failed for buffer %d\n", ZR_DEVNAME(zr), __func__, i); jpg_fbuffer_free(fh); return -ENOBUFS; } fh->buffers.buffer[i].jpg.frag_tab = (__le32 *)mem; fh->buffers.buffer[i].jpg.frag_tab_bus = virt_to_bus(mem); if (fh->buffers.need_contiguous) { mem = kmalloc(fh->buffers.buffer_size, GFP_KERNEL); if (mem == NULL) { dprintk(1, KERN_ERR "%s: %s - kmalloc failed for buffer %d\n", ZR_DEVNAME(zr), __func__, i); jpg_fbuffer_free(fh); return -ENOBUFS; } fh->buffers.buffer[i].jpg.frag_tab[0] = cpu_to_le32(virt_to_bus(mem)); fh->buffers.buffer[i].jpg.frag_tab[1] = cpu_to_le32((fh->buffers.buffer_size >> 1) | 1); for (off = 0; off < fh->buffers.buffer_size; off += PAGE_SIZE) SetPageReserved(virt_to_page(mem + off)); } else { /* jpg_bufsize is already page aligned */ for (j = 0; j < fh->buffers.buffer_size / PAGE_SIZE; j++) { mem = (void *)get_zeroed_page(GFP_KERNEL); if (mem == NULL) { dprintk(1, KERN_ERR "%s: %s - get_zeroed_page failed for buffer %d\n", ZR_DEVNAME(zr), __func__, i); jpg_fbuffer_free(fh); return -ENOBUFS; } fh->buffers.buffer[i].jpg.frag_tab[2 * j] = cpu_to_le32(virt_to_bus(mem)); fh->buffers.buffer[i].jpg.frag_tab[2 * j + 1] = cpu_to_le32((PAGE_SIZE >> 2) << 1); SetPageReserved(virt_to_page(mem)); } fh->buffers.buffer[i].jpg.frag_tab[2 * j - 1] |= cpu_to_le32(1); } } dprintk(4, KERN_DEBUG "%s: %s - %d KB allocated\n", ZR_DEVNAME(zr), __func__, (fh->buffers.num_buffers * fh->buffers.buffer_size) >> 10); fh->buffers.allocated = 1; return 0; } /* free the MJPEG grab buffers */ static void jpg_fbuffer_free(struct zoran_fh *fh) { struct zoran *zr = fh->zr; int i, j, off; unsigned char *mem; __le32 frag_tab; struct zoran_buffer *buffer; dprintk(4, KERN_DEBUG "%s: %s\n", ZR_DEVNAME(zr), __func__); for (i = 0, buffer = &fh->buffers.buffer[0]; i < fh->buffers.num_buffers; i++, buffer++) { if (!buffer->jpg.frag_tab) continue; if (fh->buffers.need_contiguous) { frag_tab = buffer->jpg.frag_tab[0]; if (frag_tab) { mem = bus_to_virt(le32_to_cpu(frag_tab)); for (off = 0; off < fh->buffers.buffer_size; off += PAGE_SIZE) ClearPageReserved(virt_to_page(mem + off)); kfree(mem); buffer->jpg.frag_tab[0] = 0; buffer->jpg.frag_tab[1] = 0; } } else { for (j = 0; j < fh->buffers.buffer_size / PAGE_SIZE; j++) { frag_tab = buffer->jpg.frag_tab[2 * j]; if (!frag_tab) break; ClearPageReserved(virt_to_page(bus_to_virt(le32_to_cpu(frag_tab)))); free_page((unsigned long)bus_to_virt(le32_to_cpu(frag_tab))); buffer->jpg.frag_tab[2 * j] = 0; buffer->jpg.frag_tab[2 * j + 1] = 0; } } free_page((unsigned long)buffer->jpg.frag_tab); buffer->jpg.frag_tab = NULL; } fh->buffers.allocated = 0; } /* * V4L Buffer grabbing */ static int zoran_v4l_set_format (struct zoran_fh *fh, int width, int height, const struct zoran_format *format) { struct zoran *zr = fh->zr; int bpp; /* Check size and format of the grab wanted */ if (height < BUZ_MIN_HEIGHT || width < BUZ_MIN_WIDTH || height > BUZ_MAX_HEIGHT || width > BUZ_MAX_WIDTH) { dprintk(1, KERN_ERR "%s: %s - wrong frame size (%dx%d)\n", ZR_DEVNAME(zr), __func__, width, height); return -EINVAL; } bpp = (format->depth + 7) / 8; /* Check against available buffer size */ if (height * width * bpp > fh->buffers.buffer_size) { dprintk(1, KERN_ERR "%s: %s - video buffer size (%d kB) is too small\n", ZR_DEVNAME(zr), __func__, fh->buffers.buffer_size >> 10); return -EINVAL; } /* The video front end needs 4-byte alinged line sizes */ if ((bpp == 2 && (width & 1)) || (bpp == 3 && (width & 3))) { dprintk(1, KERN_ERR "%s: %s - wrong frame alignment\n", ZR_DEVNAME(zr), __func__); return -EINVAL; } fh->v4l_settings.width = width; fh->v4l_settings.height = height; fh->v4l_settings.format = format; fh->v4l_settings.bytesperline = bpp * fh->v4l_settings.width; return 0; } static int zoran_v4l_queue_frame(struct zoran_fh *fh, int num) { struct zoran *zr = fh->zr; unsigned long flags; int res = 0; if (!fh->buffers.allocated) { dprintk(1, KERN_ERR "%s: %s - buffers not yet allocated\n", ZR_DEVNAME(zr), __func__); res = -ENOMEM; } /* No grabbing outside the buffer range! */ if (num >= fh->buffers.num_buffers || num < 0) { dprintk(1, KERN_ERR "%s: %s - buffer %d is out of range\n", ZR_DEVNAME(zr), __func__, num); res = -EINVAL; } spin_lock_irqsave(&zr->spinlock, flags); if (fh->buffers.active == ZORAN_FREE) { if (zr->v4l_buffers.active == ZORAN_FREE) { zr->v4l_buffers = fh->buffers; fh->buffers.active = ZORAN_ACTIVE; } else { dprintk(1, KERN_ERR "%s: %s - another session is already capturing\n", ZR_DEVNAME(zr), __func__); res = -EBUSY; } } /* make sure a grab isn't going on currently with this buffer */ if (!res) { switch (zr->v4l_buffers.buffer[num].state) { default: case BUZ_STATE_PEND: if (zr->v4l_buffers.active == ZORAN_FREE) { fh->buffers.active = ZORAN_FREE; zr->v4l_buffers.allocated = 0; } res = -EBUSY; /* what are you doing? */ break; case BUZ_STATE_DONE: dprintk(2, KERN_WARNING "%s: %s - queueing buffer %d in state DONE!?\n", ZR_DEVNAME(zr), __func__, num); case BUZ_STATE_USER: /* since there is at least one unused buffer there's room for at least * one more pend[] entry */ zr->v4l_pend[zr->v4l_pend_head++ & V4L_MASK_FRAME] = num; zr->v4l_buffers.buffer[num].state = BUZ_STATE_PEND; zr->v4l_buffers.buffer[num].bs.length = fh->v4l_settings.bytesperline * zr->v4l_settings.height; fh->buffers.buffer[num] = zr->v4l_buffers.buffer[num]; break; } } spin_unlock_irqrestore(&zr->spinlock, flags); if (!res && zr->v4l_buffers.active == ZORAN_FREE) zr->v4l_buffers.active = fh->buffers.active; return res; } /* * Sync on a V4L buffer */ static int v4l_sync(struct zoran_fh *fh, int frame) { struct zoran *zr = fh->zr; unsigned long flags; if (fh->buffers.active == ZORAN_FREE) { dprintk(1, KERN_ERR "%s: %s - no grab active for this session\n", ZR_DEVNAME(zr), __func__); return -EINVAL; } /* check passed-in frame number */ if (frame >= fh->buffers.num_buffers || frame < 0) { dprintk(1, KERN_ERR "%s: %s - frame %d is invalid\n", ZR_DEVNAME(zr), __func__, frame); return -EINVAL; } /* Check if is buffer was queued at all */ if (zr->v4l_buffers.buffer[frame].state == BUZ_STATE_USER) { dprintk(1, KERN_ERR "%s: %s - attempt to sync on a buffer which was not queued?\n", ZR_DEVNAME(zr), __func__); return -EPROTO; } /* wait on this buffer to get ready */ if (!wait_event_interruptible_timeout(zr->v4l_capq, (zr->v4l_buffers.buffer[frame].state != BUZ_STATE_PEND), 10*HZ)) return -ETIME; if (signal_pending(current)) return -ERESTARTSYS; /* buffer should now be in BUZ_STATE_DONE */ if (zr->v4l_buffers.buffer[frame].state != BUZ_STATE_DONE) dprintk(2, KERN_ERR "%s: %s - internal state error\n", ZR_DEVNAME(zr), __func__); zr->v4l_buffers.buffer[frame].state = BUZ_STATE_USER; fh->buffers.buffer[frame] = zr->v4l_buffers.buffer[frame]; spin_lock_irqsave(&zr->spinlock, flags); /* Check if streaming capture has finished */ if (zr->v4l_pend_tail == zr->v4l_pend_head) { zr36057_set_memgrab(zr, 0); if (zr->v4l_buffers.active == ZORAN_ACTIVE) { fh->buffers.active = zr->v4l_buffers.active = ZORAN_FREE; zr->v4l_buffers.allocated = 0; } } spin_unlock_irqrestore(&zr->spinlock, flags); return 0; } /* * Queue a MJPEG buffer for capture/playback */ static int zoran_jpg_queue_frame(struct zoran_fh *fh, int num, enum zoran_codec_mode mode) { struct zoran *zr = fh->zr; unsigned long flags; int res = 0; /* Check if buffers are allocated */ if (!fh->buffers.allocated) { dprintk(1, KERN_ERR "%s: %s - buffers not yet allocated\n", ZR_DEVNAME(zr), __func__); return -ENOMEM; } /* No grabbing outside the buffer range! */ if (num >= fh->buffers.num_buffers || num < 0) { dprintk(1, KERN_ERR "%s: %s - buffer %d out of range\n", ZR_DEVNAME(zr), __func__, num); return -EINVAL; } /* what is the codec mode right now? */ if (zr->codec_mode == BUZ_MODE_IDLE) { zr->jpg_settings = fh->jpg_settings; } else if (zr->codec_mode != mode) { /* wrong codec mode active - invalid */ dprintk(1, KERN_ERR "%s: %s - codec in wrong mode\n", ZR_DEVNAME(zr), __func__); return -EINVAL; } if (fh->buffers.active == ZORAN_FREE) { if (zr->jpg_buffers.active == ZORAN_FREE) { zr->jpg_buffers = fh->buffers; fh->buffers.active = ZORAN_ACTIVE; } else { dprintk(1, KERN_ERR "%s: %s - another session is already capturing\n", ZR_DEVNAME(zr), __func__); res = -EBUSY; } } if (!res && zr->codec_mode == BUZ_MODE_IDLE) { /* Ok load up the jpeg codec */ zr36057_enable_jpg(zr, mode); } spin_lock_irqsave(&zr->spinlock, flags); if (!res) { switch (zr->jpg_buffers.buffer[num].state) { case BUZ_STATE_DONE: dprintk(2, KERN_WARNING "%s: %s - queing frame in BUZ_STATE_DONE state!?\n", ZR_DEVNAME(zr), __func__); case BUZ_STATE_USER: /* since there is at least one unused buffer there's room for at *least one more pend[] entry */ zr->jpg_pend[zr->jpg_que_head++ & BUZ_MASK_FRAME] = num; zr->jpg_buffers.buffer[num].state = BUZ_STATE_PEND; fh->buffers.buffer[num] = zr->jpg_buffers.buffer[num]; zoran_feed_stat_com(zr); break; default: case BUZ_STATE_DMA: case BUZ_STATE_PEND: if (zr->jpg_buffers.active == ZORAN_FREE) { fh->buffers.active = ZORAN_FREE; zr->jpg_buffers.allocated = 0; } res = -EBUSY; /* what are you doing? */ break; } } spin_unlock_irqrestore(&zr->spinlock, flags); if (!res && zr->jpg_buffers.active == ZORAN_FREE) zr->jpg_buffers.active = fh->buffers.active; return res; } static int jpg_qbuf(struct zoran_fh *fh, int frame, enum zoran_codec_mode mode) { struct zoran *zr = fh->zr; int res = 0; /* Does the user want to stop streaming? */ if (frame < 0) { if (zr->codec_mode == mode) { if (fh->buffers.active == ZORAN_FREE) { dprintk(1, KERN_ERR "%s: %s(-1) - session not active\n", ZR_DEVNAME(zr), __func__); return -EINVAL; } fh->buffers.active = zr->jpg_buffers.active = ZORAN_FREE; zr->jpg_buffers.allocated = 0; zr36057_enable_jpg(zr, BUZ_MODE_IDLE); return 0; } else { dprintk(1, KERN_ERR "%s: %s - stop streaming but not in streaming mode\n", ZR_DEVNAME(zr), __func__); return -EINVAL; } } if ((res = zoran_jpg_queue_frame(fh, frame, mode))) return res; /* Start the jpeg codec when the first frame is queued */ if (!res && zr->jpg_que_head == 1) jpeg_start(zr); return res; } /* * Sync on a MJPEG buffer */ static int jpg_sync(struct zoran_fh *fh, struct zoran_sync *bs) { struct zoran *zr = fh->zr; unsigned long flags; int frame; if (fh->buffers.active == ZORAN_FREE) { dprintk(1, KERN_ERR "%s: %s - capture is not currently active\n", ZR_DEVNAME(zr), __func__); return -EINVAL; } if (zr->codec_mode != BUZ_MODE_MOTION_DECOMPRESS && zr->codec_mode != BUZ_MODE_MOTION_COMPRESS) { dprintk(1, KERN_ERR "%s: %s - codec not in streaming mode\n", ZR_DEVNAME(zr), __func__); return -EINVAL; } if (!wait_event_interruptible_timeout(zr->jpg_capq, (zr->jpg_que_tail != zr->jpg_dma_tail || zr->jpg_dma_tail == zr->jpg_dma_head), 10*HZ)) { int isr; btand(~ZR36057_JMC_Go_en, ZR36057_JMC); udelay(1); zr->codec->control(zr->codec, CODEC_G_STATUS, sizeof(isr), &isr); dprintk(1, KERN_ERR "%s: %s - timeout: codec isr=0x%02x\n", ZR_DEVNAME(zr), __func__, isr); return -ETIME; } if (signal_pending(current)) return -ERESTARTSYS; spin_lock_irqsave(&zr->spinlock, flags); if (zr->jpg_dma_tail != zr->jpg_dma_head) frame = zr->jpg_pend[zr->jpg_que_tail++ & BUZ_MASK_FRAME]; else frame = zr->jpg_pend[zr->jpg_que_tail & BUZ_MASK_FRAME]; /* buffer should now be in BUZ_STATE_DONE */ if (zr->jpg_buffers.buffer[frame].state != BUZ_STATE_DONE) dprintk(2, KERN_ERR "%s: %s - internal state error\n", ZR_DEVNAME(zr), __func__); *bs = zr->jpg_buffers.buffer[frame].bs; bs->frame = frame; zr->jpg_buffers.buffer[frame].state = BUZ_STATE_USER; fh->buffers.buffer[frame] = zr->jpg_buffers.buffer[frame]; spin_unlock_irqrestore(&zr->spinlock, flags); return 0; } static void zoran_open_init_session(struct zoran_fh *fh) { int i; struct zoran *zr = fh->zr; /* Per default, map the V4L Buffers */ map_mode_raw(fh); /* take over the card's current settings */ fh->overlay_settings = zr->overlay_settings; fh->overlay_settings.is_set = 0; fh->overlay_settings.format = zr->overlay_settings.format; fh->overlay_active = ZORAN_FREE; /* v4l settings */ fh->v4l_settings = zr->v4l_settings; /* jpg settings */ fh->jpg_settings = zr->jpg_settings; /* buffers */ memset(&fh->buffers, 0, sizeof(fh->buffers)); for (i = 0; i < MAX_FRAME; i++) { fh->buffers.buffer[i].state = BUZ_STATE_USER; /* nothing going on */ fh->buffers.buffer[i].bs.frame = i; } fh->buffers.allocated = 0; fh->buffers.active = ZORAN_FREE; } static void zoran_close_end_session(struct zoran_fh *fh) { struct zoran *zr = fh->zr; /* overlay */ if (fh->overlay_active != ZORAN_FREE) { fh->overlay_active = zr->overlay_active = ZORAN_FREE; zr->v4l_overlay_active = 0; if (!zr->v4l_memgrab_active) zr36057_overlay(zr, 0); zr->overlay_mask = NULL; } if (fh->map_mode == ZORAN_MAP_MODE_RAW) { /* v4l capture */ if (fh->buffers.active != ZORAN_FREE) { unsigned long flags; spin_lock_irqsave(&zr->spinlock, flags); zr36057_set_memgrab(zr, 0); zr->v4l_buffers.allocated = 0; zr->v4l_buffers.active = fh->buffers.active = ZORAN_FREE; spin_unlock_irqrestore(&zr->spinlock, flags); } /* v4l buffers */ if (fh->buffers.allocated) v4l_fbuffer_free(fh); } else { /* jpg capture */ if (fh->buffers.active != ZORAN_FREE) { zr36057_enable_jpg(zr, BUZ_MODE_IDLE); zr->jpg_buffers.allocated = 0; zr->jpg_buffers.active = fh->buffers.active = ZORAN_FREE; } /* jpg buffers */ if (fh->buffers.allocated) jpg_fbuffer_free(fh); } } /* * Open a zoran card. Right now the flags stuff is just playing */ static int zoran_open(struct file *file) { struct zoran *zr = video_drvdata(file); struct zoran_fh *fh; int res, first_open = 0; dprintk(2, KERN_INFO "%s: %s(%s, pid=[%d]), users(-)=%d\n", ZR_DEVNAME(zr), __func__, current->comm, task_pid_nr(current), zr->user + 1); mutex_lock(&zr->other_lock); if (zr->user >= 2048) { dprintk(1, KERN_ERR "%s: too many users (%d) on device\n", ZR_DEVNAME(zr), zr->user); res = -EBUSY; goto fail_unlock; } /* now, create the open()-specific file_ops struct */ fh = kzalloc(sizeof(struct zoran_fh), GFP_KERNEL); if (!fh) { dprintk(1, KERN_ERR "%s: %s - allocation of zoran_fh failed\n", ZR_DEVNAME(zr), __func__); res = -ENOMEM; goto fail_unlock; } /* used to be BUZ_MAX_WIDTH/HEIGHT, but that gives overflows * on norm-change! */ fh->overlay_mask = kmalloc(((768 + 31) / 32) * 576 * 4, GFP_KERNEL); if (!fh->overlay_mask) { dprintk(1, KERN_ERR "%s: %s - allocation of overlay_mask failed\n", ZR_DEVNAME(zr), __func__); res = -ENOMEM; goto fail_fh; } if (zr->user++ == 0) first_open = 1; /*mutex_unlock(&zr->resource_lock);*/ /* default setup - TODO: look at flags */ if (first_open) { /* First device open */ zr36057_restart(zr); zoran_open_init_params(zr); zoran_init_hardware(zr); btor(ZR36057_ICR_IntPinEn, ZR36057_ICR); } /* set file_ops stuff */ file->private_data = fh; fh->zr = zr; zoran_open_init_session(fh); mutex_unlock(&zr->other_lock); return 0; fail_fh: kfree(fh); fail_unlock: mutex_unlock(&zr->other_lock); dprintk(2, KERN_INFO "%s: open failed (%d), users(-)=%d\n", ZR_DEVNAME(zr), res, zr->user); return res; } static int zoran_close(struct file *file) { struct zoran_fh *fh = file->private_data; struct zoran *zr = fh->zr; dprintk(2, KERN_INFO "%s: %s(%s, pid=[%d]), users(+)=%d\n", ZR_DEVNAME(zr), __func__, current->comm, task_pid_nr(current), zr->user - 1); /* kernel locks (fs/device.c), so don't do that ourselves * (prevents deadlocks) */ mutex_lock(&zr->other_lock); zoran_close_end_session(fh); if (zr->user-- == 1) { /* Last process */ /* Clean up JPEG process */ wake_up_interruptible(&zr->jpg_capq); zr36057_enable_jpg(zr, BUZ_MODE_IDLE); zr->jpg_buffers.allocated = 0; zr->jpg_buffers.active = ZORAN_FREE; /* disable interrupts */ btand(~ZR36057_ICR_IntPinEn, ZR36057_ICR); if (zr36067_debug > 1) print_interrupts(zr); /* Overlay off */ zr->v4l_overlay_active = 0; zr36057_overlay(zr, 0); zr->overlay_mask = NULL; /* capture off */ wake_up_interruptible(&zr->v4l_capq); zr36057_set_memgrab(zr, 0); zr->v4l_buffers.allocated = 0; zr->v4l_buffers.active = ZORAN_FREE; zoran_set_pci_master(zr, 0); if (!pass_through) { /* Switch to color bar */ decoder_call(zr, video, s_stream, 0); encoder_call(zr, video, s_routing, 2, 0, 0); } } mutex_unlock(&zr->other_lock); file->private_data = NULL; kfree(fh->overlay_mask); kfree(fh); dprintk(4, KERN_INFO "%s: %s done\n", ZR_DEVNAME(zr), __func__); return 0; } static ssize_t zoran_read (struct file *file, char __user *data, size_t count, loff_t *ppos) { /* we simply don't support read() (yet)... */ return -EINVAL; } static ssize_t zoran_write (struct file *file, const char __user *data, size_t count, loff_t *ppos) { /* ...and the same goes for write() */ return -EINVAL; } static int setup_fbuffer(struct zoran_fh *fh, void *base, const struct zoran_format *fmt, int width, int height, int bytesperline) { struct zoran *zr = fh->zr; /* (Ronald) v4l/v4l2 guidelines */ if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO)) return -EPERM; /* Don't allow frame buffer overlay if PCI or AGP is buggy, or on ALi Magik (that needs very low latency while the card needs a higher value always) */ if (pci_pci_problems & (PCIPCI_FAIL | PCIAGP_FAIL | PCIPCI_ALIMAGIK)) return -ENXIO; /* we need a bytesperline value, even if not given */ if (!bytesperline) bytesperline = width * ((fmt->depth + 7) & ~7) / 8; #if 0 if (zr->overlay_active) { /* dzjee... stupid users... don't even bother to turn off * overlay before changing the memory location... * normally, we would return errors here. However, one of * the tools that does this is... xawtv! and since xawtv * is used by +/- 99% of the users, we'd rather be user- * friendly and silently do as if nothing went wrong */ dprintk(3, KERN_ERR "%s: %s - forced overlay turnoff because framebuffer changed\n", ZR_DEVNAME(zr), __func__); zr36057_overlay(zr, 0); } #endif if (!(fmt->flags & ZORAN_FORMAT_OVERLAY)) { dprintk(1, KERN_ERR "%s: %s - no valid overlay format given\n", ZR_DEVNAME(zr), __func__); return -EINVAL; } if (height <= 0 || width <= 0 || bytesperline <= 0) { dprintk(1, KERN_ERR "%s: %s - invalid height/width/bpl value (%d|%d|%d)\n", ZR_DEVNAME(zr), __func__, width, height, bytesperline); return -EINVAL; } if (bytesperline & 3) { dprintk(1, KERN_ERR "%s: %s - bytesperline (%d) must be 4-byte aligned\n", ZR_DEVNAME(zr), __func__, bytesperline); return -EINVAL; } zr->vbuf_base = (void *) ((unsigned long) base & ~3); zr->vbuf_height = height; zr->vbuf_width = width; zr->vbuf_depth = fmt->depth; zr->overlay_settings.format = fmt; zr->vbuf_bytesperline = bytesperline; /* The user should set new window parameters */ zr->overlay_settings.is_set = 0; return 0; } static int setup_window(struct zoran_fh *fh, int x, int y, int width, int height, struct v4l2_clip __user *clips, int clipcount, void __user *bitmap) { struct zoran *zr = fh->zr; struct v4l2_clip *vcp = NULL; int on, end; if (!zr->vbuf_base) { dprintk(1, KERN_ERR "%s: %s - frame buffer has to be set first\n", ZR_DEVNAME(zr), __func__); return -EINVAL; } if (!fh->overlay_settings.format) { dprintk(1, KERN_ERR "%s: %s - no overlay format set\n", ZR_DEVNAME(zr), __func__); return -EINVAL; } /* * The video front end needs 4-byte alinged line sizes, we correct that * silently here if necessary */ if (zr->vbuf_depth == 15 || zr->vbuf_depth == 16) { end = (x + width) & ~1; /* round down */ x = (x + 1) & ~1; /* round up */ width = end - x; } if (zr->vbuf_depth == 24) { end = (x + width) & ~3; /* round down */ x = (x + 3) & ~3; /* round up */ width = end - x; } if (width > BUZ_MAX_WIDTH) width = BUZ_MAX_WIDTH; if (height > BUZ_MAX_HEIGHT) height = BUZ_MAX_HEIGHT; /* Check for invalid parameters */ if (width < BUZ_MIN_WIDTH || height < BUZ_MIN_HEIGHT || width > BUZ_MAX_WIDTH || height > BUZ_MAX_HEIGHT) { dprintk(1, KERN_ERR "%s: %s - width = %d or height = %d invalid\n", ZR_DEVNAME(zr), __func__, width, height); return -EINVAL; } fh->overlay_settings.x = x; fh->overlay_settings.y = y; fh->overlay_settings.width = width; fh->overlay_settings.height = height; fh->overlay_settings.clipcount = clipcount; /* * If an overlay is running, we have to switch it off * and switch it on again in order to get the new settings in effect. * * We also want to avoid that the overlay mask is written * when an overlay is running. */ on = zr->v4l_overlay_active && !zr->v4l_memgrab_active && zr->overlay_active != ZORAN_FREE && fh->overlay_active != ZORAN_FREE; if (on) zr36057_overlay(zr, 0); /* * Write the overlay mask if clips are wanted. * We prefer a bitmap. */ if (bitmap) { /* fake value - it just means we want clips */ fh->overlay_settings.clipcount = 1; if (copy_from_user(fh->overlay_mask, bitmap, (width * height + 7) / 8)) { return -EFAULT; } } else if (clipcount > 0) { /* write our own bitmap from the clips */ vcp = vmalloc(sizeof(struct v4l2_clip) * (clipcount + 4)); if (vcp == NULL) { dprintk(1, KERN_ERR "%s: %s - Alloc of clip mask failed\n", ZR_DEVNAME(zr), __func__); return -ENOMEM; } if (copy_from_user (vcp, clips, sizeof(struct v4l2_clip) * clipcount)) { vfree(vcp); return -EFAULT; } write_overlay_mask(fh, vcp, clipcount); vfree(vcp); } fh->overlay_settings.is_set = 1; if (fh->overlay_active != ZORAN_FREE && zr->overlay_active != ZORAN_FREE) zr->overlay_settings = fh->overlay_settings; if (on) zr36057_overlay(zr, 1); /* Make sure the changes come into effect */ return wait_grab_pending(zr); } static int setup_overlay(struct zoran_fh *fh, int on) { struct zoran *zr = fh->zr; /* If there is nothing to do, return immediately */ if ((on && fh->overlay_active != ZORAN_FREE) || (!on && fh->overlay_active == ZORAN_FREE)) return 0; /* check whether we're touching someone else's overlay */ if (on && zr->overlay_active != ZORAN_FREE && fh->overlay_active == ZORAN_FREE) { dprintk(1, KERN_ERR "%s: %s - overlay is already active for another session\n", ZR_DEVNAME(zr), __func__); return -EBUSY; } if (!on && zr->overlay_active != ZORAN_FREE && fh->overlay_active == ZORAN_FREE) { dprintk(1, KERN_ERR "%s: %s - you cannot cancel someone else's session\n", ZR_DEVNAME(zr), __func__); return -EPERM; } if (on == 0) { zr->overlay_active = fh->overlay_active = ZORAN_FREE; zr->v4l_overlay_active = 0; /* When a grab is running, the video simply * won't be switched on any more */ if (!zr->v4l_memgrab_active) zr36057_overlay(zr, 0); zr->overlay_mask = NULL; } else { if (!zr->vbuf_base || !fh->overlay_settings.is_set) { dprintk(1, KERN_ERR "%s: %s - buffer or window not set\n", ZR_DEVNAME(zr), __func__); return -EINVAL; } if (!fh->overlay_settings.format) { dprintk(1, KERN_ERR "%s: %s - no overlay format set\n", ZR_DEVNAME(zr), __func__); return -EINVAL; } zr->overlay_active = fh->overlay_active = ZORAN_LOCKED; zr->v4l_overlay_active = 1; zr->overlay_mask = fh->overlay_mask; zr->overlay_settings = fh->overlay_settings; if (!zr->v4l_memgrab_active) zr36057_overlay(zr, 1); /* When a grab is running, the video will be * switched on when grab is finished */ } /* Make sure the changes come into effect */ return wait_grab_pending(zr); } /* get the status of a buffer in the clients buffer queue */ static int zoran_v4l2_buffer_status(struct zoran_fh *fh, struct v4l2_buffer *buf, int num) { struct zoran *zr = fh->zr; unsigned long flags; buf->flags = V4L2_BUF_FLAG_MAPPED; switch (fh->map_mode) { case ZORAN_MAP_MODE_RAW: /* check range */ if (num < 0 || num >= fh->buffers.num_buffers || !fh->buffers.allocated) { dprintk(1, KERN_ERR "%s: %s - wrong number or buffers not allocated\n", ZR_DEVNAME(zr), __func__); return -EINVAL; } spin_lock_irqsave(&zr->spinlock, flags); dprintk(3, KERN_DEBUG "%s: %s() - raw active=%c, buffer %d: state=%c, map=%c\n", ZR_DEVNAME(zr), __func__, "FAL"[fh->buffers.active], num, "UPMD"[zr->v4l_buffers.buffer[num].state], fh->buffers.buffer[num].map ? 'Y' : 'N'); spin_unlock_irqrestore(&zr->spinlock, flags); buf->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; buf->length = fh->buffers.buffer_size; /* get buffer */ buf->bytesused = fh->buffers.buffer[num].bs.length; if (fh->buffers.buffer[num].state == BUZ_STATE_DONE || fh->buffers.buffer[num].state == BUZ_STATE_USER) { buf->sequence = fh->buffers.buffer[num].bs.seq; buf->flags |= V4L2_BUF_FLAG_DONE; buf->timestamp = fh->buffers.buffer[num].bs.timestamp; } else { buf->flags |= V4L2_BUF_FLAG_QUEUED; } if (fh->v4l_settings.height <= BUZ_MAX_HEIGHT / 2) buf->field = V4L2_FIELD_TOP; else buf->field = V4L2_FIELD_INTERLACED; break; case ZORAN_MAP_MODE_JPG_REC: case ZORAN_MAP_MODE_JPG_PLAY: /* check range */ if (num < 0 || num >= fh->buffers.num_buffers || !fh->buffers.allocated) { dprintk(1, KERN_ERR "%s: %s - wrong number or buffers not allocated\n", ZR_DEVNAME(zr), __func__); return -EINVAL; } buf->type = (fh->map_mode == ZORAN_MAP_MODE_JPG_REC) ? V4L2_BUF_TYPE_VIDEO_CAPTURE : V4L2_BUF_TYPE_VIDEO_OUTPUT; buf->length = fh->buffers.buffer_size; /* these variables are only written after frame has been captured */ if (fh->buffers.buffer[num].state == BUZ_STATE_DONE || fh->buffers.buffer[num].state == BUZ_STATE_USER) { buf->sequence = fh->buffers.buffer[num].bs.seq; buf->timestamp = fh->buffers.buffer[num].bs.timestamp; buf->bytesused = fh->buffers.buffer[num].bs.length; buf->flags |= V4L2_BUF_FLAG_DONE; } else { buf->flags |= V4L2_BUF_FLAG_QUEUED; } /* which fields are these? */ if (fh->jpg_settings.TmpDcm != 1) buf->field = fh->jpg_settings.odd_even ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM; else buf->field = fh->jpg_settings.odd_even ? V4L2_FIELD_SEQ_TB : V4L2_FIELD_SEQ_BT; break; default: dprintk(5, KERN_ERR "%s: %s - invalid buffer type|map_mode (%d|%d)\n", ZR_DEVNAME(zr), __func__, buf->type, fh->map_mode); return -EINVAL; } buf->memory = V4L2_MEMORY_MMAP; buf->index = num; buf->m.offset = buf->length * num; return 0; } static int zoran_set_norm (struct zoran *zr, v4l2_std_id norm) { int on; if (zr->v4l_buffers.active != ZORAN_FREE || zr->jpg_buffers.active != ZORAN_FREE) { dprintk(1, KERN_WARNING "%s: %s called while in playback/capture mode\n", ZR_DEVNAME(zr), __func__); return -EBUSY; } if (!(norm & zr->card.norms)) { dprintk(1, KERN_ERR "%s: %s - unsupported norm %llx\n", ZR_DEVNAME(zr), __func__, norm); return -EINVAL; } if (norm == V4L2_STD_ALL) { unsigned int status = 0; v4l2_std_id std = 0; decoder_call(zr, video, querystd, &std); decoder_call(zr, core, s_std, std); /* let changes come into effect */ ssleep(2); decoder_call(zr, video, g_input_status, &status); if (status & V4L2_IN_ST_NO_SIGNAL) { dprintk(1, KERN_ERR "%s: %s - no norm detected\n", ZR_DEVNAME(zr), __func__); /* reset norm */ decoder_call(zr, core, s_std, zr->norm); return -EIO; } norm = std; } if (norm & V4L2_STD_SECAM) zr->timing = zr->card.tvn[2]; else if (norm & V4L2_STD_NTSC) zr->timing = zr->card.tvn[1]; else zr->timing = zr->card.tvn[0]; /* We switch overlay off and on since a change in the * norm needs different VFE settings */ on = zr->overlay_active && !zr->v4l_memgrab_active; if (on) zr36057_overlay(zr, 0); decoder_call(zr, core, s_std, norm); encoder_call(zr, video, s_std_output, norm); if (on) zr36057_overlay(zr, 1); /* Make sure the changes come into effect */ zr->norm = norm; return 0; } static int zoran_set_input (struct zoran *zr, int input) { if (input == zr->input) { return 0; } if (zr->v4l_buffers.active != ZORAN_FREE || zr->jpg_buffers.active != ZORAN_FREE) { dprintk(1, KERN_WARNING "%s: %s called while in playback/capture mode\n", ZR_DEVNAME(zr), __func__); return -EBUSY; } if (input < 0 || input >= zr->card.inputs) { dprintk(1, KERN_ERR "%s: %s - unnsupported input %d\n", ZR_DEVNAME(zr), __func__, input); return -EINVAL; } zr->input = input; decoder_call(zr, video, s_routing, zr->card.input[input].muxsel, 0, 0); return 0; } /* * ioctl routine */ static int zoran_querycap(struct file *file, void *__fh, struct v4l2_capability *cap) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; memset(cap, 0, sizeof(*cap)); strncpy(cap->card, ZR_DEVNAME(zr), sizeof(cap->card)-1); strncpy(cap->driver, "zoran", sizeof(cap->driver)-1); snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", pci_name(zr->pci_dev)); cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OVERLAY; return 0; } static int zoran_enum_fmt(struct zoran *zr, struct v4l2_fmtdesc *fmt, int flag) { unsigned int num, i; for (num = i = 0; i < NUM_FORMATS; i++) { if (zoran_formats[i].flags & flag && num++ == fmt->index) { strncpy(fmt->description, zoran_formats[i].name, sizeof(fmt->description) - 1); /* fmt struct pre-zeroed, so adding '\0' not neeed */ fmt->pixelformat = zoran_formats[i].fourcc; if (zoran_formats[i].flags & ZORAN_FORMAT_COMPRESSED) fmt->flags |= V4L2_FMT_FLAG_COMPRESSED; return 0; } } return -EINVAL; } static int zoran_enum_fmt_vid_cap(struct file *file, void *__fh, struct v4l2_fmtdesc *f) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; return zoran_enum_fmt(zr, f, ZORAN_FORMAT_CAPTURE); } static int zoran_enum_fmt_vid_out(struct file *file, void *__fh, struct v4l2_fmtdesc *f) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; return zoran_enum_fmt(zr, f, ZORAN_FORMAT_PLAYBACK); } static int zoran_enum_fmt_vid_overlay(struct file *file, void *__fh, struct v4l2_fmtdesc *f) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; return zoran_enum_fmt(zr, f, ZORAN_FORMAT_OVERLAY); } static int zoran_g_fmt_vid_out(struct file *file, void *__fh, struct v4l2_format *fmt) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; mutex_lock(&zr->resource_lock); fmt->fmt.pix.width = fh->jpg_settings.img_width / fh->jpg_settings.HorDcm; fmt->fmt.pix.height = fh->jpg_settings.img_height * 2 / (fh->jpg_settings.VerDcm * fh->jpg_settings.TmpDcm); fmt->fmt.pix.sizeimage = zoran_v4l2_calc_bufsize(&fh->jpg_settings); fmt->fmt.pix.pixelformat = V4L2_PIX_FMT_MJPEG; if (fh->jpg_settings.TmpDcm == 1) fmt->fmt.pix.field = (fh->jpg_settings.odd_even ? V4L2_FIELD_SEQ_TB : V4L2_FIELD_SEQ_BT); else fmt->fmt.pix.field = (fh->jpg_settings.odd_even ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM); fmt->fmt.pix.bytesperline = 0; fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; mutex_unlock(&zr->resource_lock); return 0; } static int zoran_g_fmt_vid_cap(struct file *file, void *__fh, struct v4l2_format *fmt) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; if (fh->map_mode != ZORAN_MAP_MODE_RAW) return zoran_g_fmt_vid_out(file, fh, fmt); mutex_lock(&zr->resource_lock); fmt->fmt.pix.width = fh->v4l_settings.width; fmt->fmt.pix.height = fh->v4l_settings.height; fmt->fmt.pix.sizeimage = fh->v4l_settings.bytesperline * fh->v4l_settings.height; fmt->fmt.pix.pixelformat = fh->v4l_settings.format->fourcc; fmt->fmt.pix.colorspace = fh->v4l_settings.format->colorspace; fmt->fmt.pix.bytesperline = fh->v4l_settings.bytesperline; if (BUZ_MAX_HEIGHT < (fh->v4l_settings.height * 2)) fmt->fmt.pix.field = V4L2_FIELD_INTERLACED; else fmt->fmt.pix.field = V4L2_FIELD_TOP; mutex_unlock(&zr->resource_lock); return 0; } static int zoran_g_fmt_vid_overlay(struct file *file, void *__fh, struct v4l2_format *fmt) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; mutex_lock(&zr->resource_lock); fmt->fmt.win.w.left = fh->overlay_settings.x; fmt->fmt.win.w.top = fh->overlay_settings.y; fmt->fmt.win.w.width = fh->overlay_settings.width; fmt->fmt.win.w.height = fh->overlay_settings.height; if (fh->overlay_settings.width * 2 > BUZ_MAX_HEIGHT) fmt->fmt.win.field = V4L2_FIELD_INTERLACED; else fmt->fmt.win.field = V4L2_FIELD_TOP; mutex_unlock(&zr->resource_lock); return 0; } static int zoran_try_fmt_vid_overlay(struct file *file, void *__fh, struct v4l2_format *fmt) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; mutex_lock(&zr->resource_lock); if (fmt->fmt.win.w.width > BUZ_MAX_WIDTH) fmt->fmt.win.w.width = BUZ_MAX_WIDTH; if (fmt->fmt.win.w.width < BUZ_MIN_WIDTH) fmt->fmt.win.w.width = BUZ_MIN_WIDTH; if (fmt->fmt.win.w.height > BUZ_MAX_HEIGHT) fmt->fmt.win.w.height = BUZ_MAX_HEIGHT; if (fmt->fmt.win.w.height < BUZ_MIN_HEIGHT) fmt->fmt.win.w.height = BUZ_MIN_HEIGHT; mutex_unlock(&zr->resource_lock); return 0; } static int zoran_try_fmt_vid_out(struct file *file, void *__fh, struct v4l2_format *fmt) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; struct zoran_jpg_settings settings; int res = 0; if (fmt->fmt.pix.pixelformat != V4L2_PIX_FMT_MJPEG) return -EINVAL; mutex_lock(&zr->resource_lock); settings = fh->jpg_settings; /* we actually need to set 'real' parameters now */ if ((fmt->fmt.pix.height * 2) > BUZ_MAX_HEIGHT) settings.TmpDcm = 1; else settings.TmpDcm = 2; settings.decimation = 0; if (fmt->fmt.pix.height <= fh->jpg_settings.img_height / 2) settings.VerDcm = 2; else settings.VerDcm = 1; if (fmt->fmt.pix.width <= fh->jpg_settings.img_width / 4) settings.HorDcm = 4; else if (fmt->fmt.pix.width <= fh->jpg_settings.img_width / 2) settings.HorDcm = 2; else settings.HorDcm = 1; if (settings.TmpDcm == 1) settings.field_per_buff = 2; else settings.field_per_buff = 1; if (settings.HorDcm > 1) { settings.img_x = (BUZ_MAX_WIDTH == 720) ? 8 : 0; settings.img_width = (BUZ_MAX_WIDTH == 720) ? 704 : BUZ_MAX_WIDTH; } else { settings.img_x = 0; settings.img_width = BUZ_MAX_WIDTH; } /* check */ res = zoran_check_jpg_settings(zr, &settings, 1); if (res) goto tryfmt_unlock_and_return; /* tell the user what we actually did */ fmt->fmt.pix.width = settings.img_width / settings.HorDcm; fmt->fmt.pix.height = settings.img_height * 2 / (settings.TmpDcm * settings.VerDcm); if (settings.TmpDcm == 1) fmt->fmt.pix.field = (fh->jpg_settings.odd_even ? V4L2_FIELD_SEQ_TB : V4L2_FIELD_SEQ_BT); else fmt->fmt.pix.field = (fh->jpg_settings.odd_even ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM); fmt->fmt.pix.sizeimage = zoran_v4l2_calc_bufsize(&settings); fmt->fmt.pix.bytesperline = 0; fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; tryfmt_unlock_and_return: mutex_unlock(&zr->resource_lock); return res; } static int zoran_try_fmt_vid_cap(struct file *file, void *__fh, struct v4l2_format *fmt) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; int bpp; int i; if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG) return zoran_try_fmt_vid_out(file, fh, fmt); mutex_lock(&zr->resource_lock); for (i = 0; i < NUM_FORMATS; i++) if (zoran_formats[i].fourcc == fmt->fmt.pix.pixelformat) break; if (i == NUM_FORMATS) { mutex_unlock(&zr->resource_lock); return -EINVAL; } bpp = DIV_ROUND_UP(zoran_formats[i].depth, 8); v4l_bound_align_image( &fmt->fmt.pix.width, BUZ_MIN_WIDTH, BUZ_MAX_WIDTH, bpp == 2 ? 1 : 2, &fmt->fmt.pix.height, BUZ_MIN_HEIGHT, BUZ_MAX_HEIGHT, 0, 0); mutex_unlock(&zr->resource_lock); return 0; } static int zoran_s_fmt_vid_overlay(struct file *file, void *__fh, struct v4l2_format *fmt) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; int res; dprintk(3, "x=%d, y=%d, w=%d, h=%d, cnt=%d, map=0x%p\n", fmt->fmt.win.w.left, fmt->fmt.win.w.top, fmt->fmt.win.w.width, fmt->fmt.win.w.height, fmt->fmt.win.clipcount, fmt->fmt.win.bitmap); mutex_lock(&zr->resource_lock); res = setup_window(fh, fmt->fmt.win.w.left, fmt->fmt.win.w.top, fmt->fmt.win.w.width, fmt->fmt.win.w.height, (struct v4l2_clip __user *)fmt->fmt.win.clips, fmt->fmt.win.clipcount, fmt->fmt.win.bitmap); mutex_unlock(&zr->resource_lock); return res; } static int zoran_s_fmt_vid_out(struct file *file, void *__fh, struct v4l2_format *fmt) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; __le32 printformat = __cpu_to_le32(fmt->fmt.pix.pixelformat); struct zoran_jpg_settings settings; int res = 0; dprintk(3, "size=%dx%d, fmt=0x%x (%4.4s)\n", fmt->fmt.pix.width, fmt->fmt.pix.height, fmt->fmt.pix.pixelformat, (char *) &printformat); if (fmt->fmt.pix.pixelformat != V4L2_PIX_FMT_MJPEG) return -EINVAL; mutex_lock(&zr->resource_lock); if (fh->buffers.allocated) { dprintk(1, KERN_ERR "%s: VIDIOC_S_FMT - cannot change capture mode\n", ZR_DEVNAME(zr)); res = -EBUSY; goto sfmtjpg_unlock_and_return; } settings = fh->jpg_settings; /* we actually need to set 'real' parameters now */ if (fmt->fmt.pix.height * 2 > BUZ_MAX_HEIGHT) settings.TmpDcm = 1; else settings.TmpDcm = 2; settings.decimation = 0; if (fmt->fmt.pix.height <= fh->jpg_settings.img_height / 2) settings.VerDcm = 2; else settings.VerDcm = 1; if (fmt->fmt.pix.width <= fh->jpg_settings.img_width / 4) settings.HorDcm = 4; else if (fmt->fmt.pix.width <= fh->jpg_settings.img_width / 2) settings.HorDcm = 2; else settings.HorDcm = 1; if (settings.TmpDcm == 1) settings.field_per_buff = 2; else settings.field_per_buff = 1; if (settings.HorDcm > 1) { settings.img_x = (BUZ_MAX_WIDTH == 720) ? 8 : 0; settings.img_width = (BUZ_MAX_WIDTH == 720) ? 704 : BUZ_MAX_WIDTH; } else { settings.img_x = 0; settings.img_width = BUZ_MAX_WIDTH; } /* check */ res = zoran_check_jpg_settings(zr, &settings, 0); if (res) goto sfmtjpg_unlock_and_return; /* it's ok, so set them */ fh->jpg_settings = settings; map_mode_jpg(fh, fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT); fh->buffers.buffer_size = zoran_v4l2_calc_bufsize(&fh->jpg_settings); /* tell the user what we actually did */ fmt->fmt.pix.width = settings.img_width / settings.HorDcm; fmt->fmt.pix.height = settings.img_height * 2 / (settings.TmpDcm * settings.VerDcm); if (settings.TmpDcm == 1) fmt->fmt.pix.field = (fh->jpg_settings.odd_even ? V4L2_FIELD_SEQ_TB : V4L2_FIELD_SEQ_BT); else fmt->fmt.pix.field = (fh->jpg_settings.odd_even ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM); fmt->fmt.pix.bytesperline = 0; fmt->fmt.pix.sizeimage = fh->buffers.buffer_size; fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; sfmtjpg_unlock_and_return: mutex_unlock(&zr->resource_lock); return res; } static int zoran_s_fmt_vid_cap(struct file *file, void *__fh, struct v4l2_format *fmt) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; int i; int res = 0; if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG) return zoran_s_fmt_vid_out(file, fh, fmt); for (i = 0; i < NUM_FORMATS; i++) if (fmt->fmt.pix.pixelformat == zoran_formats[i].fourcc) break; if (i == NUM_FORMATS) { dprintk(1, KERN_ERR "%s: VIDIOC_S_FMT - unknown/unsupported format 0x%x\n", ZR_DEVNAME(zr), fmt->fmt.pix.pixelformat); return -EINVAL; } mutex_lock(&zr->resource_lock); if ((fh->map_mode != ZORAN_MAP_MODE_RAW && fh->buffers.allocated) || fh->buffers.active != ZORAN_FREE) { dprintk(1, KERN_ERR "%s: VIDIOC_S_FMT - cannot change capture mode\n", ZR_DEVNAME(zr)); res = -EBUSY; goto sfmtv4l_unlock_and_return; } if (fmt->fmt.pix.height > BUZ_MAX_HEIGHT) fmt->fmt.pix.height = BUZ_MAX_HEIGHT; if (fmt->fmt.pix.width > BUZ_MAX_WIDTH) fmt->fmt.pix.width = BUZ_MAX_WIDTH; map_mode_raw(fh); res = zoran_v4l_set_format(fh, fmt->fmt.pix.width, fmt->fmt.pix.height, &zoran_formats[i]); if (res) goto sfmtv4l_unlock_and_return; /* tell the user the results/missing stuff */ fmt->fmt.pix.bytesperline = fh->v4l_settings.bytesperline; fmt->fmt.pix.sizeimage = fh->v4l_settings.height * fh->v4l_settings.bytesperline; fmt->fmt.pix.colorspace = fh->v4l_settings.format->colorspace; if (BUZ_MAX_HEIGHT < (fh->v4l_settings.height * 2)) fmt->fmt.pix.field = V4L2_FIELD_INTERLACED; else fmt->fmt.pix.field = V4L2_FIELD_TOP; sfmtv4l_unlock_and_return: mutex_unlock(&zr->resource_lock); return res; } static int zoran_g_fbuf(struct file *file, void *__fh, struct v4l2_framebuffer *fb) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; memset(fb, 0, sizeof(*fb)); mutex_lock(&zr->resource_lock); fb->base = zr->vbuf_base; fb->fmt.width = zr->vbuf_width; fb->fmt.height = zr->vbuf_height; if (zr->overlay_settings.format) fb->fmt.pixelformat = fh->overlay_settings.format->fourcc; fb->fmt.bytesperline = zr->vbuf_bytesperline; mutex_unlock(&zr->resource_lock); fb->fmt.colorspace = V4L2_COLORSPACE_SRGB; fb->fmt.field = V4L2_FIELD_INTERLACED; fb->flags = V4L2_FBUF_FLAG_OVERLAY; fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING; return 0; } static int zoran_s_fbuf(struct file *file, void *__fh, struct v4l2_framebuffer *fb) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; int i, res = 0; __le32 printformat = __cpu_to_le32(fb->fmt.pixelformat); for (i = 0; i < NUM_FORMATS; i++) if (zoran_formats[i].fourcc == fb->fmt.pixelformat) break; if (i == NUM_FORMATS) { dprintk(1, KERN_ERR "%s: VIDIOC_S_FBUF - format=0x%x (%4.4s) not allowed\n", ZR_DEVNAME(zr), fb->fmt.pixelformat, (char *)&printformat); return -EINVAL; } mutex_lock(&zr->resource_lock); res = setup_fbuffer(fh, fb->base, &zoran_formats[i], fb->fmt.width, fb->fmt.height, fb->fmt.bytesperline); mutex_unlock(&zr->resource_lock); return res; } static int zoran_overlay(struct file *file, void *__fh, unsigned int on) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; int res; mutex_lock(&zr->resource_lock); res = setup_overlay(fh, on); mutex_unlock(&zr->resource_lock); return res; } static int zoran_streamoff(struct file *file, void *__fh, enum v4l2_buf_type type); static int zoran_reqbufs(struct file *file, void *__fh, struct v4l2_requestbuffers *req) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; int res = 0; if (req->memory != V4L2_MEMORY_MMAP) { dprintk(2, KERN_ERR "%s: only MEMORY_MMAP capture is supported, not %d\n", ZR_DEVNAME(zr), req->memory); return -EINVAL; } if (req->count == 0) return zoran_streamoff(file, fh, req->type); mutex_lock(&zr->resource_lock); if (fh->buffers.allocated) { dprintk(2, KERN_ERR "%s: VIDIOC_REQBUFS - buffers already allocated\n", ZR_DEVNAME(zr)); res = -EBUSY; goto v4l2reqbuf_unlock_and_return; } if (fh->map_mode == ZORAN_MAP_MODE_RAW && req->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { /* control user input */ if (req->count < 2) req->count = 2; if (req->count > v4l_nbufs) req->count = v4l_nbufs; /* The next mmap will map the V4L buffers */ map_mode_raw(fh); fh->buffers.num_buffers = req->count; if (v4l_fbuffer_alloc(fh)) { res = -ENOMEM; goto v4l2reqbuf_unlock_and_return; } } else if (fh->map_mode == ZORAN_MAP_MODE_JPG_REC || fh->map_mode == ZORAN_MAP_MODE_JPG_PLAY) { /* we need to calculate size ourselves now */ if (req->count < 4) req->count = 4; if (req->count > jpg_nbufs) req->count = jpg_nbufs; /* The next mmap will map the MJPEG buffers */ map_mode_jpg(fh, req->type == V4L2_BUF_TYPE_VIDEO_OUTPUT); fh->buffers.num_buffers = req->count; fh->buffers.buffer_size = zoran_v4l2_calc_bufsize(&fh->jpg_settings); if (jpg_fbuffer_alloc(fh)) { res = -ENOMEM; goto v4l2reqbuf_unlock_and_return; } } else { dprintk(1, KERN_ERR "%s: VIDIOC_REQBUFS - unknown type %d\n", ZR_DEVNAME(zr), req->type); res = -EINVAL; goto v4l2reqbuf_unlock_and_return; } v4l2reqbuf_unlock_and_return: mutex_unlock(&zr->resource_lock); return res; } static int zoran_querybuf(struct file *file, void *__fh, struct v4l2_buffer *buf) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; int res; mutex_lock(&zr->resource_lock); res = zoran_v4l2_buffer_status(fh, buf, buf->index); mutex_unlock(&zr->resource_lock); return res; } static int zoran_qbuf(struct file *file, void *__fh, struct v4l2_buffer *buf) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; int res = 0, codec_mode, buf_type; mutex_lock(&zr->resource_lock); switch (fh->map_mode) { case ZORAN_MAP_MODE_RAW: if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { dprintk(1, KERN_ERR "%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n", ZR_DEVNAME(zr), buf->type, fh->map_mode); res = -EINVAL; goto qbuf_unlock_and_return; } res = zoran_v4l_queue_frame(fh, buf->index); if (res) goto qbuf_unlock_and_return; if (!zr->v4l_memgrab_active && fh->buffers.active == ZORAN_LOCKED) zr36057_set_memgrab(zr, 1); break; case ZORAN_MAP_MODE_JPG_REC: case ZORAN_MAP_MODE_JPG_PLAY: if (fh->map_mode == ZORAN_MAP_MODE_JPG_PLAY) { buf_type = V4L2_BUF_TYPE_VIDEO_OUTPUT; codec_mode = BUZ_MODE_MOTION_DECOMPRESS; } else { buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE; codec_mode = BUZ_MODE_MOTION_COMPRESS; } if (buf->type != buf_type) { dprintk(1, KERN_ERR "%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n", ZR_DEVNAME(zr), buf->type, fh->map_mode); res = -EINVAL; goto qbuf_unlock_and_return; } res = zoran_jpg_queue_frame(fh, buf->index, codec_mode); if (res != 0) goto qbuf_unlock_and_return; if (zr->codec_mode == BUZ_MODE_IDLE && fh->buffers.active == ZORAN_LOCKED) zr36057_enable_jpg(zr, codec_mode); break; default: dprintk(1, KERN_ERR "%s: VIDIOC_QBUF - unsupported type %d\n", ZR_DEVNAME(zr), buf->type); res = -EINVAL; break; } qbuf_unlock_and_return: mutex_unlock(&zr->resource_lock); return res; } static int zoran_dqbuf(struct file *file, void *__fh, struct v4l2_buffer *buf) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; int res = 0, buf_type, num = -1; /* compiler borks here (?) */ mutex_lock(&zr->resource_lock); switch (fh->map_mode) { case ZORAN_MAP_MODE_RAW: if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { dprintk(1, KERN_ERR "%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n", ZR_DEVNAME(zr), buf->type, fh->map_mode); res = -EINVAL; goto dqbuf_unlock_and_return; } num = zr->v4l_pend[zr->v4l_sync_tail & V4L_MASK_FRAME]; if (file->f_flags & O_NONBLOCK && zr->v4l_buffers.buffer[num].state != BUZ_STATE_DONE) { res = -EAGAIN; goto dqbuf_unlock_and_return; } res = v4l_sync(fh, num); if (res) goto dqbuf_unlock_and_return; zr->v4l_sync_tail++; res = zoran_v4l2_buffer_status(fh, buf, num); break; case ZORAN_MAP_MODE_JPG_REC: case ZORAN_MAP_MODE_JPG_PLAY: { struct zoran_sync bs; if (fh->map_mode == ZORAN_MAP_MODE_JPG_PLAY) buf_type = V4L2_BUF_TYPE_VIDEO_OUTPUT; else buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE; if (buf->type != buf_type) { dprintk(1, KERN_ERR "%s: VIDIOC_QBUF - invalid buf->type=%d for map_mode=%d\n", ZR_DEVNAME(zr), buf->type, fh->map_mode); res = -EINVAL; goto dqbuf_unlock_and_return; } num = zr->jpg_pend[zr->jpg_que_tail & BUZ_MASK_FRAME]; if (file->f_flags & O_NONBLOCK && zr->jpg_buffers.buffer[num].state != BUZ_STATE_DONE) { res = -EAGAIN; goto dqbuf_unlock_and_return; } bs.frame = 0; /* suppress compiler warning */ res = jpg_sync(fh, &bs); if (res) goto dqbuf_unlock_and_return; res = zoran_v4l2_buffer_status(fh, buf, bs.frame); break; } default: dprintk(1, KERN_ERR "%s: VIDIOC_DQBUF - unsupported type %d\n", ZR_DEVNAME(zr), buf->type); res = -EINVAL; break; } dqbuf_unlock_and_return: mutex_unlock(&zr->resource_lock); return res; } static int zoran_streamon(struct file *file, void *__fh, enum v4l2_buf_type type) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; int res = 0; mutex_lock(&zr->resource_lock); switch (fh->map_mode) { case ZORAN_MAP_MODE_RAW: /* raw capture */ if (zr->v4l_buffers.active != ZORAN_ACTIVE || fh->buffers.active != ZORAN_ACTIVE) { res = -EBUSY; goto strmon_unlock_and_return; } zr->v4l_buffers.active = fh->buffers.active = ZORAN_LOCKED; zr->v4l_settings = fh->v4l_settings; zr->v4l_sync_tail = zr->v4l_pend_tail; if (!zr->v4l_memgrab_active && zr->v4l_pend_head != zr->v4l_pend_tail) { zr36057_set_memgrab(zr, 1); } break; case ZORAN_MAP_MODE_JPG_REC: case ZORAN_MAP_MODE_JPG_PLAY: /* what is the codec mode right now? */ if (zr->jpg_buffers.active != ZORAN_ACTIVE || fh->buffers.active != ZORAN_ACTIVE) { res = -EBUSY; goto strmon_unlock_and_return; } zr->jpg_buffers.active = fh->buffers.active = ZORAN_LOCKED; if (zr->jpg_que_head != zr->jpg_que_tail) { /* Start the jpeg codec when the first frame is queued */ jpeg_start(zr); } break; default: dprintk(1, KERN_ERR "%s: VIDIOC_STREAMON - invalid map mode %d\n", ZR_DEVNAME(zr), fh->map_mode); res = -EINVAL; break; } strmon_unlock_and_return: mutex_unlock(&zr->resource_lock); return res; } static int zoran_streamoff(struct file *file, void *__fh, enum v4l2_buf_type type) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; int i, res = 0; unsigned long flags; mutex_lock(&zr->resource_lock); switch (fh->map_mode) { case ZORAN_MAP_MODE_RAW: /* raw capture */ if (fh->buffers.active == ZORAN_FREE && zr->v4l_buffers.active != ZORAN_FREE) { res = -EPERM; /* stay off other's settings! */ goto strmoff_unlock_and_return; } if (zr->v4l_buffers.active == ZORAN_FREE) goto strmoff_unlock_and_return; spin_lock_irqsave(&zr->spinlock, flags); /* unload capture */ if (zr->v4l_memgrab_active) { zr36057_set_memgrab(zr, 0); } for (i = 0; i < fh->buffers.num_buffers; i++) zr->v4l_buffers.buffer[i].state = BUZ_STATE_USER; fh->buffers = zr->v4l_buffers; zr->v4l_buffers.active = fh->buffers.active = ZORAN_FREE; zr->v4l_grab_seq = 0; zr->v4l_pend_head = zr->v4l_pend_tail = 0; zr->v4l_sync_tail = 0; spin_unlock_irqrestore(&zr->spinlock, flags); break; case ZORAN_MAP_MODE_JPG_REC: case ZORAN_MAP_MODE_JPG_PLAY: if (fh->buffers.active == ZORAN_FREE && zr->jpg_buffers.active != ZORAN_FREE) { res = -EPERM; /* stay off other's settings! */ goto strmoff_unlock_and_return; } if (zr->jpg_buffers.active == ZORAN_FREE) goto strmoff_unlock_and_return; res = jpg_qbuf(fh, -1, (fh->map_mode == ZORAN_MAP_MODE_JPG_REC) ? BUZ_MODE_MOTION_COMPRESS : BUZ_MODE_MOTION_DECOMPRESS); if (res) goto strmoff_unlock_and_return; break; default: dprintk(1, KERN_ERR "%s: VIDIOC_STREAMOFF - invalid map mode %d\n", ZR_DEVNAME(zr), fh->map_mode); res = -EINVAL; break; } strmoff_unlock_and_return: mutex_unlock(&zr->resource_lock); return res; } static int zoran_queryctrl(struct file *file, void *__fh, struct v4l2_queryctrl *ctrl) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; /* we only support hue/saturation/contrast/brightness */ if (ctrl->id < V4L2_CID_BRIGHTNESS || ctrl->id > V4L2_CID_HUE) return -EINVAL; decoder_call(zr, core, queryctrl, ctrl); return 0; } static int zoran_g_ctrl(struct file *file, void *__fh, struct v4l2_control *ctrl) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; /* we only support hue/saturation/contrast/brightness */ if (ctrl->id < V4L2_CID_BRIGHTNESS || ctrl->id > V4L2_CID_HUE) return -EINVAL; mutex_lock(&zr->resource_lock); decoder_call(zr, core, g_ctrl, ctrl); mutex_unlock(&zr->resource_lock); return 0; } static int zoran_s_ctrl(struct file *file, void *__fh, struct v4l2_control *ctrl) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; /* we only support hue/saturation/contrast/brightness */ if (ctrl->id < V4L2_CID_BRIGHTNESS || ctrl->id > V4L2_CID_HUE) return -EINVAL; mutex_lock(&zr->resource_lock); decoder_call(zr, core, s_ctrl, ctrl); mutex_unlock(&zr->resource_lock); return 0; } static int zoran_g_std(struct file *file, void *__fh, v4l2_std_id *std) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; mutex_lock(&zr->resource_lock); *std = zr->norm; mutex_unlock(&zr->resource_lock); return 0; } static int zoran_s_std(struct file *file, void *__fh, v4l2_std_id *std) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; int res = 0; mutex_lock(&zr->resource_lock); res = zoran_set_norm(zr, *std); if (res) goto sstd_unlock_and_return; res = wait_grab_pending(zr); sstd_unlock_and_return: mutex_unlock(&zr->resource_lock); return res; } static int zoran_enum_input(struct file *file, void *__fh, struct v4l2_input *inp) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; if (inp->index >= zr->card.inputs) return -EINVAL; strncpy(inp->name, zr->card.input[inp->index].name, sizeof(inp->name) - 1); inp->type = V4L2_INPUT_TYPE_CAMERA; inp->std = V4L2_STD_ALL; /* Get status of video decoder */ mutex_lock(&zr->resource_lock); decoder_call(zr, video, g_input_status, &inp->status); mutex_unlock(&zr->resource_lock); return 0; } static int zoran_g_input(struct file *file, void *__fh, unsigned int *input) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; mutex_lock(&zr->resource_lock); *input = zr->input; mutex_unlock(&zr->resource_lock); return 0; } static int zoran_s_input(struct file *file, void *__fh, unsigned int input) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; int res; mutex_lock(&zr->resource_lock); res = zoran_set_input(zr, input); if (res) goto sinput_unlock_and_return; /* Make sure the changes come into effect */ res = wait_grab_pending(zr); sinput_unlock_and_return: mutex_unlock(&zr->resource_lock); return res; } static int zoran_enum_output(struct file *file, void *__fh, struct v4l2_output *outp) { if (outp->index != 0) return -EINVAL; outp->index = 0; outp->type = V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY; strncpy(outp->name, "Autodetect", sizeof(outp->name)-1); return 0; } static int zoran_g_output(struct file *file, void *__fh, unsigned int *output) { *output = 0; return 0; } static int zoran_s_output(struct file *file, void *__fh, unsigned int output) { if (output != 0) return -EINVAL; return 0; } /* cropping (sub-frame capture) */ static int zoran_cropcap(struct file *file, void *__fh, struct v4l2_cropcap *cropcap) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; int type = cropcap->type, res = 0; memset(cropcap, 0, sizeof(*cropcap)); cropcap->type = type; mutex_lock(&zr->resource_lock); if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT && (cropcap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || fh->map_mode == ZORAN_MAP_MODE_RAW)) { dprintk(1, KERN_ERR "%s: VIDIOC_CROPCAP - subcapture only supported for compressed capture\n", ZR_DEVNAME(zr)); res = -EINVAL; goto cropcap_unlock_and_return; } cropcap->bounds.top = cropcap->bounds.left = 0; cropcap->bounds.width = BUZ_MAX_WIDTH; cropcap->bounds.height = BUZ_MAX_HEIGHT; cropcap->defrect.top = cropcap->defrect.left = 0; cropcap->defrect.width = BUZ_MIN_WIDTH; cropcap->defrect.height = BUZ_MIN_HEIGHT; cropcap_unlock_and_return: mutex_unlock(&zr->resource_lock); return res; } static int zoran_g_crop(struct file *file, void *__fh, struct v4l2_crop *crop) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; int type = crop->type, res = 0; memset(crop, 0, sizeof(*crop)); crop->type = type; mutex_lock(&zr->resource_lock); if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT && (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || fh->map_mode == ZORAN_MAP_MODE_RAW)) { dprintk(1, KERN_ERR "%s: VIDIOC_G_CROP - subcapture only supported for compressed capture\n", ZR_DEVNAME(zr)); res = -EINVAL; goto gcrop_unlock_and_return; } crop->c.top = fh->jpg_settings.img_y; crop->c.left = fh->jpg_settings.img_x; crop->c.width = fh->jpg_settings.img_width; crop->c.height = fh->jpg_settings.img_height; gcrop_unlock_and_return: mutex_unlock(&zr->resource_lock); return res; } static int zoran_s_crop(struct file *file, void *__fh, struct v4l2_crop *crop) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; int res = 0; struct zoran_jpg_settings settings; settings = fh->jpg_settings; mutex_lock(&zr->resource_lock); if (fh->buffers.allocated) { dprintk(1, KERN_ERR "%s: VIDIOC_S_CROP - cannot change settings while active\n", ZR_DEVNAME(zr)); res = -EBUSY; goto scrop_unlock_and_return; } if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT && (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || fh->map_mode == ZORAN_MAP_MODE_RAW)) { dprintk(1, KERN_ERR "%s: VIDIOC_G_CROP - subcapture only supported for compressed capture\n", ZR_DEVNAME(zr)); res = -EINVAL; goto scrop_unlock_and_return; } /* move into a form that we understand */ settings.img_x = crop->c.left; settings.img_y = crop->c.top; settings.img_width = crop->c.width; settings.img_height = crop->c.height; /* check validity */ res = zoran_check_jpg_settings(zr, &settings, 0); if (res) goto scrop_unlock_and_return; /* accept */ fh->jpg_settings = settings; scrop_unlock_and_return: mutex_unlock(&zr->resource_lock); return res; } static int zoran_g_jpegcomp(struct file *file, void *__fh, struct v4l2_jpegcompression *params) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; memset(params, 0, sizeof(*params)); mutex_lock(&zr->resource_lock); params->quality = fh->jpg_settings.jpg_comp.quality; params->APPn = fh->jpg_settings.jpg_comp.APPn; memcpy(params->APP_data, fh->jpg_settings.jpg_comp.APP_data, fh->jpg_settings.jpg_comp.APP_len); params->APP_len = fh->jpg_settings.jpg_comp.APP_len; memcpy(params->COM_data, fh->jpg_settings.jpg_comp.COM_data, fh->jpg_settings.jpg_comp.COM_len); params->COM_len = fh->jpg_settings.jpg_comp.COM_len; params->jpeg_markers = fh->jpg_settings.jpg_comp.jpeg_markers; mutex_unlock(&zr->resource_lock); return 0; } static int zoran_s_jpegcomp(struct file *file, void *__fh, struct v4l2_jpegcompression *params) { struct zoran_fh *fh = __fh; struct zoran *zr = fh->zr; int res = 0; struct zoran_jpg_settings settings; settings = fh->jpg_settings; settings.jpg_comp = *params; mutex_lock(&zr->resource_lock); if (fh->buffers.active != ZORAN_FREE) { dprintk(1, KERN_WARNING "%s: VIDIOC_S_JPEGCOMP called while in playback/capture mode\n", ZR_DEVNAME(zr)); res = -EBUSY; goto sjpegc_unlock_and_return; } res = zoran_check_jpg_settings(zr, &settings, 0); if (res) goto sjpegc_unlock_and_return; if (!fh->buffers.allocated) fh->buffers.buffer_size = zoran_v4l2_calc_bufsize(&fh->jpg_settings); fh->jpg_settings.jpg_comp = *params = settings.jpg_comp; sjpegc_unlock_and_return: mutex_unlock(&zr->resource_lock); return res; } static unsigned int zoran_poll (struct file *file, poll_table *wait) { struct zoran_fh *fh = file->private_data; struct zoran *zr = fh->zr; int res = 0, frame; unsigned long flags; /* we should check whether buffers are ready to be synced on * (w/o waits - O_NONBLOCK) here * if ready for read (sync), return POLLIN|POLLRDNORM, * if ready for write (sync), return POLLOUT|POLLWRNORM, * if error, return POLLERR, * if no buffers queued or so, return POLLNVAL */ mutex_lock(&zr->resource_lock); switch (fh->map_mode) { case ZORAN_MAP_MODE_RAW: poll_wait(file, &zr->v4l_capq, wait); frame = zr->v4l_pend[zr->v4l_sync_tail & V4L_MASK_FRAME]; spin_lock_irqsave(&zr->spinlock, flags); dprintk(3, KERN_DEBUG "%s: %s() raw - active=%c, sync_tail=%lu/%c, pend_tail=%lu, pend_head=%lu\n", ZR_DEVNAME(zr), __func__, "FAL"[fh->buffers.active], zr->v4l_sync_tail, "UPMD"[zr->v4l_buffers.buffer[frame].state], zr->v4l_pend_tail, zr->v4l_pend_head); /* Process is the one capturing? */ if (fh->buffers.active != ZORAN_FREE && /* Buffer ready to DQBUF? */ zr->v4l_buffers.buffer[frame].state == BUZ_STATE_DONE) res = POLLIN | POLLRDNORM; spin_unlock_irqrestore(&zr->spinlock, flags); break; case ZORAN_MAP_MODE_JPG_REC: case ZORAN_MAP_MODE_JPG_PLAY: poll_wait(file, &zr->jpg_capq, wait); frame = zr->jpg_pend[zr->jpg_que_tail & BUZ_MASK_FRAME]; spin_lock_irqsave(&zr->spinlock, flags); dprintk(3, KERN_DEBUG "%s: %s() jpg - active=%c, que_tail=%lu/%c, que_head=%lu, dma=%lu/%lu\n", ZR_DEVNAME(zr), __func__, "FAL"[fh->buffers.active], zr->jpg_que_tail, "UPMD"[zr->jpg_buffers.buffer[frame].state], zr->jpg_que_head, zr->jpg_dma_tail, zr->jpg_dma_head); if (fh->buffers.active != ZORAN_FREE && zr->jpg_buffers.buffer[frame].state == BUZ_STATE_DONE) { if (fh->map_mode == ZORAN_MAP_MODE_JPG_REC) res = POLLIN | POLLRDNORM; else res = POLLOUT | POLLWRNORM; } spin_unlock_irqrestore(&zr->spinlock, flags); break; default: dprintk(1, KERN_ERR "%s: %s - internal error, unknown map_mode=%d\n", ZR_DEVNAME(zr), __func__, fh->map_mode); res = POLLNVAL; } mutex_unlock(&zr->resource_lock); return res; } /* * This maps the buffers to user space. * * Depending on the state of fh->map_mode * the V4L or the MJPEG buffers are mapped * per buffer or all together * * Note that we need to connect to some * unmap signal event to unmap the de-allocate * the buffer accordingly (zoran_vm_close()) */ static void zoran_vm_open (struct vm_area_struct *vma) { struct zoran_mapping *map = vma->vm_private_data; map->count++; } static void zoran_vm_close (struct vm_area_struct *vma) { struct zoran_mapping *map = vma->vm_private_data; struct zoran_fh *fh = map->file->private_data; struct zoran *zr = fh->zr; int i; if (--map->count > 0) return; dprintk(3, KERN_INFO "%s: %s - munmap(%s)\n", ZR_DEVNAME(zr), __func__, mode_name(fh->map_mode)); for (i = 0; i < fh->buffers.num_buffers; i++) { if (fh->buffers.buffer[i].map == map) fh->buffers.buffer[i].map = NULL; } kfree(map); /* Any buffers still mapped? */ for (i = 0; i < fh->buffers.num_buffers; i++) if (fh->buffers.buffer[i].map) return; dprintk(3, KERN_INFO "%s: %s - free %s buffers\n", ZR_DEVNAME(zr), __func__, mode_name(fh->map_mode)); mutex_lock(&zr->resource_lock); if (fh->map_mode == ZORAN_MAP_MODE_RAW) { if (fh->buffers.active != ZORAN_FREE) { unsigned long flags; spin_lock_irqsave(&zr->spinlock, flags); zr36057_set_memgrab(zr, 0); zr->v4l_buffers.allocated = 0; zr->v4l_buffers.active = fh->buffers.active = ZORAN_FREE; spin_unlock_irqrestore(&zr->spinlock, flags); } v4l_fbuffer_free(fh); } else { if (fh->buffers.active != ZORAN_FREE) { jpg_qbuf(fh, -1, zr->codec_mode); zr->jpg_buffers.allocated = 0; zr->jpg_buffers.active = fh->buffers.active = ZORAN_FREE; } jpg_fbuffer_free(fh); } mutex_unlock(&zr->resource_lock); } static const struct vm_operations_struct zoran_vm_ops = { .open = zoran_vm_open, .close = zoran_vm_close, }; static int zoran_mmap (struct file *file, struct vm_area_struct *vma) { struct zoran_fh *fh = file->private_data; struct zoran *zr = fh->zr; unsigned long size = (vma->vm_end - vma->vm_start); unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; int i, j; unsigned long page, start = vma->vm_start, todo, pos, fraglen; int first, last; struct zoran_mapping *map; int res = 0; dprintk(3, KERN_INFO "%s: %s(%s) of 0x%08lx-0x%08lx (size=%lu)\n", ZR_DEVNAME(zr), __func__, mode_name(fh->map_mode), vma->vm_start, vma->vm_end, size); if (!(vma->vm_flags & VM_SHARED) || !(vma->vm_flags & VM_READ) || !(vma->vm_flags & VM_WRITE)) { dprintk(1, KERN_ERR "%s: %s - no MAP_SHARED/PROT_{READ,WRITE} given\n", ZR_DEVNAME(zr), __func__); return -EINVAL; } mutex_lock(&zr->resource_lock); if (!fh->buffers.allocated) { dprintk(1, KERN_ERR "%s: %s(%s) - buffers not yet allocated\n", ZR_DEVNAME(zr), __func__, mode_name(fh->map_mode)); res = -ENOMEM; goto mmap_unlock_and_return; } first = offset / fh->buffers.buffer_size; last = first - 1 + size / fh->buffers.buffer_size; if (offset % fh->buffers.buffer_size != 0 || size % fh->buffers.buffer_size != 0 || first < 0 || last < 0 || first >= fh->buffers.num_buffers || last >= fh->buffers.buffer_size) { dprintk(1, KERN_ERR "%s: %s(%s) - offset=%lu or size=%lu invalid for bufsize=%d and numbufs=%d\n", ZR_DEVNAME(zr), __func__, mode_name(fh->map_mode), offset, size, fh->buffers.buffer_size, fh->buffers.num_buffers); res = -EINVAL; goto mmap_unlock_and_return; } /* Check if any buffers are already mapped */ for (i = first; i <= last; i++) { if (fh->buffers.buffer[i].map) { dprintk(1, KERN_ERR "%s: %s(%s) - buffer %d already mapped\n", ZR_DEVNAME(zr), __func__, mode_name(fh->map_mode), i); res = -EBUSY; goto mmap_unlock_and_return; } } /* map these buffers */ map = kmalloc(sizeof(struct zoran_mapping), GFP_KERNEL); if (!map) { res = -ENOMEM; goto mmap_unlock_and_return; } map->file = file; map->count = 1; vma->vm_ops = &zoran_vm_ops; vma->vm_flags |= VM_DONTEXPAND; vma->vm_private_data = map; if (fh->map_mode == ZORAN_MAP_MODE_RAW) { for (i = first; i <= last; i++) { todo = size; if (todo > fh->buffers.buffer_size) todo = fh->buffers.buffer_size; page = fh->buffers.buffer[i].v4l.fbuffer_phys; if (remap_pfn_range(vma, start, page >> PAGE_SHIFT, todo, PAGE_SHARED)) { dprintk(1, KERN_ERR "%s: %s(V4L) - remap_pfn_range failed\n", ZR_DEVNAME(zr), __func__); res = -EAGAIN; goto mmap_unlock_and_return; } size -= todo; start += todo; fh->buffers.buffer[i].map = map; if (size == 0) break; } } else { for (i = first; i <= last; i++) { for (j = 0; j < fh->buffers.buffer_size / PAGE_SIZE; j++) { fraglen = (le32_to_cpu(fh->buffers.buffer[i].jpg. frag_tab[2 * j + 1]) & ~1) << 1; todo = size; if (todo > fraglen) todo = fraglen; pos = le32_to_cpu(fh->buffers. buffer[i].jpg.frag_tab[2 * j]); /* should just be pos on i386 */ page = virt_to_phys(bus_to_virt(pos)) >> PAGE_SHIFT; if (remap_pfn_range(vma, start, page, todo, PAGE_SHARED)) { dprintk(1, KERN_ERR "%s: %s(V4L) - remap_pfn_range failed\n", ZR_DEVNAME(zr), __func__); res = -EAGAIN; goto mmap_unlock_and_return; } size -= todo; start += todo; if (size == 0) break; if (le32_to_cpu(fh->buffers.buffer[i].jpg. frag_tab[2 * j + 1]) & 1) break; /* was last fragment */ } fh->buffers.buffer[i].map = map; if (size == 0) break; } } mmap_unlock_and_return: mutex_unlock(&zr->resource_lock); return res; } static const struct v4l2_ioctl_ops zoran_ioctl_ops = { .vidioc_querycap = zoran_querycap, .vidioc_cropcap = zoran_cropcap, .vidioc_s_crop = zoran_s_crop, .vidioc_g_crop = zoran_g_crop, .vidioc_enum_input = zoran_enum_input, .vidioc_g_input = zoran_g_input, .vidioc_s_input = zoran_s_input, .vidioc_enum_output = zoran_enum_output, .vidioc_g_output = zoran_g_output, .vidioc_s_output = zoran_s_output, .vidioc_g_fbuf = zoran_g_fbuf, .vidioc_s_fbuf = zoran_s_fbuf, .vidioc_g_std = zoran_g_std, .vidioc_s_std = zoran_s_std, .vidioc_g_jpegcomp = zoran_g_jpegcomp, .vidioc_s_jpegcomp = zoran_s_jpegcomp, .vidioc_overlay = zoran_overlay, .vidioc_reqbufs = zoran_reqbufs, .vidioc_querybuf = zoran_querybuf, .vidioc_qbuf = zoran_qbuf, .vidioc_dqbuf = zoran_dqbuf, .vidioc_streamon = zoran_streamon, .vidioc_streamoff = zoran_streamoff, .vidioc_enum_fmt_vid_cap = zoran_enum_fmt_vid_cap, .vidioc_enum_fmt_vid_out = zoran_enum_fmt_vid_out, .vidioc_enum_fmt_vid_overlay = zoran_enum_fmt_vid_overlay, .vidioc_g_fmt_vid_cap = zoran_g_fmt_vid_cap, .vidioc_g_fmt_vid_out = zoran_g_fmt_vid_out, .vidioc_g_fmt_vid_overlay = zoran_g_fmt_vid_overlay, .vidioc_s_fmt_vid_cap = zoran_s_fmt_vid_cap, .vidioc_s_fmt_vid_out = zoran_s_fmt_vid_out, .vidioc_s_fmt_vid_overlay = zoran_s_fmt_vid_overlay, .vidioc_try_fmt_vid_cap = zoran_try_fmt_vid_cap, .vidioc_try_fmt_vid_out = zoran_try_fmt_vid_out, .vidioc_try_fmt_vid_overlay = zoran_try_fmt_vid_overlay, .vidioc_queryctrl = zoran_queryctrl, .vidioc_s_ctrl = zoran_s_ctrl, .vidioc_g_ctrl = zoran_g_ctrl, }; /* please use zr->resource_lock consistently and kill this wrapper */ static long zoran_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct zoran_fh *fh = file->private_data; struct zoran *zr = fh->zr; int ret; mutex_lock(&zr->other_lock); ret = video_ioctl2(file, cmd, arg); mutex_unlock(&zr->other_lock); return ret; } static const struct v4l2_file_operations zoran_fops = { .owner = THIS_MODULE, .open = zoran_open, .release = zoran_close, .unlocked_ioctl = zoran_ioctl, .read = zoran_read, .write = zoran_write, .mmap = zoran_mmap, .poll = zoran_poll, }; struct video_device zoran_template __devinitdata = { .name = ZORAN_NAME, .fops = &zoran_fops, .ioctl_ops = &zoran_ioctl_ops, .release = &zoran_vdev_release, .tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM, };
gpl-2.0
cile381/H815_kernel
net/wireless/core.c
1070
29332
/* * This is the linux wireless configuration interface. * * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/if.h> #include <linux/module.h> #include <linux/err.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/nl80211.h> #include <linux/debugfs.h> #include <linux/notifier.h> #include <linux/device.h> #include <linux/etherdevice.h> #include <linux/rtnetlink.h> #include <linux/sched.h> #include <net/genetlink.h> #include <net/cfg80211.h> #include "nl80211.h" #include "core.h" #include "sysfs.h" #include "debugfs.h" #include "wext-compat.h" #include "ethtool.h" #include "rdev-ops.h" /* name for sysfs, %d is appended */ #define PHY_NAME "phy" MODULE_AUTHOR("Johannes Berg"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("wireless configuration support"); /* RCU-protected (and cfg80211_mutex for writers) */ LIST_HEAD(cfg80211_rdev_list); int cfg80211_rdev_list_generation; DEFINE_MUTEX(cfg80211_mutex); /* for debugfs */ static struct dentry *ieee80211_debugfs_dir; /* for the cleanup, scan and event works */ struct workqueue_struct *cfg80211_wq; static bool cfg80211_disable_40mhz_24ghz; module_param(cfg80211_disable_40mhz_24ghz, bool, 0644); MODULE_PARM_DESC(cfg80211_disable_40mhz_24ghz, "Disable 40MHz support in the 2.4GHz band"); /* requires cfg80211_mutex to be held! */ struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx) { struct cfg80211_registered_device *result = NULL, *rdev; assert_cfg80211_lock(); list_for_each_entry(rdev, &cfg80211_rdev_list, list) { if (rdev->wiphy_idx == wiphy_idx) { result = rdev; break; } } return result; } int get_wiphy_idx(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); return rdev->wiphy_idx; } /* requires cfg80211_rdev_mutex to be held! */ struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx) { struct cfg80211_registered_device *rdev; assert_cfg80211_lock(); rdev = cfg80211_rdev_by_wiphy_idx(wiphy_idx); if (!rdev) return NULL; return &rdev->wiphy; } struct cfg80211_registered_device * cfg80211_get_dev_from_ifindex(struct net *net, int ifindex) { struct cfg80211_registered_device *rdev = ERR_PTR(-ENODEV); struct net_device *dev; mutex_lock(&cfg80211_mutex); dev = dev_get_by_index(net, ifindex); if (!dev) goto out; if (dev->ieee80211_ptr) { rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy); mutex_lock(&rdev->mtx); } else rdev = ERR_PTR(-ENODEV); dev_put(dev); out: mutex_unlock(&cfg80211_mutex); return rdev; } /* requires cfg80211_mutex to be held */ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, char *newname) { struct cfg80211_registered_device *rdev2; int wiphy_idx, taken = -1, result, digits; assert_cfg80211_lock(); /* prohibit calling the thing phy%d when %d is not its number */ sscanf(newname, PHY_NAME "%d%n", &wiphy_idx, &taken); if (taken == strlen(newname) && wiphy_idx != rdev->wiphy_idx) { /* count number of places needed to print wiphy_idx */ digits = 1; while (wiphy_idx /= 10) digits++; /* * deny the name if it is phy<idx> where <idx> is printed * without leading zeroes. taken == strlen(newname) here */ if (taken == strlen(PHY_NAME) + digits) return -EINVAL; } /* Ignore nop renames */ if (strcmp(newname, dev_name(&rdev->wiphy.dev)) == 0) return 0; /* Ensure another device does not already have this name. */ list_for_each_entry(rdev2, &cfg80211_rdev_list, list) if (strcmp(newname, dev_name(&rdev2->wiphy.dev)) == 0) return -EINVAL; result = device_rename(&rdev->wiphy.dev, newname); if (result) return result; if (rdev->wiphy.debugfsdir && !debugfs_rename(rdev->wiphy.debugfsdir->d_parent, rdev->wiphy.debugfsdir, rdev->wiphy.debugfsdir->d_parent, newname)) pr_err("failed to rename debugfs dir to %s!\n", newname); nl80211_notify_dev_rename(rdev); return 0; } int cfg80211_switch_netns(struct cfg80211_registered_device *rdev, struct net *net) { struct wireless_dev *wdev; int err = 0; if (!(rdev->wiphy.flags & WIPHY_FLAG_NETNS_OK)) return -EOPNOTSUPP; list_for_each_entry(wdev, &rdev->wdev_list, list) { if (!wdev->netdev) continue; wdev->netdev->features &= ~NETIF_F_NETNS_LOCAL; err = dev_change_net_namespace(wdev->netdev, net, "wlan%d"); if (err) break; wdev->netdev->features |= NETIF_F_NETNS_LOCAL; } if (err) { /* failed -- clean up to old netns */ net = wiphy_net(&rdev->wiphy); list_for_each_entry_continue_reverse(wdev, &rdev->wdev_list, list) { if (!wdev->netdev) continue; wdev->netdev->features &= ~NETIF_F_NETNS_LOCAL; err = dev_change_net_namespace(wdev->netdev, net, "wlan%d"); WARN_ON(err); wdev->netdev->features |= NETIF_F_NETNS_LOCAL; } return err; } wiphy_net_set(&rdev->wiphy, net); err = device_rename(&rdev->wiphy.dev, dev_name(&rdev->wiphy.dev)); WARN_ON(err); return 0; } static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data) { struct cfg80211_registered_device *rdev = data; rdev_rfkill_poll(rdev); } void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { lockdep_assert_held(&rdev->devlist_mtx); lockdep_assert_held(&rdev->sched_scan_mtx); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)) return; if (!wdev->p2p_started) return; rdev_stop_p2p_device(rdev, wdev); wdev->p2p_started = false; rdev->opencount--; if (rdev->scan_req && rdev->scan_req->wdev == wdev) { bool busy = work_busy(&rdev->scan_done_wk); /* * If the work isn't pending or running (in which case it would * be waiting for the lock we hold) the driver didn't properly * cancel the scan when the interface was removed. In this case * warn and leak the scan request object to not crash later. */ WARN_ON(!busy); rdev->scan_req->aborted = true; ___cfg80211_scan_done(rdev, !busy); } } static int cfg80211_rfkill_set_block(void *data, bool blocked) { struct cfg80211_registered_device *rdev = data; struct wireless_dev *wdev; if (!blocked) return 0; rtnl_lock(); /* read-only iteration need not hold the devlist_mtx */ list_for_each_entry(wdev, &rdev->wdev_list, list) { if (wdev->netdev) { dev_close(wdev->netdev); continue; } /* otherwise, check iftype */ switch (wdev->iftype) { case NL80211_IFTYPE_P2P_DEVICE: /* but this requires it */ mutex_lock(&rdev->devlist_mtx); mutex_lock(&rdev->sched_scan_mtx); cfg80211_stop_p2p_device(rdev, wdev); mutex_unlock(&rdev->sched_scan_mtx); mutex_unlock(&rdev->devlist_mtx); break; default: break; } } rtnl_unlock(); return 0; } static void cfg80211_rfkill_sync_work(struct work_struct *work) { struct cfg80211_registered_device *rdev; rdev = container_of(work, struct cfg80211_registered_device, rfkill_sync); cfg80211_rfkill_set_block(rdev, rfkill_blocked(rdev->rfkill)); } static void cfg80211_event_work(struct work_struct *work) { struct cfg80211_registered_device *rdev; rdev = container_of(work, struct cfg80211_registered_device, event_work); rtnl_lock(); cfg80211_lock_rdev(rdev); cfg80211_process_rdev_events(rdev); cfg80211_unlock_rdev(rdev); rtnl_unlock(); } /* exported functions */ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) { static int wiphy_counter; struct cfg80211_registered_device *rdev; int alloc_size; WARN_ON(ops->add_key && (!ops->del_key || !ops->set_default_key)); WARN_ON(ops->auth && (!ops->assoc || !ops->deauth || !ops->disassoc)); WARN_ON(ops->connect && !ops->disconnect); WARN_ON(ops->join_ibss && !ops->leave_ibss); WARN_ON(ops->add_virtual_intf && !ops->del_virtual_intf); WARN_ON(ops->add_station && !ops->del_station); WARN_ON(ops->add_mpath && !ops->del_mpath); WARN_ON(ops->join_mesh && !ops->leave_mesh); alloc_size = sizeof(*rdev) + sizeof_priv; rdev = kzalloc(alloc_size, GFP_KERNEL); if (!rdev) return NULL; rdev->ops = ops; mutex_lock(&cfg80211_mutex); rdev->wiphy_idx = wiphy_counter++; if (unlikely(rdev->wiphy_idx < 0)) { wiphy_counter--; mutex_unlock(&cfg80211_mutex); /* ugh, wrapped! */ kfree(rdev); return NULL; } mutex_unlock(&cfg80211_mutex); /* give it a proper name */ dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx); mutex_init(&rdev->mtx); mutex_init(&rdev->devlist_mtx); mutex_init(&rdev->sched_scan_mtx); INIT_LIST_HEAD(&rdev->wdev_list); INIT_LIST_HEAD(&rdev->beacon_registrations); spin_lock_init(&rdev->beacon_registrations_lock); spin_lock_init(&rdev->bss_lock); INIT_LIST_HEAD(&rdev->bss_list); INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done); INIT_WORK(&rdev->sched_scan_results_wk, __cfg80211_sched_scan_results); INIT_DELAYED_WORK(&rdev->dfs_update_channels_wk, cfg80211_dfs_channels_update_work); #ifdef CONFIG_CFG80211_WEXT rdev->wiphy.wext = &cfg80211_wext_handler; #endif device_initialize(&rdev->wiphy.dev); rdev->wiphy.dev.class = &ieee80211_class; rdev->wiphy.dev.platform_data = rdev; #ifdef CONFIG_CFG80211_DEFAULT_PS rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; #endif wiphy_net_set(&rdev->wiphy, &init_net); rdev->rfkill_ops.set_block = cfg80211_rfkill_set_block; rdev->rfkill = rfkill_alloc(dev_name(&rdev->wiphy.dev), &rdev->wiphy.dev, RFKILL_TYPE_WLAN, &rdev->rfkill_ops, rdev); if (!rdev->rfkill) { kfree(rdev); return NULL; } INIT_WORK(&rdev->rfkill_sync, cfg80211_rfkill_sync_work); INIT_WORK(&rdev->conn_work, cfg80211_conn_work); INIT_WORK(&rdev->event_work, cfg80211_event_work); init_waitqueue_head(&rdev->dev_wait); /* * Initialize wiphy parameters to IEEE 802.11 MIB default values. * Fragmentation and RTS threshold are disabled by default with the * special -1 value. */ rdev->wiphy.retry_short = 7; rdev->wiphy.retry_long = 4; rdev->wiphy.frag_threshold = (u32) -1; rdev->wiphy.rts_threshold = (u32) -1; rdev->wiphy.coverage_class = 0; rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH; return &rdev->wiphy; } EXPORT_SYMBOL(wiphy_new); static int wiphy_verify_combinations(struct wiphy *wiphy) { const struct ieee80211_iface_combination *c; int i, j; for (i = 0; i < wiphy->n_iface_combinations; i++) { u32 cnt = 0; u16 all_iftypes = 0; c = &wiphy->iface_combinations[i]; /* * Combinations with just one interface aren't real, * however we make an exception for DFS. */ if (WARN_ON((c->max_interfaces < 2) && !c->radar_detect_widths)) return -EINVAL; /* Need at least one channel */ if (WARN_ON(!c->num_different_channels)) return -EINVAL; /* * Put a sane limit on maximum number of different * channels to simplify channel accounting code. */ if (WARN_ON(c->num_different_channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS)) return -EINVAL; /* DFS only works on one channel. */ if (WARN_ON(c->radar_detect_widths && (c->num_different_channels > 1))) return -EINVAL; if (WARN_ON(!c->n_limits)) return -EINVAL; for (j = 0; j < c->n_limits; j++) { u16 types = c->limits[j].types; /* * interface types shouldn't overlap, this is * used in cfg80211_can_change_interface() */ if (WARN_ON(types & all_iftypes)) return -EINVAL; all_iftypes |= types; if (WARN_ON(!c->limits[j].max)) return -EINVAL; /* Shouldn't list software iftypes in combinations! */ if (WARN_ON(wiphy->software_iftypes & types)) return -EINVAL; /* Only a single P2P_DEVICE can be allowed */ if (WARN_ON(types & BIT(NL80211_IFTYPE_P2P_DEVICE) && c->limits[j].max > 1)) return -EINVAL; cnt += c->limits[j].max; /* * Don't advertise an unsupported type * in a combination. */ if (WARN_ON((wiphy->interface_modes & types) != types)) return -EINVAL; } /* You can't even choose that many! */ if (WARN_ON(cnt < c->max_interfaces)) return -EINVAL; } return 0; } int wiphy_register(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); int res; enum ieee80211_band band; struct ieee80211_supported_band *sband; bool have_band = false; int i; u16 ifmodes = wiphy->interface_modes; #ifdef CONFIG_PM if (WARN_ON((wiphy->wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && !(wiphy->wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY))) return -EINVAL; #endif if (WARN_ON(wiphy->ap_sme_capa && !(wiphy->flags & WIPHY_FLAG_HAVE_AP_SME))) return -EINVAL; if (WARN_ON(wiphy->addresses && !wiphy->n_addresses)) return -EINVAL; if (WARN_ON(wiphy->addresses && !is_zero_ether_addr(wiphy->perm_addr) && memcmp(wiphy->perm_addr, wiphy->addresses[0].addr, ETH_ALEN))) return -EINVAL; if (WARN_ON(wiphy->max_acl_mac_addrs && (!(wiphy->flags & WIPHY_FLAG_HAVE_AP_SME) || !rdev->ops->set_mac_acl))) return -EINVAL; if (wiphy->addresses) memcpy(wiphy->perm_addr, wiphy->addresses[0].addr, ETH_ALEN); /* sanity check ifmodes */ WARN_ON(!ifmodes); ifmodes &= ((1 << NUM_NL80211_IFTYPES) - 1) & ~1; if (WARN_ON(ifmodes != wiphy->interface_modes)) wiphy->interface_modes = ifmodes; res = wiphy_verify_combinations(wiphy); if (res) return res; /* sanity check supported bands/channels */ for (band = 0; band < IEEE80211_NUM_BANDS; band++) { sband = wiphy->bands[band]; if (!sband) continue; sband->band = band; if (WARN_ON(!sband->n_channels)) return -EINVAL; /* * on 60gHz band, there are no legacy rates, so * n_bitrates is 0 */ if (WARN_ON(band != IEEE80211_BAND_60GHZ && !sband->n_bitrates)) return -EINVAL; /* * Since cfg80211_disable_40mhz_24ghz is global, we can * modify the sband's ht data even if the driver uses a * global structure for that. */ if (cfg80211_disable_40mhz_24ghz && band == IEEE80211_BAND_2GHZ && sband->ht_cap.ht_supported) { sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40; } /* * Since we use a u32 for rate bitmaps in * ieee80211_get_response_rate, we cannot * have more than 32 legacy rates. */ if (WARN_ON(sband->n_bitrates > 32)) return -EINVAL; for (i = 0; i < sband->n_channels; i++) { sband->channels[i].orig_flags = sband->channels[i].flags; sband->channels[i].orig_mag = INT_MAX; sband->channels[i].orig_mpwr = sband->channels[i].max_power; sband->channels[i].band = band; } have_band = true; } if (!have_band) { WARN_ON(1); return -EINVAL; } #ifdef CONFIG_PM if (rdev->wiphy.wowlan.n_patterns) { if (WARN_ON(!rdev->wiphy.wowlan.pattern_min_len || rdev->wiphy.wowlan.pattern_min_len > rdev->wiphy.wowlan.pattern_max_len)) return -EINVAL; } #endif /* check and set up bitrates */ ieee80211_set_bitrate_flags(wiphy); mutex_lock(&cfg80211_mutex); res = device_add(&rdev->wiphy.dev); if (res) { mutex_unlock(&cfg80211_mutex); return res; } /* set up regulatory info */ wiphy_regulatory_register(wiphy); list_add_rcu(&rdev->list, &cfg80211_rdev_list); cfg80211_rdev_list_generation++; /* add to debugfs */ rdev->wiphy.debugfsdir = debugfs_create_dir(wiphy_name(&rdev->wiphy), ieee80211_debugfs_dir); if (IS_ERR(rdev->wiphy.debugfsdir)) rdev->wiphy.debugfsdir = NULL; if (wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY) { struct regulatory_request request; request.wiphy_idx = get_wiphy_idx(wiphy); request.initiator = NL80211_REGDOM_SET_BY_DRIVER; request.alpha2[0] = '9'; request.alpha2[1] = '9'; nl80211_send_reg_change_event(&request); } cfg80211_debugfs_rdev_add(rdev); mutex_unlock(&cfg80211_mutex); /* * due to a locking dependency this has to be outside of the * cfg80211_mutex lock */ res = rfkill_register(rdev->rfkill); if (res) { device_del(&rdev->wiphy.dev); mutex_lock(&cfg80211_mutex); debugfs_remove_recursive(rdev->wiphy.debugfsdir); list_del_rcu(&rdev->list); wiphy_regulatory_deregister(wiphy); mutex_unlock(&cfg80211_mutex); return res; } rtnl_lock(); rdev->wiphy.registered = true; rtnl_unlock(); return 0; } EXPORT_SYMBOL(wiphy_register); void wiphy_rfkill_start_polling(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); if (!rdev->ops->rfkill_poll) return; rdev->rfkill_ops.poll = cfg80211_rfkill_poll; rfkill_resume_polling(rdev->rfkill); } EXPORT_SYMBOL(wiphy_rfkill_start_polling); void wiphy_rfkill_stop_polling(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); rfkill_pause_polling(rdev->rfkill); } EXPORT_SYMBOL(wiphy_rfkill_stop_polling); void wiphy_unregister(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); rtnl_lock(); rdev->wiphy.registered = false; rtnl_unlock(); rfkill_unregister(rdev->rfkill); /* protect the device list */ mutex_lock(&cfg80211_mutex); wait_event(rdev->dev_wait, ({ int __count; mutex_lock(&rdev->devlist_mtx); __count = rdev->opencount; mutex_unlock(&rdev->devlist_mtx); __count == 0; })); mutex_lock(&rdev->devlist_mtx); BUG_ON(!list_empty(&rdev->wdev_list)); mutex_unlock(&rdev->devlist_mtx); /* * First remove the hardware from everywhere, this makes * it impossible to find from userspace. */ debugfs_remove_recursive(rdev->wiphy.debugfsdir); list_del_rcu(&rdev->list); synchronize_rcu(); /* * Try to grab rdev->mtx. If a command is still in progress, * hopefully the driver will refuse it since it's tearing * down the device already. We wait for this command to complete * before unlinking the item from the list. * Note: as codified by the BUG_ON above we cannot get here if * a virtual interface is still present. Hence, we can only get * to lock contention here if userspace issues a command that * identified the hardware by wiphy index. */ cfg80211_lock_rdev(rdev); /* nothing */ cfg80211_unlock_rdev(rdev); /* * If this device got a regulatory hint tell core its * free to listen now to a new shiny device regulatory hint */ wiphy_regulatory_deregister(wiphy); cfg80211_rdev_list_generation++; device_del(&rdev->wiphy.dev); mutex_unlock(&cfg80211_mutex); flush_work(&rdev->scan_done_wk); cancel_work_sync(&rdev->conn_work); flush_work(&rdev->event_work); cancel_delayed_work_sync(&rdev->dfs_update_channels_wk); if (rdev->wowlan && rdev->ops->set_wakeup) rdev_set_wakeup(rdev, false); cfg80211_rdev_free_wowlan(rdev); } EXPORT_SYMBOL(wiphy_unregister); void cfg80211_dev_free(struct cfg80211_registered_device *rdev) { struct cfg80211_internal_bss *scan, *tmp; struct cfg80211_beacon_registration *reg, *treg; rfkill_destroy(rdev->rfkill); mutex_destroy(&rdev->mtx); mutex_destroy(&rdev->devlist_mtx); mutex_destroy(&rdev->sched_scan_mtx); list_for_each_entry_safe(reg, treg, &rdev->beacon_registrations, list) { list_del(&reg->list); kfree(reg); } list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list) cfg80211_put_bss(&rdev->wiphy, &scan->pub); kfree(rdev); } void wiphy_free(struct wiphy *wiphy) { put_device(&wiphy->dev); } EXPORT_SYMBOL(wiphy_free); void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked) { struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); if (rfkill_set_hw_state(rdev->rfkill, blocked)) schedule_work(&rdev->rfkill_sync); } EXPORT_SYMBOL(wiphy_rfkill_set_hw_state); static void wdev_cleanup_work(struct work_struct *work) { struct wireless_dev *wdev; struct cfg80211_registered_device *rdev; wdev = container_of(work, struct wireless_dev, cleanup_work); rdev = wiphy_to_dev(wdev->wiphy); mutex_lock(&rdev->sched_scan_mtx); if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) { rdev->scan_req->aborted = true; ___cfg80211_scan_done(rdev, true); } if (WARN_ON(rdev->sched_scan_req && rdev->sched_scan_req->dev == wdev->netdev)) { __cfg80211_stop_sched_scan(rdev, false); } mutex_unlock(&rdev->sched_scan_mtx); mutex_lock(&rdev->devlist_mtx); rdev->opencount--; mutex_unlock(&rdev->devlist_mtx); wake_up(&rdev->dev_wait); dev_put(wdev->netdev); } void cfg80211_unregister_wdev(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); ASSERT_RTNL(); if (WARN_ON(wdev->netdev)) return; mutex_lock(&rdev->devlist_mtx); mutex_lock(&rdev->sched_scan_mtx); list_del_rcu(&wdev->list); rdev->devlist_generation++; switch (wdev->iftype) { case NL80211_IFTYPE_P2P_DEVICE: cfg80211_stop_p2p_device(rdev, wdev); break; default: WARN_ON_ONCE(1); break; } mutex_unlock(&rdev->sched_scan_mtx); mutex_unlock(&rdev->devlist_mtx); } EXPORT_SYMBOL(cfg80211_unregister_wdev); static struct device_type wiphy_type = { .name = "wlan", }; void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, enum nl80211_iftype iftype, int num) { ASSERT_RTNL(); rdev->num_running_ifaces += num; if (iftype == NL80211_IFTYPE_MONITOR) rdev->num_running_monitor_ifaces += num; } static int cfg80211_netdev_notifier_call(struct notifier_block *nb, unsigned long state, void *ndev) { struct net_device *dev = ndev; struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev; int ret; if (!wdev) return NOTIFY_DONE; rdev = wiphy_to_dev(wdev->wiphy); WARN_ON(wdev->iftype == NL80211_IFTYPE_UNSPECIFIED); switch (state) { case NETDEV_POST_INIT: SET_NETDEV_DEVTYPE(dev, &wiphy_type); break; case NETDEV_REGISTER: /* * NB: cannot take rdev->mtx here because this may be * called within code protected by it when interfaces * are added with nl80211. */ mutex_init(&wdev->mtx); INIT_WORK(&wdev->cleanup_work, wdev_cleanup_work); INIT_LIST_HEAD(&wdev->event_list); spin_lock_init(&wdev->event_lock); INIT_LIST_HEAD(&wdev->mgmt_registrations); spin_lock_init(&wdev->mgmt_registrations_lock); mutex_lock(&rdev->devlist_mtx); wdev->identifier = ++rdev->wdev_id; list_add_rcu(&wdev->list, &rdev->wdev_list); rdev->devlist_generation++; /* can only change netns with wiphy */ dev->features |= NETIF_F_NETNS_LOCAL; if (sysfs_create_link(&dev->dev.kobj, &rdev->wiphy.dev.kobj, "phy80211")) { pr_err("failed to add phy80211 symlink to netdev!\n"); } wdev->netdev = dev; wdev->sme_state = CFG80211_SME_IDLE; mutex_unlock(&rdev->devlist_mtx); #ifdef CONFIG_CFG80211_WEXT wdev->wext.default_key = -1; wdev->wext.default_mgmt_key = -1; wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; #endif if (wdev->wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT) wdev->ps = true; else wdev->ps = false; /* allow mac80211 to determine the timeout */ wdev->ps_timeout = -1; netdev_set_default_ethtool_ops(dev, &cfg80211_ethtool_ops); if ((wdev->iftype == NL80211_IFTYPE_STATION || wdev->iftype == NL80211_IFTYPE_P2P_CLIENT || wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr) dev->priv_flags |= IFF_DONT_BRIDGE; break; case NETDEV_GOING_DOWN: switch (wdev->iftype) { case NL80211_IFTYPE_ADHOC: cfg80211_leave_ibss(rdev, dev, true); break; case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_STATION: mutex_lock(&rdev->sched_scan_mtx); __cfg80211_stop_sched_scan(rdev, false); mutex_unlock(&rdev->sched_scan_mtx); wdev_lock(wdev); #ifdef CONFIG_CFG80211_WEXT kfree(wdev->wext.ie); wdev->wext.ie = NULL; wdev->wext.ie_len = 0; wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; #endif __cfg80211_disconnect(rdev, dev, WLAN_REASON_DEAUTH_LEAVING, true); cfg80211_mlme_down(rdev, dev); wdev_unlock(wdev); break; case NL80211_IFTYPE_MESH_POINT: cfg80211_leave_mesh(rdev, dev); break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: cfg80211_stop_ap(rdev, dev); break; default: break; } wdev->beacon_interval = 0; break; case NETDEV_DOWN: cfg80211_update_iface_num(rdev, wdev->iftype, -1); dev_hold(dev); queue_work(cfg80211_wq, &wdev->cleanup_work); break; case NETDEV_UP: /* * If we have a really quick DOWN/UP succession we may * have this work still pending ... cancel it and see * if it was pending, in which case we need to account * for some of the work it would have done. */ if (cancel_work_sync(&wdev->cleanup_work)) { mutex_lock(&rdev->devlist_mtx); rdev->opencount--; mutex_unlock(&rdev->devlist_mtx); dev_put(dev); } cfg80211_update_iface_num(rdev, wdev->iftype, 1); cfg80211_lock_rdev(rdev); mutex_lock(&rdev->devlist_mtx); mutex_lock(&rdev->sched_scan_mtx); wdev_lock(wdev); switch (wdev->iftype) { #ifdef CONFIG_CFG80211_WEXT case NL80211_IFTYPE_ADHOC: cfg80211_ibss_wext_join(rdev, wdev); break; case NL80211_IFTYPE_STATION: cfg80211_mgd_wext_connect(rdev, wdev); break; #endif #ifdef CONFIG_MAC80211_MESH case NL80211_IFTYPE_MESH_POINT: { /* backward compat code... */ struct mesh_setup setup; memcpy(&setup, &default_mesh_setup, sizeof(setup)); /* back compat only needed for mesh_id */ setup.mesh_id = wdev->ssid; setup.mesh_id_len = wdev->mesh_id_up_len; if (wdev->mesh_id_up_len) __cfg80211_join_mesh(rdev, dev, &setup, &default_mesh_config); break; } #endif default: break; } wdev_unlock(wdev); mutex_unlock(&rdev->sched_scan_mtx); rdev->opencount++; mutex_unlock(&rdev->devlist_mtx); cfg80211_unlock_rdev(rdev); /* * Configure power management to the driver here so that its * correctly set also after interface type changes etc. */ if ((wdev->iftype == NL80211_IFTYPE_STATION || wdev->iftype == NL80211_IFTYPE_P2P_CLIENT) && rdev->ops->set_power_mgmt) if (rdev_set_power_mgmt(rdev, dev, wdev->ps, wdev->ps_timeout)) { /* assume this means it's off */ wdev->ps = false; } break; case NETDEV_UNREGISTER: /* * NB: cannot take rdev->mtx here because this may be * called within code protected by it when interfaces * are removed with nl80211. */ mutex_lock(&rdev->devlist_mtx); /* * It is possible to get NETDEV_UNREGISTER * multiple times. To detect that, check * that the interface is still on the list * of registered interfaces, and only then * remove and clean it up. */ if (!list_empty(&wdev->list)) { sysfs_remove_link(&dev->dev.kobj, "phy80211"); list_del_rcu(&wdev->list); rdev->devlist_generation++; cfg80211_mlme_purge_registrations(wdev); #ifdef CONFIG_CFG80211_WEXT kfree(wdev->wext.keys); #endif } mutex_unlock(&rdev->devlist_mtx); /* * synchronise (so that we won't find this netdev * from other code any more) and then clear the list * head so that the above code can safely check for * !list_empty() to avoid double-cleanup. */ synchronize_rcu(); INIT_LIST_HEAD(&wdev->list); /* * Ensure that all events have been processed and * freed. */ cfg80211_process_wdev_events(wdev); break; case NETDEV_PRE_UP: if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype))) return notifier_from_errno(-EOPNOTSUPP); if (rfkill_blocked(rdev->rfkill)) return notifier_from_errno(-ERFKILL); mutex_lock(&rdev->devlist_mtx); ret = cfg80211_can_add_interface(rdev, wdev->iftype); mutex_unlock(&rdev->devlist_mtx); if (ret) return notifier_from_errno(ret); break; } return NOTIFY_DONE; } static struct notifier_block cfg80211_netdev_notifier = { .notifier_call = cfg80211_netdev_notifier_call, }; static void __net_exit cfg80211_pernet_exit(struct net *net) { struct cfg80211_registered_device *rdev; rtnl_lock(); mutex_lock(&cfg80211_mutex); list_for_each_entry(rdev, &cfg80211_rdev_list, list) { if (net_eq(wiphy_net(&rdev->wiphy), net)) WARN_ON(cfg80211_switch_netns(rdev, &init_net)); } mutex_unlock(&cfg80211_mutex); rtnl_unlock(); } static struct pernet_operations cfg80211_pernet_ops = { .exit = cfg80211_pernet_exit, }; static int __init cfg80211_init(void) { int err; err = register_pernet_device(&cfg80211_pernet_ops); if (err) goto out_fail_pernet; err = wiphy_sysfs_init(); if (err) goto out_fail_sysfs; err = register_netdevice_notifier(&cfg80211_netdev_notifier); if (err) goto out_fail_notifier; err = nl80211_init(); if (err) goto out_fail_nl80211; ieee80211_debugfs_dir = debugfs_create_dir("ieee80211", NULL); err = regulatory_init(); if (err) goto out_fail_reg; cfg80211_wq = create_singlethread_workqueue("cfg80211"); if (!cfg80211_wq) { err = -ENOMEM; goto out_fail_wq; } return 0; out_fail_wq: regulatory_exit(); out_fail_reg: debugfs_remove(ieee80211_debugfs_dir); out_fail_nl80211: unregister_netdevice_notifier(&cfg80211_netdev_notifier); out_fail_notifier: wiphy_sysfs_exit(); out_fail_sysfs: unregister_pernet_device(&cfg80211_pernet_ops); out_fail_pernet: return err; } subsys_initcall(cfg80211_init); static void __exit cfg80211_exit(void) { debugfs_remove(ieee80211_debugfs_dir); nl80211_exit(); unregister_netdevice_notifier(&cfg80211_netdev_notifier); wiphy_sysfs_exit(); regulatory_exit(); unregister_pernet_device(&cfg80211_pernet_ops); destroy_workqueue(cfg80211_wq); } module_exit(cfg80211_exit);
gpl-2.0
cmenard/android_kernel_samsung_espresso10
net/xfrm/xfrm_replay.c
1326
13442
/* * xfrm_replay.c - xfrm replay detection, derived from xfrm_state.c. * * Copyright (C) 2010 secunet Security Networks AG * Copyright (C) 2010 Steffen Klassert <steffen.klassert@secunet.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include <net/xfrm.h> u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq) { u32 seq, seq_hi, bottom; struct xfrm_replay_state_esn *replay_esn = x->replay_esn; if (!(x->props.flags & XFRM_STATE_ESN)) return 0; seq = ntohl(net_seq); seq_hi = replay_esn->seq_hi; bottom = replay_esn->seq - replay_esn->replay_window + 1; if (likely(replay_esn->seq >= replay_esn->replay_window - 1)) { /* A. same subspace */ if (unlikely(seq < bottom)) seq_hi++; } else { /* B. window spans two subspaces */ if (unlikely(seq >= bottom)) seq_hi--; } return seq_hi; } static void xfrm_replay_notify(struct xfrm_state *x, int event) { struct km_event c; /* we send notify messages in case * 1. we updated on of the sequence numbers, and the seqno difference * is at least x->replay_maxdiff, in this case we also update the * timeout of our timer function * 2. if x->replay_maxage has elapsed since last update, * and there were changes * * The state structure must be locked! */ switch (event) { case XFRM_REPLAY_UPDATE: if (x->replay_maxdiff && (x->replay.seq - x->preplay.seq < x->replay_maxdiff) && (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) { if (x->xflags & XFRM_TIME_DEFER) event = XFRM_REPLAY_TIMEOUT; else return; } break; case XFRM_REPLAY_TIMEOUT: if (memcmp(&x->replay, &x->preplay, sizeof(struct xfrm_replay_state)) == 0) { x->xflags |= XFRM_TIME_DEFER; return; } break; } memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state)); c.event = XFRM_MSG_NEWAE; c.data.aevent = event; km_state_notify(x, &c); if (x->replay_maxage && !mod_timer(&x->rtimer, jiffies + x->replay_maxage)) x->xflags &= ~XFRM_TIME_DEFER; } static int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb) { int err = 0; struct net *net = xs_net(x); if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { XFRM_SKB_CB(skb)->seq.output.low = ++x->replay.oseq; if (unlikely(x->replay.oseq == 0)) { x->replay.oseq--; xfrm_audit_state_replay_overflow(x, skb); err = -EOVERFLOW; return err; } if (xfrm_aevent_is_on(net)) x->repl->notify(x, XFRM_REPLAY_UPDATE); } return err; } static int xfrm_replay_check(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq) { u32 diff; u32 seq = ntohl(net_seq); if (!x->props.replay_window) return 0; if (unlikely(seq == 0)) goto err; if (likely(seq > x->replay.seq)) return 0; diff = x->replay.seq - seq; if (diff >= min_t(unsigned int, x->props.replay_window, sizeof(x->replay.bitmap) * 8)) { x->stats.replay_window++; goto err; } if (x->replay.bitmap & (1U << diff)) { x->stats.replay++; goto err; } return 0; err: xfrm_audit_state_replay(x, skb, net_seq); return -EINVAL; } static void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq) { u32 diff; u32 seq = ntohl(net_seq); if (!x->props.replay_window) return; if (seq > x->replay.seq) { diff = seq - x->replay.seq; if (diff < x->props.replay_window) x->replay.bitmap = ((x->replay.bitmap) << diff) | 1; else x->replay.bitmap = 1; x->replay.seq = seq; } else { diff = x->replay.seq - seq; x->replay.bitmap |= (1U << diff); } if (xfrm_aevent_is_on(xs_net(x))) xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); } static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb) { int err = 0; struct xfrm_replay_state_esn *replay_esn = x->replay_esn; struct net *net = xs_net(x); if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq; if (unlikely(replay_esn->oseq == 0)) { replay_esn->oseq--; xfrm_audit_state_replay_overflow(x, skb); err = -EOVERFLOW; return err; } if (xfrm_aevent_is_on(net)) x->repl->notify(x, XFRM_REPLAY_UPDATE); } return err; } static int xfrm_replay_check_bmp(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq) { unsigned int bitnr, nr; struct xfrm_replay_state_esn *replay_esn = x->replay_esn; u32 pos; u32 seq = ntohl(net_seq); u32 diff = replay_esn->seq - seq; if (!replay_esn->replay_window) return 0; pos = (replay_esn->seq - 1) % replay_esn->replay_window; if (unlikely(seq == 0)) goto err; if (likely(seq > replay_esn->seq)) return 0; if (diff >= replay_esn->replay_window) { x->stats.replay_window++; goto err; } if (pos >= diff) { bitnr = (pos - diff) % replay_esn->replay_window; nr = bitnr >> 5; bitnr = bitnr & 0x1F; if (replay_esn->bmp[nr] & (1U << bitnr)) goto err_replay; } else { bitnr = replay_esn->replay_window - (diff - pos); nr = bitnr >> 5; bitnr = bitnr & 0x1F; if (replay_esn->bmp[nr] & (1U << bitnr)) goto err_replay; } return 0; err_replay: x->stats.replay++; err: xfrm_audit_state_replay(x, skb, net_seq); return -EINVAL; } static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq) { unsigned int bitnr, nr, i; u32 diff; struct xfrm_replay_state_esn *replay_esn = x->replay_esn; u32 seq = ntohl(net_seq); u32 pos = (replay_esn->seq - 1) % replay_esn->replay_window; if (!replay_esn->replay_window) return; if (seq > replay_esn->seq) { diff = seq - replay_esn->seq; if (diff < replay_esn->replay_window) { for (i = 1; i < diff; i++) { bitnr = (pos + i) % replay_esn->replay_window; nr = bitnr >> 5; bitnr = bitnr & 0x1F; replay_esn->bmp[nr] &= ~(1U << bitnr); } bitnr = (pos + diff) % replay_esn->replay_window; nr = bitnr >> 5; bitnr = bitnr & 0x1F; replay_esn->bmp[nr] |= (1U << bitnr); } else { nr = (replay_esn->replay_window - 1) >> 5; for (i = 0; i <= nr; i++) replay_esn->bmp[i] = 0; bitnr = (pos + diff) % replay_esn->replay_window; nr = bitnr >> 5; bitnr = bitnr & 0x1F; replay_esn->bmp[nr] |= (1U << bitnr); } replay_esn->seq = seq; } else { diff = replay_esn->seq - seq; if (pos >= diff) { bitnr = (pos - diff) % replay_esn->replay_window; nr = bitnr >> 5; bitnr = bitnr & 0x1F; replay_esn->bmp[nr] |= (1U << bitnr); } else { bitnr = replay_esn->replay_window - (diff - pos); nr = bitnr >> 5; bitnr = bitnr & 0x1F; replay_esn->bmp[nr] |= (1U << bitnr); } } if (xfrm_aevent_is_on(xs_net(x))) xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); } static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event) { struct km_event c; struct xfrm_replay_state_esn *replay_esn = x->replay_esn; struct xfrm_replay_state_esn *preplay_esn = x->preplay_esn; /* we send notify messages in case * 1. we updated on of the sequence numbers, and the seqno difference * is at least x->replay_maxdiff, in this case we also update the * timeout of our timer function * 2. if x->replay_maxage has elapsed since last update, * and there were changes * * The state structure must be locked! */ switch (event) { case XFRM_REPLAY_UPDATE: if (x->replay_maxdiff && (replay_esn->seq - preplay_esn->seq < x->replay_maxdiff) && (replay_esn->oseq - preplay_esn->oseq < x->replay_maxdiff)) { if (x->xflags & XFRM_TIME_DEFER) event = XFRM_REPLAY_TIMEOUT; else return; } break; case XFRM_REPLAY_TIMEOUT: if (memcmp(x->replay_esn, x->preplay_esn, xfrm_replay_state_esn_len(replay_esn)) == 0) { x->xflags |= XFRM_TIME_DEFER; return; } break; } memcpy(x->preplay_esn, x->replay_esn, xfrm_replay_state_esn_len(replay_esn)); c.event = XFRM_MSG_NEWAE; c.data.aevent = event; km_state_notify(x, &c); if (x->replay_maxage && !mod_timer(&x->rtimer, jiffies + x->replay_maxage)) x->xflags &= ~XFRM_TIME_DEFER; } static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb) { int err = 0; struct xfrm_replay_state_esn *replay_esn = x->replay_esn; struct net *net = xs_net(x); if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq; XFRM_SKB_CB(skb)->seq.output.hi = replay_esn->oseq_hi; if (unlikely(replay_esn->oseq == 0)) { XFRM_SKB_CB(skb)->seq.output.hi = ++replay_esn->oseq_hi; if (replay_esn->oseq_hi == 0) { replay_esn->oseq--; replay_esn->oseq_hi--; xfrm_audit_state_replay_overflow(x, skb); err = -EOVERFLOW; return err; } } if (xfrm_aevent_is_on(net)) x->repl->notify(x, XFRM_REPLAY_UPDATE); } return err; } static int xfrm_replay_check_esn(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq) { unsigned int bitnr, nr; u32 diff; struct xfrm_replay_state_esn *replay_esn = x->replay_esn; u32 pos; u32 seq = ntohl(net_seq); u32 wsize = replay_esn->replay_window; u32 top = replay_esn->seq; u32 bottom = top - wsize + 1; if (!wsize) return 0; pos = (replay_esn->seq - 1) % replay_esn->replay_window; if (unlikely(seq == 0 && replay_esn->seq_hi == 0 && (replay_esn->seq < replay_esn->replay_window - 1))) goto err; diff = top - seq; if (likely(top >= wsize - 1)) { /* A. same subspace */ if (likely(seq > top) || seq < bottom) return 0; } else { /* B. window spans two subspaces */ if (likely(seq > top && seq < bottom)) return 0; if (seq >= bottom) diff = ~seq + top + 1; } if (diff >= replay_esn->replay_window) { x->stats.replay_window++; goto err; } if (pos >= diff) { bitnr = (pos - diff) % replay_esn->replay_window; nr = bitnr >> 5; bitnr = bitnr & 0x1F; if (replay_esn->bmp[nr] & (1U << bitnr)) goto err_replay; } else { bitnr = replay_esn->replay_window - (diff - pos); nr = bitnr >> 5; bitnr = bitnr & 0x1F; if (replay_esn->bmp[nr] & (1U << bitnr)) goto err_replay; } return 0; err_replay: x->stats.replay++; err: xfrm_audit_state_replay(x, skb, net_seq); return -EINVAL; } static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq) { unsigned int bitnr, nr, i; int wrap; u32 diff, pos, seq, seq_hi; struct xfrm_replay_state_esn *replay_esn = x->replay_esn; if (!replay_esn->replay_window) return; seq = ntohl(net_seq); pos = (replay_esn->seq - 1) % replay_esn->replay_window; seq_hi = xfrm_replay_seqhi(x, net_seq); wrap = seq_hi - replay_esn->seq_hi; if ((!wrap && seq > replay_esn->seq) || wrap > 0) { if (likely(!wrap)) diff = seq - replay_esn->seq; else diff = ~replay_esn->seq + seq + 1; if (diff < replay_esn->replay_window) { for (i = 1; i < diff; i++) { bitnr = (pos + i) % replay_esn->replay_window; nr = bitnr >> 5; bitnr = bitnr & 0x1F; replay_esn->bmp[nr] &= ~(1U << bitnr); } bitnr = (pos + diff) % replay_esn->replay_window; nr = bitnr >> 5; bitnr = bitnr & 0x1F; replay_esn->bmp[nr] |= (1U << bitnr); } else { nr = (replay_esn->replay_window - 1) >> 5; for (i = 0; i <= nr; i++) replay_esn->bmp[i] = 0; bitnr = (pos + diff) % replay_esn->replay_window; nr = bitnr >> 5; bitnr = bitnr & 0x1F; replay_esn->bmp[nr] |= (1U << bitnr); } replay_esn->seq = seq; if (unlikely(wrap > 0)) replay_esn->seq_hi++; } else { diff = replay_esn->seq - seq; if (pos >= diff) { bitnr = (pos - diff) % replay_esn->replay_window; nr = bitnr >> 5; bitnr = bitnr & 0x1F; replay_esn->bmp[nr] |= (1U << bitnr); } else { bitnr = replay_esn->replay_window - (diff - pos); nr = bitnr >> 5; bitnr = bitnr & 0x1F; replay_esn->bmp[nr] |= (1U << bitnr); } } if (xfrm_aevent_is_on(xs_net(x))) xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); } static struct xfrm_replay xfrm_replay_legacy = { .advance = xfrm_replay_advance, .check = xfrm_replay_check, .notify = xfrm_replay_notify, .overflow = xfrm_replay_overflow, }; static struct xfrm_replay xfrm_replay_bmp = { .advance = xfrm_replay_advance_bmp, .check = xfrm_replay_check_bmp, .notify = xfrm_replay_notify_bmp, .overflow = xfrm_replay_overflow_bmp, }; static struct xfrm_replay xfrm_replay_esn = { .advance = xfrm_replay_advance_esn, .check = xfrm_replay_check_esn, .notify = xfrm_replay_notify_bmp, .overflow = xfrm_replay_overflow_esn, }; int xfrm_init_replay(struct xfrm_state *x) { struct xfrm_replay_state_esn *replay_esn = x->replay_esn; if (replay_esn) { if (replay_esn->replay_window > replay_esn->bmp_len * sizeof(__u32) * 8) return -EINVAL; if ((x->props.flags & XFRM_STATE_ESN) && replay_esn->replay_window == 0) return -EINVAL; if ((x->props.flags & XFRM_STATE_ESN) && x->replay_esn) x->repl = &xfrm_replay_esn; else x->repl = &xfrm_replay_bmp; } else x->repl = &xfrm_replay_legacy; return 0; } EXPORT_SYMBOL(xfrm_init_replay);
gpl-2.0
cmenard/T889_Kernel
net/ipv6/addrlabel.c
1838
14083
/* * IPv6 Address Label subsystem * for the IPv6 "Default" Source Address Selection * * Copyright (C)2007 USAGI/WIDE Project */ /* * Author: * YOSHIFUJI Hideaki @ USAGI/WIDE Project <yoshfuji@linux-ipv6.org> */ #include <linux/kernel.h> #include <linux/list.h> #include <linux/rcupdate.h> #include <linux/in6.h> #include <linux/slab.h> #include <net/addrconf.h> #include <linux/if_addrlabel.h> #include <linux/netlink.h> #include <linux/rtnetlink.h> #if 0 #define ADDRLABEL(x...) printk(x) #else #define ADDRLABEL(x...) do { ; } while(0) #endif /* * Policy Table */ struct ip6addrlbl_entry { #ifdef CONFIG_NET_NS struct net *lbl_net; #endif struct in6_addr prefix; int prefixlen; int ifindex; int addrtype; u32 label; struct hlist_node list; atomic_t refcnt; struct rcu_head rcu; }; static struct ip6addrlbl_table { struct hlist_head head; spinlock_t lock; u32 seq; } ip6addrlbl_table; static inline struct net *ip6addrlbl_net(const struct ip6addrlbl_entry *lbl) { return read_pnet(&lbl->lbl_net); } /* * Default policy table (RFC3484 + extensions) * * prefix addr_type label * ------------------------------------------------------------------------- * ::1/128 LOOPBACK 0 * ::/0 N/A 1 * 2002::/16 N/A 2 * ::/96 COMPATv4 3 * ::ffff:0:0/96 V4MAPPED 4 * fc00::/7 N/A 5 ULA (RFC 4193) * 2001::/32 N/A 6 Teredo (RFC 4380) * 2001:10::/28 N/A 7 ORCHID (RFC 4843) * * Note: 0xffffffff is used if we do not have any policies. */ #define IPV6_ADDR_LABEL_DEFAULT 0xffffffffUL static const __net_initdata struct ip6addrlbl_init_table { const struct in6_addr *prefix; int prefixlen; u32 label; } ip6addrlbl_init_table[] = { { /* ::/0 */ .prefix = &in6addr_any, .label = 1, },{ /* fc00::/7 */ .prefix = &(struct in6_addr){{{ 0xfc }}}, .prefixlen = 7, .label = 5, },{ /* 2002::/16 */ .prefix = &(struct in6_addr){{{ 0x20, 0x02 }}}, .prefixlen = 16, .label = 2, },{ /* 2001::/32 */ .prefix = &(struct in6_addr){{{ 0x20, 0x01 }}}, .prefixlen = 32, .label = 6, },{ /* 2001:10::/28 */ .prefix = &(struct in6_addr){{{ 0x20, 0x01, 0x00, 0x10 }}}, .prefixlen = 28, .label = 7, },{ /* ::ffff:0:0 */ .prefix = &(struct in6_addr){{{ [10] = 0xff, [11] = 0xff }}}, .prefixlen = 96, .label = 4, },{ /* ::/96 */ .prefix = &in6addr_any, .prefixlen = 96, .label = 3, },{ /* ::1/128 */ .prefix = &in6addr_loopback, .prefixlen = 128, .label = 0, } }; /* Object management */ static inline void ip6addrlbl_free(struct ip6addrlbl_entry *p) { #ifdef CONFIG_NET_NS release_net(p->lbl_net); #endif kfree(p); } static void ip6addrlbl_free_rcu(struct rcu_head *h) { ip6addrlbl_free(container_of(h, struct ip6addrlbl_entry, rcu)); } static inline int ip6addrlbl_hold(struct ip6addrlbl_entry *p) { return atomic_inc_not_zero(&p->refcnt); } static inline void ip6addrlbl_put(struct ip6addrlbl_entry *p) { if (atomic_dec_and_test(&p->refcnt)) call_rcu(&p->rcu, ip6addrlbl_free_rcu); } /* Find label */ static int __ip6addrlbl_match(struct net *net, struct ip6addrlbl_entry *p, const struct in6_addr *addr, int addrtype, int ifindex) { if (!net_eq(ip6addrlbl_net(p), net)) return 0; if (p->ifindex && p->ifindex != ifindex) return 0; if (p->addrtype && p->addrtype != addrtype) return 0; if (!ipv6_prefix_equal(addr, &p->prefix, p->prefixlen)) return 0; return 1; } static struct ip6addrlbl_entry *__ipv6_addr_label(struct net *net, const struct in6_addr *addr, int type, int ifindex) { struct hlist_node *pos; struct ip6addrlbl_entry *p; hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) { if (__ip6addrlbl_match(net, p, addr, type, ifindex)) return p; } return NULL; } u32 ipv6_addr_label(struct net *net, const struct in6_addr *addr, int type, int ifindex) { u32 label; struct ip6addrlbl_entry *p; type &= IPV6_ADDR_MAPPED | IPV6_ADDR_COMPATv4 | IPV6_ADDR_LOOPBACK; rcu_read_lock(); p = __ipv6_addr_label(net, addr, type, ifindex); label = p ? p->label : IPV6_ADDR_LABEL_DEFAULT; rcu_read_unlock(); ADDRLABEL(KERN_DEBUG "%s(addr=%pI6, type=%d, ifindex=%d) => %08x\n", __func__, addr, type, ifindex, label); return label; } /* allocate one entry */ static struct ip6addrlbl_entry *ip6addrlbl_alloc(struct net *net, const struct in6_addr *prefix, int prefixlen, int ifindex, u32 label) { struct ip6addrlbl_entry *newp; int addrtype; ADDRLABEL(KERN_DEBUG "%s(prefix=%pI6, prefixlen=%d, ifindex=%d, label=%u)\n", __func__, prefix, prefixlen, ifindex, (unsigned int)label); addrtype = ipv6_addr_type(prefix) & (IPV6_ADDR_MAPPED | IPV6_ADDR_COMPATv4 | IPV6_ADDR_LOOPBACK); switch (addrtype) { case IPV6_ADDR_MAPPED: if (prefixlen > 96) return ERR_PTR(-EINVAL); if (prefixlen < 96) addrtype = 0; break; case IPV6_ADDR_COMPATv4: if (prefixlen != 96) addrtype = 0; break; case IPV6_ADDR_LOOPBACK: if (prefixlen != 128) addrtype = 0; break; } newp = kmalloc(sizeof(*newp), GFP_KERNEL); if (!newp) return ERR_PTR(-ENOMEM); ipv6_addr_prefix(&newp->prefix, prefix, prefixlen); newp->prefixlen = prefixlen; newp->ifindex = ifindex; newp->addrtype = addrtype; newp->label = label; INIT_HLIST_NODE(&newp->list); #ifdef CONFIG_NET_NS newp->lbl_net = hold_net(net); #endif atomic_set(&newp->refcnt, 1); return newp; } /* add a label */ static int __ip6addrlbl_add(struct ip6addrlbl_entry *newp, int replace) { int ret = 0; ADDRLABEL(KERN_DEBUG "%s(newp=%p, replace=%d)\n", __func__, newp, replace); if (hlist_empty(&ip6addrlbl_table.head)) { hlist_add_head_rcu(&newp->list, &ip6addrlbl_table.head); } else { struct hlist_node *pos, *n; struct ip6addrlbl_entry *p = NULL; hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) { if (p->prefixlen == newp->prefixlen && net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) && p->ifindex == newp->ifindex && ipv6_addr_equal(&p->prefix, &newp->prefix)) { if (!replace) { ret = -EEXIST; goto out; } hlist_replace_rcu(&p->list, &newp->list); ip6addrlbl_put(p); goto out; } else if ((p->prefixlen == newp->prefixlen && !p->ifindex) || (p->prefixlen < newp->prefixlen)) { hlist_add_before_rcu(&newp->list, &p->list); goto out; } } hlist_add_after_rcu(&p->list, &newp->list); } out: if (!ret) ip6addrlbl_table.seq++; return ret; } /* add a label */ static int ip6addrlbl_add(struct net *net, const struct in6_addr *prefix, int prefixlen, int ifindex, u32 label, int replace) { struct ip6addrlbl_entry *newp; int ret = 0; ADDRLABEL(KERN_DEBUG "%s(prefix=%pI6, prefixlen=%d, ifindex=%d, label=%u, replace=%d)\n", __func__, prefix, prefixlen, ifindex, (unsigned int)label, replace); newp = ip6addrlbl_alloc(net, prefix, prefixlen, ifindex, label); if (IS_ERR(newp)) return PTR_ERR(newp); spin_lock(&ip6addrlbl_table.lock); ret = __ip6addrlbl_add(newp, replace); spin_unlock(&ip6addrlbl_table.lock); if (ret) ip6addrlbl_free(newp); return ret; } /* remove a label */ static int __ip6addrlbl_del(struct net *net, const struct in6_addr *prefix, int prefixlen, int ifindex) { struct ip6addrlbl_entry *p = NULL; struct hlist_node *pos, *n; int ret = -ESRCH; ADDRLABEL(KERN_DEBUG "%s(prefix=%pI6, prefixlen=%d, ifindex=%d)\n", __func__, prefix, prefixlen, ifindex); hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) { if (p->prefixlen == prefixlen && net_eq(ip6addrlbl_net(p), net) && p->ifindex == ifindex && ipv6_addr_equal(&p->prefix, prefix)) { hlist_del_rcu(&p->list); ip6addrlbl_put(p); ret = 0; break; } } return ret; } static int ip6addrlbl_del(struct net *net, const struct in6_addr *prefix, int prefixlen, int ifindex) { struct in6_addr prefix_buf; int ret; ADDRLABEL(KERN_DEBUG "%s(prefix=%pI6, prefixlen=%d, ifindex=%d)\n", __func__, prefix, prefixlen, ifindex); ipv6_addr_prefix(&prefix_buf, prefix, prefixlen); spin_lock(&ip6addrlbl_table.lock); ret = __ip6addrlbl_del(net, &prefix_buf, prefixlen, ifindex); spin_unlock(&ip6addrlbl_table.lock); return ret; } /* add default label */ static int __net_init ip6addrlbl_net_init(struct net *net) { int err = 0; int i; ADDRLABEL(KERN_DEBUG "%s()\n", __func__); for (i = 0; i < ARRAY_SIZE(ip6addrlbl_init_table); i++) { int ret = ip6addrlbl_add(net, ip6addrlbl_init_table[i].prefix, ip6addrlbl_init_table[i].prefixlen, 0, ip6addrlbl_init_table[i].label, 0); /* XXX: should we free all rules when we catch an error? */ if (ret && (!err || err != -ENOMEM)) err = ret; } return err; } static void __net_exit ip6addrlbl_net_exit(struct net *net) { struct ip6addrlbl_entry *p = NULL; struct hlist_node *pos, *n; /* Remove all labels belonging to the exiting net */ spin_lock(&ip6addrlbl_table.lock); hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) { if (net_eq(ip6addrlbl_net(p), net)) { hlist_del_rcu(&p->list); ip6addrlbl_put(p); } } spin_unlock(&ip6addrlbl_table.lock); } static struct pernet_operations ipv6_addr_label_ops = { .init = ip6addrlbl_net_init, .exit = ip6addrlbl_net_exit, }; int __init ipv6_addr_label_init(void) { spin_lock_init(&ip6addrlbl_table.lock); return register_pernet_subsys(&ipv6_addr_label_ops); } void ipv6_addr_label_cleanup(void) { unregister_pernet_subsys(&ipv6_addr_label_ops); } static const struct nla_policy ifal_policy[IFAL_MAX+1] = { [IFAL_ADDRESS] = { .len = sizeof(struct in6_addr), }, [IFAL_LABEL] = { .len = sizeof(u32), }, }; static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct net *net = sock_net(skb->sk); struct ifaddrlblmsg *ifal; struct nlattr *tb[IFAL_MAX+1]; struct in6_addr *pfx; u32 label; int err = 0; err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy); if (err < 0) return err; ifal = nlmsg_data(nlh); if (ifal->ifal_family != AF_INET6 || ifal->ifal_prefixlen > 128) return -EINVAL; if (!tb[IFAL_ADDRESS]) return -EINVAL; pfx = nla_data(tb[IFAL_ADDRESS]); if (!pfx) return -EINVAL; if (!tb[IFAL_LABEL]) return -EINVAL; label = nla_get_u32(tb[IFAL_LABEL]); if (label == IPV6_ADDR_LABEL_DEFAULT) return -EINVAL; switch(nlh->nlmsg_type) { case RTM_NEWADDRLABEL: if (ifal->ifal_index && !__dev_get_by_index(net, ifal->ifal_index)) return -EINVAL; err = ip6addrlbl_add(net, pfx, ifal->ifal_prefixlen, ifal->ifal_index, label, nlh->nlmsg_flags & NLM_F_REPLACE); break; case RTM_DELADDRLABEL: err = ip6addrlbl_del(net, pfx, ifal->ifal_prefixlen, ifal->ifal_index); break; default: err = -EOPNOTSUPP; } return err; } static inline void ip6addrlbl_putmsg(struct nlmsghdr *nlh, int prefixlen, int ifindex, u32 lseq) { struct ifaddrlblmsg *ifal = nlmsg_data(nlh); ifal->ifal_family = AF_INET6; ifal->ifal_prefixlen = prefixlen; ifal->ifal_flags = 0; ifal->ifal_index = ifindex; ifal->ifal_seq = lseq; }; static int ip6addrlbl_fill(struct sk_buff *skb, struct ip6addrlbl_entry *p, u32 lseq, u32 pid, u32 seq, int event, unsigned int flags) { struct nlmsghdr *nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrlblmsg), flags); if (!nlh) return -EMSGSIZE; ip6addrlbl_putmsg(nlh, p->prefixlen, p->ifindex, lseq); if (nla_put(skb, IFAL_ADDRESS, 16, &p->prefix) < 0 || nla_put_u32(skb, IFAL_LABEL, p->label) < 0) { nlmsg_cancel(skb, nlh); return -EMSGSIZE; } return nlmsg_end(skb, nlh); } static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); struct ip6addrlbl_entry *p; struct hlist_node *pos; int idx = 0, s_idx = cb->args[0]; int err; rcu_read_lock(); hlist_for_each_entry_rcu(p, pos, &ip6addrlbl_table.head, list) { if (idx >= s_idx && net_eq(ip6addrlbl_net(p), net)) { if ((err = ip6addrlbl_fill(skb, p, ip6addrlbl_table.seq, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, RTM_NEWADDRLABEL, NLM_F_MULTI)) <= 0) break; } idx++; } rcu_read_unlock(); cb->args[0] = idx; return skb->len; } static inline int ip6addrlbl_msgsize(void) { return NLMSG_ALIGN(sizeof(struct ifaddrlblmsg)) + nla_total_size(16) /* IFAL_ADDRESS */ + nla_total_size(4); /* IFAL_LABEL */ } static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg) { struct net *net = sock_net(in_skb->sk); struct ifaddrlblmsg *ifal; struct nlattr *tb[IFAL_MAX+1]; struct in6_addr *addr; u32 lseq; int err = 0; struct ip6addrlbl_entry *p; struct sk_buff *skb; err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy); if (err < 0) return err; ifal = nlmsg_data(nlh); if (ifal->ifal_family != AF_INET6 || ifal->ifal_prefixlen != 128) return -EINVAL; if (ifal->ifal_index && !__dev_get_by_index(net, ifal->ifal_index)) return -EINVAL; if (!tb[IFAL_ADDRESS]) return -EINVAL; addr = nla_data(tb[IFAL_ADDRESS]); if (!addr) return -EINVAL; rcu_read_lock(); p = __ipv6_addr_label(net, addr, ipv6_addr_type(addr), ifal->ifal_index); if (p && ip6addrlbl_hold(p)) p = NULL; lseq = ip6addrlbl_table.seq; rcu_read_unlock(); if (!p) { err = -ESRCH; goto out; } if (!(skb = nlmsg_new(ip6addrlbl_msgsize(), GFP_KERNEL))) { ip6addrlbl_put(p); return -ENOBUFS; } err = ip6addrlbl_fill(skb, p, lseq, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, RTM_NEWADDRLABEL, 0); ip6addrlbl_put(p); if (err < 0) { WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto out; } err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid); out: return err; } void __init ipv6_addr_label_rtnl_register(void) { __rtnl_register(PF_INET6, RTM_NEWADDRLABEL, ip6addrlbl_newdel, NULL); __rtnl_register(PF_INET6, RTM_DELADDRLABEL, ip6addrlbl_newdel, NULL); __rtnl_register(PF_INET6, RTM_GETADDRLABEL, ip6addrlbl_get, ip6addrlbl_dump); }
gpl-2.0
W4TCH0UT/zz_plutonium
drivers/iio/adc/ti_am335x_adc.c
2094
6484
/* * TI ADC MFD driver * * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/err.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/iio/iio.h> #include <linux/mfd/ti_am335x_tscadc.h> #include <linux/platform_data/ti_am335x_adc.h> struct tiadc_device { struct ti_tscadc_dev *mfd_tscadc; int channels; }; static unsigned int tiadc_readl(struct tiadc_device *adc, unsigned int reg) { return readl(adc->mfd_tscadc->tscadc_base + reg); } static void tiadc_writel(struct tiadc_device *adc, unsigned int reg, unsigned int val) { writel(val, adc->mfd_tscadc->tscadc_base + reg); } static void tiadc_step_config(struct tiadc_device *adc_dev) { unsigned int stepconfig; int i, channels = 0, steps; /* * There are 16 configurable steps and 8 analog input * lines available which are shared between Touchscreen and ADC. * * Steps backwards i.e. from 16 towards 0 are used by ADC * depending on number of input lines needed. * Channel would represent which analog input * needs to be given to ADC to digitalize data. */ steps = TOTAL_STEPS - adc_dev->channels; channels = TOTAL_CHANNELS - adc_dev->channels; stepconfig = STEPCONFIG_AVG_16 | STEPCONFIG_FIFO1; for (i = (steps + 1); i <= TOTAL_STEPS; i++) { tiadc_writel(adc_dev, REG_STEPCONFIG(i), stepconfig | STEPCONFIG_INP(channels)); tiadc_writel(adc_dev, REG_STEPDELAY(i), STEPCONFIG_OPENDLY); channels++; } tiadc_writel(adc_dev, REG_SE, STPENB_STEPENB); } static int tiadc_channel_init(struct iio_dev *indio_dev, int channels) { struct iio_chan_spec *chan_array; int i; indio_dev->num_channels = channels; chan_array = kcalloc(indio_dev->num_channels, sizeof(struct iio_chan_spec), GFP_KERNEL); if (chan_array == NULL) return -ENOMEM; for (i = 0; i < (indio_dev->num_channels); i++) { struct iio_chan_spec *chan = chan_array + i; chan->type = IIO_VOLTAGE; chan->indexed = 1; chan->channel = i; chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW); } indio_dev->channels = chan_array; return indio_dev->num_channels; } static void tiadc_channels_remove(struct iio_dev *indio_dev) { kfree(indio_dev->channels); } static int tiadc_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { struct tiadc_device *adc_dev = iio_priv(indio_dev); int i; unsigned int fifo1count, readx1; /* * When the sub-system is first enabled, * the sequencer will always start with the * lowest step (1) and continue until step (16). * For ex: If we have enabled 4 ADC channels and * currently use only 1 out of them, the * sequencer still configures all the 4 steps, * leading to 3 unwanted data. * Hence we need to flush out this data. */ fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); for (i = 0; i < fifo1count; i++) { readx1 = tiadc_readl(adc_dev, REG_FIFO1); if (i == chan->channel) *val = readx1 & 0xfff; } tiadc_writel(adc_dev, REG_SE, STPENB_STEPENB); return IIO_VAL_INT; } static const struct iio_info tiadc_info = { .read_raw = &tiadc_read_raw, }; static int tiadc_probe(struct platform_device *pdev) { struct iio_dev *indio_dev; struct tiadc_device *adc_dev; struct ti_tscadc_dev *tscadc_dev = pdev->dev.platform_data; struct mfd_tscadc_board *pdata; int err; pdata = tscadc_dev->dev->platform_data; if (!pdata || !pdata->adc_init) { dev_err(&pdev->dev, "Could not find platform data\n"); return -EINVAL; } indio_dev = iio_device_alloc(sizeof(struct tiadc_device)); if (indio_dev == NULL) { dev_err(&pdev->dev, "failed to allocate iio device\n"); err = -ENOMEM; goto err_ret; } adc_dev = iio_priv(indio_dev); adc_dev->mfd_tscadc = tscadc_dev; adc_dev->channels = pdata->adc_init->adc_channels; indio_dev->dev.parent = &pdev->dev; indio_dev->name = dev_name(&pdev->dev); indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &tiadc_info; tiadc_step_config(adc_dev); err = tiadc_channel_init(indio_dev, adc_dev->channels); if (err < 0) goto err_free_device; err = iio_device_register(indio_dev); if (err) goto err_free_channels; platform_set_drvdata(pdev, indio_dev); return 0; err_free_channels: tiadc_channels_remove(indio_dev); err_free_device: iio_device_free(indio_dev); err_ret: return err; } static int tiadc_remove(struct platform_device *pdev) { struct iio_dev *indio_dev = platform_get_drvdata(pdev); iio_device_unregister(indio_dev); tiadc_channels_remove(indio_dev); iio_device_free(indio_dev); return 0; } #ifdef CONFIG_PM static int tiadc_suspend(struct device *dev) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct tiadc_device *adc_dev = iio_priv(indio_dev); struct ti_tscadc_dev *tscadc_dev = dev->platform_data; unsigned int idle; if (!device_may_wakeup(tscadc_dev->dev)) { idle = tiadc_readl(adc_dev, REG_CTRL); idle &= ~(CNTRLREG_TSCSSENB); tiadc_writel(adc_dev, REG_CTRL, (idle | CNTRLREG_POWERDOWN)); } return 0; } static int tiadc_resume(struct device *dev) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct tiadc_device *adc_dev = iio_priv(indio_dev); unsigned int restore; /* Make sure ADC is powered up */ restore = tiadc_readl(adc_dev, REG_CTRL); restore &= ~(CNTRLREG_POWERDOWN); tiadc_writel(adc_dev, REG_CTRL, restore); tiadc_step_config(adc_dev); return 0; } static const struct dev_pm_ops tiadc_pm_ops = { .suspend = tiadc_suspend, .resume = tiadc_resume, }; #define TIADC_PM_OPS (&tiadc_pm_ops) #else #define TIADC_PM_OPS NULL #endif static struct platform_driver tiadc_driver = { .driver = { .name = "tiadc", .owner = THIS_MODULE, .pm = TIADC_PM_OPS, }, .probe = tiadc_probe, .remove = tiadc_remove, }; module_platform_driver(tiadc_driver); MODULE_DESCRIPTION("TI ADC controller driver"); MODULE_AUTHOR("Rachna Patil <rachna@ti.com>"); MODULE_LICENSE("GPL");
gpl-2.0
Pafcholini/linux-3.10.y
drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
3374
6673
/* * Mainly by David Woodhouse, somewhat modified by Jordan Crouse * * Copyright © 2006-2007 Red Hat, Inc. * Copyright © 2006-2007 Advanced Micro Devices, Inc. * Copyright © 2009 VIA Technology, Inc. * Copyright (c) 2010 Andres Salomon <dilinger@queued.net> * * This program is free software. You can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/cs5535.h> #include <linux/gpio.h> #include <linux/delay.h> #include <asm/olpc.h> #include "olpc_dcon.h" static int dcon_init_xo_1(struct dcon_priv *dcon) { unsigned char lob; if (gpio_request(OLPC_GPIO_DCON_STAT0, "OLPC-DCON")) { pr_err("failed to request STAT0 GPIO\n"); return -EIO; } if (gpio_request(OLPC_GPIO_DCON_STAT1, "OLPC-DCON")) { pr_err("failed to request STAT1 GPIO\n"); goto err_gp_stat1; } if (gpio_request(OLPC_GPIO_DCON_IRQ, "OLPC-DCON")) { pr_err("failed to request IRQ GPIO\n"); goto err_gp_irq; } if (gpio_request(OLPC_GPIO_DCON_LOAD, "OLPC-DCON")) { pr_err("failed to request LOAD GPIO\n"); goto err_gp_load; } if (gpio_request(OLPC_GPIO_DCON_BLANK, "OLPC-DCON")) { pr_err("failed to request BLANK GPIO\n"); goto err_gp_blank; } /* Turn off the event enable for GPIO7 just to be safe */ cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE); /* * Determine the current state by reading the GPIO bit; earlier * stages of the boot process have established the state. * * Note that we read GPIO_OUPUT_VAL rather than GPIO_READ_BACK here; * this is because OFW will disable input for the pin and set a value.. * READ_BACK will only contain a valid value if input is enabled and * then a value is set. So, future readings of the pin can use * READ_BACK, but the first one cannot. Awesome, huh? */ dcon->curr_src = cs5535_gpio_isset(OLPC_GPIO_DCON_LOAD, GPIO_OUTPUT_VAL) ? DCON_SOURCE_CPU : DCON_SOURCE_DCON; dcon->pending_src = dcon->curr_src; /* Set the directions for the GPIO pins */ gpio_direction_input(OLPC_GPIO_DCON_STAT0); gpio_direction_input(OLPC_GPIO_DCON_STAT1); gpio_direction_input(OLPC_GPIO_DCON_IRQ); gpio_direction_input(OLPC_GPIO_DCON_BLANK); gpio_direction_output(OLPC_GPIO_DCON_LOAD, dcon->curr_src == DCON_SOURCE_CPU); /* Set up the interrupt mappings */ /* Set the IRQ to pair 2 */ cs5535_gpio_setup_event(OLPC_GPIO_DCON_IRQ, 2, 0); /* Enable group 2 to trigger the DCON interrupt */ cs5535_gpio_set_irq(2, DCON_IRQ); /* Select edge level for interrupt (in PIC) */ lob = inb(0x4d0); lob &= ~(1 << DCON_IRQ); outb(lob, 0x4d0); /* Register the interrupt handler */ if (request_irq(DCON_IRQ, &dcon_interrupt, 0, "DCON", dcon)) { pr_err("failed to request DCON's irq\n"); goto err_req_irq; } /* Clear INV_EN for GPIO7 (DCONIRQ) */ cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_INVERT); /* Enable filter for GPIO12 (DCONBLANK) */ cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_FILTER); /* Disable filter for GPIO7 */ cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_FILTER); /* Disable event counter for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */ cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_EVENT_COUNT); cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_EVENT_COUNT); /* Add GPIO12 to the Filter Event Pair #7 */ cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_FE7_SEL); /* Turn off negative Edge Enable for GPIO12 */ cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_EN); /* Enable negative Edge Enable for GPIO7 */ cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_EN); /* Zero the filter amount for Filter Event Pair #7 */ cs5535_gpio_set(0, GPIO_FLTR7_AMOUNT); /* Clear the negative edge status for GPIO7 and GPIO12 */ cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS); cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_STS); /* FIXME: Clear the positive status as well, just to be sure */ cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_POSITIVE_EDGE_STS); cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_POSITIVE_EDGE_STS); /* Enable events for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */ cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE); cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_EVENTS_ENABLE); return 0; err_req_irq: gpio_free(OLPC_GPIO_DCON_BLANK); err_gp_blank: gpio_free(OLPC_GPIO_DCON_LOAD); err_gp_load: gpio_free(OLPC_GPIO_DCON_IRQ); err_gp_irq: gpio_free(OLPC_GPIO_DCON_STAT1); err_gp_stat1: gpio_free(OLPC_GPIO_DCON_STAT0); return -EIO; } static void dcon_wiggle_xo_1(void) { int x; /* * According to HiMax, when powering the DCON up we should hold * SMB_DATA high for 8 SMB_CLK cycles. This will force the DCON * state machine to reset to a (sane) initial state. Mitch Bradley * did some testing and discovered that holding for 16 SMB_CLK cycles * worked a lot more reliably, so that's what we do here. * * According to the cs5536 spec, to set GPIO14 to SMB_CLK we must * simultaneously set AUX1 IN/OUT to GPIO14; ditto for SMB_DATA and * GPIO15. */ cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL); cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_VAL); cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_ENABLE); cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_ENABLE); cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1); cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1); cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX2); cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX2); cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1); cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1); for (x = 0; x < 16; x++) { udelay(5); cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL); udelay(5); cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL); } udelay(5); cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1); cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1); cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1); cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1); } static void dcon_set_dconload_1(int val) { gpio_set_value(OLPC_GPIO_DCON_LOAD, val); } static int dcon_read_status_xo_1(u8 *status) { *status = gpio_get_value(OLPC_GPIO_DCON_STAT0); *status |= gpio_get_value(OLPC_GPIO_DCON_STAT1) << 1; /* Clear the negative edge status for GPIO7 */ cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS); return 0; } struct dcon_platform_data dcon_pdata_xo_1 = { .init = dcon_init_xo_1, .bus_stabilize_wiggle = dcon_wiggle_xo_1, .set_dconload = dcon_set_dconload_1, .read_status = dcon_read_status_xo_1, };
gpl-2.0
deepsrd/android_kernel_nx507j
drivers/video/msm/lcdc_samsung_wsvga.c
3630
6201
/* Copyright (c) 2009-2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/delay.h> #include <linux/pwm.h> #ifdef CONFIG_PMIC8058_PWM #include <linux/mfd/pmic8058.h> #include <linux/pmic8058-pwm.h> #endif #include <mach/gpio.h> #include "msm_fb.h" #ifdef CONFIG_PMIC8058_PWM static struct pwm_device *bl_pwm0; static struct pwm_device *bl_pwm1; /* for samsung panel 300hz was the minimum freq where flickering wasnt * observed as the screen was dimmed */ #define PWM_FREQ_HZ 300 #define PWM_PERIOD_USEC (USEC_PER_SEC / PWM_FREQ_HZ) #define PWM_LEVEL 100 #define PWM_DUTY_LEVEL (PWM_PERIOD_USEC / PWM_LEVEL) #endif struct lcdc_samsung_data { struct msm_panel_common_pdata *pdata; #ifdef CONFIG_FB_MSM_LCDC_DSUB int vga_enabled; #endif struct platform_device *fbpdev; }; static struct lcdc_samsung_data *dd; static void lcdc_samsung_panel_set_backlight(struct msm_fb_data_type *mfd) { #ifdef CONFIG_PMIC8058_PWM int bl_level; int ret; bl_level = mfd->bl_level; if (bl_pwm0) { ret = pwm_config(bl_pwm0, PWM_DUTY_LEVEL * bl_level, PWM_PERIOD_USEC); if (ret) printk(KERN_ERR "pwm_config on pwm 0 failed %d\n", ret); } if (bl_pwm1) { ret = pwm_config(bl_pwm1, PWM_PERIOD_USEC - (PWM_DUTY_LEVEL * bl_level), PWM_PERIOD_USEC); if (ret) printk(KERN_ERR "pwm_config on pwm 1 failed %d\n", ret); } if (bl_pwm0) { ret = pwm_enable(bl_pwm0); if (ret) printk(KERN_ERR "pwm_enable on pwm 0 failed %d\n", ret); } if (bl_pwm1) { ret = pwm_enable(bl_pwm1); if (ret) printk(KERN_ERR "pwm_enable on pwm 1 failed %d\n", ret); } #endif } #ifdef CONFIG_FB_MSM_LCDC_DSUB static ssize_t show_vga_enable(struct device *device, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", dd->vga_enabled); } static ssize_t store_vga_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long enable; int rc; rc = strict_strtoul(buf, 10, &enable); if (rc) return -EINVAL; if (dd->pdata && dd->pdata->vga_switch) rc = dd->pdata->vga_switch(enable); else rc = -ENODEV; if (!rc) { dd->vga_enabled = enable; rc = count; } return rc; } static DEVICE_ATTR(vga_enable, S_IRUGO|S_IWUSR, show_vga_enable, store_vga_enable); static struct attribute *attrs[] = { &dev_attr_vga_enable.attr, NULL, }; static struct attribute_group attr_group = { .attrs = attrs, }; #endif static int __devinit samsung_probe(struct platform_device *pdev) { int rc = 0; #ifdef CONFIG_FB_MSM_LCDC_DSUB struct msm_fb_data_type *mfd; #endif if (pdev->id == 0) { dd = kzalloc(sizeof *dd, GFP_KERNEL); if (!dd) return -ENOMEM; #ifdef CONFIG_FB_MSM_LCDC_DSUB dd->vga_enabled = 0; #endif dd->pdata = pdev->dev.platform_data; return 0; } else if (!dd) return -ENODEV; #ifdef CONFIG_PMIC8058_PWM bl_pwm0 = pwm_request(dd->pdata->gpio_num[0], "backlight1"); if (bl_pwm0 == NULL || IS_ERR(bl_pwm0)) { pr_err("%s pwm_request() failed\n", __func__); bl_pwm0 = NULL; } bl_pwm1 = pwm_request(dd->pdata->gpio_num[1], "backlight2"); if (bl_pwm1 == NULL || IS_ERR(bl_pwm1)) { pr_err("%s pwm_request() failed\n", __func__); bl_pwm1 = NULL; } pr_debug("samsung_probe: bl_pwm0=%p LPG_chan0=%d " "bl_pwm1=%p LPG_chan1=%d\n", bl_pwm0, (int)dd->pdata->gpio_num[0], bl_pwm1, (int)dd->pdata->gpio_num[1] ); #endif dd->fbpdev = msm_fb_add_device(pdev); if (!dd->fbpdev) { dev_err(&pdev->dev, "failed to add msm_fb device\n"); rc = -ENODEV; goto probe_exit; } #ifdef CONFIG_FB_MSM_LCDC_DSUB mfd = platform_get_drvdata(dd->fbpdev); if (mfd && mfd->fbi && mfd->fbi->dev) { rc = sysfs_create_group(&mfd->fbi->dev->kobj, &attr_group); if (rc) dev_err(&pdev->dev, "failed to create sysfs group\n"); } else { dev_err(&pdev->dev, "no dev to create sysfs group\n"); rc = -ENODEV; } #endif probe_exit: return rc; } #ifdef CONFIG_FB_MSM_LCDC_DSUB static int __devexit samsung_remove(struct platform_device *pdev) { sysfs_remove_group(&dd->fbpdev->dev.kobj, &attr_group); return 0; } #endif static struct platform_driver this_driver = { .probe = samsung_probe, #ifdef CONFIG_FB_MSM_LCDC_DSUB .remove = samsung_remove, #endif .driver = { .name = "lcdc_samsung_wsvga", }, }; static struct msm_fb_panel_data samsung_panel_data = { .set_backlight = lcdc_samsung_panel_set_backlight, }; static struct platform_device this_device = { .name = "lcdc_samsung_wsvga", .id = 1, .dev = { .platform_data = &samsung_panel_data, } }; static int __init lcdc_samsung_panel_init(void) { int ret; struct msm_panel_info *pinfo; if (msm_fb_detect_client("lcdc_samsung_wsvga")) return 0; ret = platform_driver_register(&this_driver); if (ret) return ret; pinfo = &samsung_panel_data.panel_info; pinfo->xres = 1024; pinfo->yres = 600; #ifdef CONFIG_FB_MSM_LCDC_DSUB /* DSUB (VGA) is on the same bus, this allows us to allocate for the * max resolution of the DSUB display */ pinfo->mode2_xres = 1440; pinfo->mode2_yres = 900; pinfo->mode2_bpp = 16; #else MSM_FB_SINGLE_MODE_PANEL(pinfo); #endif pinfo->type = LCDC_PANEL; pinfo->pdest = DISPLAY_1; pinfo->wait_cycle = 0; pinfo->bpp = 18; pinfo->fb_num = 2; pinfo->clk_rate = 43192000; pinfo->bl_max = PWM_LEVEL; pinfo->bl_min = 1; pinfo->lcdc.h_back_porch = 80; pinfo->lcdc.h_front_porch = 48; pinfo->lcdc.h_pulse_width = 32; pinfo->lcdc.v_back_porch = 4; pinfo->lcdc.v_front_porch = 3; pinfo->lcdc.v_pulse_width = 1; pinfo->lcdc.border_clr = 0; pinfo->lcdc.underflow_clr = 0xff; pinfo->lcdc.hsync_skew = 0; ret = platform_device_register(&this_device); if (ret) platform_driver_unregister(&this_driver); return ret; } module_init(lcdc_samsung_panel_init);
gpl-2.0
peteralfonso/platform_kernel_tegra
drivers/message/i2o/i2o_proc.c
4142
53361
/* * procfs handler for Linux I2O subsystem * * (c) Copyright 1999 Deepak Saxena * * Originally written by Deepak Saxena(deepak@plexity.net) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This is an initial test release. The code is based on the design of the * ide procfs system (drivers/block/ide-proc.c). Some code taken from * i2o-core module by Alan Cox. * * DISCLAIMER: This code is still under development/test and may cause * your system to behave unpredictably. Use at your own discretion. * * * Fixes/additions: * Juha Sievänen (Juha.Sievanen@cs.Helsinki.FI), * Auvo Häkkinen (Auvo.Hakkinen@cs.Helsinki.FI) * University of Helsinki, Department of Computer Science * LAN entries * Markus Lidel <Markus.Lidel@shadowconnect.com> * Changes for new I2O API */ #define OSM_NAME "proc-osm" #define OSM_VERSION "1.316" #define OSM_DESCRIPTION "I2O ProcFS OSM" #define I2O_MAX_MODULES 4 // FIXME! #define FMT_U64_HEX "0x%08x%08x" #define U64_VAL(pu64) *((u32*)(pu64)+1), *((u32*)(pu64)) #include <linux/types.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/i2o.h> #include <linux/slab.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/spinlock.h> #include <linux/workqueue.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/byteorder.h> /* Structure used to define /proc entries */ typedef struct _i2o_proc_entry_t { char *name; /* entry name */ mode_t mode; /* mode */ const struct file_operations *fops; /* open function */ } i2o_proc_entry; /* global I2O /proc/i2o entry */ static struct proc_dir_entry *i2o_proc_dir_root; /* proc OSM driver struct */ static struct i2o_driver i2o_proc_driver = { .name = OSM_NAME, }; static int print_serial_number(struct seq_file *seq, u8 * serialno, int max_len) { int i; /* 19990419 -sralston * The I2O v1.5 (and v2.0 so far) "official specification" * got serial numbers WRONG! * Apparently, and despite what Section 3.4.4 says and * Figure 3-35 shows (pg 3-39 in the pdf doc), * the convention / consensus seems to be: * + First byte is SNFormat * + Second byte is SNLen (but only if SNFormat==7 (?)) * + (v2.0) SCSI+BS may use IEEE Registered (64 or 128 bit) format */ switch (serialno[0]) { case I2O_SNFORMAT_BINARY: /* Binary */ seq_printf(seq, "0x"); for (i = 0; i < serialno[1]; i++) { seq_printf(seq, "%02X", serialno[2 + i]); } break; case I2O_SNFORMAT_ASCII: /* ASCII */ if (serialno[1] < ' ') { /* printable or SNLen? */ /* sanity */ max_len = (max_len < serialno[1]) ? max_len : serialno[1]; serialno[1 + max_len] = '\0'; /* just print it */ seq_printf(seq, "%s", &serialno[2]); } else { /* print chars for specified length */ for (i = 0; i < serialno[1]; i++) { seq_printf(seq, "%c", serialno[2 + i]); } } break; case I2O_SNFORMAT_UNICODE: /* UNICODE */ seq_printf(seq, "UNICODE Format. Can't Display\n"); break; case I2O_SNFORMAT_LAN48_MAC: /* LAN-48 MAC Address */ seq_printf(seq, "LAN-48 MAC address @ %pM", &serialno[2]); break; case I2O_SNFORMAT_WAN: /* WAN MAC Address */ /* FIXME: Figure out what a WAN access address looks like?? */ seq_printf(seq, "WAN Access Address"); break; /* plus new in v2.0 */ case I2O_SNFORMAT_LAN64_MAC: /* LAN-64 MAC Address */ /* FIXME: Figure out what a LAN-64 address really looks like?? */ seq_printf(seq, "LAN-64 MAC address @ [?:%02X:%02X:?] %pM", serialno[8], serialno[9], &serialno[2]); break; case I2O_SNFORMAT_DDM: /* I2O DDM */ seq_printf(seq, "DDM: Tid=%03Xh, Rsvd=%04Xh, OrgId=%04Xh", *(u16 *) & serialno[2], *(u16 *) & serialno[4], *(u16 *) & serialno[6]); break; case I2O_SNFORMAT_IEEE_REG64: /* IEEE Registered (64-bit) */ case I2O_SNFORMAT_IEEE_REG128: /* IEEE Registered (128-bit) */ /* FIXME: Figure if this is even close?? */ seq_printf(seq, "IEEE NodeName(hi,lo)=(%08Xh:%08Xh), PortName(hi,lo)=(%08Xh:%08Xh)\n", *(u32 *) & serialno[2], *(u32 *) & serialno[6], *(u32 *) & serialno[10], *(u32 *) & serialno[14]); break; case I2O_SNFORMAT_UNKNOWN: /* Unknown 0 */ case I2O_SNFORMAT_UNKNOWN2: /* Unknown 0xff */ default: seq_printf(seq, "Unknown data format (0x%02x)", serialno[0]); break; } return 0; } /** * i2o_get_class_name - do i2o class name lookup * @class: class number * * Return a descriptive string for an i2o class. */ static const char *i2o_get_class_name(int class) { int idx = 16; static char *i2o_class_name[] = { "Executive", "Device Driver Module", "Block Device", "Tape Device", "LAN Interface", "WAN Interface", "Fibre Channel Port", "Fibre Channel Device", "SCSI Device", "ATE Port", "ATE Device", "Floppy Controller", "Floppy Device", "Secondary Bus Port", "Peer Transport Agent", "Peer Transport", "Unknown" }; switch (class & 0xfff) { case I2O_CLASS_EXECUTIVE: idx = 0; break; case I2O_CLASS_DDM: idx = 1; break; case I2O_CLASS_RANDOM_BLOCK_STORAGE: idx = 2; break; case I2O_CLASS_SEQUENTIAL_STORAGE: idx = 3; break; case I2O_CLASS_LAN: idx = 4; break; case I2O_CLASS_WAN: idx = 5; break; case I2O_CLASS_FIBRE_CHANNEL_PORT: idx = 6; break; case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL: idx = 7; break; case I2O_CLASS_SCSI_PERIPHERAL: idx = 8; break; case I2O_CLASS_ATE_PORT: idx = 9; break; case I2O_CLASS_ATE_PERIPHERAL: idx = 10; break; case I2O_CLASS_FLOPPY_CONTROLLER: idx = 11; break; case I2O_CLASS_FLOPPY_DEVICE: idx = 12; break; case I2O_CLASS_BUS_ADAPTER: idx = 13; break; case I2O_CLASS_PEER_TRANSPORT_AGENT: idx = 14; break; case I2O_CLASS_PEER_TRANSPORT: idx = 15; break; } return i2o_class_name[idx]; } #define SCSI_TABLE_SIZE 13 static char *scsi_devices[] = { "Direct-Access Read/Write", "Sequential-Access Storage", "Printer", "Processor", "WORM Device", "CD-ROM Device", "Scanner Device", "Optical Memory Device", "Medium Changer Device", "Communications Device", "Graphics Art Pre-Press Device", "Graphics Art Pre-Press Device", "Array Controller Device" }; static char *chtostr(u8 * chars, int n) { char tmp[256]; tmp[0] = 0; return strncat(tmp, (char *)chars, n); } static int i2o_report_query_status(struct seq_file *seq, int block_status, char *group) { switch (block_status) { case -ETIMEDOUT: return seq_printf(seq, "Timeout reading group %s.\n", group); case -ENOMEM: return seq_printf(seq, "No free memory to read the table.\n"); case -I2O_PARAMS_STATUS_INVALID_GROUP_ID: return seq_printf(seq, "Group %s not supported.\n", group); default: return seq_printf(seq, "Error reading group %s. BlockStatus 0x%02X\n", group, -block_status); } } static char *bus_strings[] = { "Local Bus", "ISA", "EISA", "MCA", "PCI", "PCMCIA", "NUBUS", "CARDBUS" }; static int i2o_seq_show_hrt(struct seq_file *seq, void *v) { struct i2o_controller *c = (struct i2o_controller *)seq->private; i2o_hrt *hrt = (i2o_hrt *) c->hrt.virt; u32 bus; int i; if (hrt->hrt_version) { seq_printf(seq, "HRT table for controller is too new a version.\n"); return 0; } seq_printf(seq, "HRT has %d entries of %d bytes each.\n", hrt->num_entries, hrt->entry_len << 2); for (i = 0; i < hrt->num_entries; i++) { seq_printf(seq, "Entry %d:\n", i); seq_printf(seq, " Adapter ID: %0#10x\n", hrt->hrt_entry[i].adapter_id); seq_printf(seq, " Controlling tid: %0#6x\n", hrt->hrt_entry[i].parent_tid); if (hrt->hrt_entry[i].bus_type != 0x80) { bus = hrt->hrt_entry[i].bus_type; seq_printf(seq, " %s Information\n", bus_strings[bus]); switch (bus) { case I2O_BUS_LOCAL: seq_printf(seq, " IOBase: %0#6x,", hrt->hrt_entry[i].bus.local_bus. LbBaseIOPort); seq_printf(seq, " MemoryBase: %0#10x\n", hrt->hrt_entry[i].bus.local_bus. LbBaseMemoryAddress); break; case I2O_BUS_ISA: seq_printf(seq, " IOBase: %0#6x,", hrt->hrt_entry[i].bus.isa_bus. IsaBaseIOPort); seq_printf(seq, " MemoryBase: %0#10x,", hrt->hrt_entry[i].bus.isa_bus. IsaBaseMemoryAddress); seq_printf(seq, " CSN: %0#4x,", hrt->hrt_entry[i].bus.isa_bus.CSN); break; case I2O_BUS_EISA: seq_printf(seq, " IOBase: %0#6x,", hrt->hrt_entry[i].bus.eisa_bus. EisaBaseIOPort); seq_printf(seq, " MemoryBase: %0#10x,", hrt->hrt_entry[i].bus.eisa_bus. EisaBaseMemoryAddress); seq_printf(seq, " Slot: %0#4x,", hrt->hrt_entry[i].bus.eisa_bus. EisaSlotNumber); break; case I2O_BUS_MCA: seq_printf(seq, " IOBase: %0#6x,", hrt->hrt_entry[i].bus.mca_bus. McaBaseIOPort); seq_printf(seq, " MemoryBase: %0#10x,", hrt->hrt_entry[i].bus.mca_bus. McaBaseMemoryAddress); seq_printf(seq, " Slot: %0#4x,", hrt->hrt_entry[i].bus.mca_bus. McaSlotNumber); break; case I2O_BUS_PCI: seq_printf(seq, " Bus: %0#4x", hrt->hrt_entry[i].bus.pci_bus. PciBusNumber); seq_printf(seq, " Dev: %0#4x", hrt->hrt_entry[i].bus.pci_bus. PciDeviceNumber); seq_printf(seq, " Func: %0#4x", hrt->hrt_entry[i].bus.pci_bus. PciFunctionNumber); seq_printf(seq, " Vendor: %0#6x", hrt->hrt_entry[i].bus.pci_bus. PciVendorID); seq_printf(seq, " Device: %0#6x\n", hrt->hrt_entry[i].bus.pci_bus. PciDeviceID); break; default: seq_printf(seq, " Unsupported Bus Type\n"); } } else seq_printf(seq, " Unknown Bus Type\n"); } return 0; } static int i2o_seq_show_lct(struct seq_file *seq, void *v) { struct i2o_controller *c = (struct i2o_controller *)seq->private; i2o_lct *lct = (i2o_lct *) c->lct; int entries; int i; #define BUS_TABLE_SIZE 3 static char *bus_ports[] = { "Generic Bus", "SCSI Bus", "Fibre Channel Bus" }; entries = (lct->table_size - 3) / 9; seq_printf(seq, "LCT contains %d %s\n", entries, entries == 1 ? "entry" : "entries"); if (lct->boot_tid) seq_printf(seq, "Boot Device @ ID %d\n", lct->boot_tid); seq_printf(seq, "Current Change Indicator: %#10x\n", lct->change_ind); for (i = 0; i < entries; i++) { seq_printf(seq, "Entry %d\n", i); seq_printf(seq, " Class, SubClass : %s", i2o_get_class_name(lct->lct_entry[i].class_id)); /* * Classes which we'll print subclass info for */ switch (lct->lct_entry[i].class_id & 0xFFF) { case I2O_CLASS_RANDOM_BLOCK_STORAGE: switch (lct->lct_entry[i].sub_class) { case 0x00: seq_printf(seq, ", Direct-Access Read/Write"); break; case 0x04: seq_printf(seq, ", WORM Drive"); break; case 0x05: seq_printf(seq, ", CD-ROM Drive"); break; case 0x07: seq_printf(seq, ", Optical Memory Device"); break; default: seq_printf(seq, ", Unknown (0x%02x)", lct->lct_entry[i].sub_class); break; } break; case I2O_CLASS_LAN: switch (lct->lct_entry[i].sub_class & 0xFF) { case 0x30: seq_printf(seq, ", Ethernet"); break; case 0x40: seq_printf(seq, ", 100base VG"); break; case 0x50: seq_printf(seq, ", IEEE 802.5/Token-Ring"); break; case 0x60: seq_printf(seq, ", ANSI X3T9.5 FDDI"); break; case 0x70: seq_printf(seq, ", Fibre Channel"); break; default: seq_printf(seq, ", Unknown Sub-Class (0x%02x)", lct->lct_entry[i].sub_class & 0xFF); break; } break; case I2O_CLASS_SCSI_PERIPHERAL: if (lct->lct_entry[i].sub_class < SCSI_TABLE_SIZE) seq_printf(seq, ", %s", scsi_devices[lct->lct_entry[i]. sub_class]); else seq_printf(seq, ", Unknown Device Type"); break; case I2O_CLASS_BUS_ADAPTER: if (lct->lct_entry[i].sub_class < BUS_TABLE_SIZE) seq_printf(seq, ", %s", bus_ports[lct->lct_entry[i]. sub_class]); else seq_printf(seq, ", Unknown Bus Type"); break; } seq_printf(seq, "\n"); seq_printf(seq, " Local TID : 0x%03x\n", lct->lct_entry[i].tid); seq_printf(seq, " User TID : 0x%03x\n", lct->lct_entry[i].user_tid); seq_printf(seq, " Parent TID : 0x%03x\n", lct->lct_entry[i].parent_tid); seq_printf(seq, " Identity Tag : 0x%x%x%x%x%x%x%x%x\n", lct->lct_entry[i].identity_tag[0], lct->lct_entry[i].identity_tag[1], lct->lct_entry[i].identity_tag[2], lct->lct_entry[i].identity_tag[3], lct->lct_entry[i].identity_tag[4], lct->lct_entry[i].identity_tag[5], lct->lct_entry[i].identity_tag[6], lct->lct_entry[i].identity_tag[7]); seq_printf(seq, " Change Indicator : %0#10x\n", lct->lct_entry[i].change_ind); seq_printf(seq, " Event Capab Mask : %0#10x\n", lct->lct_entry[i].device_flags); } return 0; } static int i2o_seq_show_status(struct seq_file *seq, void *v) { struct i2o_controller *c = (struct i2o_controller *)seq->private; char prodstr[25]; int version; i2o_status_block *sb = c->status_block.virt; i2o_status_get(c); // reread the status block seq_printf(seq, "Organization ID : %0#6x\n", sb->org_id); version = sb->i2o_version; /* FIXME for Spec 2.0 if (version == 0x02) { seq_printf(seq, "Lowest I2O version supported: "); switch(workspace[2]) { case 0x00: seq_printf(seq, "1.0\n"); break; case 0x01: seq_printf(seq, "1.5\n"); break; case 0x02: seq_printf(seq, "2.0\n"); break; } seq_printf(seq, "Highest I2O version supported: "); switch(workspace[3]) { case 0x00: seq_printf(seq, "1.0\n"); break; case 0x01: seq_printf(seq, "1.5\n"); break; case 0x02: seq_printf(seq, "2.0\n"); break; } } */ seq_printf(seq, "IOP ID : %0#5x\n", sb->iop_id); seq_printf(seq, "Host Unit ID : %0#6x\n", sb->host_unit_id); seq_printf(seq, "Segment Number : %0#5x\n", sb->segment_number); seq_printf(seq, "I2O version : "); switch (version) { case 0x00: seq_printf(seq, "1.0\n"); break; case 0x01: seq_printf(seq, "1.5\n"); break; case 0x02: seq_printf(seq, "2.0\n"); break; default: seq_printf(seq, "Unknown version\n"); } seq_printf(seq, "IOP State : "); switch (sb->iop_state) { case 0x01: seq_printf(seq, "INIT\n"); break; case 0x02: seq_printf(seq, "RESET\n"); break; case 0x04: seq_printf(seq, "HOLD\n"); break; case 0x05: seq_printf(seq, "READY\n"); break; case 0x08: seq_printf(seq, "OPERATIONAL\n"); break; case 0x10: seq_printf(seq, "FAILED\n"); break; case 0x11: seq_printf(seq, "FAULTED\n"); break; default: seq_printf(seq, "Unknown\n"); break; } seq_printf(seq, "Messenger Type : "); switch (sb->msg_type) { case 0x00: seq_printf(seq, "Memory mapped\n"); break; case 0x01: seq_printf(seq, "Memory mapped only\n"); break; case 0x02: seq_printf(seq, "Remote only\n"); break; case 0x03: seq_printf(seq, "Memory mapped and remote\n"); break; default: seq_printf(seq, "Unknown\n"); } seq_printf(seq, "Inbound Frame Size : %d bytes\n", sb->inbound_frame_size << 2); seq_printf(seq, "Max Inbound Frames : %d\n", sb->max_inbound_frames); seq_printf(seq, "Current Inbound Frames : %d\n", sb->cur_inbound_frames); seq_printf(seq, "Max Outbound Frames : %d\n", sb->max_outbound_frames); /* Spec doesn't say if NULL terminated or not... */ memcpy(prodstr, sb->product_id, 24); prodstr[24] = '\0'; seq_printf(seq, "Product ID : %s\n", prodstr); seq_printf(seq, "Expected LCT Size : %d bytes\n", sb->expected_lct_size); seq_printf(seq, "IOP Capabilities\n"); seq_printf(seq, " Context Field Size Support : "); switch (sb->iop_capabilities & 0x0000003) { case 0: seq_printf(seq, "Supports only 32-bit context fields\n"); break; case 1: seq_printf(seq, "Supports only 64-bit context fields\n"); break; case 2: seq_printf(seq, "Supports 32-bit and 64-bit context fields, " "but not concurrently\n"); break; case 3: seq_printf(seq, "Supports 32-bit and 64-bit context fields " "concurrently\n"); break; default: seq_printf(seq, "0x%08x\n", sb->iop_capabilities); } seq_printf(seq, " Current Context Field Size : "); switch (sb->iop_capabilities & 0x0000000C) { case 0: seq_printf(seq, "not configured\n"); break; case 4: seq_printf(seq, "Supports only 32-bit context fields\n"); break; case 8: seq_printf(seq, "Supports only 64-bit context fields\n"); break; case 12: seq_printf(seq, "Supports both 32-bit or 64-bit context fields " "concurrently\n"); break; default: seq_printf(seq, "\n"); } seq_printf(seq, " Inbound Peer Support : %s\n", (sb-> iop_capabilities & 0x00000010) ? "Supported" : "Not supported"); seq_printf(seq, " Outbound Peer Support : %s\n", (sb-> iop_capabilities & 0x00000020) ? "Supported" : "Not supported"); seq_printf(seq, " Peer to Peer Support : %s\n", (sb-> iop_capabilities & 0x00000040) ? "Supported" : "Not supported"); seq_printf(seq, "Desired private memory size : %d kB\n", sb->desired_mem_size >> 10); seq_printf(seq, "Allocated private memory size : %d kB\n", sb->current_mem_size >> 10); seq_printf(seq, "Private memory base address : %0#10x\n", sb->current_mem_base); seq_printf(seq, "Desired private I/O size : %d kB\n", sb->desired_io_size >> 10); seq_printf(seq, "Allocated private I/O size : %d kB\n", sb->current_io_size >> 10); seq_printf(seq, "Private I/O base address : %0#10x\n", sb->current_io_base); return 0; } static int i2o_seq_show_hw(struct seq_file *seq, void *v) { struct i2o_controller *c = (struct i2o_controller *)seq->private; static u32 work32[5]; static u8 *work8 = (u8 *) work32; static u16 *work16 = (u16 *) work32; int token; u32 hwcap; static char *cpu_table[] = { "Intel 80960 series", "AMD2900 series", "Motorola 68000 series", "ARM series", "MIPS series", "Sparc series", "PowerPC series", "Intel x86 series" }; token = i2o_parm_field_get(c->exec, 0x0000, -1, &work32, sizeof(work32)); if (token < 0) { i2o_report_query_status(seq, token, "0x0000 IOP Hardware"); return 0; } seq_printf(seq, "I2O Vendor ID : %0#6x\n", work16[0]); seq_printf(seq, "Product ID : %0#6x\n", work16[1]); seq_printf(seq, "CPU : "); if (work8[16] > 8) seq_printf(seq, "Unknown\n"); else seq_printf(seq, "%s\n", cpu_table[work8[16]]); /* Anyone using ProcessorVersion? */ seq_printf(seq, "RAM : %dkB\n", work32[1] >> 10); seq_printf(seq, "Non-Volatile Mem : %dkB\n", work32[2] >> 10); hwcap = work32[3]; seq_printf(seq, "Capabilities : 0x%08x\n", hwcap); seq_printf(seq, " [%s] Self booting\n", (hwcap & 0x00000001) ? "+" : "-"); seq_printf(seq, " [%s] Upgradable IRTOS\n", (hwcap & 0x00000002) ? "+" : "-"); seq_printf(seq, " [%s] Supports downloading DDMs\n", (hwcap & 0x00000004) ? "+" : "-"); seq_printf(seq, " [%s] Supports installing DDMs\n", (hwcap & 0x00000008) ? "+" : "-"); seq_printf(seq, " [%s] Battery-backed RAM\n", (hwcap & 0x00000010) ? "+" : "-"); return 0; } /* Executive group 0003h - Executing DDM List (table) */ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v) { struct i2o_controller *c = (struct i2o_controller *)seq->private; int token; int i; typedef struct _i2o_exec_execute_ddm_table { u16 ddm_tid; u8 module_type; u8 reserved; u16 i2o_vendor_id; u16 module_id; u8 module_name_version[28]; u32 data_size; u32 code_size; } i2o_exec_execute_ddm_table; struct { u16 result_count; u16 pad; u16 block_size; u8 block_status; u8 error_info_size; u16 row_count; u16 more_flag; i2o_exec_execute_ddm_table ddm_table[I2O_MAX_MODULES]; } *result; i2o_exec_execute_ddm_table ddm_table; result = kmalloc(sizeof(*result), GFP_KERNEL); if (!result) return -ENOMEM; token = i2o_parm_table_get(c->exec, I2O_PARAMS_TABLE_GET, 0x0003, -1, NULL, 0, result, sizeof(*result)); if (token < 0) { i2o_report_query_status(seq, token, "0x0003 Executing DDM List"); goto out; } seq_printf(seq, "Tid Module_type Vendor Mod_id Module_name Vrs Data_size Code_size\n"); ddm_table = result->ddm_table[0]; for (i = 0; i < result->row_count; ddm_table = result->ddm_table[++i]) { seq_printf(seq, "0x%03x ", ddm_table.ddm_tid & 0xFFF); switch (ddm_table.module_type) { case 0x01: seq_printf(seq, "Downloaded DDM "); break; case 0x22: seq_printf(seq, "Embedded DDM "); break; default: seq_printf(seq, " "); } seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id); seq_printf(seq, "%-#8x", ddm_table.module_id); seq_printf(seq, "%-29s", chtostr(ddm_table.module_name_version, 28)); seq_printf(seq, "%9d ", ddm_table.data_size); seq_printf(seq, "%8d", ddm_table.code_size); seq_printf(seq, "\n"); } out: kfree(result); return 0; } /* Executive group 0004h - Driver Store (scalar) */ static int i2o_seq_show_driver_store(struct seq_file *seq, void *v) { struct i2o_controller *c = (struct i2o_controller *)seq->private; u32 work32[8]; int token; token = i2o_parm_field_get(c->exec, 0x0004, -1, &work32, sizeof(work32)); if (token < 0) { i2o_report_query_status(seq, token, "0x0004 Driver Store"); return 0; } seq_printf(seq, "Module limit : %d\n" "Module count : %d\n" "Current space : %d kB\n" "Free space : %d kB\n", work32[0], work32[1], work32[2] >> 10, work32[3] >> 10); return 0; } /* Executive group 0005h - Driver Store Table (table) */ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v) { typedef struct _i2o_driver_store { u16 stored_ddm_index; u8 module_type; u8 reserved; u16 i2o_vendor_id; u16 module_id; u8 module_name_version[28]; u8 date[8]; u32 module_size; u32 mpb_size; u32 module_flags; } i2o_driver_store_table; struct i2o_controller *c = (struct i2o_controller *)seq->private; int token; int i; typedef struct { u16 result_count; u16 pad; u16 block_size; u8 block_status; u8 error_info_size; u16 row_count; u16 more_flag; i2o_driver_store_table dst[I2O_MAX_MODULES]; } i2o_driver_result_table; i2o_driver_result_table *result; i2o_driver_store_table *dst; result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL); if (result == NULL) return -ENOMEM; token = i2o_parm_table_get(c->exec, I2O_PARAMS_TABLE_GET, 0x0005, -1, NULL, 0, result, sizeof(*result)); if (token < 0) { i2o_report_query_status(seq, token, "0x0005 DRIVER STORE TABLE"); kfree(result); return 0; } seq_printf(seq, "# Module_type Vendor Mod_id Module_name Vrs" "Date Mod_size Par_size Flags\n"); for (i = 0, dst = &result->dst[0]; i < result->row_count; dst = &result->dst[++i]) { seq_printf(seq, "%-3d", dst->stored_ddm_index); switch (dst->module_type) { case 0x01: seq_printf(seq, "Downloaded DDM "); break; case 0x22: seq_printf(seq, "Embedded DDM "); break; default: seq_printf(seq, " "); } seq_printf(seq, "%-#7x", dst->i2o_vendor_id); seq_printf(seq, "%-#8x", dst->module_id); seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28)); seq_printf(seq, "%-9s", chtostr(dst->date, 8)); seq_printf(seq, "%8d ", dst->module_size); seq_printf(seq, "%8d ", dst->mpb_size); seq_printf(seq, "0x%04x", dst->module_flags); seq_printf(seq, "\n"); } kfree(result); return 0; } /* Generic group F000h - Params Descriptor (table) */ static int i2o_seq_show_groups(struct seq_file *seq, void *v) { struct i2o_device *d = (struct i2o_device *)seq->private; int token; int i; u8 properties; typedef struct _i2o_group_info { u16 group_number; u16 field_count; u16 row_count; u8 properties; u8 reserved; } i2o_group_info; struct { u16 result_count; u16 pad; u16 block_size; u8 block_status; u8 error_info_size; u16 row_count; u16 more_flag; i2o_group_info group[256]; } *result; result = kmalloc(sizeof(*result), GFP_KERNEL); if (!result) return -ENOMEM; token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF000, -1, NULL, 0, result, sizeof(*result)); if (token < 0) { i2o_report_query_status(seq, token, "0xF000 Params Descriptor"); goto out; } seq_printf(seq, "# Group FieldCount RowCount Type Add Del Clear\n"); for (i = 0; i < result->row_count; i++) { seq_printf(seq, "%-3d", i); seq_printf(seq, "0x%04X ", result->group[i].group_number); seq_printf(seq, "%10d ", result->group[i].field_count); seq_printf(seq, "%8d ", result->group[i].row_count); properties = result->group[i].properties; if (properties & 0x1) seq_printf(seq, "Table "); else seq_printf(seq, "Scalar "); if (properties & 0x2) seq_printf(seq, " + "); else seq_printf(seq, " - "); if (properties & 0x4) seq_printf(seq, " + "); else seq_printf(seq, " - "); if (properties & 0x8) seq_printf(seq, " + "); else seq_printf(seq, " - "); seq_printf(seq, "\n"); } if (result->more_flag) seq_printf(seq, "There is more...\n"); out: kfree(result); return 0; } /* Generic group F001h - Physical Device Table (table) */ static int i2o_seq_show_phys_device(struct seq_file *seq, void *v) { struct i2o_device *d = (struct i2o_device *)seq->private; int token; int i; struct { u16 result_count; u16 pad; u16 block_size; u8 block_status; u8 error_info_size; u16 row_count; u16 more_flag; u32 adapter_id[64]; } result; token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF001, -1, NULL, 0, &result, sizeof(result)); if (token < 0) { i2o_report_query_status(seq, token, "0xF001 Physical Device Table"); return 0; } if (result.row_count) seq_printf(seq, "# AdapterId\n"); for (i = 0; i < result.row_count; i++) { seq_printf(seq, "%-2d", i); seq_printf(seq, "%#7x\n", result.adapter_id[i]); } if (result.more_flag) seq_printf(seq, "There is more...\n"); return 0; } /* Generic group F002h - Claimed Table (table) */ static int i2o_seq_show_claimed(struct seq_file *seq, void *v) { struct i2o_device *d = (struct i2o_device *)seq->private; int token; int i; struct { u16 result_count; u16 pad; u16 block_size; u8 block_status; u8 error_info_size; u16 row_count; u16 more_flag; u16 claimed_tid[64]; } result; token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF002, -1, NULL, 0, &result, sizeof(result)); if (token < 0) { i2o_report_query_status(seq, token, "0xF002 Claimed Table"); return 0; } if (result.row_count) seq_printf(seq, "# ClaimedTid\n"); for (i = 0; i < result.row_count; i++) { seq_printf(seq, "%-2d", i); seq_printf(seq, "%#7x\n", result.claimed_tid[i]); } if (result.more_flag) seq_printf(seq, "There is more...\n"); return 0; } /* Generic group F003h - User Table (table) */ static int i2o_seq_show_users(struct seq_file *seq, void *v) { struct i2o_device *d = (struct i2o_device *)seq->private; int token; int i; typedef struct _i2o_user_table { u16 instance; u16 user_tid; u8 claim_type; u8 reserved1; u16 reserved2; } i2o_user_table; struct { u16 result_count; u16 pad; u16 block_size; u8 block_status; u8 error_info_size; u16 row_count; u16 more_flag; i2o_user_table user[64]; } *result; result = kmalloc(sizeof(*result), GFP_KERNEL); if (!result) return -ENOMEM; token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF003, -1, NULL, 0, result, sizeof(*result)); if (token < 0) { i2o_report_query_status(seq, token, "0xF003 User Table"); goto out; } seq_printf(seq, "# Instance UserTid ClaimType\n"); for (i = 0; i < result->row_count; i++) { seq_printf(seq, "%-3d", i); seq_printf(seq, "%#8x ", result->user[i].instance); seq_printf(seq, "%#7x ", result->user[i].user_tid); seq_printf(seq, "%#9x\n", result->user[i].claim_type); } if (result->more_flag) seq_printf(seq, "There is more...\n"); out: kfree(result); return 0; } /* Generic group F005h - Private message extensions (table) (optional) */ static int i2o_seq_show_priv_msgs(struct seq_file *seq, void *v) { struct i2o_device *d = (struct i2o_device *)seq->private; int token; int i; typedef struct _i2o_private { u16 ext_instance; u16 organization_id; u16 x_function_code; } i2o_private; struct { u16 result_count; u16 pad; u16 block_size; u8 block_status; u8 error_info_size; u16 row_count; u16 more_flag; i2o_private extension[64]; } result; token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF000, -1, NULL, 0, &result, sizeof(result)); if (token < 0) { i2o_report_query_status(seq, token, "0xF005 Private Message Extensions (optional)"); return 0; } seq_printf(seq, "Instance# OrgId FunctionCode\n"); for (i = 0; i < result.row_count; i++) { seq_printf(seq, "%0#9x ", result.extension[i].ext_instance); seq_printf(seq, "%0#6x ", result.extension[i].organization_id); seq_printf(seq, "%0#6x", result.extension[i].x_function_code); seq_printf(seq, "\n"); } if (result.more_flag) seq_printf(seq, "There is more...\n"); return 0; } /* Generic group F006h - Authorized User Table (table) */ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v) { struct i2o_device *d = (struct i2o_device *)seq->private; int token; int i; struct { u16 result_count; u16 pad; u16 block_size; u8 block_status; u8 error_info_size; u16 row_count; u16 more_flag; u32 alternate_tid[64]; } result; token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF006, -1, NULL, 0, &result, sizeof(result)); if (token < 0) { i2o_report_query_status(seq, token, "0xF006 Autohorized User Table"); return 0; } if (result.row_count) seq_printf(seq, "# AlternateTid\n"); for (i = 0; i < result.row_count; i++) { seq_printf(seq, "%-2d", i); seq_printf(seq, "%#7x ", result.alternate_tid[i]); } if (result.more_flag) seq_printf(seq, "There is more...\n"); return 0; } /* Generic group F100h - Device Identity (scalar) */ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v) { struct i2o_device *d = (struct i2o_device *)seq->private; static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number // == (allow) 512d bytes (max) static u16 *work16 = (u16 *) work32; int token; token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32)); if (token < 0) { i2o_report_query_status(seq, token, "0xF100 Device Identity"); return 0; } seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0])); seq_printf(seq, "Owner TID : %0#5x\n", work16[2]); seq_printf(seq, "Parent TID : %0#5x\n", work16[3]); seq_printf(seq, "Vendor info : %s\n", chtostr((u8 *) (work32 + 2), 16)); seq_printf(seq, "Product info : %s\n", chtostr((u8 *) (work32 + 6), 16)); seq_printf(seq, "Description : %s\n", chtostr((u8 *) (work32 + 10), 16)); seq_printf(seq, "Product rev. : %s\n", chtostr((u8 *) (work32 + 14), 8)); seq_printf(seq, "Serial number : "); print_serial_number(seq, (u8 *) (work32 + 16), /* allow for SNLen plus * possible trailing '\0' */ sizeof(work32) - (16 * sizeof(u32)) - 2); seq_printf(seq, "\n"); return 0; } static int i2o_seq_show_dev_name(struct seq_file *seq, void *v) { struct i2o_device *d = (struct i2o_device *)seq->private; seq_printf(seq, "%s\n", dev_name(&d->device)); return 0; } /* Generic group F101h - DDM Identity (scalar) */ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v) { struct i2o_device *d = (struct i2o_device *)seq->private; int token; struct { u16 ddm_tid; u8 module_name[24]; u8 module_rev[8]; u8 sn_format; u8 serial_number[12]; u8 pad[256]; // allow up to 256 byte (max) serial number } result; token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result)); if (token < 0) { i2o_report_query_status(seq, token, "0xF101 DDM Identity"); return 0; } seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid); seq_printf(seq, "Module name : %s\n", chtostr(result.module_name, 24)); seq_printf(seq, "Module revision : %s\n", chtostr(result.module_rev, 8)); seq_printf(seq, "Serial number : "); print_serial_number(seq, result.serial_number, sizeof(result) - 36); /* allow for SNLen plus possible trailing '\0' */ seq_printf(seq, "\n"); return 0; } /* Generic group F102h - User Information (scalar) */ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v) { struct i2o_device *d = (struct i2o_device *)seq->private; int token; struct { u8 device_name[64]; u8 service_name[64]; u8 physical_location[64]; u8 instance_number[4]; } result; token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result)); if (token < 0) { i2o_report_query_status(seq, token, "0xF102 User Information"); return 0; } seq_printf(seq, "Device name : %s\n", chtostr(result.device_name, 64)); seq_printf(seq, "Service name : %s\n", chtostr(result.service_name, 64)); seq_printf(seq, "Physical name : %s\n", chtostr(result.physical_location, 64)); seq_printf(seq, "Instance number : %s\n", chtostr(result.instance_number, 4)); return 0; } /* Generic group F103h - SGL Operating Limits (scalar) */ static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v) { struct i2o_device *d = (struct i2o_device *)seq->private; static u32 work32[12]; static u16 *work16 = (u16 *) work32; static u8 *work8 = (u8 *) work32; int token; token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32)); if (token < 0) { i2o_report_query_status(seq, token, "0xF103 SGL Operating Limits"); return 0; } seq_printf(seq, "SGL chain size : %d\n", work32[0]); seq_printf(seq, "Max SGL chain size : %d\n", work32[1]); seq_printf(seq, "SGL chain size target : %d\n", work32[2]); seq_printf(seq, "SGL frag count : %d\n", work16[6]); seq_printf(seq, "Max SGL frag count : %d\n", work16[7]); seq_printf(seq, "SGL frag count target : %d\n", work16[8]); /* FIXME if (d->i2oversion == 0x02) { */ seq_printf(seq, "SGL data alignment : %d\n", work16[8]); seq_printf(seq, "SGL addr limit : %d\n", work8[20]); seq_printf(seq, "SGL addr sizes supported : "); if (work8[21] & 0x01) seq_printf(seq, "32 bit "); if (work8[21] & 0x02) seq_printf(seq, "64 bit "); if (work8[21] & 0x04) seq_printf(seq, "96 bit "); if (work8[21] & 0x08) seq_printf(seq, "128 bit "); seq_printf(seq, "\n"); /* } */ return 0; } /* Generic group F200h - Sensors (scalar) */ static int i2o_seq_show_sensors(struct seq_file *seq, void *v) { struct i2o_device *d = (struct i2o_device *)seq->private; int token; struct { u16 sensor_instance; u8 component; u16 component_instance; u8 sensor_class; u8 sensor_type; u8 scaling_exponent; u32 actual_reading; u32 minimum_reading; u32 low2lowcat_treshold; u32 lowcat2low_treshold; u32 lowwarn2low_treshold; u32 low2lowwarn_treshold; u32 norm2lowwarn_treshold; u32 lowwarn2norm_treshold; u32 nominal_reading; u32 hiwarn2norm_treshold; u32 norm2hiwarn_treshold; u32 high2hiwarn_treshold; u32 hiwarn2high_treshold; u32 hicat2high_treshold; u32 hi2hicat_treshold; u32 maximum_reading; u8 sensor_state; u16 event_enable; } result; token = i2o_parm_field_get(d, 0xF200, -1, &result, sizeof(result)); if (token < 0) { i2o_report_query_status(seq, token, "0xF200 Sensors (optional)"); return 0; } seq_printf(seq, "Sensor instance : %d\n", result.sensor_instance); seq_printf(seq, "Component : %d = ", result.component); switch (result.component) { case 0: seq_printf(seq, "Other"); break; case 1: seq_printf(seq, "Planar logic Board"); break; case 2: seq_printf(seq, "CPU"); break; case 3: seq_printf(seq, "Chassis"); break; case 4: seq_printf(seq, "Power Supply"); break; case 5: seq_printf(seq, "Storage"); break; case 6: seq_printf(seq, "External"); break; } seq_printf(seq, "\n"); seq_printf(seq, "Component instance : %d\n", result.component_instance); seq_printf(seq, "Sensor class : %s\n", result.sensor_class ? "Analog" : "Digital"); seq_printf(seq, "Sensor type : %d = ", result.sensor_type); switch (result.sensor_type) { case 0: seq_printf(seq, "Other\n"); break; case 1: seq_printf(seq, "Thermal\n"); break; case 2: seq_printf(seq, "DC voltage (DC volts)\n"); break; case 3: seq_printf(seq, "AC voltage (AC volts)\n"); break; case 4: seq_printf(seq, "DC current (DC amps)\n"); break; case 5: seq_printf(seq, "AC current (AC volts)\n"); break; case 6: seq_printf(seq, "Door open\n"); break; case 7: seq_printf(seq, "Fan operational\n"); break; } seq_printf(seq, "Scaling exponent : %d\n", result.scaling_exponent); seq_printf(seq, "Actual reading : %d\n", result.actual_reading); seq_printf(seq, "Minimum reading : %d\n", result.minimum_reading); seq_printf(seq, "Low2LowCat treshold : %d\n", result.low2lowcat_treshold); seq_printf(seq, "LowCat2Low treshold : %d\n", result.lowcat2low_treshold); seq_printf(seq, "LowWarn2Low treshold : %d\n", result.lowwarn2low_treshold); seq_printf(seq, "Low2LowWarn treshold : %d\n", result.low2lowwarn_treshold); seq_printf(seq, "Norm2LowWarn treshold : %d\n", result.norm2lowwarn_treshold); seq_printf(seq, "LowWarn2Norm treshold : %d\n", result.lowwarn2norm_treshold); seq_printf(seq, "Nominal reading : %d\n", result.nominal_reading); seq_printf(seq, "HiWarn2Norm treshold : %d\n", result.hiwarn2norm_treshold); seq_printf(seq, "Norm2HiWarn treshold : %d\n", result.norm2hiwarn_treshold); seq_printf(seq, "High2HiWarn treshold : %d\n", result.high2hiwarn_treshold); seq_printf(seq, "HiWarn2High treshold : %d\n", result.hiwarn2high_treshold); seq_printf(seq, "HiCat2High treshold : %d\n", result.hicat2high_treshold); seq_printf(seq, "High2HiCat treshold : %d\n", result.hi2hicat_treshold); seq_printf(seq, "Maximum reading : %d\n", result.maximum_reading); seq_printf(seq, "Sensor state : %d = ", result.sensor_state); switch (result.sensor_state) { case 0: seq_printf(seq, "Normal\n"); break; case 1: seq_printf(seq, "Abnormal\n"); break; case 2: seq_printf(seq, "Unknown\n"); break; case 3: seq_printf(seq, "Low Catastrophic (LoCat)\n"); break; case 4: seq_printf(seq, "Low (Low)\n"); break; case 5: seq_printf(seq, "Low Warning (LoWarn)\n"); break; case 6: seq_printf(seq, "High Warning (HiWarn)\n"); break; case 7: seq_printf(seq, "High (High)\n"); break; case 8: seq_printf(seq, "High Catastrophic (HiCat)\n"); break; } seq_printf(seq, "Event_enable : 0x%02X\n", result.event_enable); seq_printf(seq, " [%s] Operational state change. \n", (result.event_enable & 0x01) ? "+" : "-"); seq_printf(seq, " [%s] Low catastrophic. \n", (result.event_enable & 0x02) ? "+" : "-"); seq_printf(seq, " [%s] Low reading. \n", (result.event_enable & 0x04) ? "+" : "-"); seq_printf(seq, " [%s] Low warning. \n", (result.event_enable & 0x08) ? "+" : "-"); seq_printf(seq, " [%s] Change back to normal from out of range state. \n", (result.event_enable & 0x10) ? "+" : "-"); seq_printf(seq, " [%s] High warning. \n", (result.event_enable & 0x20) ? "+" : "-"); seq_printf(seq, " [%s] High reading. \n", (result.event_enable & 0x40) ? "+" : "-"); seq_printf(seq, " [%s] High catastrophic. \n", (result.event_enable & 0x80) ? "+" : "-"); return 0; } static int i2o_seq_open_hrt(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_hrt, PDE(inode)->data); }; static int i2o_seq_open_lct(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_lct, PDE(inode)->data); }; static int i2o_seq_open_status(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_status, PDE(inode)->data); }; static int i2o_seq_open_hw(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_hw, PDE(inode)->data); }; static int i2o_seq_open_ddm_table(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_ddm_table, PDE(inode)->data); }; static int i2o_seq_open_driver_store(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_driver_store, PDE(inode)->data); }; static int i2o_seq_open_drivers_stored(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_drivers_stored, PDE(inode)->data); }; static int i2o_seq_open_groups(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_groups, PDE(inode)->data); }; static int i2o_seq_open_phys_device(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_phys_device, PDE(inode)->data); }; static int i2o_seq_open_claimed(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_claimed, PDE(inode)->data); }; static int i2o_seq_open_users(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_users, PDE(inode)->data); }; static int i2o_seq_open_priv_msgs(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_priv_msgs, PDE(inode)->data); }; static int i2o_seq_open_authorized_users(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_authorized_users, PDE(inode)->data); }; static int i2o_seq_open_dev_identity(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_dev_identity, PDE(inode)->data); }; static int i2o_seq_open_ddm_identity(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_ddm_identity, PDE(inode)->data); }; static int i2o_seq_open_uinfo(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_uinfo, PDE(inode)->data); }; static int i2o_seq_open_sgl_limits(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_sgl_limits, PDE(inode)->data); }; static int i2o_seq_open_sensors(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_sensors, PDE(inode)->data); }; static int i2o_seq_open_dev_name(struct inode *inode, struct file *file) { return single_open(file, i2o_seq_show_dev_name, PDE(inode)->data); }; static const struct file_operations i2o_seq_fops_lct = { .open = i2o_seq_open_lct, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_hrt = { .open = i2o_seq_open_hrt, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_status = { .open = i2o_seq_open_status, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_hw = { .open = i2o_seq_open_hw, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_ddm_table = { .open = i2o_seq_open_ddm_table, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_driver_store = { .open = i2o_seq_open_driver_store, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_drivers_stored = { .open = i2o_seq_open_drivers_stored, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_groups = { .open = i2o_seq_open_groups, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_phys_device = { .open = i2o_seq_open_phys_device, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_claimed = { .open = i2o_seq_open_claimed, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_users = { .open = i2o_seq_open_users, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_priv_msgs = { .open = i2o_seq_open_priv_msgs, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_authorized_users = { .open = i2o_seq_open_authorized_users, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_dev_name = { .open = i2o_seq_open_dev_name, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_dev_identity = { .open = i2o_seq_open_dev_identity, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_ddm_identity = { .open = i2o_seq_open_ddm_identity, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_uinfo = { .open = i2o_seq_open_uinfo, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_sgl_limits = { .open = i2o_seq_open_sgl_limits, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations i2o_seq_fops_sensors = { .open = i2o_seq_open_sensors, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* * IOP specific entries...write field just in case someone * ever wants one. */ static i2o_proc_entry i2o_proc_generic_iop_entries[] = { {"hrt", S_IFREG | S_IRUGO, &i2o_seq_fops_hrt}, {"lct", S_IFREG | S_IRUGO, &i2o_seq_fops_lct}, {"status", S_IFREG | S_IRUGO, &i2o_seq_fops_status}, {"hw", S_IFREG | S_IRUGO, &i2o_seq_fops_hw}, {"ddm_table", S_IFREG | S_IRUGO, &i2o_seq_fops_ddm_table}, {"driver_store", S_IFREG | S_IRUGO, &i2o_seq_fops_driver_store}, {"drivers_stored", S_IFREG | S_IRUGO, &i2o_seq_fops_drivers_stored}, {NULL, 0, NULL} }; /* * Device specific entries */ static i2o_proc_entry generic_dev_entries[] = { {"groups", S_IFREG | S_IRUGO, &i2o_seq_fops_groups}, {"phys_dev", S_IFREG | S_IRUGO, &i2o_seq_fops_phys_device}, {"claimed", S_IFREG | S_IRUGO, &i2o_seq_fops_claimed}, {"users", S_IFREG | S_IRUGO, &i2o_seq_fops_users}, {"priv_msgs", S_IFREG | S_IRUGO, &i2o_seq_fops_priv_msgs}, {"authorized_users", S_IFREG | S_IRUGO, &i2o_seq_fops_authorized_users}, {"dev_identity", S_IFREG | S_IRUGO, &i2o_seq_fops_dev_identity}, {"ddm_identity", S_IFREG | S_IRUGO, &i2o_seq_fops_ddm_identity}, {"user_info", S_IFREG | S_IRUGO, &i2o_seq_fops_uinfo}, {"sgl_limits", S_IFREG | S_IRUGO, &i2o_seq_fops_sgl_limits}, {"sensors", S_IFREG | S_IRUGO, &i2o_seq_fops_sensors}, {NULL, 0, NULL} }; /* * Storage unit specific entries (SCSI Periph, BS) with device names */ static i2o_proc_entry rbs_dev_entries[] = { {"dev_name", S_IFREG | S_IRUGO, &i2o_seq_fops_dev_name}, {NULL, 0, NULL} }; /** * i2o_proc_create_entries - Creates proc dir entries * @dir: proc dir entry under which the entries should be placed * @i2o_pe: pointer to the entries which should be added * @data: pointer to I2O controller or device * * Create proc dir entries for a I2O controller or I2O device. * * Returns 0 on success or negative error code on failure. */ static int i2o_proc_create_entries(struct proc_dir_entry *dir, i2o_proc_entry * i2o_pe, void *data) { struct proc_dir_entry *tmp; while (i2o_pe->name) { tmp = proc_create_data(i2o_pe->name, i2o_pe->mode, dir, i2o_pe->fops, data); if (!tmp) return -1; i2o_pe++; } return 0; } /** * i2o_proc_subdir_remove - Remove child entries from a proc entry * @dir: proc dir entry from which the childs should be removed * * Iterate over each i2o proc entry under dir and remove it. If the child * also has entries, remove them too. */ static void i2o_proc_subdir_remove(struct proc_dir_entry *dir) { struct proc_dir_entry *pe, *tmp; pe = dir->subdir; while (pe) { tmp = pe->next; i2o_proc_subdir_remove(pe); remove_proc_entry(pe->name, dir); pe = tmp; } }; /** * i2o_proc_device_add - Add an I2O device to the proc dir * @dir: proc dir entry to which the device should be added * @dev: I2O device which should be added * * Add an I2O device to the proc dir entry dir and create the entries for * the device depending on the class of the I2O device. */ static void i2o_proc_device_add(struct proc_dir_entry *dir, struct i2o_device *dev) { char buff[10]; struct proc_dir_entry *devdir; i2o_proc_entry *i2o_pe = NULL; sprintf(buff, "%03x", dev->lct_data.tid); osm_debug("adding device /proc/i2o/%s/%s\n", dev->iop->name, buff); devdir = proc_mkdir(buff, dir); if (!devdir) { osm_warn("Could not allocate procdir!\n"); return; } devdir->data = dev; i2o_proc_create_entries(devdir, generic_dev_entries, dev); /* Inform core that we want updates about this device's status */ switch (dev->lct_data.class_id) { case I2O_CLASS_SCSI_PERIPHERAL: case I2O_CLASS_RANDOM_BLOCK_STORAGE: i2o_pe = rbs_dev_entries; break; default: break; } if (i2o_pe) i2o_proc_create_entries(devdir, i2o_pe, dev); } /** * i2o_proc_iop_add - Add an I2O controller to the i2o proc tree * @dir: parent proc dir entry * @c: I2O controller which should be added * * Add the entries to the parent proc dir entry. Also each device is added * to the controllers proc dir entry. * * Returns 0 on success or negative error code on failure. */ static int i2o_proc_iop_add(struct proc_dir_entry *dir, struct i2o_controller *c) { struct proc_dir_entry *iopdir; struct i2o_device *dev; osm_debug("adding IOP /proc/i2o/%s\n", c->name); iopdir = proc_mkdir(c->name, dir); if (!iopdir) return -1; iopdir->data = c; i2o_proc_create_entries(iopdir, i2o_proc_generic_iop_entries, c); list_for_each_entry(dev, &c->devices, list) i2o_proc_device_add(iopdir, dev); return 0; } /** * i2o_proc_iop_remove - Removes an I2O controller from the i2o proc tree * @dir: parent proc dir entry * @c: I2O controller which should be removed * * Iterate over each i2o proc entry and search controller c. If it is found * remove it from the tree. */ static void i2o_proc_iop_remove(struct proc_dir_entry *dir, struct i2o_controller *c) { struct proc_dir_entry *pe, *tmp; pe = dir->subdir; while (pe) { tmp = pe->next; if (pe->data == c) { i2o_proc_subdir_remove(pe); remove_proc_entry(pe->name, dir); } osm_debug("removing IOP /proc/i2o/%s\n", c->name); pe = tmp; } } /** * i2o_proc_fs_create - Create the i2o proc fs. * * Iterate over each I2O controller and create the entries for it. * * Returns 0 on success or negative error code on failure. */ static int __init i2o_proc_fs_create(void) { struct i2o_controller *c; i2o_proc_dir_root = proc_mkdir("i2o", NULL); if (!i2o_proc_dir_root) return -1; list_for_each_entry(c, &i2o_controllers, list) i2o_proc_iop_add(i2o_proc_dir_root, c); return 0; }; /** * i2o_proc_fs_destroy - Cleanup the all i2o proc entries * * Iterate over each I2O controller and remove the entries for it. * * Returns 0 on success or negative error code on failure. */ static int __exit i2o_proc_fs_destroy(void) { struct i2o_controller *c; list_for_each_entry(c, &i2o_controllers, list) i2o_proc_iop_remove(i2o_proc_dir_root, c); remove_proc_entry("i2o", NULL); return 0; }; /** * i2o_proc_init - Init function for procfs * * Registers Proc OSM and creates procfs entries. * * Returns 0 on success or negative error code on failure. */ static int __init i2o_proc_init(void) { int rc; printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); rc = i2o_driver_register(&i2o_proc_driver); if (rc) return rc; rc = i2o_proc_fs_create(); if (rc) { i2o_driver_unregister(&i2o_proc_driver); return rc; } return 0; }; /** * i2o_proc_exit - Exit function for procfs * * Unregisters Proc OSM and removes procfs entries. */ static void __exit i2o_proc_exit(void) { i2o_driver_unregister(&i2o_proc_driver); i2o_proc_fs_destroy(); }; MODULE_AUTHOR("Deepak Saxena"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(OSM_DESCRIPTION); MODULE_VERSION(OSM_VERSION); module_init(i2o_proc_init); module_exit(i2o_proc_exit);
gpl-2.0
Mazout360/lge-kernel-gb
arch/arm/mach-omap1/leds-innovator.c
4910
1565
/* * linux/arch/arm/mach-omap1/leds-innovator.c */ #include <linux/init.h> #include <mach/hardware.h> #include <asm/leds.h> #include <asm/system.h> #include "leds.h" #define LED_STATE_ENABLED 1 #define LED_STATE_CLAIMED 2 static unsigned int led_state; static unsigned int hw_led_state; void innovator_leds_event(led_event_t evt) { unsigned long flags; local_irq_save(flags); switch (evt) { case led_start: hw_led_state = 0; led_state = LED_STATE_ENABLED; break; case led_stop: led_state &= ~LED_STATE_ENABLED; hw_led_state = 0; break; case led_claim: led_state |= LED_STATE_CLAIMED; hw_led_state = 0; break; case led_release: led_state &= ~LED_STATE_CLAIMED; hw_led_state = 0; break; #ifdef CONFIG_LEDS_TIMER case led_timer: if (!(led_state & LED_STATE_CLAIMED)) hw_led_state ^= 0; break; #endif #ifdef CONFIG_LEDS_CPU case led_idle_start: if (!(led_state & LED_STATE_CLAIMED)) hw_led_state |= 0; break; case led_idle_end: if (!(led_state & LED_STATE_CLAIMED)) hw_led_state &= ~0; break; #endif case led_halted: break; case led_green_on: if (led_state & LED_STATE_CLAIMED) hw_led_state &= ~0; break; case led_green_off: if (led_state & LED_STATE_CLAIMED) hw_led_state |= 0; break; case led_amber_on: break; case led_amber_off: break; case led_red_on: if (led_state & LED_STATE_CLAIMED) hw_led_state &= ~0; break; case led_red_off: if (led_state & LED_STATE_CLAIMED) hw_led_state |= 0; break; default: break; } local_irq_restore(flags); }
gpl-2.0
PyYoshi/android_kernel_kyocera_isw12k
drivers/hid/hid-belkin.c
8238
2624
/* * HID driver for some belkin "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" #define BELKIN_HIDDEV 0x01 #define BELKIN_WKBD 0x02 #define belkin_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ EV_KEY, (c)) static int belkin_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER || !(quirks & BELKIN_WKBD)) return 0; switch (usage->hid & HID_USAGE) { case 0x03a: belkin_map_key_clear(KEY_SOUND); break; case 0x03b: belkin_map_key_clear(KEY_CAMERA); break; case 0x03c: belkin_map_key_clear(KEY_DOCUMENTS); break; default: return 0; } return 1; } static int belkin_probe(struct hid_device *hdev, const struct hid_device_id *id) { unsigned long quirks = id->driver_data; int ret; hid_set_drvdata(hdev, (void *)quirks); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err_free; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT | ((quirks & BELKIN_HIDDEV) ? HID_CONNECT_HIDDEV_FORCE : 0)); if (ret) { hid_err(hdev, "hw start failed\n"); goto err_free; } return 0; err_free: return ret; } static const struct hid_device_id belkin_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM), .driver_data = BELKIN_HIDDEV }, { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD), .driver_data = BELKIN_WKBD }, { } }; MODULE_DEVICE_TABLE(hid, belkin_devices); static struct hid_driver belkin_driver = { .name = "belkin", .id_table = belkin_devices, .input_mapping = belkin_input_mapping, .probe = belkin_probe, }; static int __init belkin_init(void) { return hid_register_driver(&belkin_driver); } static void __exit belkin_exit(void) { hid_unregister_driver(&belkin_driver); } module_init(belkin_init); module_exit(belkin_exit); MODULE_LICENSE("GPL");
gpl-2.0
CyanogenMod/lge-kernel-omap4
drivers/mtd/maps/sc520cdp.c
10798
9142
/* sc520cdp.c -- MTD map driver for AMD SC520 Customer Development Platform * * Copyright (C) 2001 Sysgo Real-Time Solutions GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * * The SC520CDP is an evaluation board for the Elan SC520 processor available * from AMD. It has two banks of 32-bit Flash ROM, each 8 Megabytes in size, * and up to 512 KiB of 8-bit DIL Flash ROM. * For details see http://www.amd.com/products/epd/desiging/evalboards/18.elansc520/520_cdp_brief/index.html */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <asm/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/concat.h> /* ** The Embedded Systems BIOS decodes the first FLASH starting at ** 0x8400000. This is a *terrible* place for it because accessing ** the flash at this location causes the A22 address line to be high ** (that's what 0x8400000 binary's ought to be). But this is the highest ** order address line on the raw flash devices themselves!! ** This causes the top HALF of the flash to be accessed first. Beyond ** the physical limits of the flash, the flash chip aliases over (to ** 0x880000 which causes the bottom half to be accessed. This splits the ** flash into two and inverts it! If you then try to access this from another ** program that does NOT do this insanity, then you *will* access the ** first half of the flash, but not find what you expect there. That ** stuff is in the *second* half! Similarly, the address used by the ** BIOS for the second FLASH bank is also quite a bad choice. ** If REPROGRAM_PAR is defined below (the default), then this driver will ** choose more useful addresses for the FLASH banks by reprogramming the ** responsible PARxx registers in the SC520's MMCR region. This will ** cause the settings to be incompatible with the BIOS's settings, which ** shouldn't be a problem since you are running Linux, (i.e. the BIOS is ** not much use anyway). However, if you need to be compatible with ** the BIOS for some reason, just undefine REPROGRAM_PAR. */ #define REPROGRAM_PAR #ifdef REPROGRAM_PAR /* These are the addresses we want.. */ #define WINDOW_ADDR_0 0x08800000 #define WINDOW_ADDR_1 0x09000000 #define WINDOW_ADDR_2 0x09800000 /* .. and these are the addresses the BIOS gives us */ #define WINDOW_ADDR_0_BIOS 0x08400000 #define WINDOW_ADDR_1_BIOS 0x08c00000 #define WINDOW_ADDR_2_BIOS 0x09400000 #else #define WINDOW_ADDR_0 0x08400000 #define WINDOW_ADDR_1 0x08C00000 #define WINDOW_ADDR_2 0x09400000 #endif #define WINDOW_SIZE_0 0x00800000 #define WINDOW_SIZE_1 0x00800000 #define WINDOW_SIZE_2 0x00080000 static struct map_info sc520cdp_map[] = { { .name = "SC520CDP Flash Bank #0", .size = WINDOW_SIZE_0, .bankwidth = 4, .phys = WINDOW_ADDR_0 }, { .name = "SC520CDP Flash Bank #1", .size = WINDOW_SIZE_1, .bankwidth = 4, .phys = WINDOW_ADDR_1 }, { .name = "SC520CDP DIL Flash", .size = WINDOW_SIZE_2, .bankwidth = 1, .phys = WINDOW_ADDR_2 }, }; #define NUM_FLASH_BANKS ARRAY_SIZE(sc520cdp_map) static struct mtd_info *mymtd[NUM_FLASH_BANKS]; static struct mtd_info *merged_mtd; #ifdef REPROGRAM_PAR /* ** The SC520 MMCR (memory mapped control register) region resides ** at 0xFFFEF000. The 16 Programmable Address Region (PAR) registers ** are at offset 0x88 in the MMCR: */ #define SC520_MMCR_BASE 0xFFFEF000 #define SC520_MMCR_EXTENT 0x1000 #define SC520_PAR(x) ((0x88/sizeof(unsigned long)) + (x)) #define NUM_SC520_PAR 16 /* total number of PAR registers */ /* ** The highest three bits in a PAR register determine what target ** device is controlled by this PAR. Here, only ROMCS? and BOOTCS ** devices are of interest. */ #define SC520_PAR_BOOTCS (0x4<<29) #define SC520_PAR_ROMCS0 (0x5<<29) #define SC520_PAR_ROMCS1 (0x6<<29) #define SC520_PAR_TRGDEV (0x7<<29) /* ** Bits 28 thru 26 determine some attributes for the ** region controlled by the PAR. (We only use non-cacheable) */ #define SC520_PAR_WRPROT (1<<26) /* write protected */ #define SC520_PAR_NOCACHE (1<<27) /* non-cacheable */ #define SC520_PAR_NOEXEC (1<<28) /* code execution denied */ /* ** Bit 25 determines the granularity: 4K or 64K */ #define SC520_PAR_PG_SIZ4 (0<<25) #define SC520_PAR_PG_SIZ64 (1<<25) /* ** Build a value to be written into a PAR register. ** We only need ROM entries, 64K page size: */ #define SC520_PAR_ENTRY(trgdev, address, size) \ ((trgdev) | SC520_PAR_NOCACHE | SC520_PAR_PG_SIZ64 | \ (address) >> 16 | (((size) >> 16) - 1) << 14) struct sc520_par_table { unsigned long trgdev; unsigned long new_par; unsigned long default_address; }; static const struct sc520_par_table par_table[NUM_FLASH_BANKS] = { { /* Flash Bank #0: selected by ROMCS0 */ SC520_PAR_ROMCS0, SC520_PAR_ENTRY(SC520_PAR_ROMCS0, WINDOW_ADDR_0, WINDOW_SIZE_0), WINDOW_ADDR_0_BIOS }, { /* Flash Bank #1: selected by ROMCS1 */ SC520_PAR_ROMCS1, SC520_PAR_ENTRY(SC520_PAR_ROMCS1, WINDOW_ADDR_1, WINDOW_SIZE_1), WINDOW_ADDR_1_BIOS }, { /* DIL (BIOS) Flash: selected by BOOTCS */ SC520_PAR_BOOTCS, SC520_PAR_ENTRY(SC520_PAR_BOOTCS, WINDOW_ADDR_2, WINDOW_SIZE_2), WINDOW_ADDR_2_BIOS } }; static void sc520cdp_setup_par(void) { volatile unsigned long __iomem *mmcr; unsigned long mmcr_val; int i, j; /* map in SC520's MMCR area */ mmcr = ioremap_nocache(SC520_MMCR_BASE, SC520_MMCR_EXTENT); if(!mmcr) { /* ioremap_nocache failed: skip the PAR reprogramming */ /* force physical address fields to BIOS defaults: */ for(i = 0; i < NUM_FLASH_BANKS; i++) sc520cdp_map[i].phys = par_table[i].default_address; return; } /* ** Find the PARxx registers that are responsible for activating ** ROMCS0, ROMCS1 and BOOTCS. Reprogram each of these with a ** new value from the table. */ for(i = 0; i < NUM_FLASH_BANKS; i++) { /* for each par_table entry */ for(j = 0; j < NUM_SC520_PAR; j++) { /* for each PAR register */ mmcr_val = mmcr[SC520_PAR(j)]; /* if target device field matches, reprogram the PAR */ if((mmcr_val & SC520_PAR_TRGDEV) == par_table[i].trgdev) { mmcr[SC520_PAR(j)] = par_table[i].new_par; break; } } if(j == NUM_SC520_PAR) { /* no matching PAR found: try default BIOS address */ printk(KERN_NOTICE "Could not find PAR responsible for %s\n", sc520cdp_map[i].name); printk(KERN_NOTICE "Trying default address 0x%lx\n", par_table[i].default_address); sc520cdp_map[i].phys = par_table[i].default_address; } } iounmap(mmcr); } #endif static int __init init_sc520cdp(void) { int i, devices_found = 0; #ifdef REPROGRAM_PAR /* reprogram PAR registers so flash appears at the desired addresses */ sc520cdp_setup_par(); #endif for (i = 0; i < NUM_FLASH_BANKS; i++) { printk(KERN_NOTICE "SC520 CDP flash device: 0x%Lx at 0x%Lx\n", (unsigned long long)sc520cdp_map[i].size, (unsigned long long)sc520cdp_map[i].phys); sc520cdp_map[i].virt = ioremap_nocache(sc520cdp_map[i].phys, sc520cdp_map[i].size); if (!sc520cdp_map[i].virt) { printk("Failed to ioremap_nocache\n"); return -EIO; } simple_map_init(&sc520cdp_map[i]); mymtd[i] = do_map_probe("cfi_probe", &sc520cdp_map[i]); if(!mymtd[i]) mymtd[i] = do_map_probe("jedec_probe", &sc520cdp_map[i]); if(!mymtd[i]) mymtd[i] = do_map_probe("map_rom", &sc520cdp_map[i]); if (mymtd[i]) { mymtd[i]->owner = THIS_MODULE; ++devices_found; } else { iounmap(sc520cdp_map[i].virt); } } if(devices_found >= 2) { /* Combine the two flash banks into a single MTD device & register it: */ merged_mtd = mtd_concat_create(mymtd, 2, "SC520CDP Flash Banks #0 and #1"); if(merged_mtd) mtd_device_register(merged_mtd, NULL, 0); } if(devices_found == 3) /* register the third (DIL-Flash) device */ mtd_device_register(mymtd[2], NULL, 0); return(devices_found ? 0 : -ENXIO); } static void __exit cleanup_sc520cdp(void) { int i; if (merged_mtd) { mtd_device_unregister(merged_mtd); mtd_concat_destroy(merged_mtd); } if (mymtd[2]) mtd_device_unregister(mymtd[2]); for (i = 0; i < NUM_FLASH_BANKS; i++) { if (mymtd[i]) map_destroy(mymtd[i]); if (sc520cdp_map[i].virt) { iounmap(sc520cdp_map[i].virt); sc520cdp_map[i].virt = NULL; } } } module_init(init_sc520cdp); module_exit(cleanup_sc520cdp); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Sysgo Real-Time Solutions GmbH"); MODULE_DESCRIPTION("MTD map driver for AMD SC520 Customer Development Platform");
gpl-2.0