repo_name
string
path
string
copies
string
size
string
content
string
license
string
NX511J-dev/kernel_zte_nx511j
drivers/mtd/maps/sc520cdp.c
10798
9142
/* sc520cdp.c -- MTD map driver for AMD SC520 Customer Development Platform * * Copyright (C) 2001 Sysgo Real-Time Solutions GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * * The SC520CDP is an evaluation board for the Elan SC520 processor available * from AMD. It has two banks of 32-bit Flash ROM, each 8 Megabytes in size, * and up to 512 KiB of 8-bit DIL Flash ROM. * For details see http://www.amd.com/products/epd/desiging/evalboards/18.elansc520/520_cdp_brief/index.html */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <asm/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/concat.h> /* ** The Embedded Systems BIOS decodes the first FLASH starting at ** 0x8400000. This is a *terrible* place for it because accessing ** the flash at this location causes the A22 address line to be high ** (that's what 0x8400000 binary's ought to be). But this is the highest ** order address line on the raw flash devices themselves!! ** This causes the top HALF of the flash to be accessed first. Beyond ** the physical limits of the flash, the flash chip aliases over (to ** 0x880000 which causes the bottom half to be accessed. This splits the ** flash into two and inverts it! If you then try to access this from another ** program that does NOT do this insanity, then you *will* access the ** first half of the flash, but not find what you expect there. That ** stuff is in the *second* half! Similarly, the address used by the ** BIOS for the second FLASH bank is also quite a bad choice. ** If REPROGRAM_PAR is defined below (the default), then this driver will ** choose more useful addresses for the FLASH banks by reprogramming the ** responsible PARxx registers in the SC520's MMCR region. This will ** cause the settings to be incompatible with the BIOS's settings, which ** shouldn't be a problem since you are running Linux, (i.e. the BIOS is ** not much use anyway). However, if you need to be compatible with ** the BIOS for some reason, just undefine REPROGRAM_PAR. */ #define REPROGRAM_PAR #ifdef REPROGRAM_PAR /* These are the addresses we want.. */ #define WINDOW_ADDR_0 0x08800000 #define WINDOW_ADDR_1 0x09000000 #define WINDOW_ADDR_2 0x09800000 /* .. and these are the addresses the BIOS gives us */ #define WINDOW_ADDR_0_BIOS 0x08400000 #define WINDOW_ADDR_1_BIOS 0x08c00000 #define WINDOW_ADDR_2_BIOS 0x09400000 #else #define WINDOW_ADDR_0 0x08400000 #define WINDOW_ADDR_1 0x08C00000 #define WINDOW_ADDR_2 0x09400000 #endif #define WINDOW_SIZE_0 0x00800000 #define WINDOW_SIZE_1 0x00800000 #define WINDOW_SIZE_2 0x00080000 static struct map_info sc520cdp_map[] = { { .name = "SC520CDP Flash Bank #0", .size = WINDOW_SIZE_0, .bankwidth = 4, .phys = WINDOW_ADDR_0 }, { .name = "SC520CDP Flash Bank #1", .size = WINDOW_SIZE_1, .bankwidth = 4, .phys = WINDOW_ADDR_1 }, { .name = "SC520CDP DIL Flash", .size = WINDOW_SIZE_2, .bankwidth = 1, .phys = WINDOW_ADDR_2 }, }; #define NUM_FLASH_BANKS ARRAY_SIZE(sc520cdp_map) static struct mtd_info *mymtd[NUM_FLASH_BANKS]; static struct mtd_info *merged_mtd; #ifdef REPROGRAM_PAR /* ** The SC520 MMCR (memory mapped control register) region resides ** at 0xFFFEF000. The 16 Programmable Address Region (PAR) registers ** are at offset 0x88 in the MMCR: */ #define SC520_MMCR_BASE 0xFFFEF000 #define SC520_MMCR_EXTENT 0x1000 #define SC520_PAR(x) ((0x88/sizeof(unsigned long)) + (x)) #define NUM_SC520_PAR 16 /* total number of PAR registers */ /* ** The highest three bits in a PAR register determine what target ** device is controlled by this PAR. Here, only ROMCS? and BOOTCS ** devices are of interest. */ #define SC520_PAR_BOOTCS (0x4<<29) #define SC520_PAR_ROMCS0 (0x5<<29) #define SC520_PAR_ROMCS1 (0x6<<29) #define SC520_PAR_TRGDEV (0x7<<29) /* ** Bits 28 thru 26 determine some attributes for the ** region controlled by the PAR. (We only use non-cacheable) */ #define SC520_PAR_WRPROT (1<<26) /* write protected */ #define SC520_PAR_NOCACHE (1<<27) /* non-cacheable */ #define SC520_PAR_NOEXEC (1<<28) /* code execution denied */ /* ** Bit 25 determines the granularity: 4K or 64K */ #define SC520_PAR_PG_SIZ4 (0<<25) #define SC520_PAR_PG_SIZ64 (1<<25) /* ** Build a value to be written into a PAR register. ** We only need ROM entries, 64K page size: */ #define SC520_PAR_ENTRY(trgdev, address, size) \ ((trgdev) | SC520_PAR_NOCACHE | SC520_PAR_PG_SIZ64 | \ (address) >> 16 | (((size) >> 16) - 1) << 14) struct sc520_par_table { unsigned long trgdev; unsigned long new_par; unsigned long default_address; }; static const struct sc520_par_table par_table[NUM_FLASH_BANKS] = { { /* Flash Bank #0: selected by ROMCS0 */ SC520_PAR_ROMCS0, SC520_PAR_ENTRY(SC520_PAR_ROMCS0, WINDOW_ADDR_0, WINDOW_SIZE_0), WINDOW_ADDR_0_BIOS }, { /* Flash Bank #1: selected by ROMCS1 */ SC520_PAR_ROMCS1, SC520_PAR_ENTRY(SC520_PAR_ROMCS1, WINDOW_ADDR_1, WINDOW_SIZE_1), WINDOW_ADDR_1_BIOS }, { /* DIL (BIOS) Flash: selected by BOOTCS */ SC520_PAR_BOOTCS, SC520_PAR_ENTRY(SC520_PAR_BOOTCS, WINDOW_ADDR_2, WINDOW_SIZE_2), WINDOW_ADDR_2_BIOS } }; static void sc520cdp_setup_par(void) { volatile unsigned long __iomem *mmcr; unsigned long mmcr_val; int i, j; /* map in SC520's MMCR area */ mmcr = ioremap_nocache(SC520_MMCR_BASE, SC520_MMCR_EXTENT); if(!mmcr) { /* ioremap_nocache failed: skip the PAR reprogramming */ /* force physical address fields to BIOS defaults: */ for(i = 0; i < NUM_FLASH_BANKS; i++) sc520cdp_map[i].phys = par_table[i].default_address; return; } /* ** Find the PARxx registers that are responsible for activating ** ROMCS0, ROMCS1 and BOOTCS. Reprogram each of these with a ** new value from the table. */ for(i = 0; i < NUM_FLASH_BANKS; i++) { /* for each par_table entry */ for(j = 0; j < NUM_SC520_PAR; j++) { /* for each PAR register */ mmcr_val = mmcr[SC520_PAR(j)]; /* if target device field matches, reprogram the PAR */ if((mmcr_val & SC520_PAR_TRGDEV) == par_table[i].trgdev) { mmcr[SC520_PAR(j)] = par_table[i].new_par; break; } } if(j == NUM_SC520_PAR) { /* no matching PAR found: try default BIOS address */ printk(KERN_NOTICE "Could not find PAR responsible for %s\n", sc520cdp_map[i].name); printk(KERN_NOTICE "Trying default address 0x%lx\n", par_table[i].default_address); sc520cdp_map[i].phys = par_table[i].default_address; } } iounmap(mmcr); } #endif static int __init init_sc520cdp(void) { int i, devices_found = 0; #ifdef REPROGRAM_PAR /* reprogram PAR registers so flash appears at the desired addresses */ sc520cdp_setup_par(); #endif for (i = 0; i < NUM_FLASH_BANKS; i++) { printk(KERN_NOTICE "SC520 CDP flash device: 0x%Lx at 0x%Lx\n", (unsigned long long)sc520cdp_map[i].size, (unsigned long long)sc520cdp_map[i].phys); sc520cdp_map[i].virt = ioremap_nocache(sc520cdp_map[i].phys, sc520cdp_map[i].size); if (!sc520cdp_map[i].virt) { printk("Failed to ioremap_nocache\n"); return -EIO; } simple_map_init(&sc520cdp_map[i]); mymtd[i] = do_map_probe("cfi_probe", &sc520cdp_map[i]); if(!mymtd[i]) mymtd[i] = do_map_probe("jedec_probe", &sc520cdp_map[i]); if(!mymtd[i]) mymtd[i] = do_map_probe("map_rom", &sc520cdp_map[i]); if (mymtd[i]) { mymtd[i]->owner = THIS_MODULE; ++devices_found; } else { iounmap(sc520cdp_map[i].virt); } } if(devices_found >= 2) { /* Combine the two flash banks into a single MTD device & register it: */ merged_mtd = mtd_concat_create(mymtd, 2, "SC520CDP Flash Banks #0 and #1"); if(merged_mtd) mtd_device_register(merged_mtd, NULL, 0); } if(devices_found == 3) /* register the third (DIL-Flash) device */ mtd_device_register(mymtd[2], NULL, 0); return(devices_found ? 0 : -ENXIO); } static void __exit cleanup_sc520cdp(void) { int i; if (merged_mtd) { mtd_device_unregister(merged_mtd); mtd_concat_destroy(merged_mtd); } if (mymtd[2]) mtd_device_unregister(mymtd[2]); for (i = 0; i < NUM_FLASH_BANKS; i++) { if (mymtd[i]) map_destroy(mymtd[i]); if (sc520cdp_map[i].virt) { iounmap(sc520cdp_map[i].virt); sc520cdp_map[i].virt = NULL; } } } module_init(init_sc520cdp); module_exit(cleanup_sc520cdp); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Sysgo Real-Time Solutions GmbH"); MODULE_DESCRIPTION("MTD map driver for AMD SC520 Customer Development Platform");
gpl-2.0
ErickReyes/kernel-mst3000
arch/blackfin/mach-common/arch_checks.c
12078
2233
/* * Do some checking to make sure things are OK * * Copyright 2007-2010 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <asm/fixed_code.h> #include <mach/anomaly.h> #include <asm/clocks.h> #ifdef CONFIG_BFIN_KERNEL_CLOCK # if (CONFIG_VCO_HZ > CONFIG_MAX_VCO_HZ) # error "VCO selected is more than maximum value. Please change the VCO multipler" # endif # if (CONFIG_SCLK_HZ > CONFIG_MAX_SCLK_HZ) # error "Sclk value selected is more than maximum. Please select a proper value for SCLK multiplier" # endif # if (CONFIG_SCLK_HZ < CONFIG_MIN_SCLK_HZ) # error "Sclk value selected is less than minimum. Please select a proper value for SCLK multiplier" # endif # if (ANOMALY_05000273) && (CONFIG_SCLK_HZ * 2 > CONFIG_CCLK_HZ) # error "ANOMALY 05000273, please make sure CCLK is at least 2x SCLK" # endif # if (CONFIG_SCLK_HZ > CONFIG_CCLK_HZ) && (CONFIG_SCLK_HZ != CONFIG_CLKIN_HZ) && (CONFIG_CCLK_HZ != CONFIG_CLKIN_HZ) # error "Please select sclk less than cclk" # endif #endif /* CONFIG_BFIN_KERNEL_CLOCK */ #if CONFIG_BOOT_LOAD < FIXED_CODE_END # error "The kernel load address must be after the fixed code section" #endif #if (CONFIG_BOOT_LOAD & 0x3) # error "The kernel load address must be 4 byte aligned" #endif /* The entire kernel must be able to make a 24bit pcrel call to start of L1 */ #if ((0xffffffff - L1_CODE_START + 1) + CONFIG_BOOT_LOAD) > 0x1000000 # error "The kernel load address is too high; keep it below 10meg for safety" #endif #if ANOMALY_05000263 && defined(CONFIG_MPU) # error the MPU will not function safely while Anomaly 05000263 applies #endif #if ANOMALY_05000448 # error You are using a part with anomaly 05000448, this issue causes random memory read/write failures - that means random crashes. #endif /* if 220 exists, can not set External Memory WB and L2 not_cached, either External Memory not_cached and L2 WB */ #if ANOMALY_05000220 && \ (defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)) # error "Anomaly 05000220 does not allow you to use Write Back cache with L2 or External Memory" #endif #if ANOMALY_05000491 && !defined(CONFIG_ICACHE_FLUSH_L1) # error You need IFLUSH in L1 inst while Anomaly 05000491 applies #endif
gpl-2.0
theapant/ZTE_N800-kernel
fs/ocfs2/cluster/ver.c
12590
1222
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * ver.c * * version string * * Copyright (C) 2002, 2005 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include "ver.h" #define CLUSTER_BUILD_VERSION "1.5.0" #define VERSION_STR "OCFS2 Node Manager " CLUSTER_BUILD_VERSION void cluster_print_version(void) { printk(KERN_INFO "%s\n", VERSION_STR); } MODULE_DESCRIPTION(VERSION_STR); MODULE_VERSION(CLUSTER_BUILD_VERSION);
gpl-2.0
XT701/Kernel
drivers/media/video/sn9c102/sn9c102_ov7660.c
12846
17770
/*************************************************************************** * Plug-in for OV7660 image sensor connected to the SN9C1xx PC Camera * * Controllers * * * * Copyright (C) 2007 by Luca Risolia <luca.risolia@studio.unibo.it> * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the Free Software * * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ***************************************************************************/ #include "sn9c102_sensor.h" #include "sn9c102_devtable.h" static int ov7660_init(struct sn9c102_device* cam) { int err = 0; err = sn9c102_write_const_regs(cam, {0x40, 0x02}, {0x00, 0x03}, {0x1a, 0x04}, {0x03, 0x10}, {0x08, 0x14}, {0x20, 0x17}, {0x8b, 0x18}, {0x00, 0x19}, {0x1d, 0x1a}, {0x10, 0x1b}, {0x02, 0x1c}, {0x03, 0x1d}, {0x0f, 0x1e}, {0x0c, 0x1f}, {0x00, 0x20}, {0x29, 0x21}, {0x40, 0x22}, {0x54, 0x23}, {0x66, 0x24}, {0x76, 0x25}, {0x85, 0x26}, {0x94, 0x27}, {0xa1, 0x28}, {0xae, 0x29}, {0xbb, 0x2a}, {0xc7, 0x2b}, {0xd3, 0x2c}, {0xde, 0x2d}, {0xea, 0x2e}, {0xf4, 0x2f}, {0xff, 0x30}, {0x00, 0x3f}, {0xc7, 0x40}, {0x01, 0x41}, {0x44, 0x42}, {0x00, 0x43}, {0x44, 0x44}, {0x00, 0x45}, {0x44, 0x46}, {0x00, 0x47}, {0xc7, 0x48}, {0x01, 0x49}, {0xc7, 0x4a}, {0x01, 0x4b}, {0xc7, 0x4c}, {0x01, 0x4d}, {0x44, 0x4e}, {0x00, 0x4f}, {0x44, 0x50}, {0x00, 0x51}, {0x44, 0x52}, {0x00, 0x53}, {0xc7, 0x54}, {0x01, 0x55}, {0xc7, 0x56}, {0x01, 0x57}, {0xc7, 0x58}, {0x01, 0x59}, {0x44, 0x5a}, {0x00, 0x5b}, {0x44, 0x5c}, {0x00, 0x5d}, {0x44, 0x5e}, {0x00, 0x5f}, {0xc7, 0x60}, {0x01, 0x61}, {0xc7, 0x62}, {0x01, 0x63}, {0xc7, 0x64}, {0x01, 0x65}, {0x44, 0x66}, {0x00, 0x67}, {0x44, 0x68}, {0x00, 0x69}, {0x44, 0x6a}, {0x00, 0x6b}, {0xc7, 0x6c}, {0x01, 0x6d}, {0xc7, 0x6e}, {0x01, 0x6f}, {0xc7, 0x70}, {0x01, 0x71}, {0x44, 0x72}, {0x00, 0x73}, {0x44, 0x74}, {0x00, 0x75}, {0x44, 0x76}, {0x00, 0x77}, {0xc7, 0x78}, {0x01, 0x79}, {0xc7, 0x7a}, {0x01, 0x7b}, {0xc7, 0x7c}, {0x01, 0x7d}, {0x44, 0x7e}, {0x00, 0x7f}, {0x14, 0x84}, {0x00, 0x85}, {0x27, 0x86}, {0x00, 0x87}, {0x07, 0x88}, {0x00, 0x89}, {0xec, 0x8a}, {0x0f, 0x8b}, {0xd8, 0x8c}, {0x0f, 0x8d}, {0x3d, 0x8e}, {0x00, 0x8f}, {0x3d, 0x90}, {0x00, 0x91}, {0xcd, 0x92}, {0x0f, 0x93}, {0xf7, 0x94}, {0x0f, 0x95}, {0x0c, 0x96}, {0x00, 0x97}, {0x00, 0x98}, {0x66, 0x99}, {0x05, 0x9a}, {0x00, 0x9b}, {0x04, 0x9c}, {0x00, 0x9d}, {0x08, 0x9e}, {0x00, 0x9f}, {0x2d, 0xc0}, {0x2d, 0xc1}, {0x3a, 0xc2}, {0x05, 0xc3}, {0x04, 0xc4}, {0x3f, 0xc5}, {0x00, 0xc6}, {0x00, 0xc7}, {0x50, 0xc8}, {0x3C, 0xc9}, {0x28, 0xca}, {0xd8, 0xcb}, {0x14, 0xcc}, {0xec, 0xcd}, {0x32, 0xce}, {0xdd, 0xcf}, {0x32, 0xd0}, {0xdd, 0xd1}, {0x6a, 0xd2}, {0x50, 0xd3}, {0x00, 0xd4}, {0x00, 0xd5}, {0x00, 0xd6}); err += sn9c102_i2c_write(cam, 0x12, 0x80); err += sn9c102_i2c_write(cam, 0x11, 0x09); err += sn9c102_i2c_write(cam, 0x00, 0x0A); err += sn9c102_i2c_write(cam, 0x01, 0x80); err += sn9c102_i2c_write(cam, 0x02, 0x80); err += sn9c102_i2c_write(cam, 0x03, 0x00); err += sn9c102_i2c_write(cam, 0x04, 0x00); err += sn9c102_i2c_write(cam, 0x05, 0x08); err += sn9c102_i2c_write(cam, 0x06, 0x0B); err += sn9c102_i2c_write(cam, 0x07, 0x00); err += sn9c102_i2c_write(cam, 0x08, 0x1C); err += sn9c102_i2c_write(cam, 0x09, 0x01); err += sn9c102_i2c_write(cam, 0x0A, 0x76); err += sn9c102_i2c_write(cam, 0x0B, 0x60); err += sn9c102_i2c_write(cam, 0x0C, 0x00); err += sn9c102_i2c_write(cam, 0x0D, 0x08); err += sn9c102_i2c_write(cam, 0x0E, 0x04); err += sn9c102_i2c_write(cam, 0x0F, 0x6F); err += sn9c102_i2c_write(cam, 0x10, 0x20); err += sn9c102_i2c_write(cam, 0x11, 0x03); err += sn9c102_i2c_write(cam, 0x12, 0x05); err += sn9c102_i2c_write(cam, 0x13, 0xC7); err += sn9c102_i2c_write(cam, 0x14, 0x2C); err += sn9c102_i2c_write(cam, 0x15, 0x00); err += sn9c102_i2c_write(cam, 0x16, 0x02); err += sn9c102_i2c_write(cam, 0x17, 0x10); err += sn9c102_i2c_write(cam, 0x18, 0x60); err += sn9c102_i2c_write(cam, 0x19, 0x02); err += sn9c102_i2c_write(cam, 0x1A, 0x7B); err += sn9c102_i2c_write(cam, 0x1B, 0x02); err += sn9c102_i2c_write(cam, 0x1C, 0x7F); err += sn9c102_i2c_write(cam, 0x1D, 0xA2); err += sn9c102_i2c_write(cam, 0x1E, 0x01); err += sn9c102_i2c_write(cam, 0x1F, 0x0E); err += sn9c102_i2c_write(cam, 0x20, 0x05); err += sn9c102_i2c_write(cam, 0x21, 0x05); err += sn9c102_i2c_write(cam, 0x22, 0x05); err += sn9c102_i2c_write(cam, 0x23, 0x05); err += sn9c102_i2c_write(cam, 0x24, 0x68); err += sn9c102_i2c_write(cam, 0x25, 0x58); err += sn9c102_i2c_write(cam, 0x26, 0xD4); err += sn9c102_i2c_write(cam, 0x27, 0x80); err += sn9c102_i2c_write(cam, 0x28, 0x80); err += sn9c102_i2c_write(cam, 0x29, 0x30); err += sn9c102_i2c_write(cam, 0x2A, 0x00); err += sn9c102_i2c_write(cam, 0x2B, 0x00); err += sn9c102_i2c_write(cam, 0x2C, 0x80); err += sn9c102_i2c_write(cam, 0x2D, 0x00); err += sn9c102_i2c_write(cam, 0x2E, 0x00); err += sn9c102_i2c_write(cam, 0x2F, 0x0E); err += sn9c102_i2c_write(cam, 0x30, 0x08); err += sn9c102_i2c_write(cam, 0x31, 0x30); err += sn9c102_i2c_write(cam, 0x32, 0xB4); err += sn9c102_i2c_write(cam, 0x33, 0x00); err += sn9c102_i2c_write(cam, 0x34, 0x07); err += sn9c102_i2c_write(cam, 0x35, 0x84); err += sn9c102_i2c_write(cam, 0x36, 0x00); err += sn9c102_i2c_write(cam, 0x37, 0x0C); err += sn9c102_i2c_write(cam, 0x38, 0x02); err += sn9c102_i2c_write(cam, 0x39, 0x43); err += sn9c102_i2c_write(cam, 0x3A, 0x00); err += sn9c102_i2c_write(cam, 0x3B, 0x0A); err += sn9c102_i2c_write(cam, 0x3C, 0x6C); err += sn9c102_i2c_write(cam, 0x3D, 0x99); err += sn9c102_i2c_write(cam, 0x3E, 0x0E); err += sn9c102_i2c_write(cam, 0x3F, 0x41); err += sn9c102_i2c_write(cam, 0x40, 0xC1); err += sn9c102_i2c_write(cam, 0x41, 0x22); err += sn9c102_i2c_write(cam, 0x42, 0x08); err += sn9c102_i2c_write(cam, 0x43, 0xF0); err += sn9c102_i2c_write(cam, 0x44, 0x10); err += sn9c102_i2c_write(cam, 0x45, 0x78); err += sn9c102_i2c_write(cam, 0x46, 0xA8); err += sn9c102_i2c_write(cam, 0x47, 0x60); err += sn9c102_i2c_write(cam, 0x48, 0x80); err += sn9c102_i2c_write(cam, 0x49, 0x00); err += sn9c102_i2c_write(cam, 0x4A, 0x00); err += sn9c102_i2c_write(cam, 0x4B, 0x00); err += sn9c102_i2c_write(cam, 0x4C, 0x00); err += sn9c102_i2c_write(cam, 0x4D, 0x00); err += sn9c102_i2c_write(cam, 0x4E, 0x00); err += sn9c102_i2c_write(cam, 0x4F, 0x46); err += sn9c102_i2c_write(cam, 0x50, 0x36); err += sn9c102_i2c_write(cam, 0x51, 0x0F); err += sn9c102_i2c_write(cam, 0x52, 0x17); err += sn9c102_i2c_write(cam, 0x53, 0x7F); err += sn9c102_i2c_write(cam, 0x54, 0x96); err += sn9c102_i2c_write(cam, 0x55, 0x40); err += sn9c102_i2c_write(cam, 0x56, 0x40); err += sn9c102_i2c_write(cam, 0x57, 0x40); err += sn9c102_i2c_write(cam, 0x58, 0x0F); err += sn9c102_i2c_write(cam, 0x59, 0xBA); err += sn9c102_i2c_write(cam, 0x5A, 0x9A); err += sn9c102_i2c_write(cam, 0x5B, 0x22); err += sn9c102_i2c_write(cam, 0x5C, 0xB9); err += sn9c102_i2c_write(cam, 0x5D, 0x9B); err += sn9c102_i2c_write(cam, 0x5E, 0x10); err += sn9c102_i2c_write(cam, 0x5F, 0xF0); err += sn9c102_i2c_write(cam, 0x60, 0x05); err += sn9c102_i2c_write(cam, 0x61, 0x60); err += sn9c102_i2c_write(cam, 0x62, 0x00); err += sn9c102_i2c_write(cam, 0x63, 0x00); err += sn9c102_i2c_write(cam, 0x64, 0x50); err += sn9c102_i2c_write(cam, 0x65, 0x30); err += sn9c102_i2c_write(cam, 0x66, 0x00); err += sn9c102_i2c_write(cam, 0x67, 0x80); err += sn9c102_i2c_write(cam, 0x68, 0x7A); err += sn9c102_i2c_write(cam, 0x69, 0x90); err += sn9c102_i2c_write(cam, 0x6A, 0x80); err += sn9c102_i2c_write(cam, 0x6B, 0x0A); err += sn9c102_i2c_write(cam, 0x6C, 0x30); err += sn9c102_i2c_write(cam, 0x6D, 0x48); err += sn9c102_i2c_write(cam, 0x6E, 0x80); err += sn9c102_i2c_write(cam, 0x6F, 0x74); err += sn9c102_i2c_write(cam, 0x70, 0x64); err += sn9c102_i2c_write(cam, 0x71, 0x60); err += sn9c102_i2c_write(cam, 0x72, 0x5C); err += sn9c102_i2c_write(cam, 0x73, 0x58); err += sn9c102_i2c_write(cam, 0x74, 0x54); err += sn9c102_i2c_write(cam, 0x75, 0x4C); err += sn9c102_i2c_write(cam, 0x76, 0x40); err += sn9c102_i2c_write(cam, 0x77, 0x38); err += sn9c102_i2c_write(cam, 0x78, 0x34); err += sn9c102_i2c_write(cam, 0x79, 0x30); err += sn9c102_i2c_write(cam, 0x7A, 0x2F); err += sn9c102_i2c_write(cam, 0x7B, 0x2B); err += sn9c102_i2c_write(cam, 0x7C, 0x03); err += sn9c102_i2c_write(cam, 0x7D, 0x07); err += sn9c102_i2c_write(cam, 0x7E, 0x17); err += sn9c102_i2c_write(cam, 0x7F, 0x34); err += sn9c102_i2c_write(cam, 0x80, 0x41); err += sn9c102_i2c_write(cam, 0x81, 0x4D); err += sn9c102_i2c_write(cam, 0x82, 0x58); err += sn9c102_i2c_write(cam, 0x83, 0x63); err += sn9c102_i2c_write(cam, 0x84, 0x6E); err += sn9c102_i2c_write(cam, 0x85, 0x77); err += sn9c102_i2c_write(cam, 0x86, 0x87); err += sn9c102_i2c_write(cam, 0x87, 0x95); err += sn9c102_i2c_write(cam, 0x88, 0xAF); err += sn9c102_i2c_write(cam, 0x89, 0xC7); err += sn9c102_i2c_write(cam, 0x8A, 0xDF); err += sn9c102_i2c_write(cam, 0x8B, 0x99); err += sn9c102_i2c_write(cam, 0x8C, 0x99); err += sn9c102_i2c_write(cam, 0x8D, 0xCF); err += sn9c102_i2c_write(cam, 0x8E, 0x20); err += sn9c102_i2c_write(cam, 0x8F, 0x26); err += sn9c102_i2c_write(cam, 0x90, 0x10); err += sn9c102_i2c_write(cam, 0x91, 0x0C); err += sn9c102_i2c_write(cam, 0x92, 0x25); err += sn9c102_i2c_write(cam, 0x93, 0x00); err += sn9c102_i2c_write(cam, 0x94, 0x50); err += sn9c102_i2c_write(cam, 0x95, 0x50); err += sn9c102_i2c_write(cam, 0x96, 0x00); err += sn9c102_i2c_write(cam, 0x97, 0x01); err += sn9c102_i2c_write(cam, 0x98, 0x10); err += sn9c102_i2c_write(cam, 0x99, 0x40); err += sn9c102_i2c_write(cam, 0x9A, 0x40); err += sn9c102_i2c_write(cam, 0x9B, 0x20); err += sn9c102_i2c_write(cam, 0x9C, 0x00); err += sn9c102_i2c_write(cam, 0x9D, 0x99); err += sn9c102_i2c_write(cam, 0x9E, 0x7F); err += sn9c102_i2c_write(cam, 0x9F, 0x00); err += sn9c102_i2c_write(cam, 0xA0, 0x00); err += sn9c102_i2c_write(cam, 0xA1, 0x00); return err; } static int ov7660_get_ctrl(struct sn9c102_device* cam, struct v4l2_control* ctrl) { int err = 0; switch (ctrl->id) { case V4L2_CID_EXPOSURE: if ((ctrl->value = sn9c102_i2c_read(cam, 0x10)) < 0) return -EIO; break; case V4L2_CID_DO_WHITE_BALANCE: if ((ctrl->value = sn9c102_read_reg(cam, 0x02)) < 0) return -EIO; ctrl->value = (ctrl->value & 0x04) ? 1 : 0; break; case V4L2_CID_RED_BALANCE: if ((ctrl->value = sn9c102_read_reg(cam, 0x05)) < 0) return -EIO; ctrl->value &= 0x7f; break; case V4L2_CID_BLUE_BALANCE: if ((ctrl->value = sn9c102_read_reg(cam, 0x06)) < 0) return -EIO; ctrl->value &= 0x7f; break; case SN9C102_V4L2_CID_GREEN_BALANCE: if ((ctrl->value = sn9c102_read_reg(cam, 0x07)) < 0) return -EIO; ctrl->value &= 0x7f; break; case SN9C102_V4L2_CID_BAND_FILTER: if ((ctrl->value = sn9c102_i2c_read(cam, 0x3b)) < 0) return -EIO; ctrl->value &= 0x08; break; case V4L2_CID_GAIN: if ((ctrl->value = sn9c102_i2c_read(cam, 0x00)) < 0) return -EIO; ctrl->value &= 0x1f; break; case V4L2_CID_AUTOGAIN: if ((ctrl->value = sn9c102_i2c_read(cam, 0x13)) < 0) return -EIO; ctrl->value &= 0x01; break; default: return -EINVAL; } return err ? -EIO : 0; } static int ov7660_set_ctrl(struct sn9c102_device* cam, const struct v4l2_control* ctrl) { int err = 0; switch (ctrl->id) { case V4L2_CID_EXPOSURE: err += sn9c102_i2c_write(cam, 0x10, ctrl->value); break; case V4L2_CID_DO_WHITE_BALANCE: err += sn9c102_write_reg(cam, 0x43 | (ctrl->value << 2), 0x02); break; case V4L2_CID_RED_BALANCE: err += sn9c102_write_reg(cam, ctrl->value, 0x05); break; case V4L2_CID_BLUE_BALANCE: err += sn9c102_write_reg(cam, ctrl->value, 0x06); break; case SN9C102_V4L2_CID_GREEN_BALANCE: err += sn9c102_write_reg(cam, ctrl->value, 0x07); break; case SN9C102_V4L2_CID_BAND_FILTER: err += sn9c102_i2c_write(cam, ctrl->value << 3, 0x3b); break; case V4L2_CID_GAIN: err += sn9c102_i2c_write(cam, 0x00, 0x60 + ctrl->value); break; case V4L2_CID_AUTOGAIN: err += sn9c102_i2c_write(cam, 0x13, 0xc0 | (ctrl->value * 0x07)); break; default: return -EINVAL; } return err ? -EIO : 0; } static int ov7660_set_crop(struct sn9c102_device* cam, const struct v4l2_rect* rect) { struct sn9c102_sensor* s = sn9c102_get_sensor(cam); int err = 0; u8 h_start = (u8)(rect->left - s->cropcap.bounds.left) + 1, v_start = (u8)(rect->top - s->cropcap.bounds.top) + 1; err += sn9c102_write_reg(cam, h_start, 0x12); err += sn9c102_write_reg(cam, v_start, 0x13); return err; } static int ov7660_set_pix_format(struct sn9c102_device* cam, const struct v4l2_pix_format* pix) { int r0, err = 0; r0 = sn9c102_pread_reg(cam, 0x01); if (pix->pixelformat == V4L2_PIX_FMT_JPEG) { err += sn9c102_write_reg(cam, r0 | 0x40, 0x01); err += sn9c102_write_reg(cam, 0xa2, 0x17); err += sn9c102_i2c_write(cam, 0x11, 0x00); } else { err += sn9c102_write_reg(cam, r0 | 0x40, 0x01); err += sn9c102_write_reg(cam, 0xa2, 0x17); err += sn9c102_i2c_write(cam, 0x11, 0x0d); } return err; } static const struct sn9c102_sensor ov7660 = { .name = "OV7660", .maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>", .supported_bridge = BRIDGE_SN9C105 | BRIDGE_SN9C120, .sysfs_ops = SN9C102_I2C_READ | SN9C102_I2C_WRITE, .frequency = SN9C102_I2C_100KHZ, .interface = SN9C102_I2C_2WIRES, .i2c_slave_id = 0x21, .init = &ov7660_init, .qctrl = { { .id = V4L2_CID_GAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "global gain", .minimum = 0x00, .maximum = 0x1f, .step = 0x01, .default_value = 0x09, .flags = 0, }, { .id = V4L2_CID_EXPOSURE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "exposure", .minimum = 0x00, .maximum = 0xff, .step = 0x01, .default_value = 0x27, .flags = 0, }, { .id = V4L2_CID_DO_WHITE_BALANCE, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "night mode", .minimum = 0x00, .maximum = 0x01, .step = 0x01, .default_value = 0x00, .flags = 0, }, { .id = V4L2_CID_RED_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "red balance", .minimum = 0x00, .maximum = 0x7f, .step = 0x01, .default_value = 0x14, .flags = 0, }, { .id = V4L2_CID_BLUE_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "blue balance", .minimum = 0x00, .maximum = 0x7f, .step = 0x01, .default_value = 0x14, .flags = 0, }, { .id = V4L2_CID_AUTOGAIN, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "auto adjust", .minimum = 0x00, .maximum = 0x01, .step = 0x01, .default_value = 0x01, .flags = 0, }, { .id = SN9C102_V4L2_CID_GREEN_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "green balance", .minimum = 0x00, .maximum = 0x7f, .step = 0x01, .default_value = 0x14, .flags = 0, }, { .id = SN9C102_V4L2_CID_BAND_FILTER, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "band filter", .minimum = 0x00, .maximum = 0x01, .step = 0x01, .default_value = 0x00, .flags = 0, }, }, .get_ctrl = &ov7660_get_ctrl, .set_ctrl = &ov7660_set_ctrl, .cropcap = { .bounds = { .left = 0, .top = 0, .width = 640, .height = 480, }, .defrect = { .left = 0, .top = 0, .width = 640, .height = 480, }, }, .set_crop = &ov7660_set_crop, .pix_format = { .width = 640, .height = 480, .pixelformat = V4L2_PIX_FMT_JPEG, .priv = 8, }, .set_pix_format = &ov7660_set_pix_format }; int sn9c102_probe_ov7660(struct sn9c102_device* cam) { int pid, ver, err; err = sn9c102_write_const_regs(cam, {0x01, 0xf1}, {0x00, 0xf1}, {0x01, 0x01}, {0x00, 0x01}, {0x28, 0x17}); pid = sn9c102_i2c_try_read(cam, &ov7660, 0x0a); ver = sn9c102_i2c_try_read(cam, &ov7660, 0x0b); if (err || pid < 0 || ver < 0) return -EIO; if (pid != 0x76 || ver != 0x60) return -ENODEV; sn9c102_attach_sensor(cam, &ov7660); return 0; }
gpl-2.0
krichter722/gcc
gcc/testsuite/gcc.target/aarch64/simd/vmulx_f64_1.c
47
1329
/* Test the vmulx_f64 AArch64 SIMD intrinsic. */ /* { dg-do run } */ /* { dg-options "-save-temps -O3" } */ #include "arm_neon.h" extern void abort (void); void __attribute__ ((noinline)) test_case (float64_t v1[1], float64_t v2[1], float64_t e1[1]) { float64x1_t vec1_1 = vld1_f64 (v1); float64x1_t vec1_2 = vld1_f64 (v2); float64x1_t actual1 = vmulx_f64 (vec1_1, vec1_2); float64_t actual[1]; vst1_f64 (actual, actual1); if (actual[0] != e1[0]) abort (); } int main (void) { float64_t v1 = 3.14159265359; float64_t v2 = -2.71828; float64_t v1_1[] = {v1}; float64_t v1_2[] = {v2}; float64_t e1[] = {v1 * v2}; test_case (v1_1, v1_2, e1); float64_t v2_1[] = {0}; float64_t v2_2[] = {__builtin_huge_val ()}; float64_t e2[] = {2.0}; test_case (v2_1, v2_2, e2); float64_t v3_1[] = {0}; float64_t v3_2[] = {-__builtin_huge_val ()}; float64_t e3[] = {-2.0}; test_case (v3_1, v3_2, e3); float64_t v4_1[] = {-0.0}; float64_t v4_2[] = {__builtin_huge_val ()}; float64_t e4[] = {-2.0}; test_case (v4_1, v4_2, e4); float64_t v5_1[] = {-0.0}; float64_t v5_2[] = {-__builtin_huge_val ()}; float64_t e5[] = {2.0}; test_case (v5_1, v5_2, e5); return 0; } /* { dg-final { scan-assembler-times "fmulx\[ \t\]+\[dD\]\[0-9\]+, ?\[dD\]\[0-9\]+, ?\[dD\]\[0-9\]+\n" 1 } } */
gpl-2.0
smarr/graal
src/os/windows/vm/threadCritical_windows.cpp
47
3564
/* * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #include "precompiled.hpp" #include "runtime/thread.inline.hpp" #include "runtime/threadCritical.hpp" // OS-includes here # include <windows.h> # include <winbase.h> // // See threadCritical.hpp for details of this class. // static bool initialized = false; static volatile jint lock_count = -1; static HANDLE lock_event; static DWORD lock_owner = -1; // // Note that Microsoft's critical region code contains a race // condition, and is not suitable for use. A thread holding the // critical section cannot safely suspend a thread attempting // to enter the critical region. The failure mode is that both // threads are permanently suspended. // // I experiemented with the use of ordinary windows mutex objects // and found them ~30 times slower than the critical region code. // void ThreadCritical::initialize() { } void ThreadCritical::release() { assert(lock_owner == -1, "Mutex being deleted while owned."); assert(lock_count == -1, "Mutex being deleted while recursively locked"); assert(lock_event != NULL, "Sanity check"); CloseHandle(lock_event); } ThreadCritical::ThreadCritical() { DWORD current_thread = GetCurrentThreadId(); if (lock_owner != current_thread) { // Grab the lock before doing anything. while (Atomic::cmpxchg(0, &lock_count, -1) != -1) { if (initialized) { DWORD ret = WaitForSingleObject(lock_event, INFINITE); assert(ret == WAIT_OBJECT_0, "unexpected return value from WaitForSingleObject"); } } // Make sure the event object is allocated. if (!initialized) { // Locking will not work correctly unless this is autoreset. lock_event = CreateEvent(NULL, false, false, NULL); initialized = true; } assert(lock_owner == -1, "Lock acquired illegally."); lock_owner = current_thread; } else { // Atomicity isn't required. Bump the recursion count. lock_count++; } assert(lock_owner == GetCurrentThreadId(), "Lock acquired illegally."); } ThreadCritical::~ThreadCritical() { assert(lock_owner == GetCurrentThreadId(), "unlock attempt by wrong thread"); assert(lock_count >= 0, "Attempt to unlock when already unlocked"); if (lock_count == 0) { // We're going to unlock lock_owner = -1; lock_count = -1; // No lost wakeups, lock_event stays signaled until reset. DWORD ret = SetEvent(lock_event); assert(ret != 0, "unexpected return value from SetEvent"); } else { // Just unwinding a recursive lock; lock_count--; } }
gpl-2.0
HinTak/linux
drivers/net/ethernet/chelsio/cxgb/cxgb2.c
47
36640
/***************************************************************************** * * * File: cxgb2.c * * $Revision: 1.25 $ * * $Date: 2005/06/22 00:43:25 $ * * Description: * * Chelsio 10Gb Ethernet Driver. * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License, version 2, as * * published by the Free Software Foundation. * * * * You should have received a copy of the GNU General Public License along * * with this program; if not, see <http://www.gnu.org/licenses/>. * * * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * * * http://www.chelsio.com * * * * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * * All rights reserved. * * * * Maintainers: maintainers@chelsio.com * * * * Authors: Dimitrios Michailidis <dm@chelsio.com> * * Tina Yang <tainay@chelsio.com> * * Felix Marti <felix@chelsio.com> * * Scott Bardone <sbardone@chelsio.com> * * Kurt Ottaway <kottaway@chelsio.com> * * Frank DiMambro <frank@chelsio.com> * * * * History: * * * ****************************************************************************/ #include "common.h" #include <linux/module.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/if_vlan.h> #include <linux/mii.h> #include <linux/sockios.h> #include <linux/dma-mapping.h> #include <linux/uaccess.h> #include "cpl5_cmd.h" #include "regs.h" #include "gmac.h" #include "cphy.h" #include "sge.h" #include "tp.h" #include "espi.h" #include "elmer0.h" #include <linux/workqueue.h> static inline void schedule_mac_stats_update(struct adapter *ap, int secs) { schedule_delayed_work(&ap->stats_update_task, secs * HZ); } static inline void cancel_mac_stats_update(struct adapter *ap) { cancel_delayed_work(&ap->stats_update_task); } #define MAX_CMDQ_ENTRIES 16384 #define MAX_CMDQ1_ENTRIES 1024 #define MAX_RX_BUFFERS 16384 #define MAX_RX_JUMBO_BUFFERS 16384 #define MAX_TX_BUFFERS_HIGH 16384U #define MAX_TX_BUFFERS_LOW 1536U #define MAX_TX_BUFFERS 1460U #define MIN_FL_ENTRIES 32 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) /* * The EEPROM is actually bigger but only the first few bytes are used so we * only report those. */ #define EEPROM_SIZE 32 MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR("Chelsio Communications"); MODULE_LICENSE("GPL"); static int dflt_msg_enable = DFLT_MSG_ENABLE; module_param(dflt_msg_enable, int, 0); MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap"); #define HCLOCK 0x0 #define LCLOCK 0x1 /* T1 cards powersave mode */ static int t1_clock(struct adapter *adapter, int mode); static int t1powersave = 1; /* HW default is powersave mode. */ module_param(t1powersave, int, 0); MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode"); static int disable_msi = 0; module_param(disable_msi, int, 0); MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); /* * Setup MAC to receive the types of packets we want. */ static void t1_set_rxmode(struct net_device *dev) { struct adapter *adapter = dev->ml_priv; struct cmac *mac = adapter->port[dev->if_port].mac; struct t1_rx_mode rm; rm.dev = dev; mac->ops->set_rx_mode(mac, &rm); } static void link_report(struct port_info *p) { if (!netif_carrier_ok(p->dev)) netdev_info(p->dev, "link down\n"); else { const char *s = "10Mbps"; switch (p->link_config.speed) { case SPEED_10000: s = "10Gbps"; break; case SPEED_1000: s = "1000Mbps"; break; case SPEED_100: s = "100Mbps"; break; } netdev_info(p->dev, "link up, %s, %s-duplex\n", s, p->link_config.duplex == DUPLEX_FULL ? "full" : "half"); } } void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat, int speed, int duplex, int pause) { struct port_info *p = &adapter->port[port_id]; if (link_stat != netif_carrier_ok(p->dev)) { if (link_stat) netif_carrier_on(p->dev); else netif_carrier_off(p->dev); link_report(p); /* multi-ports: inform toe */ if ((speed > 0) && (adapter->params.nports > 1)) { unsigned int sched_speed = 10; switch (speed) { case SPEED_1000: sched_speed = 1000; break; case SPEED_100: sched_speed = 100; break; case SPEED_10: sched_speed = 10; break; } t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed); } } } static void link_start(struct port_info *p) { struct cmac *mac = p->mac; mac->ops->reset(mac); if (mac->ops->macaddress_set) mac->ops->macaddress_set(mac, p->dev->dev_addr); t1_set_rxmode(p->dev); t1_link_start(p->phy, mac, &p->link_config); mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); } static void enable_hw_csum(struct adapter *adapter) { if (adapter->port[0].dev->hw_features & NETIF_F_TSO) t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */ t1_tp_set_tcp_checksum_offload(adapter->tp, 1); } /* * Things to do upon first use of a card. * This must run with the rtnl lock held. */ static int cxgb_up(struct adapter *adapter) { int err = 0; if (!(adapter->flags & FULL_INIT_DONE)) { err = t1_init_hw_modules(adapter); if (err) goto out_err; enable_hw_csum(adapter); adapter->flags |= FULL_INIT_DONE; } t1_interrupts_clear(adapter); adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev); err = request_threaded_irq(adapter->pdev->irq, t1_interrupt, t1_interrupt_thread, adapter->params.has_msi ? 0 : IRQF_SHARED, adapter->name, adapter); if (err) { if (adapter->params.has_msi) pci_disable_msi(adapter->pdev); goto out_err; } t1_sge_start(adapter->sge); t1_interrupts_enable(adapter); out_err: return err; } /* * Release resources when all the ports have been stopped. */ static void cxgb_down(struct adapter *adapter) { t1_sge_stop(adapter->sge); t1_interrupts_disable(adapter); free_irq(adapter->pdev->irq, adapter); if (adapter->params.has_msi) pci_disable_msi(adapter->pdev); } static int cxgb_open(struct net_device *dev) { int err; struct adapter *adapter = dev->ml_priv; int other_ports = adapter->open_device_map & PORT_MASK; napi_enable(&adapter->napi); if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) { napi_disable(&adapter->napi); return err; } __set_bit(dev->if_port, &adapter->open_device_map); link_start(&adapter->port[dev->if_port]); netif_start_queue(dev); if (!other_ports && adapter->params.stats_update_period) schedule_mac_stats_update(adapter, adapter->params.stats_update_period); t1_vlan_mode(adapter, dev->features); return 0; } static int cxgb_close(struct net_device *dev) { struct adapter *adapter = dev->ml_priv; struct port_info *p = &adapter->port[dev->if_port]; struct cmac *mac = p->mac; netif_stop_queue(dev); napi_disable(&adapter->napi); mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX); netif_carrier_off(dev); clear_bit(dev->if_port, &adapter->open_device_map); if (adapter->params.stats_update_period && !(adapter->open_device_map & PORT_MASK)) { /* Stop statistics accumulation. */ smp_mb__after_atomic(); spin_lock(&adapter->work_lock); /* sync with update task */ spin_unlock(&adapter->work_lock); cancel_mac_stats_update(adapter); } if (!adapter->open_device_map) cxgb_down(adapter); return 0; } static struct net_device_stats *t1_get_stats(struct net_device *dev) { struct adapter *adapter = dev->ml_priv; struct port_info *p = &adapter->port[dev->if_port]; struct net_device_stats *ns = &dev->stats; const struct cmac_statistics *pstats; /* Do a full update of the MAC stats */ pstats = p->mac->ops->statistics_update(p->mac, MAC_STATS_UPDATE_FULL); ns->tx_packets = pstats->TxUnicastFramesOK + pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK; ns->rx_packets = pstats->RxUnicastFramesOK + pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK; ns->tx_bytes = pstats->TxOctetsOK; ns->rx_bytes = pstats->RxOctetsOK; ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors + pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions; ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors + pstats->RxFCSErrors + pstats->RxAlignErrors + pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors + pstats->RxSymbolErrors + pstats->RxRuntErrors; ns->multicast = pstats->RxMulticastFramesOK; ns->collisions = pstats->TxTotalCollisions; /* detailed rx_errors */ ns->rx_length_errors = pstats->RxFrameTooLongErrors + pstats->RxJabberErrors; ns->rx_over_errors = 0; ns->rx_crc_errors = pstats->RxFCSErrors; ns->rx_frame_errors = pstats->RxAlignErrors; ns->rx_fifo_errors = 0; ns->rx_missed_errors = 0; /* detailed tx_errors */ ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions; ns->tx_carrier_errors = 0; ns->tx_fifo_errors = pstats->TxUnderrun; ns->tx_heartbeat_errors = 0; ns->tx_window_errors = pstats->TxLateCollisions; return ns; } static u32 get_msglevel(struct net_device *dev) { struct adapter *adapter = dev->ml_priv; return adapter->msg_enable; } static void set_msglevel(struct net_device *dev, u32 val) { struct adapter *adapter = dev->ml_priv; adapter->msg_enable = val; } static const char stats_strings[][ETH_GSTRING_LEN] = { "TxOctetsOK", "TxOctetsBad", "TxUnicastFramesOK", "TxMulticastFramesOK", "TxBroadcastFramesOK", "TxPauseFrames", "TxFramesWithDeferredXmissions", "TxLateCollisions", "TxTotalCollisions", "TxFramesAbortedDueToXSCollisions", "TxUnderrun", "TxLengthErrors", "TxInternalMACXmitError", "TxFramesWithExcessiveDeferral", "TxFCSErrors", "TxJumboFramesOk", "TxJumboOctetsOk", "RxOctetsOK", "RxOctetsBad", "RxUnicastFramesOK", "RxMulticastFramesOK", "RxBroadcastFramesOK", "RxPauseFrames", "RxFCSErrors", "RxAlignErrors", "RxSymbolErrors", "RxDataErrors", "RxSequenceErrors", "RxRuntErrors", "RxJabberErrors", "RxInternalMACRcvError", "RxInRangeLengthErrors", "RxOutOfRangeLengthField", "RxFrameTooLongErrors", "RxJumboFramesOk", "RxJumboOctetsOk", /* Port stats */ "RxCsumGood", "TxCsumOffload", "TxTso", "RxVlan", "TxVlan", "TxNeedHeadroom", /* Interrupt stats */ "rx drops", "pure_rsps", "unhandled irqs", "respQ_empty", "respQ_overflow", "freelistQ_empty", "pkt_too_big", "pkt_mismatch", "cmdQ_full0", "cmdQ_full1", "espi_DIP2ParityErr", "espi_DIP4Err", "espi_RxDrops", "espi_TxDrops", "espi_RxOvfl", "espi_ParityErr" }; #define T2_REGMAP_SIZE (3 * 1024) static int get_regs_len(struct net_device *dev) { return T2_REGMAP_SIZE; } static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct adapter *adapter = dev->ml_priv; strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->bus_info, pci_name(adapter->pdev), sizeof(info->bus_info)); } static int get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(stats_strings); default: return -EOPNOTSUPP; } } static void get_strings(struct net_device *dev, u32 stringset, u8 *data) { if (stringset == ETH_SS_STATS) memcpy(data, stats_strings, sizeof(stats_strings)); } static void get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct adapter *adapter = dev->ml_priv; struct cmac *mac = adapter->port[dev->if_port].mac; const struct cmac_statistics *s; const struct sge_intr_counts *t; struct sge_port_stats ss; s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL); t = t1_sge_get_intr_counts(adapter->sge); t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss); *data++ = s->TxOctetsOK; *data++ = s->TxOctetsBad; *data++ = s->TxUnicastFramesOK; *data++ = s->TxMulticastFramesOK; *data++ = s->TxBroadcastFramesOK; *data++ = s->TxPauseFrames; *data++ = s->TxFramesWithDeferredXmissions; *data++ = s->TxLateCollisions; *data++ = s->TxTotalCollisions; *data++ = s->TxFramesAbortedDueToXSCollisions; *data++ = s->TxUnderrun; *data++ = s->TxLengthErrors; *data++ = s->TxInternalMACXmitError; *data++ = s->TxFramesWithExcessiveDeferral; *data++ = s->TxFCSErrors; *data++ = s->TxJumboFramesOK; *data++ = s->TxJumboOctetsOK; *data++ = s->RxOctetsOK; *data++ = s->RxOctetsBad; *data++ = s->RxUnicastFramesOK; *data++ = s->RxMulticastFramesOK; *data++ = s->RxBroadcastFramesOK; *data++ = s->RxPauseFrames; *data++ = s->RxFCSErrors; *data++ = s->RxAlignErrors; *data++ = s->RxSymbolErrors; *data++ = s->RxDataErrors; *data++ = s->RxSequenceErrors; *data++ = s->RxRuntErrors; *data++ = s->RxJabberErrors; *data++ = s->RxInternalMACRcvError; *data++ = s->RxInRangeLengthErrors; *data++ = s->RxOutOfRangeLengthField; *data++ = s->RxFrameTooLongErrors; *data++ = s->RxJumboFramesOK; *data++ = s->RxJumboOctetsOK; *data++ = ss.rx_cso_good; *data++ = ss.tx_cso; *data++ = ss.tx_tso; *data++ = ss.vlan_xtract; *data++ = ss.vlan_insert; *data++ = ss.tx_need_hdrroom; *data++ = t->rx_drops; *data++ = t->pure_rsps; *data++ = t->unhandled_irqs; *data++ = t->respQ_empty; *data++ = t->respQ_overflow; *data++ = t->freelistQ_empty; *data++ = t->pkt_too_big; *data++ = t->pkt_mismatch; *data++ = t->cmdQ_full[0]; *data++ = t->cmdQ_full[1]; if (adapter->espi) { const struct espi_intr_counts *e; e = t1_espi_get_intr_counts(adapter->espi); *data++ = e->DIP2_parity_err; *data++ = e->DIP4_err; *data++ = e->rx_drops; *data++ = e->tx_drops; *data++ = e->rx_ovflw; *data++ = e->parity_err; } } static inline void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, unsigned int end) { u32 *p = buf + start; for ( ; start <= end; start += sizeof(u32)) *p++ = readl(ap->regs + start); } static void get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) { struct adapter *ap = dev->ml_priv; /* * Version scheme: bits 0..9: chip version, bits 10..15: chip revision */ regs->version = 2; memset(buf, 0, T2_REGMAP_SIZE); reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER); reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE); reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR); reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT); reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE); reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE); reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT); reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL); reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE); reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD); } static int get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct adapter *adapter = dev->ml_priv; struct port_info *p = &adapter->port[dev->if_port]; u32 supported, advertising; supported = p->link_config.supported; advertising = p->link_config.advertising; if (netif_carrier_ok(dev)) { cmd->base.speed = p->link_config.speed; cmd->base.duplex = p->link_config.duplex; } else { cmd->base.speed = SPEED_UNKNOWN; cmd->base.duplex = DUPLEX_UNKNOWN; } cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; cmd->base.phy_address = p->phy->mdio.prtad; cmd->base.autoneg = p->link_config.autoneg; ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, advertising); return 0; } static int speed_duplex_to_caps(int speed, int duplex) { int cap = 0; switch (speed) { case SPEED_10: if (duplex == DUPLEX_FULL) cap = SUPPORTED_10baseT_Full; else cap = SUPPORTED_10baseT_Half; break; case SPEED_100: if (duplex == DUPLEX_FULL) cap = SUPPORTED_100baseT_Full; else cap = SUPPORTED_100baseT_Half; break; case SPEED_1000: if (duplex == DUPLEX_FULL) cap = SUPPORTED_1000baseT_Full; else cap = SUPPORTED_1000baseT_Half; break; case SPEED_10000: if (duplex == DUPLEX_FULL) cap = SUPPORTED_10000baseT_Full; } return cap; } #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \ ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \ ADVERTISED_10000baseT_Full) static int set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct adapter *adapter = dev->ml_priv; struct port_info *p = &adapter->port[dev->if_port]; struct link_config *lc = &p->link_config; u32 advertising; ethtool_convert_link_mode_to_legacy_u32(&advertising, cmd->link_modes.advertising); if (!(lc->supported & SUPPORTED_Autoneg)) return -EOPNOTSUPP; /* can't change speed/duplex */ if (cmd->base.autoneg == AUTONEG_DISABLE) { u32 speed = cmd->base.speed; int cap = speed_duplex_to_caps(speed, cmd->base.duplex); if (!(lc->supported & cap) || (speed == SPEED_1000)) return -EINVAL; lc->requested_speed = speed; lc->requested_duplex = cmd->base.duplex; lc->advertising = 0; } else { advertising &= ADVERTISED_MASK; if (advertising & (advertising - 1)) advertising = lc->supported; advertising &= lc->supported; if (!advertising) return -EINVAL; lc->requested_speed = SPEED_INVALID; lc->requested_duplex = DUPLEX_INVALID; lc->advertising = advertising | ADVERTISED_Autoneg; } lc->autoneg = cmd->base.autoneg; if (netif_running(dev)) t1_link_start(p->phy, p->mac, lc); return 0; } static void get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) { struct adapter *adapter = dev->ml_priv; struct port_info *p = &adapter->port[dev->if_port]; epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0; epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0; epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0; } static int set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) { struct adapter *adapter = dev->ml_priv; struct port_info *p = &adapter->port[dev->if_port]; struct link_config *lc = &p->link_config; if (epause->autoneg == AUTONEG_DISABLE) lc->requested_fc = 0; else if (lc->supported & SUPPORTED_Autoneg) lc->requested_fc = PAUSE_AUTONEG; else return -EINVAL; if (epause->rx_pause) lc->requested_fc |= PAUSE_RX; if (epause->tx_pause) lc->requested_fc |= PAUSE_TX; if (lc->autoneg == AUTONEG_ENABLE) { if (netif_running(dev)) t1_link_start(p->phy, p->mac, lc); } else { lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); if (netif_running(dev)) p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1, lc->fc); } return 0; } static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) { struct adapter *adapter = dev->ml_priv; int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; e->rx_max_pending = MAX_RX_BUFFERS; e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS; e->tx_max_pending = MAX_CMDQ_ENTRIES; e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl]; e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl]; e->tx_pending = adapter->params.sge.cmdQ_size[0]; } static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) { struct adapter *adapter = dev->ml_priv; int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending || e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS || e->tx_pending > MAX_CMDQ_ENTRIES || e->rx_pending < MIN_FL_ENTRIES || e->rx_jumbo_pending < MIN_FL_ENTRIES || e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1)) return -EINVAL; if (adapter->flags & FULL_INIT_DONE) return -EBUSY; adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending; adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending; adapter->params.sge.cmdQ_size[0] = e->tx_pending; adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ? MAX_CMDQ1_ENTRIES : e->tx_pending; return 0; } static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) { struct adapter *adapter = dev->ml_priv; adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs; adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce; adapter->params.sge.sample_interval_usecs = c->rate_sample_interval; t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge); return 0; } static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) { struct adapter *adapter = dev->ml_priv; c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs; c->rate_sample_interval = adapter->params.sge.sample_interval_usecs; c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable; return 0; } static int get_eeprom_len(struct net_device *dev) { struct adapter *adapter = dev->ml_priv; return t1_is_asic(adapter) ? EEPROM_SIZE : 0; } #define EEPROM_MAGIC(ap) \ (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16)) static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, u8 *data) { int i; u8 buf[EEPROM_SIZE] __attribute__((aligned(4))); struct adapter *adapter = dev->ml_priv; e->magic = EEPROM_MAGIC(adapter); for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32)) t1_seeprom_read(adapter, i, (__le32 *)&buf[i]); memcpy(data, buf + e->offset, e->len); return 0; } static const struct ethtool_ops t1_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX | ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL, .get_drvinfo = get_drvinfo, .get_msglevel = get_msglevel, .set_msglevel = set_msglevel, .get_ringparam = get_sge_param, .set_ringparam = set_sge_param, .get_coalesce = get_coalesce, .set_coalesce = set_coalesce, .get_eeprom_len = get_eeprom_len, .get_eeprom = get_eeprom, .get_pauseparam = get_pauseparam, .set_pauseparam = set_pauseparam, .get_link = ethtool_op_get_link, .get_strings = get_strings, .get_sset_count = get_sset_count, .get_ethtool_stats = get_stats, .get_regs_len = get_regs_len, .get_regs = get_regs, .get_link_ksettings = get_link_ksettings, .set_link_ksettings = set_link_ksettings, }; static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd) { struct adapter *adapter = dev->ml_priv; struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio; return mdio_mii_ioctl(mdio, if_mii(req), cmd); } static int t1_change_mtu(struct net_device *dev, int new_mtu) { int ret; struct adapter *adapter = dev->ml_priv; struct cmac *mac = adapter->port[dev->if_port].mac; if (!mac->ops->set_mtu) return -EOPNOTSUPP; if ((ret = mac->ops->set_mtu(mac, new_mtu))) return ret; dev->mtu = new_mtu; return 0; } static int t1_set_mac_addr(struct net_device *dev, void *p) { struct adapter *adapter = dev->ml_priv; struct cmac *mac = adapter->port[dev->if_port].mac; struct sockaddr *addr = p; if (!mac->ops->macaddress_set) return -EOPNOTSUPP; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); mac->ops->macaddress_set(mac, dev->dev_addr); return 0; } static netdev_features_t t1_fix_features(struct net_device *dev, netdev_features_t features) { /* * Since there is no support for separate rx/tx vlan accel * enable/disable make sure tx flag is always in same state as rx. */ if (features & NETIF_F_HW_VLAN_CTAG_RX) features |= NETIF_F_HW_VLAN_CTAG_TX; else features &= ~NETIF_F_HW_VLAN_CTAG_TX; return features; } static int t1_set_features(struct net_device *dev, netdev_features_t features) { netdev_features_t changed = dev->features ^ features; struct adapter *adapter = dev->ml_priv; if (changed & NETIF_F_HW_VLAN_CTAG_RX) t1_vlan_mode(adapter, features); return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER static void t1_netpoll(struct net_device *dev) { unsigned long flags; struct adapter *adapter = dev->ml_priv; local_irq_save(flags); t1_interrupt(adapter->pdev->irq, adapter); local_irq_restore(flags); } #endif /* * Periodic accumulation of MAC statistics. This is used only if the MAC * does not have any other way to prevent stats counter overflow. */ static void mac_stats_task(struct work_struct *work) { int i; struct adapter *adapter = container_of(work, struct adapter, stats_update_task.work); for_each_port(adapter, i) { struct port_info *p = &adapter->port[i]; if (netif_running(p->dev)) p->mac->ops->statistics_update(p->mac, MAC_STATS_UPDATE_FAST); } /* Schedule the next statistics update if any port is active. */ spin_lock(&adapter->work_lock); if (adapter->open_device_map & PORT_MASK) schedule_mac_stats_update(adapter, adapter->params.stats_update_period); spin_unlock(&adapter->work_lock); } static const struct net_device_ops cxgb_netdev_ops = { .ndo_open = cxgb_open, .ndo_stop = cxgb_close, .ndo_start_xmit = t1_start_xmit, .ndo_get_stats = t1_get_stats, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = t1_set_rxmode, .ndo_do_ioctl = t1_ioctl, .ndo_change_mtu = t1_change_mtu, .ndo_set_mac_address = t1_set_mac_addr, .ndo_fix_features = t1_fix_features, .ndo_set_features = t1_set_features, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = t1_netpoll, #endif }; static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int i, err, pci_using_dac = 0; unsigned long mmio_start, mmio_len; const struct board_info *bi; struct adapter *adapter = NULL; struct port_info *pi; err = pci_enable_device(pdev); if (err) return err; if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { pr_err("%s: cannot find PCI device memory base address\n", pci_name(pdev)); err = -ENODEV; goto out_disable_pdev; } if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { pci_using_dac = 1; if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { pr_err("%s: unable to obtain 64-bit DMA for coherent allocations\n", pci_name(pdev)); err = -ENODEV; goto out_disable_pdev; } } else if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) { pr_err("%s: no usable DMA configuration\n", pci_name(pdev)); goto out_disable_pdev; } err = pci_request_regions(pdev, DRV_NAME); if (err) { pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev)); goto out_disable_pdev; } pci_set_master(pdev); mmio_start = pci_resource_start(pdev, 0); mmio_len = pci_resource_len(pdev, 0); bi = t1_get_board_info(ent->driver_data); for (i = 0; i < bi->port_number; ++i) { struct net_device *netdev; netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter)); if (!netdev) { err = -ENOMEM; goto out_free_dev; } SET_NETDEV_DEV(netdev, &pdev->dev); if (!adapter) { adapter = netdev_priv(netdev); adapter->pdev = pdev; adapter->port[0].dev = netdev; /* so we don't leak it */ adapter->regs = ioremap(mmio_start, mmio_len); if (!adapter->regs) { pr_err("%s: cannot map device registers\n", pci_name(pdev)); err = -ENOMEM; goto out_free_dev; } if (t1_get_board_rev(adapter, bi, &adapter->params)) { err = -ENODEV; /* Can't handle this chip rev */ goto out_free_dev; } adapter->name = pci_name(pdev); adapter->msg_enable = dflt_msg_enable; adapter->mmio_len = mmio_len; spin_lock_init(&adapter->tpi_lock); spin_lock_init(&adapter->work_lock); spin_lock_init(&adapter->async_lock); spin_lock_init(&adapter->mac_lock); INIT_DELAYED_WORK(&adapter->stats_update_task, mac_stats_task); pci_set_drvdata(pdev, netdev); } pi = &adapter->port[i]; pi->dev = netdev; netif_carrier_off(netdev); netdev->irq = pdev->irq; netdev->if_port = i; netdev->mem_start = mmio_start; netdev->mem_end = mmio_start + mmio_len - 1; netdev->ml_priv = adapter; netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_LLTX; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; if (vlan_tso_capable(adapter)) { netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; /* T204: disable TSO */ if (!(is_T2(adapter)) || bi->port_number != 4) { netdev->hw_features |= NETIF_F_TSO; netdev->features |= NETIF_F_TSO; } } netdev->netdev_ops = &cxgb_netdev_ops; netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ? sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt); netif_napi_add(netdev, &adapter->napi, t1_poll, 64); netdev->ethtool_ops = &t1_ethtool_ops; switch (bi->board) { case CHBT_BOARD_CHT110: case CHBT_BOARD_N110: case CHBT_BOARD_N210: case CHBT_BOARD_CHT210: netdev->max_mtu = PM3393_MAX_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); break; case CHBT_BOARD_CHN204: netdev->max_mtu = VSC7326_MAX_MTU; break; default: netdev->max_mtu = ETH_DATA_LEN; break; } } if (t1_init_sw_modules(adapter, bi) < 0) { err = -ENODEV; goto out_free_dev; } /* * The card is now ready to go. If any errors occur during device * registration we do not fail the whole card but rather proceed only * with the ports we manage to register successfully. However we must * register at least one net device. */ for (i = 0; i < bi->port_number; ++i) { err = register_netdev(adapter->port[i].dev); if (err) pr_warn("%s: cannot register net device %s, skipping\n", pci_name(pdev), adapter->port[i].dev->name); else { /* * Change the name we use for messages to the name of * the first successfully registered interface. */ if (!adapter->registered_device_map) adapter->name = adapter->port[i].dev->name; __set_bit(i, &adapter->registered_device_map); } } if (!adapter->registered_device_map) { pr_err("%s: could not register any net devices\n", pci_name(pdev)); goto out_release_adapter_res; } pr_info("%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name, bi->desc, adapter->params.chip_revision, adapter->params.pci.is_pcix ? "PCIX" : "PCI", adapter->params.pci.speed, adapter->params.pci.width); /* * Set the T1B ASIC and memory clocks. */ if (t1powersave) adapter->t1powersave = LCLOCK; /* HW default is powersave mode. */ else adapter->t1powersave = HCLOCK; if (t1_is_T1B(adapter)) t1_clock(adapter, t1powersave); return 0; out_release_adapter_res: t1_free_sw_modules(adapter); out_free_dev: if (adapter) { if (adapter->regs) iounmap(adapter->regs); for (i = bi->port_number - 1; i >= 0; --i) if (adapter->port[i].dev) free_netdev(adapter->port[i].dev); } pci_release_regions(pdev); out_disable_pdev: pci_disable_device(pdev); return err; } static void bit_bang(struct adapter *adapter, int bitdata, int nbits) { int data; int i; u32 val; enum { S_CLOCK = 1 << 3, S_DATA = 1 << 4 }; for (i = (nbits - 1); i > -1; i--) { udelay(50); data = ((bitdata >> i) & 0x1); __t1_tpi_read(adapter, A_ELMER0_GPO, &val); if (data) val |= S_DATA; else val &= ~S_DATA; udelay(50); /* Set SCLOCK low */ val &= ~S_CLOCK; __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); /* Write SCLOCK high */ val |= S_CLOCK; __t1_tpi_write(adapter, A_ELMER0_GPO, val); } } static int t1_clock(struct adapter *adapter, int mode) { u32 val; int M_CORE_VAL; int M_MEM_VAL; enum { M_CORE_BITS = 9, T_CORE_VAL = 0, T_CORE_BITS = 2, N_CORE_VAL = 0, N_CORE_BITS = 2, M_MEM_BITS = 9, T_MEM_VAL = 0, T_MEM_BITS = 2, N_MEM_VAL = 0, N_MEM_BITS = 2, NP_LOAD = 1 << 17, S_LOAD_MEM = 1 << 5, S_LOAD_CORE = 1 << 6, S_CLOCK = 1 << 3 }; if (!t1_is_T1B(adapter)) return -ENODEV; /* Can't re-clock this chip. */ if (mode & 2) return 0; /* show current mode. */ if ((adapter->t1powersave & 1) == (mode & 1)) return -EALREADY; /* ASIC already running in mode. */ if ((mode & 1) == HCLOCK) { M_CORE_VAL = 0x14; M_MEM_VAL = 0x18; adapter->t1powersave = HCLOCK; /* overclock */ } else { M_CORE_VAL = 0xe; M_MEM_VAL = 0x10; adapter->t1powersave = LCLOCK; /* underclock */ } /* Don't interrupt this serial stream! */ spin_lock(&adapter->tpi_lock); /* Initialize for ASIC core */ __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val |= NP_LOAD; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val &= ~S_LOAD_CORE; val &= ~S_CLOCK; __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); /* Serial program the ASIC clock synthesizer */ bit_bang(adapter, T_CORE_VAL, T_CORE_BITS); bit_bang(adapter, N_CORE_VAL, N_CORE_BITS); bit_bang(adapter, M_CORE_VAL, M_CORE_BITS); udelay(50); /* Finish ASIC core */ __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val |= S_LOAD_CORE; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val &= ~S_LOAD_CORE; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); /* Initialize for memory */ __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val |= NP_LOAD; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val &= ~S_LOAD_MEM; val &= ~S_CLOCK; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); /* Serial program the memory clock synthesizer */ bit_bang(adapter, T_MEM_VAL, T_MEM_BITS); bit_bang(adapter, N_MEM_VAL, N_MEM_BITS); bit_bang(adapter, M_MEM_VAL, M_MEM_BITS); udelay(50); /* Finish memory */ __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val |= S_LOAD_MEM; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(50); __t1_tpi_read(adapter, A_ELMER0_GPO, &val); val &= ~S_LOAD_MEM; udelay(50); __t1_tpi_write(adapter, A_ELMER0_GPO, val); spin_unlock(&adapter->tpi_lock); return 0; } static inline void t1_sw_reset(struct pci_dev *pdev) { pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3); pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0); } static void remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct adapter *adapter = dev->ml_priv; int i; for_each_port(adapter, i) { if (test_bit(i, &adapter->registered_device_map)) unregister_netdev(adapter->port[i].dev); } t1_free_sw_modules(adapter); iounmap(adapter->regs); while (--i >= 0) { if (adapter->port[i].dev) free_netdev(adapter->port[i].dev); } pci_release_regions(pdev); pci_disable_device(pdev); t1_sw_reset(pdev); } static struct pci_driver cxgb_pci_driver = { .name = DRV_NAME, .id_table = t1_pci_tbl, .probe = init_one, .remove = remove_one, }; module_pci_driver(cxgb_pci_driver);
gpl-2.0
jibaron/ddebug
drivers/net/ethernet/8390/ne3210.c
47
10313
/* ne3210.c Linux driver for Novell NE3210 EISA Network Adapter Copyright (C) 1998, Paul Gortmaker. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. Information and Code Sources: 1) Based upon my other EISA 8390 drivers (lne390, es3210, smc-ultra32) 2) The existing myriad of other Linux 8390 drivers by Donald Becker. 3) Info for getting IRQ and sh-mem gleaned from the EISA cfg file The NE3210 is an EISA shared memory NS8390 implementation. Shared memory address > 1MB should work with this driver. Note that the .cfg file (3/11/93, v1.0) has AUI and BNC switched around (or perhaps there are some defective/backwards cards ???) This driver WILL NOT WORK FOR THE NE3200 - it is completely different and does not use an 8390 at all. Updated to EISA probing API 5/2003 by Marc Zyngier. */ #include <linux/module.h> #include <linux/eisa.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/mm.h> #include <asm/io.h> #include "8390.h" #define DRV_NAME "ne3210" static void ne3210_reset_8390(struct net_device *dev); static void ne3210_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page); static void ne3210_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset); static void ne3210_block_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page); #define NE3210_START_PG 0x00 /* First page of TX buffer */ #define NE3210_STOP_PG 0x80 /* Last page +1 of RX ring */ #define NE3210_IO_EXTENT 0x20 #define NE3210_SA_PROM 0x16 /* Start of e'net addr. */ #define NE3210_RESET_PORT 0xc84 #define NE3210_NIC_OFFSET 0x00 /* Hello, the 8390 is *here* */ #define NE3210_ADDR0 0x00 /* 3 byte vendor prefix */ #define NE3210_ADDR1 0x00 #define NE3210_ADDR2 0x1b #define NE3210_CFG1 0xc84 /* NB: 0xc84 is also "reset" port. */ #define NE3210_CFG2 0xc90 #define NE3210_CFG_EXTENT (NE3210_CFG2 - NE3210_CFG1 + 1) /* * You can OR any of the following bits together and assign it * to NE3210_DEBUG to get verbose driver info during operation. * Currently only the probe one is implemented. */ #define NE3210_D_PROBE 0x01 #define NE3210_D_RX_PKT 0x02 #define NE3210_D_TX_PKT 0x04 #define NE3210_D_IRQ 0x08 #define NE3210_DEBUG 0x0 static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3}; static unsigned int shmem_map[] __initdata = {0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0}; static const char * const ifmap[] __initconst = {"UTP", "?", "BNC", "AUI"}; static int ifmap_val[] __initdata = { IF_PORT_10BASET, IF_PORT_UNKNOWN, IF_PORT_10BASE2, IF_PORT_AUI, }; static int __init ne3210_eisa_probe (struct device *device) { unsigned long ioaddr, phys_mem; int i, retval, port_index; struct eisa_device *edev = to_eisa_device (device); struct net_device *dev; /* Allocate dev->priv and fill in 8390 specific dev fields. */ if (!(dev = alloc_ei_netdev ())) { printk ("ne3210.c: unable to allocate memory for dev!\n"); return -ENOMEM; } SET_NETDEV_DEV(dev, device); dev_set_drvdata(device, dev); ioaddr = edev->base_addr; if (!request_region(ioaddr, NE3210_IO_EXTENT, DRV_NAME)) { retval = -EBUSY; goto out; } if (!request_region(ioaddr + NE3210_CFG1, NE3210_CFG_EXTENT, DRV_NAME)) { retval = -EBUSY; goto out1; } #if NE3210_DEBUG & NE3210_D_PROBE printk("ne3210-debug: probe at %#x, ID %s\n", ioaddr, edev->id.sig); printk("ne3210-debug: config regs: %#x %#x\n", inb(ioaddr + NE3210_CFG1), inb(ioaddr + NE3210_CFG2)); #endif port_index = inb(ioaddr + NE3210_CFG2) >> 6; for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = inb(ioaddr + NE3210_SA_PROM + i); printk("ne3210.c: NE3210 in EISA slot %d, media: %s, addr: %pM.\n", edev->slot, ifmap[port_index], dev->dev_addr); /* Snarf the interrupt now. CFG file has them all listed as `edge' with share=NO */ dev->irq = irq_map[(inb(ioaddr + NE3210_CFG2) >> 3) & 0x07]; printk("ne3210.c: using IRQ %d, ", dev->irq); retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev); if (retval) { printk (" unable to get IRQ %d.\n", dev->irq); goto out2; } phys_mem = shmem_map[inb(ioaddr + NE3210_CFG2) & 0x07] * 0x1000; /* BEWARE!! Some dain-bramaged EISA SCUs will allow you to put the card mem within the region covered by `normal' RAM !!! */ if (phys_mem > 1024*1024) { /* phys addr > 1MB */ if (phys_mem < virt_to_phys(high_memory)) { printk(KERN_CRIT "ne3210.c: Card RAM overlaps with normal memory!!!\n"); printk(KERN_CRIT "ne3210.c: Use EISA SCU to set card memory below 1MB,\n"); printk(KERN_CRIT "ne3210.c: or to an address above 0x%llx.\n", (u64)virt_to_phys(high_memory)); printk(KERN_CRIT "ne3210.c: Driver NOT installed.\n"); retval = -EINVAL; goto out3; } } if (!request_mem_region (phys_mem, NE3210_STOP_PG*0x100, DRV_NAME)) { printk ("ne3210.c: Unable to request shared memory at physical address %#lx\n", phys_mem); goto out3; } printk("%dkB memory at physical address %#lx\n", NE3210_STOP_PG/4, phys_mem); ei_status.mem = ioremap(phys_mem, NE3210_STOP_PG*0x100); if (!ei_status.mem) { printk(KERN_ERR "ne3210.c: Unable to remap card memory !!\n"); printk(KERN_ERR "ne3210.c: Driver NOT installed.\n"); retval = -EAGAIN; goto out4; } printk("ne3210.c: remapped %dkB card memory to virtual address %p\n", NE3210_STOP_PG/4, ei_status.mem); dev->mem_start = (unsigned long)ei_status.mem; dev->mem_end = dev->mem_start + (NE3210_STOP_PG - NE3210_START_PG)*256; /* The 8390 offset is zero for the NE3210 */ dev->base_addr = ioaddr; ei_status.name = "NE3210"; ei_status.tx_start_page = NE3210_START_PG; ei_status.rx_start_page = NE3210_START_PG + TX_PAGES; ei_status.stop_page = NE3210_STOP_PG; ei_status.word16 = 1; ei_status.priv = phys_mem; if (ei_debug > 0) printk("ne3210 loaded.\n"); ei_status.reset_8390 = &ne3210_reset_8390; ei_status.block_input = &ne3210_block_input; ei_status.block_output = &ne3210_block_output; ei_status.get_8390_hdr = &ne3210_get_8390_hdr; dev->netdev_ops = &ei_netdev_ops; dev->if_port = ifmap_val[port_index]; if ((retval = register_netdev (dev))) goto out5; NS8390_init(dev, 0); return 0; out5: iounmap(ei_status.mem); out4: release_mem_region (phys_mem, NE3210_STOP_PG*0x100); out3: free_irq (dev->irq, dev); out2: release_region (ioaddr + NE3210_CFG1, NE3210_CFG_EXTENT); out1: release_region (ioaddr, NE3210_IO_EXTENT); out: free_netdev (dev); return retval; } static int __devexit ne3210_eisa_remove (struct device *device) { struct net_device *dev = dev_get_drvdata(device); unsigned long ioaddr = to_eisa_device (device)->base_addr; unregister_netdev (dev); iounmap(ei_status.mem); release_mem_region (ei_status.priv, NE3210_STOP_PG*0x100); free_irq (dev->irq, dev); release_region (ioaddr + NE3210_CFG1, NE3210_CFG_EXTENT); release_region (ioaddr, NE3210_IO_EXTENT); free_netdev (dev); return 0; } /* * Reset by toggling the "Board Enable" bits (bit 2 and 0). */ static void ne3210_reset_8390(struct net_device *dev) { unsigned short ioaddr = dev->base_addr; outb(0x04, ioaddr + NE3210_RESET_PORT); if (ei_debug > 1) printk("%s: resetting the NE3210...", dev->name); mdelay(2); ei_status.txing = 0; outb(0x01, ioaddr + NE3210_RESET_PORT); if (ei_debug > 1) printk("reset done\n"); } /* * Note: In the following three functions is the implicit assumption * that the associated memcpy will only use "rep; movsl" as long as * we keep the counts as some multiple of doublewords. This is a * requirement of the hardware, and also prevents us from using * eth_io_copy_and_sum() since we can't guarantee it will limit * itself to doubleword access. */ /* * Grab the 8390 specific header. Similar to the block_input routine, but * we don't need to be concerned with ring wrap as the header will be at * the start of a page, so we optimize accordingly. (A single doubleword.) */ static void ne3210_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) { void __iomem *hdr_start = ei_status.mem + ((ring_page - NE3210_START_PG)<<8); memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */ } /* * Block input and output are easy on shared memory ethercards, the only * complication is when the ring buffer wraps. The count will already * be rounded up to a doubleword value via ne3210_get_8390_hdr() above. */ static void ne3210_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) { void __iomem *start = ei_status.mem + ring_offset - NE3210_START_PG*256; if (ring_offset + count > NE3210_STOP_PG*256) { /* Packet wraps over end of ring buffer. */ int semi_count = NE3210_STOP_PG*256 - ring_offset; memcpy_fromio(skb->data, start, semi_count); count -= semi_count; memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES*256, count); } else { /* Packet is in one chunk. */ memcpy_fromio(skb->data, start, count); } } static void ne3210_block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page) { void __iomem *shmem = ei_status.mem + ((start_page - NE3210_START_PG)<<8); count = (count + 3) & ~3; /* Round up to doubleword */ memcpy_toio(shmem, buf, count); } static struct eisa_device_id ne3210_ids[] = { { "EGL0101" }, { "NVL1801" }, { "" }, }; MODULE_DEVICE_TABLE(eisa, ne3210_ids); static struct eisa_driver ne3210_eisa_driver = { .id_table = ne3210_ids, .driver = { .name = "ne3210", .probe = ne3210_eisa_probe, .remove = __devexit_p (ne3210_eisa_remove), }, }; MODULE_DESCRIPTION("NE3210 EISA Ethernet driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(eisa, ne3210_ids); static int ne3210_init(void) { return eisa_driver_register (&ne3210_eisa_driver); } static void ne3210_cleanup(void) { eisa_driver_unregister (&ne3210_eisa_driver); } module_init (ne3210_init); module_exit (ne3210_cleanup);
gpl-2.0
iamroot12CD/linux
kernel/irq/proc.c
303
12257
/* * linux/kernel/irq/proc.c * * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar * * This file contains the /proc/irq/ handling code. */ #include <linux/irq.h> #include <linux/gfp.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include "internals.h" /* * Access rules: * * procfs protects read/write of /proc/irq/N/ files against a * concurrent free of the interrupt descriptor. remove_proc_entry() * immediately prevents new read/writes to happen and waits for * already running read/write functions to complete. * * We remove the proc entries first and then delete the interrupt * descriptor from the radix tree and free it. So it is guaranteed * that irq_to_desc(N) is valid as long as the read/writes are * permitted by procfs. * * The read from /proc/interrupts is a different problem because there * is no protection. So the lookup and the access to irqdesc * information must be protected by sparse_irq_lock. */ static struct proc_dir_entry *root_irq_dir; #ifdef CONFIG_SMP static int show_irq_affinity(int type, struct seq_file *m, void *v) { struct irq_desc *desc = irq_to_desc((long)m->private); const struct cpumask *mask = desc->irq_data.affinity; #ifdef CONFIG_GENERIC_PENDING_IRQ if (irqd_is_setaffinity_pending(&desc->irq_data)) mask = desc->pending_mask; #endif if (type) seq_printf(m, "%*pbl\n", cpumask_pr_args(mask)); else seq_printf(m, "%*pb\n", cpumask_pr_args(mask)); return 0; } static int irq_affinity_hint_proc_show(struct seq_file *m, void *v) { struct irq_desc *desc = irq_to_desc((long)m->private); unsigned long flags; cpumask_var_t mask; if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; raw_spin_lock_irqsave(&desc->lock, flags); if (desc->affinity_hint) cpumask_copy(mask, desc->affinity_hint); raw_spin_unlock_irqrestore(&desc->lock, flags); seq_printf(m, "%*pb\n", cpumask_pr_args(mask)); free_cpumask_var(mask); return 0; } #ifndef is_affinity_mask_valid #define is_affinity_mask_valid(val) 1 #endif int no_irq_affinity; static int irq_affinity_proc_show(struct seq_file *m, void *v) { return show_irq_affinity(0, m, v); } static int irq_affinity_list_proc_show(struct seq_file *m, void *v) { return show_irq_affinity(1, m, v); } static ssize_t write_irq_affinity(int type, struct file *file, const char __user *buffer, size_t count, loff_t *pos) { unsigned int irq = (int)(long)PDE_DATA(file_inode(file)); cpumask_var_t new_value; int err; if (!irq_can_set_affinity(irq) || no_irq_affinity) return -EIO; if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) return -ENOMEM; if (type) err = cpumask_parselist_user(buffer, count, new_value); else err = cpumask_parse_user(buffer, count, new_value); if (err) goto free_cpumask; if (!is_affinity_mask_valid(new_value)) { err = -EINVAL; goto free_cpumask; } /* * Do not allow disabling IRQs completely - it's a too easy * way to make the system unusable accidentally :-) At least * one online CPU still has to be targeted. */ if (!cpumask_intersects(new_value, cpu_online_mask)) { /* Special case for empty set - allow the architecture code to set default SMP affinity. */ err = irq_select_affinity_usr(irq, new_value) ? -EINVAL : count; } else { irq_set_affinity(irq, new_value); err = count; } free_cpumask: free_cpumask_var(new_value); return err; } static ssize_t irq_affinity_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { return write_irq_affinity(0, file, buffer, count, pos); } static ssize_t irq_affinity_list_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { return write_irq_affinity(1, file, buffer, count, pos); } static int irq_affinity_proc_open(struct inode *inode, struct file *file) { return single_open(file, irq_affinity_proc_show, PDE_DATA(inode)); } static int irq_affinity_list_proc_open(struct inode *inode, struct file *file) { return single_open(file, irq_affinity_list_proc_show, PDE_DATA(inode)); } static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file) { return single_open(file, irq_affinity_hint_proc_show, PDE_DATA(inode)); } static const struct file_operations irq_affinity_proc_fops = { .open = irq_affinity_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = irq_affinity_proc_write, }; static const struct file_operations irq_affinity_hint_proc_fops = { .open = irq_affinity_hint_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations irq_affinity_list_proc_fops = { .open = irq_affinity_list_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = irq_affinity_list_proc_write, }; static int default_affinity_show(struct seq_file *m, void *v) { seq_printf(m, "%*pb\n", cpumask_pr_args(irq_default_affinity)); return 0; } static ssize_t default_affinity_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { cpumask_var_t new_value; int err; if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) return -ENOMEM; err = cpumask_parse_user(buffer, count, new_value); if (err) goto out; if (!is_affinity_mask_valid(new_value)) { err = -EINVAL; goto out; } /* * Do not allow disabling IRQs completely - it's a too easy * way to make the system unusable accidentally :-) At least * one online CPU still has to be targeted. */ if (!cpumask_intersects(new_value, cpu_online_mask)) { err = -EINVAL; goto out; } cpumask_copy(irq_default_affinity, new_value); err = count; out: free_cpumask_var(new_value); return err; } static int default_affinity_open(struct inode *inode, struct file *file) { return single_open(file, default_affinity_show, PDE_DATA(inode)); } static const struct file_operations default_affinity_proc_fops = { .open = default_affinity_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = default_affinity_write, }; static int irq_node_proc_show(struct seq_file *m, void *v) { struct irq_desc *desc = irq_to_desc((long) m->private); seq_printf(m, "%d\n", desc->irq_data.node); return 0; } static int irq_node_proc_open(struct inode *inode, struct file *file) { return single_open(file, irq_node_proc_show, PDE_DATA(inode)); } static const struct file_operations irq_node_proc_fops = { .open = irq_node_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #endif static int irq_spurious_proc_show(struct seq_file *m, void *v) { struct irq_desc *desc = irq_to_desc((long) m->private); seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n", desc->irq_count, desc->irqs_unhandled, jiffies_to_msecs(desc->last_unhandled)); return 0; } static int irq_spurious_proc_open(struct inode *inode, struct file *file) { return single_open(file, irq_spurious_proc_show, PDE_DATA(inode)); } static const struct file_operations irq_spurious_proc_fops = { .open = irq_spurious_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #define MAX_NAMELEN 128 static int name_unique(unsigned int irq, struct irqaction *new_action) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action; unsigned long flags; int ret = 1; raw_spin_lock_irqsave(&desc->lock, flags); for (action = desc->action ; action; action = action->next) { if ((action != new_action) && action->name && !strcmp(new_action->name, action->name)) { ret = 0; break; } } raw_spin_unlock_irqrestore(&desc->lock, flags); return ret; } void register_handler_proc(unsigned int irq, struct irqaction *action) { char name [MAX_NAMELEN]; struct irq_desc *desc = irq_to_desc(irq); if (!desc->dir || action->dir || !action->name || !name_unique(irq, action)) return; memset(name, 0, MAX_NAMELEN); snprintf(name, MAX_NAMELEN, "%s", action->name); /* create /proc/irq/1234/handler/ */ action->dir = proc_mkdir(name, desc->dir); } #undef MAX_NAMELEN #define MAX_NAMELEN 10 void register_irq_proc(unsigned int irq, struct irq_desc *desc) { char name [MAX_NAMELEN]; if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir) return; memset(name, 0, MAX_NAMELEN); sprintf(name, "%d", irq); /* create /proc/irq/1234 */ desc->dir = proc_mkdir(name, root_irq_dir); if (!desc->dir) return; #ifdef CONFIG_SMP /* create /proc/irq/<irq>/smp_affinity */ proc_create_data("smp_affinity", 0644, desc->dir, &irq_affinity_proc_fops, (void *)(long)irq); /* create /proc/irq/<irq>/affinity_hint */ proc_create_data("affinity_hint", 0444, desc->dir, &irq_affinity_hint_proc_fops, (void *)(long)irq); /* create /proc/irq/<irq>/smp_affinity_list */ proc_create_data("smp_affinity_list", 0644, desc->dir, &irq_affinity_list_proc_fops, (void *)(long)irq); proc_create_data("node", 0444, desc->dir, &irq_node_proc_fops, (void *)(long)irq); #endif proc_create_data("spurious", 0444, desc->dir, &irq_spurious_proc_fops, (void *)(long)irq); } void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) { char name [MAX_NAMELEN]; if (!root_irq_dir || !desc->dir) return; #ifdef CONFIG_SMP remove_proc_entry("smp_affinity", desc->dir); remove_proc_entry("affinity_hint", desc->dir); remove_proc_entry("smp_affinity_list", desc->dir); remove_proc_entry("node", desc->dir); #endif remove_proc_entry("spurious", desc->dir); memset(name, 0, MAX_NAMELEN); sprintf(name, "%u", irq); remove_proc_entry(name, root_irq_dir); } #undef MAX_NAMELEN void unregister_handler_proc(unsigned int irq, struct irqaction *action) { proc_remove(action->dir); } static void register_default_affinity_proc(void) { #ifdef CONFIG_SMP proc_create("irq/default_smp_affinity", 0644, NULL, &default_affinity_proc_fops); #endif } void init_irq_proc(void) { unsigned int irq; struct irq_desc *desc; /* create /proc/irq */ root_irq_dir = proc_mkdir("irq", NULL); if (!root_irq_dir) return; register_default_affinity_proc(); /* * Create entries for all existing IRQs. */ for_each_irq_desc(irq, desc) { if (!desc) continue; register_irq_proc(irq, desc); } } #ifdef CONFIG_GENERIC_IRQ_SHOW int __weak arch_show_interrupts(struct seq_file *p, int prec) { return 0; } #ifndef ACTUAL_NR_IRQS # define ACTUAL_NR_IRQS nr_irqs #endif int show_interrupts(struct seq_file *p, void *v) { static int prec; unsigned long flags, any_count = 0; int i = *(loff_t *) v, j; struct irqaction *action; struct irq_desc *desc; if (i > ACTUAL_NR_IRQS) return 0; if (i == ACTUAL_NR_IRQS) return arch_show_interrupts(p, prec); /* print header and calculate the width of the first column */ if (i == 0) { for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) j *= 10; seq_printf(p, "%*s", prec + 8, ""); for_each_online_cpu(j) seq_printf(p, "CPU%-8d", j); seq_putc(p, '\n'); } irq_lock_sparse(); desc = irq_to_desc(i); if (!desc) goto outsparse; raw_spin_lock_irqsave(&desc->lock, flags); for_each_online_cpu(j) any_count |= kstat_irqs_cpu(i, j); action = desc->action; if (!action && !any_count) goto out; seq_printf(p, "%*d: ", prec, i); for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); if (desc->irq_data.chip) { if (desc->irq_data.chip->irq_print_chip) desc->irq_data.chip->irq_print_chip(&desc->irq_data, p); else if (desc->irq_data.chip->name) seq_printf(p, " %8s", desc->irq_data.chip->name); else seq_printf(p, " %8s", "-"); } else { seq_printf(p, " %8s", "None"); } if (desc->irq_data.domain) seq_printf(p, " %*d", prec, (int) desc->irq_data.hwirq); #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge"); #endif if (desc->name) seq_printf(p, "-%-8s", desc->name); if (action) { seq_printf(p, " %s", action->name); while ((action = action->next) != NULL) seq_printf(p, ", %s", action->name); } seq_putc(p, '\n'); out: raw_spin_unlock_irqrestore(&desc->lock, flags); outsparse: irq_unlock_sparse(); return 0; } #endif
gpl-2.0
danbarsor/linux_kernel-2.6.35
mm/fremap.c
815
6855
/* * linux/mm/fremap.c * * Explicit pagetable population and nonlinear (random) mappings support. * * started by Ingo Molnar, Copyright (C) 2002, 2003 */ #include <linux/backing-dev.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/file.h> #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/swapops.h> #include <linux/rmap.h> #include <linux/module.h> #include <linux/syscalls.h> #include <linux/mmu_notifier.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include "internal.h" static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { pte_t pte = *ptep; if (pte_present(pte)) { struct page *page; flush_cache_page(vma, addr, pte_pfn(pte)); pte = ptep_clear_flush(vma, addr, ptep); page = vm_normal_page(vma, addr, pte); if (page) { if (pte_dirty(pte)) set_page_dirty(page); page_remove_rmap(page); page_cache_release(page); update_hiwater_rss(mm); dec_mm_counter(mm, MM_FILEPAGES); } } else { if (!pte_file(pte)) free_swap_and_cache(pte_to_swp_entry(pte)); pte_clear_not_present_full(mm, addr, ptep, 0); } } /* * Install a file pte to a given virtual memory address, release any * previously existing mapping. */ static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot) { int err = -ENOMEM; pte_t *pte; spinlock_t *ptl; pte = get_locked_pte(mm, addr, &ptl); if (!pte) goto out; if (!pte_none(*pte)) zap_pte(mm, vma, addr, pte); set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff)); /* * We don't need to run update_mmu_cache() here because the "file pte" * being installed by install_file_pte() is not a real pte - it's a * non-present entry (like a swap entry), noting what file offset should * be mapped there when there's a fault (in a non-linear vma where * that's not obvious). */ pte_unmap_unlock(pte, ptl); err = 0; out: return err; } static int populate_range(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long size, pgoff_t pgoff) { int err; do { err = install_file_pte(mm, vma, addr, pgoff, vma->vm_page_prot); if (err) return err; size -= PAGE_SIZE; addr += PAGE_SIZE; pgoff++; } while (size); return 0; } /** * sys_remap_file_pages - remap arbitrary pages of an existing VM_SHARED vma * @start: start of the remapped virtual memory range * @size: size of the remapped virtual memory range * @prot: new protection bits of the range (see NOTE) * @pgoff: to-be-mapped page of the backing store file * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO. * * sys_remap_file_pages remaps arbitrary pages of an existing VM_SHARED vma * (shared backing store file). * * This syscall works purely via pagetables, so it's the most efficient * way to map the same (large) file into a given virtual window. Unlike * mmap()/mremap() it does not create any new vmas. The new mappings are * also safe across swapout. * * NOTE: the @prot parameter right now is ignored (but must be zero), * and the vma's default protection is used. Arbitrary protections * might be implemented in the future. */ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, unsigned long, prot, unsigned long, pgoff, unsigned long, flags) { struct mm_struct *mm = current->mm; struct address_space *mapping; unsigned long end = start + size; struct vm_area_struct *vma; int err = -EINVAL; int has_write_lock = 0; if (prot) return err; /* * Sanitize the syscall parameters: */ start = start & PAGE_MASK; size = size & PAGE_MASK; /* Does the address range wrap, or is the span zero-sized? */ if (start + size <= start) return err; /* Can we represent this offset inside this architecture's pte's? */ #if PTE_FILE_MAX_BITS < BITS_PER_LONG if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS)) return err; #endif /* We need down_write() to change vma->vm_flags. */ down_read(&mm->mmap_sem); retry: vma = find_vma(mm, start); /* * Make sure the vma is shared, that it supports prefaulting, * and that the remapped range is valid and fully within * the single existing vma. vm_private_data is used as a * swapout cursor in a VM_NONLINEAR vma. */ if (!vma || !(vma->vm_flags & VM_SHARED)) goto out; if (vma->vm_private_data && !(vma->vm_flags & VM_NONLINEAR)) goto out; if (!(vma->vm_flags & VM_CAN_NONLINEAR)) goto out; if (end <= start || start < vma->vm_start || end > vma->vm_end) goto out; /* Must set VM_NONLINEAR before any pages are populated. */ if (!(vma->vm_flags & VM_NONLINEAR)) { /* Don't need a nonlinear mapping, exit success */ if (pgoff == linear_page_index(vma, start)) { err = 0; goto out; } if (!has_write_lock) { up_read(&mm->mmap_sem); down_write(&mm->mmap_sem); has_write_lock = 1; goto retry; } mapping = vma->vm_file->f_mapping; /* * page_mkclean doesn't work on nonlinear vmas, so if * dirty pages need to be accounted, emulate with linear * vmas. */ if (mapping_cap_account_dirty(mapping)) { unsigned long addr; struct file *file = vma->vm_file; flags &= MAP_NONBLOCK; get_file(file); addr = mmap_region(file, start, size, flags, vma->vm_flags, pgoff); fput(file); if (IS_ERR_VALUE(addr)) { err = addr; } else { BUG_ON(addr != start); err = 0; } goto out; } spin_lock(&mapping->i_mmap_lock); flush_dcache_mmap_lock(mapping); vma->vm_flags |= VM_NONLINEAR; vma_prio_tree_remove(vma, &mapping->i_mmap); vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear); flush_dcache_mmap_unlock(mapping); spin_unlock(&mapping->i_mmap_lock); } if (vma->vm_flags & VM_LOCKED) { /* * drop PG_Mlocked flag for over-mapped range */ unsigned int saved_flags = vma->vm_flags; munlock_vma_pages_range(vma, start, start + size); vma->vm_flags = saved_flags; } mmu_notifier_invalidate_range_start(mm, start, start + size); err = populate_range(mm, vma, start, size, pgoff); mmu_notifier_invalidate_range_end(mm, start, start + size); if (!err && !(flags & MAP_NONBLOCK)) { if (vma->vm_flags & VM_LOCKED) { /* * might be mapping previously unmapped range of file */ mlock_vma_pages_range(vma, start, start + size); } else { if (unlikely(has_write_lock)) { downgrade_write(&mm->mmap_sem); has_write_lock = 0; } make_pages_present(start, start+size); } } /* * We can't clear VM_NONLINEAR because we'd have to do * it after ->populate completes, and that would prevent * downgrading the lock. (Locks can't be upgraded). */ out: if (likely(!has_write_lock)) up_read(&mm->mmap_sem); else up_write(&mm->mmap_sem); return err; }
gpl-2.0
gbiyer/Sony-Aosp-Kernel
arch/x86/mm/ioremap.c
1327
15673
/* * Re-map IO memory to kernel address space so that we can access it. * This is needed for high PCI addresses that aren't mapped in the * 640k-1MB IO memory area on PC's * * (C) Copyright 1995 1996 Linus Torvalds */ #include <linux/bootmem.h> #include <linux/init.h> #include <linux/io.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mmiotrace.h> #include <asm/cacheflush.h> #include <asm/e820.h> #include <asm/fixmap.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #include <asm/pgalloc.h> #include <asm/pat.h> #include "physaddr.h" /* * Fix up the linear direct mapping of the kernel to avoid cache attribute * conflicts. */ int ioremap_change_attr(unsigned long vaddr, unsigned long size, unsigned long prot_val) { unsigned long nrpages = size >> PAGE_SHIFT; int err; switch (prot_val) { case _PAGE_CACHE_UC: default: err = _set_memory_uc(vaddr, nrpages); break; case _PAGE_CACHE_WC: err = _set_memory_wc(vaddr, nrpages); break; case _PAGE_CACHE_WB: err = _set_memory_wb(vaddr, nrpages); break; } return err; } static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, void *arg) { unsigned long i; for (i = 0; i < nr_pages; ++i) if (pfn_valid(start_pfn + i) && !PageReserved(pfn_to_page(start_pfn + i))) return 1; WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn); return 0; } /* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses * directly. * * NOTE! We need to allow non-page-aligned mappings too: we will obviously * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ static void __iomem *__ioremap_caller(resource_size_t phys_addr, unsigned long size, unsigned long prot_val, void *caller) { unsigned long offset, vaddr; resource_size_t pfn, last_pfn, last_addr; const resource_size_t unaligned_phys_addr = phys_addr; const unsigned long unaligned_size = size; struct vm_struct *area; unsigned long new_prot_val; pgprot_t prot; int retval; void __iomem *ret_addr; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) return NULL; if (!phys_addr_valid(phys_addr)) { printk(KERN_WARNING "ioremap: invalid physical address %llx\n", (unsigned long long)phys_addr); WARN_ON_ONCE(1); return NULL; } /* * Don't remap the low PCI/ISA area, it's always mapped.. */ if (is_ISA_range(phys_addr, last_addr)) return (__force void __iomem *)phys_to_virt(phys_addr); /* * Don't allow anybody to remap normal RAM that we're using.. */ pfn = phys_addr >> PAGE_SHIFT; last_pfn = last_addr >> PAGE_SHIFT; if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, __ioremap_check_ram) == 1) return NULL; /* * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; phys_addr &= PHYSICAL_PAGE_MASK; size = PAGE_ALIGN(last_addr+1) - phys_addr; retval = reserve_memtype(phys_addr, (u64)phys_addr + size, prot_val, &new_prot_val); if (retval) { printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval); return NULL; } if (prot_val != new_prot_val) { if (!is_new_memtype_allowed(phys_addr, size, prot_val, new_prot_val)) { printk(KERN_ERR "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n", (unsigned long long)phys_addr, (unsigned long long)(phys_addr + size), prot_val, new_prot_val); goto err_free_memtype; } prot_val = new_prot_val; } switch (prot_val) { case _PAGE_CACHE_UC: default: prot = PAGE_KERNEL_IO_NOCACHE; break; case _PAGE_CACHE_UC_MINUS: prot = PAGE_KERNEL_IO_UC_MINUS; break; case _PAGE_CACHE_WC: prot = PAGE_KERNEL_IO_WC; break; case _PAGE_CACHE_WB: prot = PAGE_KERNEL_IO; break; } /* * Ok, go for it.. */ area = get_vm_area_caller(size, VM_IOREMAP, caller); if (!area) goto err_free_memtype; area->phys_addr = phys_addr; vaddr = (unsigned long) area->addr; if (kernel_map_sync_memtype(phys_addr, size, prot_val)) goto err_free_area; if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) goto err_free_area; ret_addr = (void __iomem *) (vaddr + offset); mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); /* * Check if the request spans more than any BAR in the iomem resource * tree. */ WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size), KERN_INFO "Info: mapping multiple BARs. Your kernel is fine."); return ret_addr; err_free_area: free_vm_area(area); err_free_memtype: free_memtype(phys_addr, phys_addr + size); return NULL; } /** * ioremap_nocache - map bus memory into CPU space * @phys_addr: bus address of the memory * @size: size of the resource to map * * ioremap_nocache performs a platform specific sequence of operations to * make bus memory CPU accessible via the readb/readw/readl/writeb/ * writew/writel functions and the other mmio helpers. The returned * address is not guaranteed to be usable directly as a virtual * address. * * This version of ioremap ensures that the memory is marked uncachable * on the CPU as well as honouring existing caching rules from things like * the PCI bus. Note that there are other caches and buffers on many * busses. In particular driver authors should read up on PCI writes * * It's useful if some control registers are in such an area and * write combining or read caching is not desirable: * * Must be freed with iounmap. */ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) { /* * Ideally, this should be: * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS; * * Till we fix all X drivers to use ioremap_wc(), we will use * UC MINUS. */ unsigned long val = _PAGE_CACHE_UC_MINUS; return __ioremap_caller(phys_addr, size, val, __builtin_return_address(0)); } EXPORT_SYMBOL(ioremap_nocache); /** * ioremap_wc - map memory into CPU space write combined * @phys_addr: bus address of the memory * @size: size of the resource to map * * This version of ioremap ensures that the memory is marked write combining. * Write combining allows faster writes to some hardware devices. * * Must be freed with iounmap. */ void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) { if (pat_enabled) return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, __builtin_return_address(0)); else return ioremap_nocache(phys_addr, size); } EXPORT_SYMBOL(ioremap_wc); void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) { return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB, __builtin_return_address(0)); } EXPORT_SYMBOL(ioremap_cache); void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, unsigned long prot_val) { return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK), __builtin_return_address(0)); } EXPORT_SYMBOL(ioremap_prot); /** * iounmap - Free a IO remapping * @addr: virtual address from ioremap_* * * Caller must ensure there is only one unmapping for the same pointer. */ void iounmap(volatile void __iomem *addr) { struct vm_struct *p, *o; if ((void __force *)addr <= high_memory) return; /* * __ioremap special-cases the PCI/ISA range by not instantiating a * vm_area and by simply returning an address into the kernel mapping * of ISA space. So handle that here. */ if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) && (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) return; addr = (volatile void __iomem *) (PAGE_MASK & (unsigned long __force)addr); mmiotrace_iounmap(addr); /* Use the vm area unlocked, assuming the caller ensures there isn't another iounmap for the same address in parallel. Reuse of the virtual address is prevented by leaving it in the global lists until we're done with it. cpa takes care of the direct mappings. */ p = find_vm_area((void __force *)addr); if (!p) { printk(KERN_ERR "iounmap: bad address %p\n", addr); dump_stack(); return; } free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p)); /* Finally remove it */ o = remove_vm_area((void __force *)addr); BUG_ON(p != o || o == NULL); kfree(p); } EXPORT_SYMBOL(iounmap); /* * Convert a physical pointer to a virtual kernel pointer for /dev/mem * access */ void *xlate_dev_mem_ptr(unsigned long phys) { void *addr; unsigned long start = phys & PAGE_MASK; /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */ if (page_is_ram(start >> PAGE_SHIFT)) return __va(phys); addr = (void __force *)ioremap_cache(start, PAGE_SIZE); if (addr) addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK)); return addr; } void unxlate_dev_mem_ptr(unsigned long phys, void *addr) { if (page_is_ram(phys >> PAGE_SHIFT)) return; iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK)); return; } static int __initdata early_ioremap_debug; static int __init early_ioremap_debug_setup(char *str) { early_ioremap_debug = 1; return 0; } early_param("early_ioremap_debug", early_ioremap_debug_setup); static __initdata int after_paging_init; static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) { /* Don't assume we're using swapper_pg_dir at this point */ pgd_t *base = __va(read_cr3()); pgd_t *pgd = &base[pgd_index(addr)]; pud_t *pud = pud_offset(pgd, addr); pmd_t *pmd = pmd_offset(pud, addr); return pmd; } static inline pte_t * __init early_ioremap_pte(unsigned long addr) { return &bm_pte[pte_index(addr)]; } bool __init is_early_ioremap_ptep(pte_t *ptep) { return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)]; } static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata; void __init early_ioremap_init(void) { pmd_t *pmd; int i; if (early_ioremap_debug) printk(KERN_INFO "early_ioremap_init()\n"); for (i = 0; i < FIX_BTMAPS_SLOTS; i++) slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i); pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); memset(bm_pte, 0, sizeof(bm_pte)); pmd_populate_kernel(&init_mm, pmd, bm_pte); /* * The boot-ioremap range spans multiple pmds, for which * we are not prepared: */ #define __FIXADDR_TOP (-PAGE_SIZE) BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); #undef __FIXADDR_TOP if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { WARN_ON(1); printk(KERN_WARNING "pmd %p != %p\n", pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))); printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", fix_to_virt(FIX_BTMAP_BEGIN)); printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", fix_to_virt(FIX_BTMAP_END)); printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); } } void __init early_ioremap_reset(void) { after_paging_init = 1; } static void __init __early_set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) { unsigned long addr = __fix_to_virt(idx); pte_t *pte; if (idx >= __end_of_fixed_addresses) { BUG(); return; } pte = early_ioremap_pte(addr); if (pgprot_val(flags)) set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); else pte_clear(&init_mm, addr, pte); __flush_tlb_one(addr); } static inline void __init early_set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) { if (after_paging_init) __set_fixmap(idx, phys, prot); else __early_set_fixmap(idx, phys, prot); } static inline void __init early_clear_fixmap(enum fixed_addresses idx) { if (after_paging_init) clear_fixmap(idx); else __early_set_fixmap(idx, 0, __pgprot(0)); } static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata; static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata; void __init fixup_early_ioremap(void) { int i; for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { if (prev_map[i]) { WARN_ON(1); break; } } early_ioremap_init(); } static int __init check_early_ioremap_leak(void) { int count = 0; int i; for (i = 0; i < FIX_BTMAPS_SLOTS; i++) if (prev_map[i]) count++; if (!count) return 0; WARN(1, KERN_WARNING "Debug warning: early ioremap leak of %d areas detected.\n", count); printk(KERN_WARNING "please boot with early_ioremap_debug and report the dmesg.\n"); return 1; } late_initcall(check_early_ioremap_leak); static void __init __iomem * __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot) { unsigned long offset; resource_size_t last_addr; unsigned int nrpages; enum fixed_addresses idx0, idx; int i, slot; WARN_ON(system_state != SYSTEM_BOOTING); slot = -1; for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { if (!prev_map[i]) { slot = i; break; } } if (slot < 0) { printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n", (u64)phys_addr, size); WARN_ON(1); return NULL; } if (early_ioremap_debug) { printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ", (u64)phys_addr, size, slot); dump_stack(); } /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) { WARN_ON(1); return NULL; } prev_size[slot] = size; /* * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr + 1) - phys_addr; /* * Mappings have to fit in the FIX_BTMAP area. */ nrpages = size >> PAGE_SHIFT; if (nrpages > NR_FIX_BTMAPS) { WARN_ON(1); return NULL; } /* * Ok, go for it.. */ idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; idx = idx0; while (nrpages > 0) { early_set_fixmap(idx, phys_addr, prot); phys_addr += PAGE_SIZE; --idx; --nrpages; } if (early_ioremap_debug) printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]); prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]); return prev_map[slot]; } /* Remap an IO device */ void __init __iomem * early_ioremap(resource_size_t phys_addr, unsigned long size) { return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO); } /* Remap memory */ void __init __iomem * early_memremap(resource_size_t phys_addr, unsigned long size) { return __early_ioremap(phys_addr, size, PAGE_KERNEL); } void __init early_iounmap(void __iomem *addr, unsigned long size) { unsigned long virt_addr; unsigned long offset; unsigned int nrpages; enum fixed_addresses idx; int i, slot; slot = -1; for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { if (prev_map[i] == addr) { slot = i; break; } } if (slot < 0) { printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n", addr, size); WARN_ON(1); return; } if (prev_size[slot] != size) { printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n", addr, size, slot, prev_size[slot]); WARN_ON(1); return; } if (early_ioremap_debug) { printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr, size, slot); dump_stack(); } virt_addr = (unsigned long)addr; if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) { WARN_ON(1); return; } offset = virt_addr & ~PAGE_MASK; nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT; idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; while (nrpages > 0) { early_clear_fixmap(idx); --idx; --nrpages; } prev_map[slot] = NULL; }
gpl-2.0
HSAFoundation/HSA-Drivers-Linux-AMD
src/kernel/fs/pstore/ftrace.c
1839
3086
/* * Copyright 2012 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/compiler.h> #include <linux/irqflags.h> #include <linux/percpu.h> #include <linux/smp.h> #include <linux/atomic.h> #include <linux/types.h> #include <linux/mutex.h> #include <linux/ftrace.h> #include <linux/fs.h> #include <linux/debugfs.h> #include <linux/err.h> #include <linux/cache.h> #include <asm/barrier.h> #include "internal.h" static void notrace pstore_ftrace_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *regs) { unsigned long flags; struct pstore_ftrace_record rec = {}; if (unlikely(oops_in_progress)) return; local_irq_save(flags); rec.ip = ip; rec.parent_ip = parent_ip; pstore_ftrace_encode_cpu(&rec, raw_smp_processor_id()); psinfo->write_buf(PSTORE_TYPE_FTRACE, 0, NULL, 0, (void *)&rec, 0, sizeof(rec), psinfo); local_irq_restore(flags); } static struct ftrace_ops pstore_ftrace_ops __read_mostly = { .func = pstore_ftrace_call, }; static DEFINE_MUTEX(pstore_ftrace_lock); static bool pstore_ftrace_enabled; static ssize_t pstore_ftrace_knob_write(struct file *f, const char __user *buf, size_t count, loff_t *ppos) { u8 on; ssize_t ret; ret = kstrtou8_from_user(buf, count, 2, &on); if (ret) return ret; mutex_lock(&pstore_ftrace_lock); if (!on ^ pstore_ftrace_enabled) goto out; if (on) ret = register_ftrace_function(&pstore_ftrace_ops); else ret = unregister_ftrace_function(&pstore_ftrace_ops); if (ret) { pr_err("%s: unable to %sregister ftrace ops: %zd\n", __func__, on ? "" : "un", ret); goto err; } pstore_ftrace_enabled = on; out: ret = count; err: mutex_unlock(&pstore_ftrace_lock); return ret; } static ssize_t pstore_ftrace_knob_read(struct file *f, char __user *buf, size_t count, loff_t *ppos) { char val[] = { '0' + pstore_ftrace_enabled, '\n' }; return simple_read_from_buffer(buf, count, ppos, val, sizeof(val)); } static const struct file_operations pstore_knob_fops = { .open = simple_open, .read = pstore_ftrace_knob_read, .write = pstore_ftrace_knob_write, }; void pstore_register_ftrace(void) { struct dentry *dir; struct dentry *file; if (!psinfo->write_buf) return; dir = debugfs_create_dir("pstore", NULL); if (!dir) { pr_err("%s: unable to create pstore directory\n", __func__); return; } file = debugfs_create_file("record_ftrace", 0600, dir, NULL, &pstore_knob_fops); if (!file) { pr_err("%s: unable to create record_ftrace file\n", __func__); goto err_file; } return; err_file: debugfs_remove(dir); }
gpl-2.0
TipsyOs-Devices/android_kernel_samsung_trlte
drivers/media/radio/radio-keene.c
2095
12323
/* * Copyright (c) 2012 Hans Verkuil <hverkuil@xs4all.nl> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* kernel includes */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-event.h> #include <linux/usb.h> #include <linux/mutex.h> /* driver and module definitions */ MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>"); MODULE_DESCRIPTION("Keene FM Transmitter driver"); MODULE_LICENSE("GPL"); /* Actually, it advertises itself as a Logitech */ #define USB_KEENE_VENDOR 0x046d #define USB_KEENE_PRODUCT 0x0a0e /* Probably USB_TIMEOUT should be modified in module parameter */ #define BUFFER_LENGTH 8 #define USB_TIMEOUT 500 /* Frequency limits in MHz */ #define FREQ_MIN 76U #define FREQ_MAX 108U #define FREQ_MUL 16000U /* USB Device ID List */ static struct usb_device_id usb_keene_device_table[] = { {USB_DEVICE_AND_INTERFACE_INFO(USB_KEENE_VENDOR, USB_KEENE_PRODUCT, USB_CLASS_HID, 0, 0) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, usb_keene_device_table); struct keene_device { struct usb_device *usbdev; struct usb_interface *intf; struct video_device vdev; struct v4l2_device v4l2_dev; struct v4l2_ctrl_handler hdl; struct mutex lock; u8 *buffer; unsigned curfreq; u8 tx; u8 pa; bool stereo; bool muted; bool preemph_75_us; }; static inline struct keene_device *to_keene_dev(struct v4l2_device *v4l2_dev) { return container_of(v4l2_dev, struct keene_device, v4l2_dev); } /* Set frequency (if non-0), PA, mute and turn on/off the FM transmitter. */ static int keene_cmd_main(struct keene_device *radio, unsigned freq, bool play) { unsigned short freq_send = freq ? (freq - 76 * 16000) / 800 : 0; int ret; radio->buffer[0] = 0x00; radio->buffer[1] = 0x50; radio->buffer[2] = (freq_send >> 8) & 0xff; radio->buffer[3] = freq_send & 0xff; radio->buffer[4] = radio->pa; /* If bit 4 is set, then tune to the frequency. If bit 3 is set, then unmute; if bit 2 is set, then mute. If bit 1 is set, then enter idle mode; if bit 0 is set, then enter transit mode. */ radio->buffer[5] = (radio->muted ? 4 : 8) | (play ? 1 : 2) | (freq ? 0x10 : 0); radio->buffer[6] = 0x00; radio->buffer[7] = 0x00; ret = usb_control_msg(radio->usbdev, usb_sndctrlpipe(radio->usbdev, 0), 9, 0x21, 0x200, 2, radio->buffer, BUFFER_LENGTH, USB_TIMEOUT); if (ret < 0) { dev_warn(&radio->vdev.dev, "%s failed (%d)\n", __func__, ret); return ret; } if (freq) radio->curfreq = freq; return 0; } /* Set TX, stereo and preemphasis mode (50 us vs 75 us). */ static int keene_cmd_set(struct keene_device *radio) { int ret; radio->buffer[0] = 0x00; radio->buffer[1] = 0x51; radio->buffer[2] = radio->tx; /* If bit 0 is set, then transmit mono, otherwise stereo. If bit 2 is set, then enable 75 us preemphasis, otherwise it is 50 us. */ radio->buffer[3] = (!radio->stereo) | (radio->preemph_75_us ? 4 : 0); radio->buffer[4] = 0x00; radio->buffer[5] = 0x00; radio->buffer[6] = 0x00; radio->buffer[7] = 0x00; ret = usb_control_msg(radio->usbdev, usb_sndctrlpipe(radio->usbdev, 0), 9, 0x21, 0x200, 2, radio->buffer, BUFFER_LENGTH, USB_TIMEOUT); if (ret < 0) { dev_warn(&radio->vdev.dev, "%s failed (%d)\n", __func__, ret); return ret; } return 0; } /* Handle unplugging the device. * We call video_unregister_device in any case. * The last function called in this procedure is * usb_keene_device_release. */ static void usb_keene_disconnect(struct usb_interface *intf) { struct keene_device *radio = to_keene_dev(usb_get_intfdata(intf)); mutex_lock(&radio->lock); usb_set_intfdata(intf, NULL); video_unregister_device(&radio->vdev); v4l2_device_disconnect(&radio->v4l2_dev); mutex_unlock(&radio->lock); v4l2_device_put(&radio->v4l2_dev); } static int usb_keene_suspend(struct usb_interface *intf, pm_message_t message) { struct keene_device *radio = to_keene_dev(usb_get_intfdata(intf)); return keene_cmd_main(radio, 0, false); } static int usb_keene_resume(struct usb_interface *intf) { struct keene_device *radio = to_keene_dev(usb_get_intfdata(intf)); mdelay(50); keene_cmd_set(radio); keene_cmd_main(radio, radio->curfreq, true); return 0; } static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *v) { struct keene_device *radio = video_drvdata(file); strlcpy(v->driver, "radio-keene", sizeof(v->driver)); strlcpy(v->card, "Keene FM Transmitter", sizeof(v->card)); usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info)); v->device_caps = V4L2_CAP_RADIO | V4L2_CAP_MODULATOR; v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } static int vidioc_g_modulator(struct file *file, void *priv, struct v4l2_modulator *v) { struct keene_device *radio = video_drvdata(file); if (v->index > 0) return -EINVAL; strlcpy(v->name, "FM", sizeof(v->name)); v->rangelow = FREQ_MIN * FREQ_MUL; v->rangehigh = FREQ_MAX * FREQ_MUL; v->txsubchans = radio->stereo ? V4L2_TUNER_SUB_STEREO : V4L2_TUNER_SUB_MONO; v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO; return 0; } static int vidioc_s_modulator(struct file *file, void *priv, const struct v4l2_modulator *v) { struct keene_device *radio = video_drvdata(file); if (v->index > 0) return -EINVAL; radio->stereo = (v->txsubchans == V4L2_TUNER_SUB_STEREO); return keene_cmd_set(radio); } static int vidioc_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *f) { struct keene_device *radio = video_drvdata(file); unsigned freq = f->frequency; if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO) return -EINVAL; freq = clamp(freq, FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL); return keene_cmd_main(radio, freq, true); } static int vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct keene_device *radio = video_drvdata(file); if (f->tuner != 0) return -EINVAL; f->type = V4L2_TUNER_RADIO; f->frequency = radio->curfreq; return 0; } static int keene_s_ctrl(struct v4l2_ctrl *ctrl) { static const u8 db2tx[] = { /* -15, -12, -9, -6, -3, 0 dB */ 0x03, 0x13, 0x02, 0x12, 0x22, 0x32, /* 3, 6, 9, 12, 15, 18 dB */ 0x21, 0x31, 0x20, 0x30, 0x40, 0x50 }; struct keene_device *radio = container_of(ctrl->handler, struct keene_device, hdl); switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: radio->muted = ctrl->val; return keene_cmd_main(radio, 0, true); case V4L2_CID_TUNE_POWER_LEVEL: /* To go from dBuV to the register value we apply the following formula: */ radio->pa = (ctrl->val - 71) * 100 / 62; return keene_cmd_main(radio, 0, true); case V4L2_CID_TUNE_PREEMPHASIS: radio->preemph_75_us = ctrl->val == V4L2_PREEMPHASIS_75_uS; return keene_cmd_set(radio); case V4L2_CID_AUDIO_COMPRESSION_GAIN: radio->tx = db2tx[(ctrl->val - ctrl->minimum) / ctrl->step]; return keene_cmd_set(radio); } return -EINVAL; } /* File system interface */ static const struct v4l2_file_operations usb_keene_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = v4l2_fh_release, .poll = v4l2_ctrl_poll, .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ctrl_ops keene_ctrl_ops = { .s_ctrl = keene_s_ctrl, }; static const struct v4l2_ioctl_ops usb_keene_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_g_modulator = vidioc_g_modulator, .vidioc_s_modulator = vidioc_s_modulator, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, .vidioc_log_status = v4l2_ctrl_log_status, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; static void usb_keene_video_device_release(struct v4l2_device *v4l2_dev) { struct keene_device *radio = to_keene_dev(v4l2_dev); /* free rest memory */ v4l2_ctrl_handler_free(&radio->hdl); kfree(radio->buffer); kfree(radio); } /* check if the device is present and register with v4l and usb if it is */ static int usb_keene_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); struct keene_device *radio; struct v4l2_ctrl_handler *hdl; int retval = 0; /* * The Keene FM transmitter USB device has the same USB ID as * the Logitech AudioHub Speaker, but it should ignore the hid. * Check if the name is that of the Keene device. * If not, then someone connected the AudioHub and we shouldn't * attempt to handle this driver. * For reference: the product name of the AudioHub is * "AudioHub Speaker". */ if (dev->product && strcmp(dev->product, "B-LINK USB Audio ")) return -ENODEV; radio = kzalloc(sizeof(struct keene_device), GFP_KERNEL); if (radio) radio->buffer = kmalloc(BUFFER_LENGTH, GFP_KERNEL); if (!radio || !radio->buffer) { dev_err(&intf->dev, "kmalloc for keene_device failed\n"); kfree(radio); retval = -ENOMEM; goto err; } hdl = &radio->hdl; v4l2_ctrl_handler_init(hdl, 4); v4l2_ctrl_new_std(hdl, &keene_ctrl_ops, V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0); v4l2_ctrl_new_std_menu(hdl, &keene_ctrl_ops, V4L2_CID_TUNE_PREEMPHASIS, V4L2_PREEMPHASIS_75_uS, 1, V4L2_PREEMPHASIS_50_uS); v4l2_ctrl_new_std(hdl, &keene_ctrl_ops, V4L2_CID_TUNE_POWER_LEVEL, 84, 118, 1, 118); v4l2_ctrl_new_std(hdl, &keene_ctrl_ops, V4L2_CID_AUDIO_COMPRESSION_GAIN, -15, 18, 3, 0); radio->pa = 118; radio->tx = 0x32; radio->stereo = true; radio->curfreq = 95.16 * FREQ_MUL; if (hdl->error) { retval = hdl->error; v4l2_ctrl_handler_free(hdl); goto err_v4l2; } retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev); if (retval < 0) { dev_err(&intf->dev, "couldn't register v4l2_device\n"); goto err_v4l2; } mutex_init(&radio->lock); radio->v4l2_dev.ctrl_handler = hdl; radio->v4l2_dev.release = usb_keene_video_device_release; strlcpy(radio->vdev.name, radio->v4l2_dev.name, sizeof(radio->vdev.name)); radio->vdev.v4l2_dev = &radio->v4l2_dev; radio->vdev.fops = &usb_keene_fops; radio->vdev.ioctl_ops = &usb_keene_ioctl_ops; radio->vdev.lock = &radio->lock; radio->vdev.release = video_device_release_empty; radio->vdev.vfl_dir = VFL_DIR_TX; radio->usbdev = interface_to_usbdev(intf); radio->intf = intf; usb_set_intfdata(intf, &radio->v4l2_dev); video_set_drvdata(&radio->vdev, radio); set_bit(V4L2_FL_USE_FH_PRIO, &radio->vdev.flags); retval = video_register_device(&radio->vdev, VFL_TYPE_RADIO, -1); if (retval < 0) { dev_err(&intf->dev, "could not register video device\n"); goto err_vdev; } v4l2_ctrl_handler_setup(hdl); dev_info(&intf->dev, "V4L2 device registered as %s\n", video_device_node_name(&radio->vdev)); return 0; err_vdev: v4l2_device_unregister(&radio->v4l2_dev); err_v4l2: kfree(radio->buffer); kfree(radio); err: return retval; } /* USB subsystem interface */ static struct usb_driver usb_keene_driver = { .name = "radio-keene", .probe = usb_keene_probe, .disconnect = usb_keene_disconnect, .id_table = usb_keene_device_table, .suspend = usb_keene_suspend, .resume = usb_keene_resume, .reset_resume = usb_keene_resume, }; static int __init keene_init(void) { int retval = usb_register(&usb_keene_driver); if (retval) pr_err(KBUILD_MODNAME ": usb_register failed. Error number %d\n", retval); return retval; } static void __exit keene_exit(void) { usb_deregister(&usb_keene_driver); } module_init(keene_init); module_exit(keene_exit);
gpl-2.0
baltoboard/linux-3.14
drivers/fmc/fmc-match.c
2351
3166
/* * Copyright (C) 2012 CERN (www.cern.ch) * Author: Alessandro Rubini <rubini@gnudd.com> * * Released according to the GNU GPL, version 2 or any later version. * * This work is part of the White Rabbit project, a research effort led * by CERN, the European Institute for Nuclear Research. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/fmc.h> #include <linux/ipmi-fru.h> /* The fru parser is both user and kernel capable: it needs alloc */ void *fru_alloc(size_t size) { return kzalloc(size, GFP_KERNEL); } /* The actual match function */ int fmc_match(struct device *dev, struct device_driver *drv) { struct fmc_driver *fdrv = to_fmc_driver(drv); struct fmc_device *fdev = to_fmc_device(dev); struct fmc_fru_id *fid; int i, matched = 0; /* This currently only matches the EEPROM (FRU id) */ fid = fdrv->id_table.fru_id; if (!fid) { dev_warn(&fdev->dev, "Driver has no ID: matches all\n"); matched = 1; } else { if (!fdev->id.manufacturer || !fdev->id.product_name) return 0; /* the device has no FRU information */ for (i = 0; i < fdrv->id_table.fru_id_nr; i++, fid++) { if (fid->manufacturer && strcmp(fid->manufacturer, fdev->id.manufacturer)) continue; if (fid->product_name && strcmp(fid->product_name, fdev->id.product_name)) continue; matched = 1; break; } } /* FIXME: match SDB contents */ return matched; } /* This function creates ID info for a newly registered device */ int fmc_fill_id_info(struct fmc_device *fmc) { struct fru_common_header *h; struct fru_board_info_area *bia; int ret, allocated = 0; /* If we know the eeprom length, try to read it off the device */ if (fmc->eeprom_len && !fmc->eeprom) { fmc->eeprom = kzalloc(fmc->eeprom_len, GFP_KERNEL); if (!fmc->eeprom) return -ENOMEM; allocated = 1; ret = fmc->op->read_ee(fmc, 0, fmc->eeprom, fmc->eeprom_len); if (ret < 0) goto out; } /* If no eeprom, continue with other matches */ if (!fmc->eeprom) return 0; dev_info(fmc->hwdev, "mezzanine %i\n", fmc->slot_id); /* header */ /* So we have the eeprom: parse the FRU part (if any) */ h = (void *)fmc->eeprom; if (h->format != 1) { pr_info(" EEPROM has no FRU information\n"); goto out; } if (!fru_header_cksum_ok(h)) { pr_info(" FRU: wrong header checksum\n"); goto out; } bia = fru_get_board_area(h); if (!fru_bia_cksum_ok(bia)) { pr_info(" FRU: wrong board area checksum\n"); goto out; } fmc->id.manufacturer = fru_get_board_manufacturer(h); fmc->id.product_name = fru_get_product_name(h); pr_info(" Manufacturer: %s\n", fmc->id.manufacturer); pr_info(" Product name: %s\n", fmc->id.product_name); /* Create the short name (FIXME: look in sdb as well) */ fmc->mezzanine_name = kstrdup(fmc->id.product_name, GFP_KERNEL); out: if (allocated) { kfree(fmc->eeprom); fmc->eeprom = NULL; } return 0; /* no error: let other identification work */ } /* Some ID data is allocated using fru_alloc() above, so release it */ void fmc_free_id_info(struct fmc_device *fmc) { kfree(fmc->mezzanine_name); kfree(fmc->id.manufacturer); kfree(fmc->id.product_name); }
gpl-2.0
MoKee/android_kernel_motorola_msm8960-common
scripts/kconfig/symbol.c
2863
27145
/* * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org> * Released under the terms of the GNU GPL v2.0. */ #include <ctype.h> #include <stdlib.h> #include <string.h> #include <regex.h> #include <sys/utsname.h> #define LKC_DIRECT_LINK #include "lkc.h" struct symbol symbol_yes = { .name = "y", .curr = { "y", yes }, .flags = SYMBOL_CONST|SYMBOL_VALID, }, symbol_mod = { .name = "m", .curr = { "m", mod }, .flags = SYMBOL_CONST|SYMBOL_VALID, }, symbol_no = { .name = "n", .curr = { "n", no }, .flags = SYMBOL_CONST|SYMBOL_VALID, }, symbol_empty = { .name = "", .curr = { "", no }, .flags = SYMBOL_VALID, }; struct symbol *sym_defconfig_list; struct symbol *modules_sym; tristate modules_val; struct expr *sym_env_list; static void sym_add_default(struct symbol *sym, const char *def) { struct property *prop = prop_alloc(P_DEFAULT, sym); prop->expr = expr_alloc_symbol(sym_lookup(def, SYMBOL_CONST)); } void sym_init(void) { struct symbol *sym; struct utsname uts; static bool inited = false; if (inited) return; inited = true; uname(&uts); sym = sym_lookup("UNAME_RELEASE", 0); sym->type = S_STRING; sym->flags |= SYMBOL_AUTO; sym_add_default(sym, uts.release); } enum symbol_type sym_get_type(struct symbol *sym) { enum symbol_type type = sym->type; if (type == S_TRISTATE) { if (sym_is_choice_value(sym) && sym->visible == yes) type = S_BOOLEAN; else if (modules_val == no) type = S_BOOLEAN; } return type; } const char *sym_type_name(enum symbol_type type) { switch (type) { case S_BOOLEAN: return "boolean"; case S_TRISTATE: return "tristate"; case S_INT: return "integer"; case S_HEX: return "hex"; case S_STRING: return "string"; case S_UNKNOWN: return "unknown"; case S_OTHER: break; } return "???"; } struct property *sym_get_choice_prop(struct symbol *sym) { struct property *prop; for_all_choices(sym, prop) return prop; return NULL; } struct property *sym_get_env_prop(struct symbol *sym) { struct property *prop; for_all_properties(sym, prop, P_ENV) return prop; return NULL; } struct property *sym_get_default_prop(struct symbol *sym) { struct property *prop; for_all_defaults(sym, prop) { prop->visible.tri = expr_calc_value(prop->visible.expr); if (prop->visible.tri != no) return prop; } return NULL; } static struct property *sym_get_range_prop(struct symbol *sym) { struct property *prop; for_all_properties(sym, prop, P_RANGE) { prop->visible.tri = expr_calc_value(prop->visible.expr); if (prop->visible.tri != no) return prop; } return NULL; } static int sym_get_range_val(struct symbol *sym, int base) { sym_calc_value(sym); switch (sym->type) { case S_INT: base = 10; break; case S_HEX: base = 16; break; default: break; } return strtol(sym->curr.val, NULL, base); } static void sym_validate_range(struct symbol *sym) { struct property *prop; int base, val, val2; char str[64]; switch (sym->type) { case S_INT: base = 10; break; case S_HEX: base = 16; break; default: return; } prop = sym_get_range_prop(sym); if (!prop) return; val = strtol(sym->curr.val, NULL, base); val2 = sym_get_range_val(prop->expr->left.sym, base); if (val >= val2) { val2 = sym_get_range_val(prop->expr->right.sym, base); if (val <= val2) return; } if (sym->type == S_INT) sprintf(str, "%d", val2); else sprintf(str, "0x%x", val2); sym->curr.val = strdup(str); } static void sym_calc_visibility(struct symbol *sym) { struct property *prop; tristate tri; /* any prompt visible? */ tri = no; for_all_prompts(sym, prop) { prop->visible.tri = expr_calc_value(prop->visible.expr); tri = EXPR_OR(tri, prop->visible.tri); } if (tri == mod && (sym->type != S_TRISTATE || modules_val == no)) tri = yes; if (sym->visible != tri) { sym->visible = tri; sym_set_changed(sym); } if (sym_is_choice_value(sym)) return; /* defaulting to "yes" if no explicit "depends on" are given */ tri = yes; if (sym->dir_dep.expr) tri = expr_calc_value(sym->dir_dep.expr); if (tri == mod) tri = yes; if (sym->dir_dep.tri != tri) { sym->dir_dep.tri = tri; sym_set_changed(sym); } tri = no; if (sym->rev_dep.expr) tri = expr_calc_value(sym->rev_dep.expr); if (tri == mod && sym_get_type(sym) == S_BOOLEAN) tri = yes; if (sym->rev_dep.tri != tri) { sym->rev_dep.tri = tri; sym_set_changed(sym); } } /* * Find the default symbol for a choice. * First try the default values for the choice symbol * Next locate the first visible choice value * Return NULL if none was found */ struct symbol *sym_choice_default(struct symbol *sym) { struct symbol *def_sym; struct property *prop; struct expr *e; /* any of the defaults visible? */ for_all_defaults(sym, prop) { prop->visible.tri = expr_calc_value(prop->visible.expr); if (prop->visible.tri == no) continue; def_sym = prop_get_symbol(prop); if (def_sym->visible != no) return def_sym; } /* just get the first visible value */ prop = sym_get_choice_prop(sym); expr_list_for_each_sym(prop->expr, e, def_sym) if (def_sym->visible != no) return def_sym; /* failed to locate any defaults */ return NULL; } static struct symbol *sym_calc_choice(struct symbol *sym) { struct symbol *def_sym; struct property *prop; struct expr *e; /* first calculate all choice values' visibilities */ prop = sym_get_choice_prop(sym); expr_list_for_each_sym(prop->expr, e, def_sym) sym_calc_visibility(def_sym); /* is the user choice visible? */ def_sym = sym->def[S_DEF_USER].val; if (def_sym && def_sym->visible != no) return def_sym; def_sym = sym_choice_default(sym); if (def_sym == NULL) /* no choice? reset tristate value */ sym->curr.tri = no; return def_sym; } void sym_calc_value(struct symbol *sym) { struct symbol_value newval, oldval; struct property *prop; struct expr *e; if (!sym) return; if (sym->flags & SYMBOL_VALID) return; sym->flags |= SYMBOL_VALID; oldval = sym->curr; switch (sym->type) { case S_INT: case S_HEX: case S_STRING: newval = symbol_empty.curr; break; case S_BOOLEAN: case S_TRISTATE: newval = symbol_no.curr; break; default: sym->curr.val = sym->name; sym->curr.tri = no; return; } if (!sym_is_choice_value(sym)) sym->flags &= ~SYMBOL_WRITE; sym_calc_visibility(sym); /* set default if recursively called */ sym->curr = newval; switch (sym_get_type(sym)) { case S_BOOLEAN: case S_TRISTATE: if (sym_is_choice_value(sym) && sym->visible == yes) { prop = sym_get_choice_prop(sym); newval.tri = (prop_get_symbol(prop)->curr.val == sym) ? yes : no; } else { if (sym->visible != no) { /* if the symbol is visible use the user value * if available, otherwise try the default value */ sym->flags |= SYMBOL_WRITE; if (sym_has_value(sym)) { newval.tri = EXPR_AND(sym->def[S_DEF_USER].tri, sym->visible); goto calc_newval; } } if (sym->rev_dep.tri != no) sym->flags |= SYMBOL_WRITE; if (!sym_is_choice(sym)) { prop = sym_get_default_prop(sym); if (prop) { sym->flags |= SYMBOL_WRITE; newval.tri = EXPR_AND(expr_calc_value(prop->expr), prop->visible.tri); } } calc_newval: if (sym->dir_dep.tri == no && sym->rev_dep.tri != no) { struct expr *e; e = expr_simplify_unmet_dep(sym->rev_dep.expr, sym->dir_dep.expr); fprintf(stderr, "warning: ("); expr_fprint(e, stderr); fprintf(stderr, ") selects %s which has unmet direct dependencies (", sym->name); expr_fprint(sym->dir_dep.expr, stderr); fprintf(stderr, ")\n"); expr_free(e); } newval.tri = EXPR_OR(newval.tri, sym->rev_dep.tri); } if (newval.tri == mod && sym_get_type(sym) == S_BOOLEAN) newval.tri = yes; break; case S_STRING: case S_HEX: case S_INT: if (sym->visible != no) { sym->flags |= SYMBOL_WRITE; if (sym_has_value(sym)) { newval.val = sym->def[S_DEF_USER].val; break; } } prop = sym_get_default_prop(sym); if (prop) { struct symbol *ds = prop_get_symbol(prop); if (ds) { sym->flags |= SYMBOL_WRITE; sym_calc_value(ds); newval.val = ds->curr.val; } } break; default: ; } sym->curr = newval; if (sym_is_choice(sym) && newval.tri == yes) sym->curr.val = sym_calc_choice(sym); sym_validate_range(sym); if (memcmp(&oldval, &sym->curr, sizeof(oldval))) { sym_set_changed(sym); if (modules_sym == sym) { sym_set_all_changed(); modules_val = modules_sym->curr.tri; } } if (sym_is_choice(sym)) { struct symbol *choice_sym; prop = sym_get_choice_prop(sym); expr_list_for_each_sym(prop->expr, e, choice_sym) { if ((sym->flags & SYMBOL_WRITE) && choice_sym->visible != no) choice_sym->flags |= SYMBOL_WRITE; if (sym->flags & SYMBOL_CHANGED) sym_set_changed(choice_sym); } } if (sym->flags & SYMBOL_AUTO) sym->flags &= ~SYMBOL_WRITE; } void sym_clear_all_valid(void) { struct symbol *sym; int i; for_all_symbols(i, sym) sym->flags &= ~SYMBOL_VALID; sym_add_change_count(1); if (modules_sym) sym_calc_value(modules_sym); } void sym_set_changed(struct symbol *sym) { struct property *prop; sym->flags |= SYMBOL_CHANGED; for (prop = sym->prop; prop; prop = prop->next) { if (prop->menu) prop->menu->flags |= MENU_CHANGED; } } void sym_set_all_changed(void) { struct symbol *sym; int i; for_all_symbols(i, sym) sym_set_changed(sym); } bool sym_tristate_within_range(struct symbol *sym, tristate val) { int type = sym_get_type(sym); if (sym->visible == no) return false; if (type != S_BOOLEAN && type != S_TRISTATE) return false; if (type == S_BOOLEAN && val == mod) return false; if (sym->visible <= sym->rev_dep.tri) return false; if (sym_is_choice_value(sym) && sym->visible == yes) return val == yes; return val >= sym->rev_dep.tri && val <= sym->visible; } bool sym_set_tristate_value(struct symbol *sym, tristate val) { tristate oldval = sym_get_tristate_value(sym); if (oldval != val && !sym_tristate_within_range(sym, val)) return false; if (!(sym->flags & SYMBOL_DEF_USER)) { sym->flags |= SYMBOL_DEF_USER; sym_set_changed(sym); } /* * setting a choice value also resets the new flag of the choice * symbol and all other choice values. */ if (sym_is_choice_value(sym) && val == yes) { struct symbol *cs = prop_get_symbol(sym_get_choice_prop(sym)); struct property *prop; struct expr *e; cs->def[S_DEF_USER].val = sym; cs->flags |= SYMBOL_DEF_USER; prop = sym_get_choice_prop(cs); for (e = prop->expr; e; e = e->left.expr) { if (e->right.sym->visible != no) e->right.sym->flags |= SYMBOL_DEF_USER; } } sym->def[S_DEF_USER].tri = val; if (oldval != val) sym_clear_all_valid(); return true; } tristate sym_toggle_tristate_value(struct symbol *sym) { tristate oldval, newval; oldval = newval = sym_get_tristate_value(sym); do { switch (newval) { case no: newval = mod; break; case mod: newval = yes; break; case yes: newval = no; break; } if (sym_set_tristate_value(sym, newval)) break; } while (oldval != newval); return newval; } bool sym_string_valid(struct symbol *sym, const char *str) { signed char ch; switch (sym->type) { case S_STRING: return true; case S_INT: ch = *str++; if (ch == '-') ch = *str++; if (!isdigit(ch)) return false; if (ch == '0' && *str != 0) return false; while ((ch = *str++)) { if (!isdigit(ch)) return false; } return true; case S_HEX: if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) str += 2; ch = *str++; do { if (!isxdigit(ch)) return false; } while ((ch = *str++)); return true; case S_BOOLEAN: case S_TRISTATE: switch (str[0]) { case 'y': case 'Y': case 'm': case 'M': case 'n': case 'N': return true; } return false; default: return false; } } bool sym_string_within_range(struct symbol *sym, const char *str) { struct property *prop; int val; switch (sym->type) { case S_STRING: return sym_string_valid(sym, str); case S_INT: if (!sym_string_valid(sym, str)) return false; prop = sym_get_range_prop(sym); if (!prop) return true; val = strtol(str, NULL, 10); return val >= sym_get_range_val(prop->expr->left.sym, 10) && val <= sym_get_range_val(prop->expr->right.sym, 10); case S_HEX: if (!sym_string_valid(sym, str)) return false; prop = sym_get_range_prop(sym); if (!prop) return true; val = strtol(str, NULL, 16); return val >= sym_get_range_val(prop->expr->left.sym, 16) && val <= sym_get_range_val(prop->expr->right.sym, 16); case S_BOOLEAN: case S_TRISTATE: switch (str[0]) { case 'y': case 'Y': return sym_tristate_within_range(sym, yes); case 'm': case 'M': return sym_tristate_within_range(sym, mod); case 'n': case 'N': return sym_tristate_within_range(sym, no); } return false; default: return false; } } bool sym_set_string_value(struct symbol *sym, const char *newval) { const char *oldval; char *val; int size; switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: switch (newval[0]) { case 'y': case 'Y': return sym_set_tristate_value(sym, yes); case 'm': case 'M': return sym_set_tristate_value(sym, mod); case 'n': case 'N': return sym_set_tristate_value(sym, no); } return false; default: ; } if (!sym_string_within_range(sym, newval)) return false; if (!(sym->flags & SYMBOL_DEF_USER)) { sym->flags |= SYMBOL_DEF_USER; sym_set_changed(sym); } oldval = sym->def[S_DEF_USER].val; size = strlen(newval) + 1; if (sym->type == S_HEX && (newval[0] != '0' || (newval[1] != 'x' && newval[1] != 'X'))) { size += 2; sym->def[S_DEF_USER].val = val = malloc(size); *val++ = '0'; *val++ = 'x'; } else if (!oldval || strcmp(oldval, newval)) sym->def[S_DEF_USER].val = val = malloc(size); else return true; strcpy(val, newval); free((void *)oldval); sym_clear_all_valid(); return true; } /* * Find the default value associated to a symbol. * For tristate symbol handle the modules=n case * in which case "m" becomes "y". * If the symbol does not have any default then fallback * to the fixed default values. */ const char *sym_get_string_default(struct symbol *sym) { struct property *prop; struct symbol *ds; const char *str; tristate val; sym_calc_visibility(sym); sym_calc_value(modules_sym); val = symbol_no.curr.tri; str = symbol_empty.curr.val; /* If symbol has a default value look it up */ prop = sym_get_default_prop(sym); if (prop != NULL) { switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: /* The visibility may limit the value from yes => mod */ val = EXPR_AND(expr_calc_value(prop->expr), prop->visible.tri); break; default: /* * The following fails to handle the situation * where a default value is further limited by * the valid range. */ ds = prop_get_symbol(prop); if (ds != NULL) { sym_calc_value(ds); str = (const char *)ds->curr.val; } } } /* Handle select statements */ val = EXPR_OR(val, sym->rev_dep.tri); /* transpose mod to yes if modules are not enabled */ if (val == mod) if (!sym_is_choice_value(sym) && modules_sym->curr.tri == no) val = yes; /* transpose mod to yes if type is bool */ if (sym->type == S_BOOLEAN && val == mod) val = yes; switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: switch (val) { case no: return "n"; case mod: return "m"; case yes: return "y"; } case S_INT: case S_HEX: return str; case S_STRING: return str; case S_OTHER: case S_UNKNOWN: break; } return ""; } const char *sym_get_string_value(struct symbol *sym) { tristate val; switch (sym->type) { case S_BOOLEAN: case S_TRISTATE: val = sym_get_tristate_value(sym); switch (val) { case no: return "n"; case mod: return "m"; case yes: return "y"; } break; default: ; } return (const char *)sym->curr.val; } bool sym_is_changable(struct symbol *sym) { return sym->visible > sym->rev_dep.tri; } static unsigned strhash(const char *s) { /* fnv32 hash */ unsigned hash = 2166136261U; for (; *s; s++) hash = (hash ^ *s) * 0x01000193; return hash; } struct symbol *sym_lookup(const char *name, int flags) { struct symbol *symbol; char *new_name; int hash; if (name) { if (name[0] && !name[1]) { switch (name[0]) { case 'y': return &symbol_yes; case 'm': return &symbol_mod; case 'n': return &symbol_no; } } hash = strhash(name) % SYMBOL_HASHSIZE; for (symbol = symbol_hash[hash]; symbol; symbol = symbol->next) { if (symbol->name && !strcmp(symbol->name, name) && (flags ? symbol->flags & flags : !(symbol->flags & (SYMBOL_CONST|SYMBOL_CHOICE)))) return symbol; } new_name = strdup(name); } else { new_name = NULL; hash = 0; } symbol = malloc(sizeof(*symbol)); memset(symbol, 0, sizeof(*symbol)); symbol->name = new_name; symbol->type = S_UNKNOWN; symbol->flags |= flags; symbol->next = symbol_hash[hash]; symbol_hash[hash] = symbol; return symbol; } struct symbol *sym_find(const char *name) { struct symbol *symbol = NULL; int hash = 0; if (!name) return NULL; if (name[0] && !name[1]) { switch (name[0]) { case 'y': return &symbol_yes; case 'm': return &symbol_mod; case 'n': return &symbol_no; } } hash = strhash(name) % SYMBOL_HASHSIZE; for (symbol = symbol_hash[hash]; symbol; symbol = symbol->next) { if (symbol->name && !strcmp(symbol->name, name) && !(symbol->flags & SYMBOL_CONST)) break; } return symbol; } /* * Expand symbol's names embedded in the string given in argument. Symbols' * name to be expanded shall be prefixed by a '$'. Unknown symbol expands to * the empty string. */ const char *sym_expand_string_value(const char *in) { const char *src; char *res; size_t reslen; reslen = strlen(in) + 1; res = malloc(reslen); res[0] = '\0'; while ((src = strchr(in, '$'))) { char *p, name[SYMBOL_MAXLENGTH]; const char *symval = ""; struct symbol *sym; size_t newlen; strncat(res, in, src - in); src++; p = name; while (isalnum(*src) || *src == '_') *p++ = *src++; *p = '\0'; sym = sym_find(name); if (sym != NULL) { sym_calc_value(sym); symval = sym_get_string_value(sym); } newlen = strlen(res) + strlen(symval) + strlen(src) + 1; if (newlen > reslen) { reslen = newlen; res = realloc(res, reslen); } strcat(res, symval); in = src; } strcat(res, in); return res; } struct symbol **sym_re_search(const char *pattern) { struct symbol *sym, **sym_arr = NULL; int i, cnt, size; regex_t re; cnt = size = 0; /* Skip if empty */ if (strlen(pattern) == 0) return NULL; if (regcomp(&re, pattern, REG_EXTENDED|REG_NOSUB|REG_ICASE)) return NULL; for_all_symbols(i, sym) { if (sym->flags & SYMBOL_CONST || !sym->name) continue; if (regexec(&re, sym->name, 0, NULL, 0)) continue; if (cnt + 1 >= size) { void *tmp = sym_arr; size += 16; sym_arr = realloc(sym_arr, size * sizeof(struct symbol *)); if (!sym_arr) { free(tmp); return NULL; } } sym_calc_value(sym); sym_arr[cnt++] = sym; } if (sym_arr) sym_arr[cnt] = NULL; regfree(&re); return sym_arr; } /* * When we check for recursive dependencies we use a stack to save * current state so we can print out relevant info to user. * The entries are located on the call stack so no need to free memory. * Note inser() remove() must always match to properly clear the stack. */ static struct dep_stack { struct dep_stack *prev, *next; struct symbol *sym; struct property *prop; struct expr *expr; } *check_top; static void dep_stack_insert(struct dep_stack *stack, struct symbol *sym) { memset(stack, 0, sizeof(*stack)); if (check_top) check_top->next = stack; stack->prev = check_top; stack->sym = sym; check_top = stack; } static void dep_stack_remove(void) { check_top = check_top->prev; if (check_top) check_top->next = NULL; } /* * Called when we have detected a recursive dependency. * check_top point to the top of the stact so we use * the ->prev pointer to locate the bottom of the stack. */ static void sym_check_print_recursive(struct symbol *last_sym) { struct dep_stack *stack; struct symbol *sym, *next_sym; struct menu *menu = NULL; struct property *prop; struct dep_stack cv_stack; if (sym_is_choice_value(last_sym)) { dep_stack_insert(&cv_stack, last_sym); last_sym = prop_get_symbol(sym_get_choice_prop(last_sym)); } for (stack = check_top; stack != NULL; stack = stack->prev) if (stack->sym == last_sym) break; if (!stack) { fprintf(stderr, "unexpected recursive dependency error\n"); return; } for (; stack; stack = stack->next) { sym = stack->sym; next_sym = stack->next ? stack->next->sym : last_sym; prop = stack->prop; if (prop == NULL) prop = stack->sym->prop; /* for choice values find the menu entry (used below) */ if (sym_is_choice(sym) || sym_is_choice_value(sym)) { for (prop = sym->prop; prop; prop = prop->next) { menu = prop->menu; if (prop->menu) break; } } if (stack->sym == last_sym) fprintf(stderr, "%s:%d:error: recursive dependency detected!\n", prop->file->name, prop->lineno); if (stack->expr) { fprintf(stderr, "%s:%d:\tsymbol %s %s value contains %s\n", prop->file->name, prop->lineno, sym->name ? sym->name : "<choice>", prop_get_type_name(prop->type), next_sym->name ? next_sym->name : "<choice>"); } else if (stack->prop) { fprintf(stderr, "%s:%d:\tsymbol %s depends on %s\n", prop->file->name, prop->lineno, sym->name ? sym->name : "<choice>", next_sym->name ? next_sym->name : "<choice>"); } else if (sym_is_choice(sym)) { fprintf(stderr, "%s:%d:\tchoice %s contains symbol %s\n", menu->file->name, menu->lineno, sym->name ? sym->name : "<choice>", next_sym->name ? next_sym->name : "<choice>"); } else if (sym_is_choice_value(sym)) { fprintf(stderr, "%s:%d:\tsymbol %s is part of choice %s\n", menu->file->name, menu->lineno, sym->name ? sym->name : "<choice>", next_sym->name ? next_sym->name : "<choice>"); } else { fprintf(stderr, "%s:%d:\tsymbol %s is selected by %s\n", prop->file->name, prop->lineno, sym->name ? sym->name : "<choice>", next_sym->name ? next_sym->name : "<choice>"); } } if (check_top == &cv_stack) dep_stack_remove(); } static struct symbol *sym_check_expr_deps(struct expr *e) { struct symbol *sym; if (!e) return NULL; switch (e->type) { case E_OR: case E_AND: sym = sym_check_expr_deps(e->left.expr); if (sym) return sym; return sym_check_expr_deps(e->right.expr); case E_NOT: return sym_check_expr_deps(e->left.expr); case E_EQUAL: case E_UNEQUAL: sym = sym_check_deps(e->left.sym); if (sym) return sym; return sym_check_deps(e->right.sym); case E_SYMBOL: return sym_check_deps(e->left.sym); default: break; } printf("Oops! How to check %d?\n", e->type); return NULL; } /* return NULL when dependencies are OK */ static struct symbol *sym_check_sym_deps(struct symbol *sym) { struct symbol *sym2; struct property *prop; struct dep_stack stack; dep_stack_insert(&stack, sym); sym2 = sym_check_expr_deps(sym->rev_dep.expr); if (sym2) goto out; for (prop = sym->prop; prop; prop = prop->next) { if (prop->type == P_CHOICE || prop->type == P_SELECT) continue; stack.prop = prop; sym2 = sym_check_expr_deps(prop->visible.expr); if (sym2) break; if (prop->type != P_DEFAULT || sym_is_choice(sym)) continue; stack.expr = prop->expr; sym2 = sym_check_expr_deps(prop->expr); if (sym2) break; stack.expr = NULL; } out: dep_stack_remove(); return sym2; } static struct symbol *sym_check_choice_deps(struct symbol *choice) { struct symbol *sym, *sym2; struct property *prop; struct expr *e; struct dep_stack stack; dep_stack_insert(&stack, choice); prop = sym_get_choice_prop(choice); expr_list_for_each_sym(prop->expr, e, sym) sym->flags |= (SYMBOL_CHECK | SYMBOL_CHECKED); choice->flags |= (SYMBOL_CHECK | SYMBOL_CHECKED); sym2 = sym_check_sym_deps(choice); choice->flags &= ~SYMBOL_CHECK; if (sym2) goto out; expr_list_for_each_sym(prop->expr, e, sym) { sym2 = sym_check_sym_deps(sym); if (sym2) break; } out: expr_list_for_each_sym(prop->expr, e, sym) sym->flags &= ~SYMBOL_CHECK; if (sym2 && sym_is_choice_value(sym2) && prop_get_symbol(sym_get_choice_prop(sym2)) == choice) sym2 = choice; dep_stack_remove(); return sym2; } struct symbol *sym_check_deps(struct symbol *sym) { struct symbol *sym2; struct property *prop; if (sym->flags & SYMBOL_CHECK) { sym_check_print_recursive(sym); return sym; } if (sym->flags & SYMBOL_CHECKED) return NULL; if (sym_is_choice_value(sym)) { struct dep_stack stack; /* for choice groups start the check with main choice symbol */ dep_stack_insert(&stack, sym); prop = sym_get_choice_prop(sym); sym2 = sym_check_deps(prop_get_symbol(prop)); dep_stack_remove(); } else if (sym_is_choice(sym)) { sym2 = sym_check_choice_deps(sym); } else { sym->flags |= (SYMBOL_CHECK | SYMBOL_CHECKED); sym2 = sym_check_sym_deps(sym); sym->flags &= ~SYMBOL_CHECK; } if (sym2 && sym2 == sym) sym2 = NULL; return sym2; } struct property *prop_alloc(enum prop_type type, struct symbol *sym) { struct property *prop; struct property **propp; prop = malloc(sizeof(*prop)); memset(prop, 0, sizeof(*prop)); prop->type = type; prop->sym = sym; prop->file = current_file; prop->lineno = zconf_lineno(); /* append property to the prop list of symbol */ if (sym) { for (propp = &sym->prop; *propp; propp = &(*propp)->next) ; *propp = prop; } return prop; } struct symbol *prop_get_symbol(struct property *prop) { if (prop->expr && (prop->expr->type == E_SYMBOL || prop->expr->type == E_LIST)) return prop->expr->left.sym; return NULL; } const char *prop_get_type_name(enum prop_type type) { switch (type) { case P_PROMPT: return "prompt"; case P_ENV: return "env"; case P_COMMENT: return "comment"; case P_MENU: return "menu"; case P_DEFAULT: return "default"; case P_CHOICE: return "choice"; case P_SELECT: return "select"; case P_RANGE: return "range"; case P_SYMBOL: return "symbol"; case P_UNKNOWN: break; } return "unknown"; } static void prop_add_env(const char *env) { struct symbol *sym, *sym2; struct property *prop; char *p; sym = current_entry->sym; sym->flags |= SYMBOL_AUTO; for_all_properties(sym, prop, P_ENV) { sym2 = prop_get_symbol(prop); if (strcmp(sym2->name, env)) menu_warn(current_entry, "redefining environment symbol from %s", sym2->name); return; } prop = prop_alloc(P_ENV, sym); prop->expr = expr_alloc_symbol(sym_lookup(env, SYMBOL_CONST)); sym_env_list = expr_alloc_one(E_LIST, sym_env_list); sym_env_list->right.sym = sym; p = getenv(env); if (p) sym_add_default(sym, p); else menu_warn(current_entry, "environment variable %s undefined", env); }
gpl-2.0
fenggangwu/sffs
arch/x86/crypto/twofish_glue.c
2863
3286
/* * Glue Code for assembler optimized version of TWOFISH * * Originally Twofish for GPG * By Matthew Skala <mskala@ansuz.sooke.bc.ca>, July 26, 1998 * 256-bit key length added March 20, 1999 * Some modifications to reduce the text size by Werner Koch, April, 1998 * Ported to the kerneli patch by Marc Mutz <Marc@Mutz.com> * Ported to CryptoAPI by Colin Slater <hoho@tacomeat.net> * * The original author has disclaimed all copyright interest in this * code and thus put it in the public domain. The subsequent authors * have put this under the GNU General Public License. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA * * This code is a "clean room" implementation, written from the paper * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey, * Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available * through http://www.counterpane.com/twofish.html * * For background information on multiplication in finite fields, used for * the matrix operations in the key schedule, see the book _Contemporary * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the * Third Edition. */ #include <crypto/twofish.h> #include <linux/crypto.h> #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(twofish_enc_blk); asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst, const u8 *src); EXPORT_SYMBOL_GPL(twofish_dec_blk); static void twofish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { twofish_enc_blk(crypto_tfm_ctx(tfm), dst, src); } static void twofish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { twofish_dec_blk(crypto_tfm_ctx(tfm), dst, src); } static struct crypto_alg alg = { .cra_name = "twofish", .cra_driver_name = "twofish-asm", .cra_priority = 200, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = TF_BLOCK_SIZE, .cra_ctxsize = sizeof(struct twofish_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = TF_MIN_KEY_SIZE, .cia_max_keysize = TF_MAX_KEY_SIZE, .cia_setkey = twofish_setkey, .cia_encrypt = twofish_encrypt, .cia_decrypt = twofish_decrypt } } }; static int __init init(void) { return crypto_register_alg(&alg); } static void __exit fini(void) { crypto_unregister_alg(&alg); } module_init(init); module_exit(fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION ("Twofish Cipher Algorithm, asm optimized"); MODULE_ALIAS_CRYPTO("twofish"); MODULE_ALIAS_CRYPTO("twofish-asm");
gpl-2.0
fortunave3gxx/android_kernel_samsung_fortuna-common-old
drivers/ps3/ps3av.c
4655
28405
/* * PS3 AV backend support. * * Copyright (C) 2007 Sony Computer Entertainment Inc. * Copyright 2007 Sony Corp. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/notifier.h> #include <linux/ioctl.h> #include <linux/fb.h> #include <linux/slab.h> #include <asm/firmware.h> #include <asm/ps3av.h> #include <asm/ps3.h> #include "vuart.h" #define BUFSIZE 4096 /* vuart buf size */ #define PS3AV_BUF_SIZE 512 /* max packet size */ static int safe_mode; static int timeout = 5000; /* in msec ( 5 sec ) */ module_param(timeout, int, 0644); static struct ps3av { struct mutex mutex; struct work_struct work; struct completion done; struct workqueue_struct *wq; int open_count; struct ps3_system_bus_device *dev; int region; struct ps3av_pkt_av_get_hw_conf av_hw_conf; u32 av_port[PS3AV_AV_PORT_MAX + PS3AV_OPT_PORT_MAX]; u32 opt_port[PS3AV_OPT_PORT_MAX]; u32 head[PS3AV_HEAD_MAX]; u32 audio_port; int ps3av_mode; int ps3av_mode_old; union { struct ps3av_reply_hdr reply_hdr; u8 raw[PS3AV_BUF_SIZE]; } recv_buf; } *ps3av; /* color space */ #define YUV444 PS3AV_CMD_VIDEO_CS_YUV444_8 #define RGB8 PS3AV_CMD_VIDEO_CS_RGB_8 /* format */ #define XRGB PS3AV_CMD_VIDEO_FMT_X8R8G8B8 /* aspect */ #define A_N PS3AV_CMD_AV_ASPECT_4_3 #define A_W PS3AV_CMD_AV_ASPECT_16_9 static const struct avset_video_mode { u32 cs; u32 fmt; u32 vid; u32 aspect; u32 x; u32 y; } video_mode_table[] = { { 0, }, /* auto */ {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_480I, A_N, 720, 480}, {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_480P, A_N, 720, 480}, {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_720P_60HZ, A_W, 1280, 720}, {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080I_60HZ, A_W, 1920, 1080}, {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080P_60HZ, A_W, 1920, 1080}, {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_576I, A_N, 720, 576}, {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_576P, A_N, 720, 576}, {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_720P_50HZ, A_W, 1280, 720}, {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080I_50HZ, A_W, 1920, 1080}, {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080P_50HZ, A_W, 1920, 1080}, { RGB8, XRGB, PS3AV_CMD_VIDEO_VID_WXGA, A_W, 1280, 768}, { RGB8, XRGB, PS3AV_CMD_VIDEO_VID_SXGA, A_N, 1280, 1024}, { RGB8, XRGB, PS3AV_CMD_VIDEO_VID_WUXGA, A_W, 1920, 1200}, }; /* supported CIDs */ static u32 cmd_table[] = { /* init */ PS3AV_CID_AV_INIT, PS3AV_CID_AV_FIN, PS3AV_CID_VIDEO_INIT, PS3AV_CID_AUDIO_INIT, /* set */ PS3AV_CID_AV_ENABLE_EVENT, PS3AV_CID_AV_DISABLE_EVENT, PS3AV_CID_AV_VIDEO_CS, PS3AV_CID_AV_VIDEO_MUTE, PS3AV_CID_AV_VIDEO_DISABLE_SIG, PS3AV_CID_AV_AUDIO_PARAM, PS3AV_CID_AV_AUDIO_MUTE, PS3AV_CID_AV_HDMI_MODE, PS3AV_CID_AV_TV_MUTE, PS3AV_CID_VIDEO_MODE, PS3AV_CID_VIDEO_FORMAT, PS3AV_CID_VIDEO_PITCH, PS3AV_CID_AUDIO_MODE, PS3AV_CID_AUDIO_MUTE, PS3AV_CID_AUDIO_ACTIVE, PS3AV_CID_AUDIO_INACTIVE, PS3AV_CID_AVB_PARAM, /* get */ PS3AV_CID_AV_GET_HW_CONF, PS3AV_CID_AV_GET_MONITOR_INFO, /* event */ PS3AV_CID_EVENT_UNPLUGGED, PS3AV_CID_EVENT_PLUGGED, PS3AV_CID_EVENT_HDCP_DONE, PS3AV_CID_EVENT_HDCP_FAIL, PS3AV_CID_EVENT_HDCP_AUTH, PS3AV_CID_EVENT_HDCP_ERROR, 0 }; #define PS3AV_EVENT_CMD_MASK 0x10000000 #define PS3AV_EVENT_ID_MASK 0x0000ffff #define PS3AV_CID_MASK 0xffffffff #define PS3AV_REPLY_BIT 0x80000000 #define ps3av_event_get_port_id(cid) ((cid >> 16) & 0xff) static u32 *ps3av_search_cmd_table(u32 cid, u32 mask) { u32 *table; int i; table = cmd_table; for (i = 0;; table++, i++) { if ((*table & mask) == (cid & mask)) break; if (*table == 0) return NULL; } return table; } static int ps3av_parse_event_packet(const struct ps3av_reply_hdr *hdr) { u32 *table; if (hdr->cid & PS3AV_EVENT_CMD_MASK) { table = ps3av_search_cmd_table(hdr->cid, PS3AV_EVENT_CMD_MASK); if (table) dev_dbg(&ps3av->dev->core, "recv event packet cid:%08x port:0x%x size:%d\n", hdr->cid, ps3av_event_get_port_id(hdr->cid), hdr->size); else printk(KERN_ERR "%s: failed event packet, cid:%08x size:%d\n", __func__, hdr->cid, hdr->size); return 1; /* receive event packet */ } return 0; } #define POLLING_INTERVAL 25 /* in msec */ static int ps3av_vuart_write(struct ps3_system_bus_device *dev, const void *buf, unsigned long size) { int error; dev_dbg(&dev->core, " -> %s:%d\n", __func__, __LINE__); error = ps3_vuart_write(dev, buf, size); dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__); return error ? error : size; } static int ps3av_vuart_read(struct ps3_system_bus_device *dev, void *buf, unsigned long size, int timeout) { int error; int loopcnt = 0; dev_dbg(&dev->core, " -> %s:%d\n", __func__, __LINE__); timeout = (timeout + POLLING_INTERVAL - 1) / POLLING_INTERVAL; while (loopcnt++ <= timeout) { error = ps3_vuart_read(dev, buf, size); if (!error) return size; if (error != -EAGAIN) { printk(KERN_ERR "%s: ps3_vuart_read failed %d\n", __func__, error); return error; } msleep(POLLING_INTERVAL); } return -EWOULDBLOCK; } static int ps3av_send_cmd_pkt(const struct ps3av_send_hdr *send_buf, struct ps3av_reply_hdr *recv_buf, int write_len, int read_len) { int res; u32 cmd; int event; if (!ps3av) return -ENODEV; /* send pkt */ res = ps3av_vuart_write(ps3av->dev, send_buf, write_len); if (res < 0) { dev_dbg(&ps3av->dev->core, "%s: ps3av_vuart_write() failed (result=%d)\n", __func__, res); return res; } /* recv pkt */ cmd = send_buf->cid; do { /* read header */ res = ps3av_vuart_read(ps3av->dev, recv_buf, PS3AV_HDR_SIZE, timeout); if (res != PS3AV_HDR_SIZE) { dev_dbg(&ps3av->dev->core, "%s: ps3av_vuart_read() failed (result=%d)\n", __func__, res); return res; } /* read body */ res = ps3av_vuart_read(ps3av->dev, &recv_buf->cid, recv_buf->size, timeout); if (res < 0) { dev_dbg(&ps3av->dev->core, "%s: ps3av_vuart_read() failed (result=%d)\n", __func__, res); return res; } res += PS3AV_HDR_SIZE; /* total len */ event = ps3av_parse_event_packet(recv_buf); /* ret > 0 event packet */ } while (event); if ((cmd | PS3AV_REPLY_BIT) != recv_buf->cid) { dev_dbg(&ps3av->dev->core, "%s: reply err (result=%x)\n", __func__, recv_buf->cid); return -EINVAL; } return 0; } static int ps3av_process_reply_packet(struct ps3av_send_hdr *cmd_buf, const struct ps3av_reply_hdr *recv_buf, int user_buf_size) { int return_len; if (recv_buf->version != PS3AV_VERSION) { dev_dbg(&ps3av->dev->core, "reply_packet invalid version:%x\n", recv_buf->version); return -EFAULT; } return_len = recv_buf->size + PS3AV_HDR_SIZE; if (return_len > user_buf_size) return_len = user_buf_size; memcpy(cmd_buf, recv_buf, return_len); return 0; /* success */ } void ps3av_set_hdr(u32 cid, u16 size, struct ps3av_send_hdr *hdr) { hdr->version = PS3AV_VERSION; hdr->size = size - PS3AV_HDR_SIZE; hdr->cid = cid; } int ps3av_do_pkt(u32 cid, u16 send_len, size_t usr_buf_size, struct ps3av_send_hdr *buf) { int res = 0; u32 *table; BUG_ON(!ps3av); mutex_lock(&ps3av->mutex); table = ps3av_search_cmd_table(cid, PS3AV_CID_MASK); BUG_ON(!table); BUG_ON(send_len < PS3AV_HDR_SIZE); BUG_ON(usr_buf_size < send_len); BUG_ON(usr_buf_size > PS3AV_BUF_SIZE); /* create header */ ps3av_set_hdr(cid, send_len, buf); /* send packet via vuart */ res = ps3av_send_cmd_pkt(buf, &ps3av->recv_buf.reply_hdr, send_len, usr_buf_size); if (res < 0) { printk(KERN_ERR "%s: ps3av_send_cmd_pkt() failed (result=%d)\n", __func__, res); goto err; } /* process reply packet */ res = ps3av_process_reply_packet(buf, &ps3av->recv_buf.reply_hdr, usr_buf_size); if (res < 0) { printk(KERN_ERR "%s: put_return_status() failed (result=%d)\n", __func__, res); goto err; } mutex_unlock(&ps3av->mutex); return 0; err: mutex_unlock(&ps3av->mutex); printk(KERN_ERR "%s: failed cid:%x res:%d\n", __func__, cid, res); return res; } static int ps3av_set_av_video_mute(u32 mute) { int i, num_of_av_port, res; num_of_av_port = ps3av->av_hw_conf.num_of_hdmi + ps3av->av_hw_conf.num_of_avmulti; /* video mute on */ for (i = 0; i < num_of_av_port; i++) { res = ps3av_cmd_av_video_mute(1, &ps3av->av_port[i], mute); if (res < 0) return -1; } return 0; } static int ps3av_set_video_disable_sig(void) { int i, num_of_hdmi_port, num_of_av_port, res; num_of_hdmi_port = ps3av->av_hw_conf.num_of_hdmi; num_of_av_port = ps3av->av_hw_conf.num_of_hdmi + ps3av->av_hw_conf.num_of_avmulti; /* tv mute */ for (i = 0; i < num_of_hdmi_port; i++) { res = ps3av_cmd_av_tv_mute(ps3av->av_port[i], PS3AV_CMD_MUTE_ON); if (res < 0) return -1; } msleep(100); /* video mute on */ for (i = 0; i < num_of_av_port; i++) { res = ps3av_cmd_av_video_disable_sig(ps3av->av_port[i]); if (res < 0) return -1; if (i < num_of_hdmi_port) { res = ps3av_cmd_av_tv_mute(ps3av->av_port[i], PS3AV_CMD_MUTE_OFF); if (res < 0) return -1; } } msleep(300); return 0; } static int ps3av_set_audio_mute(u32 mute) { int i, num_of_av_port, num_of_opt_port, res; num_of_av_port = ps3av->av_hw_conf.num_of_hdmi + ps3av->av_hw_conf.num_of_avmulti; num_of_opt_port = ps3av->av_hw_conf.num_of_spdif; for (i = 0; i < num_of_av_port; i++) { res = ps3av_cmd_av_audio_mute(1, &ps3av->av_port[i], mute); if (res < 0) return -1; } for (i = 0; i < num_of_opt_port; i++) { res = ps3av_cmd_audio_mute(1, &ps3av->opt_port[i], mute); if (res < 0) return -1; } return 0; } int ps3av_set_audio_mode(u32 ch, u32 fs, u32 word_bits, u32 format, u32 source) { struct ps3av_pkt_avb_param avb_param; int i, num_of_audio, vid, res; struct ps3av_pkt_audio_mode audio_mode; u32 len = 0; num_of_audio = ps3av->av_hw_conf.num_of_hdmi + ps3av->av_hw_conf.num_of_avmulti + ps3av->av_hw_conf.num_of_spdif; avb_param.num_of_video_pkt = 0; avb_param.num_of_audio_pkt = PS3AV_AVB_NUM_AUDIO; /* always 0 */ avb_param.num_of_av_video_pkt = 0; avb_param.num_of_av_audio_pkt = ps3av->av_hw_conf.num_of_hdmi; vid = video_mode_table[ps3av->ps3av_mode].vid; /* audio mute */ ps3av_set_audio_mute(PS3AV_CMD_MUTE_ON); /* audio inactive */ res = ps3av_cmd_audio_active(0, ps3av->audio_port); if (res < 0) dev_dbg(&ps3av->dev->core, "ps3av_cmd_audio_active OFF failed\n"); /* audio_pkt */ for (i = 0; i < num_of_audio; i++) { ps3av_cmd_set_audio_mode(&audio_mode, ps3av->av_port[i], ch, fs, word_bits, format, source); if (i < ps3av->av_hw_conf.num_of_hdmi) { /* hdmi only */ len += ps3av_cmd_set_av_audio_param(&avb_param.buf[len], ps3av->av_port[i], &audio_mode, vid); } /* audio_mode pkt should be sent separately */ res = ps3av_cmd_audio_mode(&audio_mode); if (res < 0) dev_dbg(&ps3av->dev->core, "ps3av_cmd_audio_mode failed, port:%x\n", i); } /* send command using avb pkt */ len += offsetof(struct ps3av_pkt_avb_param, buf); res = ps3av_cmd_avb_param(&avb_param, len); if (res < 0) dev_dbg(&ps3av->dev->core, "ps3av_cmd_avb_param failed\n"); /* audio mute */ ps3av_set_audio_mute(PS3AV_CMD_MUTE_OFF); /* audio active */ res = ps3av_cmd_audio_active(1, ps3av->audio_port); if (res < 0) dev_dbg(&ps3av->dev->core, "ps3av_cmd_audio_active ON failed\n"); return 0; } EXPORT_SYMBOL_GPL(ps3av_set_audio_mode); static int ps3av_set_videomode(void) { /* av video mute */ ps3av_set_av_video_mute(PS3AV_CMD_MUTE_ON); /* wake up ps3avd to do the actual video mode setting */ queue_work(ps3av->wq, &ps3av->work); return 0; } static void ps3av_set_videomode_packet(u32 id) { struct ps3av_pkt_avb_param avb_param; unsigned int i; u32 len = 0, av_video_cs; const struct avset_video_mode *video_mode; int res; video_mode = &video_mode_table[id & PS3AV_MODE_MASK]; avb_param.num_of_video_pkt = PS3AV_AVB_NUM_VIDEO; /* num of head */ avb_param.num_of_audio_pkt = 0; avb_param.num_of_av_video_pkt = ps3av->av_hw_conf.num_of_hdmi + ps3av->av_hw_conf.num_of_avmulti; avb_param.num_of_av_audio_pkt = 0; /* video_pkt */ for (i = 0; i < avb_param.num_of_video_pkt; i++) len += ps3av_cmd_set_video_mode(&avb_param.buf[len], ps3av->head[i], video_mode->vid, video_mode->fmt, id); /* av_video_pkt */ for (i = 0; i < avb_param.num_of_av_video_pkt; i++) { if (id & PS3AV_MODE_DVI || id & PS3AV_MODE_RGB) av_video_cs = RGB8; else av_video_cs = video_mode->cs; #ifndef PS3AV_HDMI_YUV if (ps3av->av_port[i] == PS3AV_CMD_AVPORT_HDMI_0 || ps3av->av_port[i] == PS3AV_CMD_AVPORT_HDMI_1) av_video_cs = RGB8; /* use RGB for HDMI */ #endif len += ps3av_cmd_set_av_video_cs(&avb_param.buf[len], ps3av->av_port[i], video_mode->vid, av_video_cs, video_mode->aspect, id); } /* send command using avb pkt */ len += offsetof(struct ps3av_pkt_avb_param, buf); res = ps3av_cmd_avb_param(&avb_param, len); if (res == PS3AV_STATUS_NO_SYNC_HEAD) printk(KERN_WARNING "%s: Command failed. Please try your request again.\n", __func__); else if (res) dev_dbg(&ps3av->dev->core, "ps3av_cmd_avb_param failed\n"); } static void ps3av_set_videomode_cont(u32 id, u32 old_id) { static int vesa; int res; /* video signal off */ ps3av_set_video_disable_sig(); /* * AV backend needs non-VESA mode setting at least one time * when VESA mode is used. */ if (vesa == 0 && (id & PS3AV_MODE_MASK) >= PS3AV_MODE_WXGA) { /* vesa mode */ ps3av_set_videomode_packet(PS3AV_MODE_480P); } vesa = 1; /* Retail PS3 product doesn't support this */ if (id & PS3AV_MODE_HDCP_OFF) { res = ps3av_cmd_av_hdmi_mode(PS3AV_CMD_AV_HDMI_HDCP_OFF); if (res == PS3AV_STATUS_UNSUPPORTED_HDMI_MODE) dev_dbg(&ps3av->dev->core, "Not supported\n"); else if (res) dev_dbg(&ps3av->dev->core, "ps3av_cmd_av_hdmi_mode failed\n"); } else if (old_id & PS3AV_MODE_HDCP_OFF) { res = ps3av_cmd_av_hdmi_mode(PS3AV_CMD_AV_HDMI_MODE_NORMAL); if (res < 0 && res != PS3AV_STATUS_UNSUPPORTED_HDMI_MODE) dev_dbg(&ps3av->dev->core, "ps3av_cmd_av_hdmi_mode failed\n"); } ps3av_set_videomode_packet(id); msleep(1500); /* av video mute */ ps3av_set_av_video_mute(PS3AV_CMD_MUTE_OFF); } static void ps3avd(struct work_struct *work) { ps3av_set_videomode_cont(ps3av->ps3av_mode, ps3av->ps3av_mode_old); complete(&ps3av->done); } #define SHIFT_50 0 #define SHIFT_60 4 #define SHIFT_VESA 8 static const struct { unsigned mask:19; unsigned id:4; } ps3av_preferred_modes[] = { { PS3AV_RESBIT_WUXGA << SHIFT_VESA, PS3AV_MODE_WUXGA }, { PS3AV_RESBIT_1920x1080P << SHIFT_60, PS3AV_MODE_1080P60 }, { PS3AV_RESBIT_1920x1080P << SHIFT_50, PS3AV_MODE_1080P50 }, { PS3AV_RESBIT_1920x1080I << SHIFT_60, PS3AV_MODE_1080I60 }, { PS3AV_RESBIT_1920x1080I << SHIFT_50, PS3AV_MODE_1080I50 }, { PS3AV_RESBIT_SXGA << SHIFT_VESA, PS3AV_MODE_SXGA }, { PS3AV_RESBIT_WXGA << SHIFT_VESA, PS3AV_MODE_WXGA }, { PS3AV_RESBIT_1280x720P << SHIFT_60, PS3AV_MODE_720P60 }, { PS3AV_RESBIT_1280x720P << SHIFT_50, PS3AV_MODE_720P50 }, { PS3AV_RESBIT_720x480P << SHIFT_60, PS3AV_MODE_480P }, { PS3AV_RESBIT_720x576P << SHIFT_50, PS3AV_MODE_576P }, }; static enum ps3av_mode_num ps3av_resbit2id(u32 res_50, u32 res_60, u32 res_vesa) { unsigned int i; u32 res_all; /* * We mask off the resolution bits we care about and combine the * results in one bitfield, so make sure there's no overlap */ BUILD_BUG_ON(PS3AV_RES_MASK_50 << SHIFT_50 & PS3AV_RES_MASK_60 << SHIFT_60); BUILD_BUG_ON(PS3AV_RES_MASK_50 << SHIFT_50 & PS3AV_RES_MASK_VESA << SHIFT_VESA); BUILD_BUG_ON(PS3AV_RES_MASK_60 << SHIFT_60 & PS3AV_RES_MASK_VESA << SHIFT_VESA); res_all = (res_50 & PS3AV_RES_MASK_50) << SHIFT_50 | (res_60 & PS3AV_RES_MASK_60) << SHIFT_60 | (res_vesa & PS3AV_RES_MASK_VESA) << SHIFT_VESA; if (!res_all) return 0; for (i = 0; i < ARRAY_SIZE(ps3av_preferred_modes); i++) if (res_all & ps3av_preferred_modes[i].mask) return ps3av_preferred_modes[i].id; return 0; } static enum ps3av_mode_num ps3av_hdmi_get_id(struct ps3av_info_monitor *info) { enum ps3av_mode_num id; if (safe_mode) return PS3AV_DEFAULT_HDMI_MODE_ID_REG_60; /* check native resolution */ id = ps3av_resbit2id(info->res_50.native, info->res_60.native, info->res_vesa.native); if (id) { pr_debug("%s: Using native mode %d\n", __func__, id); return id; } /* check supported resolutions */ id = ps3av_resbit2id(info->res_50.res_bits, info->res_60.res_bits, info->res_vesa.res_bits); if (id) { pr_debug("%s: Using supported mode %d\n", __func__, id); return id; } if (ps3av->region & PS3AV_REGION_60) id = PS3AV_DEFAULT_HDMI_MODE_ID_REG_60; else id = PS3AV_DEFAULT_HDMI_MODE_ID_REG_50; pr_debug("%s: Using default mode %d\n", __func__, id); return id; } static void ps3av_monitor_info_dump( const struct ps3av_pkt_av_get_monitor_info *monitor_info) { const struct ps3av_info_monitor *info = &monitor_info->info; const struct ps3av_info_audio *audio = info->audio; char id[sizeof(info->monitor_id)*3+1]; int i; pr_debug("Monitor Info: size %u\n", monitor_info->send_hdr.size); pr_debug("avport: %02x\n", info->avport); for (i = 0; i < sizeof(info->monitor_id); i++) sprintf(&id[i*3], " %02x", info->monitor_id[i]); pr_debug("monitor_id: %s\n", id); pr_debug("monitor_type: %02x\n", info->monitor_type); pr_debug("monitor_name: %.*s\n", (int)sizeof(info->monitor_name), info->monitor_name); /* resolution */ pr_debug("resolution_60: bits: %08x native: %08x\n", info->res_60.res_bits, info->res_60.native); pr_debug("resolution_50: bits: %08x native: %08x\n", info->res_50.res_bits, info->res_50.native); pr_debug("resolution_other: bits: %08x native: %08x\n", info->res_other.res_bits, info->res_other.native); pr_debug("resolution_vesa: bits: %08x native: %08x\n", info->res_vesa.res_bits, info->res_vesa.native); /* color space */ pr_debug("color space rgb: %02x\n", info->cs.rgb); pr_debug("color space yuv444: %02x\n", info->cs.yuv444); pr_debug("color space yuv422: %02x\n", info->cs.yuv422); /* color info */ pr_debug("color info red: X %04x Y %04x\n", info->color.red_x, info->color.red_y); pr_debug("color info green: X %04x Y %04x\n", info->color.green_x, info->color.green_y); pr_debug("color info blue: X %04x Y %04x\n", info->color.blue_x, info->color.blue_y); pr_debug("color info white: X %04x Y %04x\n", info->color.white_x, info->color.white_y); pr_debug("color info gamma: %08x\n", info->color.gamma); /* other info */ pr_debug("supported_AI: %02x\n", info->supported_ai); pr_debug("speaker_info: %02x\n", info->speaker_info); pr_debug("num of audio: %02x\n", info->num_of_audio_block); /* audio block */ for (i = 0; i < info->num_of_audio_block; i++) { pr_debug( "audio[%d] type: %02x max_ch: %02x fs: %02x sbit: %02x\n", i, audio->type, audio->max_num_of_ch, audio->fs, audio->sbit); audio++; } } static const struct ps3av_monitor_quirk { const char *monitor_name; u32 clear_60; } ps3av_monitor_quirks[] = { { .monitor_name = "DELL 2007WFP", .clear_60 = PS3AV_RESBIT_1920x1080I }, { .monitor_name = "L226WTQ", .clear_60 = PS3AV_RESBIT_1920x1080I | PS3AV_RESBIT_1920x1080P }, { .monitor_name = "SyncMaster", .clear_60 = PS3AV_RESBIT_1920x1080I } }; static void ps3av_fixup_monitor_info(struct ps3av_info_monitor *info) { unsigned int i; const struct ps3av_monitor_quirk *quirk; for (i = 0; i < ARRAY_SIZE(ps3av_monitor_quirks); i++) { quirk = &ps3av_monitor_quirks[i]; if (!strncmp(info->monitor_name, quirk->monitor_name, sizeof(info->monitor_name))) { pr_info("%s: Applying quirk for %s\n", __func__, quirk->monitor_name); info->res_60.res_bits &= ~quirk->clear_60; info->res_60.native &= ~quirk->clear_60; break; } } } static int ps3av_auto_videomode(struct ps3av_pkt_av_get_hw_conf *av_hw_conf) { int i, res, id = 0, dvi = 0, rgb = 0; struct ps3av_pkt_av_get_monitor_info monitor_info; struct ps3av_info_monitor *info; /* get mode id for hdmi */ for (i = 0; i < av_hw_conf->num_of_hdmi && !id; i++) { res = ps3av_cmd_video_get_monitor_info(&monitor_info, PS3AV_CMD_AVPORT_HDMI_0 + i); if (res < 0) return -1; ps3av_monitor_info_dump(&monitor_info); info = &monitor_info.info; ps3av_fixup_monitor_info(info); switch (info->monitor_type) { case PS3AV_MONITOR_TYPE_DVI: dvi = PS3AV_MODE_DVI; /* fall through */ case PS3AV_MONITOR_TYPE_HDMI: id = ps3av_hdmi_get_id(info); break; } } if (!id) { /* no HDMI interface or HDMI is off */ if (ps3av->region & PS3AV_REGION_60) id = PS3AV_DEFAULT_AVMULTI_MODE_ID_REG_60; else id = PS3AV_DEFAULT_AVMULTI_MODE_ID_REG_50; if (ps3av->region & PS3AV_REGION_RGB) rgb = PS3AV_MODE_RGB; pr_debug("%s: Using avmulti mode %d\n", __func__, id); } return id | dvi | rgb; } static int ps3av_get_hw_conf(struct ps3av *ps3av) { int i, j, k, res; const struct ps3av_pkt_av_get_hw_conf *hw_conf; /* get av_hw_conf */ res = ps3av_cmd_av_get_hw_conf(&ps3av->av_hw_conf); if (res < 0) return -1; hw_conf = &ps3av->av_hw_conf; pr_debug("av_h_conf: num of hdmi: %u\n", hw_conf->num_of_hdmi); pr_debug("av_h_conf: num of avmulti: %u\n", hw_conf->num_of_avmulti); pr_debug("av_h_conf: num of spdif: %u\n", hw_conf->num_of_spdif); for (i = 0; i < PS3AV_HEAD_MAX; i++) ps3av->head[i] = PS3AV_CMD_VIDEO_HEAD_A + i; for (i = 0; i < PS3AV_OPT_PORT_MAX; i++) ps3av->opt_port[i] = PS3AV_CMD_AVPORT_SPDIF_0 + i; for (i = 0; i < hw_conf->num_of_hdmi; i++) ps3av->av_port[i] = PS3AV_CMD_AVPORT_HDMI_0 + i; for (j = 0; j < hw_conf->num_of_avmulti; j++) ps3av->av_port[i + j] = PS3AV_CMD_AVPORT_AVMULTI_0 + j; for (k = 0; k < hw_conf->num_of_spdif; k++) ps3av->av_port[i + j + k] = PS3AV_CMD_AVPORT_SPDIF_0 + k; /* set all audio port */ ps3av->audio_port = PS3AV_CMD_AUDIO_PORT_HDMI_0 | PS3AV_CMD_AUDIO_PORT_HDMI_1 | PS3AV_CMD_AUDIO_PORT_AVMULTI_0 | PS3AV_CMD_AUDIO_PORT_SPDIF_0 | PS3AV_CMD_AUDIO_PORT_SPDIF_1; return 0; } /* set mode using id */ int ps3av_set_video_mode(int id) { int size; u32 option; size = ARRAY_SIZE(video_mode_table); if ((id & PS3AV_MODE_MASK) > size - 1 || id < 0) { dev_dbg(&ps3av->dev->core, "%s: error id :%d\n", __func__, id); return -EINVAL; } /* auto mode */ option = id & ~PS3AV_MODE_MASK; if ((id & PS3AV_MODE_MASK) == PS3AV_MODE_AUTO) { id = ps3av_auto_videomode(&ps3av->av_hw_conf); if (id < 1) { printk(KERN_ERR "%s: invalid id :%d\n", __func__, id); return -EINVAL; } id |= option; } /* set videomode */ wait_for_completion(&ps3av->done); ps3av->ps3av_mode_old = ps3av->ps3av_mode; ps3av->ps3av_mode = id; if (ps3av_set_videomode()) ps3av->ps3av_mode = ps3av->ps3av_mode_old; return 0; } EXPORT_SYMBOL_GPL(ps3av_set_video_mode); int ps3av_get_auto_mode(void) { return ps3av_auto_videomode(&ps3av->av_hw_conf); } EXPORT_SYMBOL_GPL(ps3av_get_auto_mode); int ps3av_get_mode(void) { return ps3av ? ps3av->ps3av_mode : 0; } EXPORT_SYMBOL_GPL(ps3av_get_mode); /* get resolution by video_mode */ int ps3av_video_mode2res(u32 id, u32 *xres, u32 *yres) { int size; id = id & PS3AV_MODE_MASK; size = ARRAY_SIZE(video_mode_table); if (id > size - 1 || id < 0) { printk(KERN_ERR "%s: invalid mode %d\n", __func__, id); return -EINVAL; } *xres = video_mode_table[id].x; *yres = video_mode_table[id].y; return 0; } EXPORT_SYMBOL_GPL(ps3av_video_mode2res); /* mute */ int ps3av_video_mute(int mute) { return ps3av_set_av_video_mute(mute ? PS3AV_CMD_MUTE_ON : PS3AV_CMD_MUTE_OFF); } EXPORT_SYMBOL_GPL(ps3av_video_mute); /* mute analog output only */ int ps3av_audio_mute_analog(int mute) { int i, res; for (i = 0; i < ps3av->av_hw_conf.num_of_avmulti; i++) { res = ps3av_cmd_av_audio_mute(1, &ps3av->av_port[i + ps3av->av_hw_conf.num_of_hdmi], mute); if (res < 0) return -1; } return 0; } EXPORT_SYMBOL_GPL(ps3av_audio_mute_analog); int ps3av_audio_mute(int mute) { return ps3av_set_audio_mute(mute ? PS3AV_CMD_MUTE_ON : PS3AV_CMD_MUTE_OFF); } EXPORT_SYMBOL_GPL(ps3av_audio_mute); static int ps3av_probe(struct ps3_system_bus_device *dev) { int res; int id; dev_dbg(&dev->core, " -> %s:%d\n", __func__, __LINE__); dev_dbg(&dev->core, " timeout=%d\n", timeout); if (ps3av) { dev_err(&dev->core, "Only one ps3av device is supported\n"); return -EBUSY; } ps3av = kzalloc(sizeof(*ps3av), GFP_KERNEL); if (!ps3av) return -ENOMEM; mutex_init(&ps3av->mutex); ps3av->ps3av_mode = PS3AV_MODE_AUTO; ps3av->dev = dev; INIT_WORK(&ps3av->work, ps3avd); init_completion(&ps3av->done); complete(&ps3av->done); ps3av->wq = create_singlethread_workqueue("ps3avd"); if (!ps3av->wq) { res = -ENOMEM; goto fail; } switch (ps3_os_area_get_av_multi_out()) { case PS3_PARAM_AV_MULTI_OUT_NTSC: ps3av->region = PS3AV_REGION_60; break; case PS3_PARAM_AV_MULTI_OUT_PAL_YCBCR: case PS3_PARAM_AV_MULTI_OUT_SECAM: ps3av->region = PS3AV_REGION_50; break; case PS3_PARAM_AV_MULTI_OUT_PAL_RGB: ps3av->region = PS3AV_REGION_50 | PS3AV_REGION_RGB; break; default: ps3av->region = PS3AV_REGION_60; break; } /* init avsetting modules */ res = ps3av_cmd_init(); if (res < 0) printk(KERN_ERR "%s: ps3av_cmd_init failed %d\n", __func__, res); ps3av_get_hw_conf(ps3av); #ifdef CONFIG_FB if (fb_mode_option && !strcmp(fb_mode_option, "safe")) safe_mode = 1; #endif /* CONFIG_FB */ id = ps3av_auto_videomode(&ps3av->av_hw_conf); if (id < 0) { printk(KERN_ERR "%s: invalid id :%d\n", __func__, id); res = -EINVAL; goto fail; } safe_mode = 0; mutex_lock(&ps3av->mutex); ps3av->ps3av_mode = id; mutex_unlock(&ps3av->mutex); dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__); return 0; fail: kfree(ps3av); ps3av = NULL; return res; } static int ps3av_remove(struct ps3_system_bus_device *dev) { dev_dbg(&dev->core, " -> %s:%d\n", __func__, __LINE__); if (ps3av) { ps3av_cmd_fin(); if (ps3av->wq) destroy_workqueue(ps3av->wq); kfree(ps3av); ps3av = NULL; } dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__); return 0; } static void ps3av_shutdown(struct ps3_system_bus_device *dev) { dev_dbg(&dev->core, " -> %s:%d\n", __func__, __LINE__); ps3av_remove(dev); dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__); } static struct ps3_vuart_port_driver ps3av_driver = { .core.match_id = PS3_MATCH_ID_AV_SETTINGS, .core.core.name = "ps3_av", .probe = ps3av_probe, .remove = ps3av_remove, .shutdown = ps3av_shutdown, }; static int __init ps3av_module_init(void) { int error; if (!firmware_has_feature(FW_FEATURE_PS3_LV1)) return -ENODEV; pr_debug(" -> %s:%d\n", __func__, __LINE__); error = ps3_vuart_port_driver_register(&ps3av_driver); if (error) { printk(KERN_ERR "%s: ps3_vuart_port_driver_register failed %d\n", __func__, error); return error; } pr_debug(" <- %s:%d\n", __func__, __LINE__); return error; } static void __exit ps3av_module_exit(void) { pr_debug(" -> %s:%d\n", __func__, __LINE__); ps3_vuart_port_driver_unregister(&ps3av_driver); pr_debug(" <- %s:%d\n", __func__, __LINE__); } subsys_initcall(ps3av_module_init); module_exit(ps3av_module_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("PS3 AV Settings Driver"); MODULE_AUTHOR("Sony Computer Entertainment Inc."); MODULE_ALIAS(PS3_MODULE_ALIAS_AV_SETTINGS);
gpl-2.0
boa19861105/B2_UHL
arch/arm/plat-mxc/cpu.c
4911
1078
#include <linux/module.h> #include <linux/io.h> #include <mach/hardware.h> unsigned int __mxc_cpu_type; EXPORT_SYMBOL(__mxc_cpu_type); void mxc_set_cpu_type(unsigned int type) { __mxc_cpu_type = type; } void imx_print_silicon_rev(const char *cpu, int srev) { if (srev == IMX_CHIP_REVISION_UNKNOWN) pr_info("CPU identified as %s, unknown revision\n", cpu); else pr_info("CPU identified as %s, silicon rev %d.%d\n", cpu, (srev >> 4) & 0xf, srev & 0xf); } void __init imx_set_aips(void __iomem *base) { unsigned int reg; /* * Set all MPROTx to be non-bufferable, trusted for R/W, * not forced to user-mode. */ __raw_writel(0x77777777, base + 0x0); __raw_writel(0x77777777, base + 0x4); /* * Set all OPACRx to be non-bufferable, to not require * supervisor privilege level for access, allow for * write access and untrusted master access. */ __raw_writel(0x0, base + 0x40); __raw_writel(0x0, base + 0x44); __raw_writel(0x0, base + 0x48); __raw_writel(0x0, base + 0x4C); reg = __raw_readl(base + 0x50) & 0x00FFFFFF; __raw_writel(reg, base + 0x50); }
gpl-2.0
RJDTWO/android_kernel_oneplus_msm8974
drivers/hid/hid-a4tech.c
8239
3731
/* * HID driver for some a4tech "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/slab.h> #include "hid-ids.h" #define A4_2WHEEL_MOUSE_HACK_7 0x01 #define A4_2WHEEL_MOUSE_HACK_B8 0x02 struct a4tech_sc { unsigned long quirks; unsigned int hw_wheel; __s32 delayed_value; }; static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct a4tech_sc *a4 = hid_get_drvdata(hdev); if (usage->type == EV_REL && usage->code == REL_WHEEL) set_bit(REL_HWHEEL, *bit); if ((a4->quirks & A4_2WHEEL_MOUSE_HACK_7) && usage->hid == 0x00090007) return -1; return 0; } static int a4_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct a4tech_sc *a4 = hid_get_drvdata(hdev); struct input_dev *input; if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput || !usage->type) return 0; input = field->hidinput->input; if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8) { if (usage->type == EV_REL && usage->code == REL_WHEEL) { a4->delayed_value = value; return 1; } if (usage->hid == 0x000100b8) { input_event(input, EV_REL, value ? REL_HWHEEL : REL_WHEEL, a4->delayed_value); return 1; } } if ((a4->quirks & A4_2WHEEL_MOUSE_HACK_7) && usage->hid == 0x00090007) { a4->hw_wheel = !!value; return 1; } if (usage->code == REL_WHEEL && a4->hw_wheel) { input_event(input, usage->type, REL_HWHEEL, value); return 1; } return 0; } static int a4_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct a4tech_sc *a4; int ret; a4 = kzalloc(sizeof(*a4), GFP_KERNEL); if (a4 == NULL) { hid_err(hdev, "can't alloc device descriptor\n"); ret = -ENOMEM; goto err_free; } a4->quirks = id->driver_data; hid_set_drvdata(hdev, a4); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err_free; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "hw start failed\n"); goto err_free; } return 0; err_free: kfree(a4); return ret; } static void a4_remove(struct hid_device *hdev) { struct a4tech_sc *a4 = hid_get_drvdata(hdev); hid_hw_stop(hdev); kfree(a4); } static const struct hid_device_id a4_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU), .driver_data = A4_2WHEEL_MOUSE_HACK_7 }, { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D), .driver_data = A4_2WHEEL_MOUSE_HACK_B8 }, { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649), .driver_data = A4_2WHEEL_MOUSE_HACK_B8 }, { } }; MODULE_DEVICE_TABLE(hid, a4_devices); static struct hid_driver a4_driver = { .name = "a4tech", .id_table = a4_devices, .input_mapped = a4_input_mapped, .event = a4_event, .probe = a4_probe, .remove = a4_remove, }; static int __init a4_init(void) { return hid_register_driver(&a4_driver); } static void __exit a4_exit(void) { hid_unregister_driver(&a4_driver); } module_init(a4_init); module_exit(a4_exit); MODULE_LICENSE("GPL");
gpl-2.0
juston-li/flo
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
9775
8644
/* * (C) 2010,2011 Thomas Renninger <trenn@suse.de>, Novell Inc. * * Licensed under the terms of the GNU GPL License version 2. * * PCI initialization based on example code from: * Andreas Herrmann <andreas.herrmann3@amd.com> */ #if defined(__i386__) || defined(__x86_64__) #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <time.h> #include <string.h> #include <pci/pci.h> #include "idle_monitor/cpupower-monitor.h" #include "helpers/helpers.h" #define PCI_NON_PC0_OFFSET 0xb0 #define PCI_PC1_OFFSET 0xb4 #define PCI_PC6_OFFSET 0xb8 #define PCI_MONITOR_ENABLE_REG 0xe0 #define PCI_NON_PC0_ENABLE_BIT 0 #define PCI_PC1_ENABLE_BIT 1 #define PCI_PC6_ENABLE_BIT 2 #define PCI_NBP1_STAT_OFFSET 0x98 #define PCI_NBP1_ACTIVE_BIT 2 #define PCI_NBP1_ENTERED_BIT 1 #define PCI_NBP1_CAP_OFFSET 0x90 #define PCI_NBP1_CAPABLE_BIT 31 #define OVERFLOW_MS 343597 /* 32 bit register filled at 12500 HZ (1 tick per 80ns) */ enum amd_fam14h_states {NON_PC0 = 0, PC1, PC6, NBP1, AMD_FAM14H_STATE_NUM}; static int fam14h_get_count_percent(unsigned int self_id, double *percent, unsigned int cpu); static int fam14h_nbp1_count(unsigned int id, unsigned long long *count, unsigned int cpu); static cstate_t amd_fam14h_cstates[AMD_FAM14H_STATE_NUM] = { { .name = "!PC0", .desc = N_("Package in sleep state (PC1 or deeper)"), .id = NON_PC0, .range = RANGE_PACKAGE, .get_count_percent = fam14h_get_count_percent, }, { .name = "PC1", .desc = N_("Processor Package C1"), .id = PC1, .range = RANGE_PACKAGE, .get_count_percent = fam14h_get_count_percent, }, { .name = "PC6", .desc = N_("Processor Package C6"), .id = PC6, .range = RANGE_PACKAGE, .get_count_percent = fam14h_get_count_percent, }, { .name = "NBP1", .desc = N_("North Bridge P1 boolean counter (returns 0 or 1)"), .id = NBP1, .range = RANGE_PACKAGE, .get_count = fam14h_nbp1_count, }, }; static struct pci_access *pci_acc; static struct pci_dev *amd_fam14h_pci_dev; static int nbp1_entered; struct timespec start_time; static unsigned long long timediff; #ifdef DEBUG struct timespec dbg_time; long dbg_timediff; #endif static unsigned long long *previous_count[AMD_FAM14H_STATE_NUM]; static unsigned long long *current_count[AMD_FAM14H_STATE_NUM]; static int amd_fam14h_get_pci_info(struct cstate *state, unsigned int *pci_offset, unsigned int *enable_bit, unsigned int cpu) { switch (state->id) { case NON_PC0: *enable_bit = PCI_NON_PC0_ENABLE_BIT; *pci_offset = PCI_NON_PC0_OFFSET; break; case PC1: *enable_bit = PCI_PC1_ENABLE_BIT; *pci_offset = PCI_PC1_OFFSET; break; case PC6: *enable_bit = PCI_PC6_ENABLE_BIT; *pci_offset = PCI_PC6_OFFSET; break; case NBP1: *enable_bit = PCI_NBP1_ENTERED_BIT; *pci_offset = PCI_NBP1_STAT_OFFSET; break; default: return -1; }; return 0; } static int amd_fam14h_init(cstate_t *state, unsigned int cpu) { int enable_bit, pci_offset, ret; uint32_t val; ret = amd_fam14h_get_pci_info(state, &pci_offset, &enable_bit, cpu); if (ret) return ret; /* NBP1 needs extra treating -> write 1 to D18F6x98 bit 1 for init */ if (state->id == NBP1) { val = pci_read_long(amd_fam14h_pci_dev, pci_offset); val |= 1 << enable_bit; val = pci_write_long(amd_fam14h_pci_dev, pci_offset, val); return ret; } /* Enable monitor */ val = pci_read_long(amd_fam14h_pci_dev, PCI_MONITOR_ENABLE_REG); dprint("Init %s: read at offset: 0x%x val: %u\n", state->name, PCI_MONITOR_ENABLE_REG, (unsigned int) val); val |= 1 << enable_bit; pci_write_long(amd_fam14h_pci_dev, PCI_MONITOR_ENABLE_REG, val); dprint("Init %s: offset: 0x%x enable_bit: %d - val: %u (%u)\n", state->name, PCI_MONITOR_ENABLE_REG, enable_bit, (unsigned int) val, cpu); /* Set counter to zero */ pci_write_long(amd_fam14h_pci_dev, pci_offset, 0); previous_count[state->id][cpu] = 0; return 0; } static int amd_fam14h_disable(cstate_t *state, unsigned int cpu) { int enable_bit, pci_offset, ret; uint32_t val; ret = amd_fam14h_get_pci_info(state, &pci_offset, &enable_bit, cpu); if (ret) return ret; val = pci_read_long(amd_fam14h_pci_dev, pci_offset); dprint("%s: offset: 0x%x %u\n", state->name, pci_offset, val); if (state->id == NBP1) { /* was the bit whether NBP1 got entered set? */ nbp1_entered = (val & (1 << PCI_NBP1_ACTIVE_BIT)) | (val & (1 << PCI_NBP1_ENTERED_BIT)); dprint("NBP1 was %sentered - 0x%x - enable_bit: " "%d - pci_offset: 0x%x\n", nbp1_entered ? "" : "not ", val, enable_bit, pci_offset); return ret; } current_count[state->id][cpu] = val; dprint("%s: Current - %llu (%u)\n", state->name, current_count[state->id][cpu], cpu); dprint("%s: Previous - %llu (%u)\n", state->name, previous_count[state->id][cpu], cpu); val = pci_read_long(amd_fam14h_pci_dev, PCI_MONITOR_ENABLE_REG); val &= ~(1 << enable_bit); pci_write_long(amd_fam14h_pci_dev, PCI_MONITOR_ENABLE_REG, val); return 0; } static int fam14h_nbp1_count(unsigned int id, unsigned long long *count, unsigned int cpu) { if (id == NBP1) { if (nbp1_entered) *count = 1; else *count = 0; return 0; } return -1; } static int fam14h_get_count_percent(unsigned int id, double *percent, unsigned int cpu) { unsigned long diff; if (id >= AMD_FAM14H_STATE_NUM) return -1; /* residency count in 80ns -> divide through 12.5 to get us residency */ diff = current_count[id][cpu] - previous_count[id][cpu]; if (timediff == 0) *percent = 0.0; else *percent = 100.0 * diff / timediff / 12.5; dprint("Timediff: %llu - res~: %lu us - percent: %.2f %%\n", timediff, diff * 10 / 125, *percent); return 0; } static int amd_fam14h_start(void) { int num, cpu; clock_gettime(CLOCK_REALTIME, &start_time); for (num = 0; num < AMD_FAM14H_STATE_NUM; num++) { for (cpu = 0; cpu < cpu_count; cpu++) amd_fam14h_init(&amd_fam14h_cstates[num], cpu); } #ifdef DEBUG clock_gettime(CLOCK_REALTIME, &dbg_time); dbg_timediff = timespec_diff_us(start_time, dbg_time); dprint("Enabling counters took: %lu us\n", dbg_timediff); #endif return 0; } static int amd_fam14h_stop(void) { int num, cpu; struct timespec end_time; clock_gettime(CLOCK_REALTIME, &end_time); for (num = 0; num < AMD_FAM14H_STATE_NUM; num++) { for (cpu = 0; cpu < cpu_count; cpu++) amd_fam14h_disable(&amd_fam14h_cstates[num], cpu); } #ifdef DEBUG clock_gettime(CLOCK_REALTIME, &dbg_time); dbg_timediff = timespec_diff_us(end_time, dbg_time); dprint("Disabling counters took: %lu ns\n", dbg_timediff); #endif timediff = timespec_diff_us(start_time, end_time); if (timediff / 1000 > OVERFLOW_MS) print_overflow_err((unsigned int)timediff / 1000000, OVERFLOW_MS / 1000); return 0; } static int is_nbp1_capable(void) { uint32_t val; val = pci_read_long(amd_fam14h_pci_dev, PCI_NBP1_CAP_OFFSET); return val & (1 << 31); } struct cpuidle_monitor *amd_fam14h_register(void) { int num; if (cpupower_cpu_info.vendor != X86_VENDOR_AMD) return NULL; if (cpupower_cpu_info.family == 0x14) strncpy(amd_fam14h_monitor.name, "Fam_14h", MONITOR_NAME_LEN - 1); else if (cpupower_cpu_info.family == 0x12) strncpy(amd_fam14h_monitor.name, "Fam_12h", MONITOR_NAME_LEN - 1); else return NULL; /* We do not alloc for nbp1 machine wide counter */ for (num = 0; num < AMD_FAM14H_STATE_NUM - 1; num++) { previous_count[num] = calloc(cpu_count, sizeof(unsigned long long)); current_count[num] = calloc(cpu_count, sizeof(unsigned long long)); } /* We need PCI device: Slot 18, Func 6, compare with BKDG for fam 12h/14h */ amd_fam14h_pci_dev = pci_slot_func_init(&pci_acc, 0x18, 6); if (amd_fam14h_pci_dev == NULL || pci_acc == NULL) return NULL; if (!is_nbp1_capable()) amd_fam14h_monitor.hw_states_num = AMD_FAM14H_STATE_NUM - 1; amd_fam14h_monitor.name_len = strlen(amd_fam14h_monitor.name); return &amd_fam14h_monitor; } static void amd_fam14h_unregister(void) { int num; for (num = 0; num < AMD_FAM14H_STATE_NUM - 1; num++) { free(previous_count[num]); free(current_count[num]); } pci_cleanup(pci_acc); } struct cpuidle_monitor amd_fam14h_monitor = { .name = "", .hw_states = amd_fam14h_cstates, .hw_states_num = AMD_FAM14H_STATE_NUM, .start = amd_fam14h_start, .stop = amd_fam14h_stop, .do_register = amd_fam14h_register, .unregister = amd_fam14h_unregister, .needs_root = 1, .overflow_s = OVERFLOW_MS / 1000, }; #endif /* #if defined(__i386__) || defined(__x86_64__) */
gpl-2.0
adegroote/linux
arch/cris/arch-v10/drivers/eeprom.c
11311
22088
/*!***************************************************************************** *! *! Implements an interface for i2c compatible eeproms to run under Linux. *! Supports 2k, 8k(?) and 16k. Uses adaptive timing adjustments by *! Johan.Adolfsson@axis.com *! *! Probing results: *! 8k or not is detected (the assumes 2k or 16k) *! 2k or 16k detected using test reads and writes. *! *!------------------------------------------------------------------------ *! HISTORY *! *! DATE NAME CHANGES *! ---- ---- ------- *! Aug 28 1999 Edgar Iglesias Initial Version *! Aug 31 1999 Edgar Iglesias Allow simultaneous users. *! Sep 03 1999 Edgar Iglesias Updated probe. *! Sep 03 1999 Edgar Iglesias Added bail-out stuff if we get interrupted *! in the spin-lock. *! *! (c) 1999 Axis Communications AB, Lund, Sweden *!*****************************************************************************/ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/wait.h> #include <asm/uaccess.h> #include "i2c.h" #define D(x) /* If we should use adaptive timing or not: */ /* #define EEPROM_ADAPTIVE_TIMING */ #define EEPROM_MAJOR_NR 122 /* use a LOCAL/EXPERIMENTAL major for now */ #define EEPROM_MINOR_NR 0 /* Empirical sane initial value of the delay, the value will be adapted to * what the chip needs when using EEPROM_ADAPTIVE_TIMING. */ #define INITIAL_WRITEDELAY_US 4000 #define MAX_WRITEDELAY_US 10000 /* 10 ms according to spec for 2KB EEPROM */ /* This one defines how many times to try when eeprom fails. */ #define EEPROM_RETRIES 10 #define EEPROM_2KB (2 * 1024) /*#define EEPROM_4KB (4 * 1024)*/ /* Exists but not used in Axis products */ #define EEPROM_8KB (8 * 1024 - 1 ) /* Last byte has write protection bit */ #define EEPROM_16KB (16 * 1024) #define i2c_delay(x) udelay(x) /* * This structure describes the attached eeprom chip. * The values are probed for. */ struct eeprom_type { unsigned long size; unsigned long sequential_write_pagesize; unsigned char select_cmd; unsigned long usec_delay_writecycles; /* Min time between write cycles (up to 10ms for some models) */ unsigned long usec_delay_step; /* For adaptive algorithm */ int adapt_state; /* 1 = To high , 0 = Even, -1 = To low */ /* this one is to keep the read/write operations atomic */ struct mutex lock; int retry_cnt_addr; /* Used to keep track of number of retries for adaptive timing adjustments */ int retry_cnt_read; }; static int eeprom_open(struct inode * inode, struct file * file); static loff_t eeprom_lseek(struct file * file, loff_t offset, int orig); static ssize_t eeprom_read(struct file * file, char * buf, size_t count, loff_t *off); static ssize_t eeprom_write(struct file * file, const char * buf, size_t count, loff_t *off); static int eeprom_close(struct inode * inode, struct file * file); static int eeprom_address(unsigned long addr); static int read_from_eeprom(char * buf, int count); static int eeprom_write_buf(loff_t addr, const char * buf, int count); static int eeprom_read_buf(loff_t addr, char * buf, int count); static void eeprom_disable_write_protect(void); static const char eeprom_name[] = "eeprom"; /* chip description */ static struct eeprom_type eeprom; /* This is the exported file-operations structure for this device. */ const struct file_operations eeprom_fops = { .llseek = eeprom_lseek, .read = eeprom_read, .write = eeprom_write, .open = eeprom_open, .release = eeprom_close }; /* eeprom init call. Probes for different eeprom models. */ int __init eeprom_init(void) { mutex_init(&eeprom.lock); #ifdef CONFIG_ETRAX_I2C_EEPROM_PROBE #define EETEXT "Found" #else #define EETEXT "Assuming" #endif if (register_chrdev(EEPROM_MAJOR_NR, eeprom_name, &eeprom_fops)) { printk(KERN_INFO "%s: unable to get major %d for eeprom device\n", eeprom_name, EEPROM_MAJOR_NR); return -1; } printk("EEPROM char device v0.3, (c) 2000 Axis Communications AB\n"); /* * Note: Most of this probing method was taken from the printserver (5470e) * codebase. It did not contain a way of finding the 16kB chips * (M24128 or variants). The method used here might not work * for all models. If you encounter problems the easiest way * is probably to define your model within #ifdef's, and hard- * code it. */ eeprom.size = 0; eeprom.usec_delay_writecycles = INITIAL_WRITEDELAY_US; eeprom.usec_delay_step = 128; eeprom.adapt_state = 0; #ifdef CONFIG_ETRAX_I2C_EEPROM_PROBE i2c_start(); i2c_outbyte(0x80); if(!i2c_getack()) { /* It's not 8k.. */ int success = 0; unsigned char buf_2k_start[16]; /* Im not sure this will work... :) */ /* assume 2kB, if failure go for 16kB */ /* Test with 16kB settings.. */ /* If it's a 2kB EEPROM and we address it outside it's range * it will mirror the address space: * 1. We read two locations (that are mirrored), * if the content differs * it's a 16kB EEPROM. * 2. if it doesn't differ - write different value to one of the locations, * check the other - if content still is the same it's a 2k EEPROM, * restore original data. */ #define LOC1 8 #define LOC2 (0x1fb) /*1fb, 3ed, 5df, 7d1 */ /* 2k settings */ i2c_stop(); eeprom.size = EEPROM_2KB; eeprom.select_cmd = 0xA0; eeprom.sequential_write_pagesize = 16; if( eeprom_read_buf( 0, buf_2k_start, 16 ) == 16 ) { D(printk("2k start: '%16.16s'\n", buf_2k_start)); } else { printk(KERN_INFO "%s: Failed to read in 2k mode!\n", eeprom_name); } /* 16k settings */ eeprom.size = EEPROM_16KB; eeprom.select_cmd = 0xA0; eeprom.sequential_write_pagesize = 64; { unsigned char loc1[4], loc2[4], tmp[4]; if( eeprom_read_buf(LOC2, loc2, 4) == 4) { if( eeprom_read_buf(LOC1, loc1, 4) == 4) { D(printk("0 loc1: (%i) '%4.4s' loc2 (%i) '%4.4s'\n", LOC1, loc1, LOC2, loc2)); #if 0 if (memcmp(loc1, loc2, 4) != 0 ) { /* It's 16k */ printk(KERN_INFO "%s: 16k detected in step 1\n", eeprom_name); eeprom.size = EEPROM_16KB; success = 1; } else #endif { /* Do step 2 check */ /* Invert value */ loc1[0] = ~loc1[0]; if (eeprom_write_buf(LOC1, loc1, 1) == 1) { /* If 2k EEPROM this write will actually write 10 bytes * from pos 0 */ D(printk("1 loc1: (%i) '%4.4s' loc2 (%i) '%4.4s'\n", LOC1, loc1, LOC2, loc2)); if( eeprom_read_buf(LOC1, tmp, 4) == 4) { D(printk("2 loc1: (%i) '%4.4s' tmp '%4.4s'\n", LOC1, loc1, tmp)); if (memcmp(loc1, tmp, 4) != 0 ) { printk(KERN_INFO "%s: read and write differs! Not 16kB\n", eeprom_name); loc1[0] = ~loc1[0]; if (eeprom_write_buf(LOC1, loc1, 1) == 1) { success = 1; } else { printk(KERN_INFO "%s: Restore 2k failed during probe," " EEPROM might be corrupt!\n", eeprom_name); } i2c_stop(); /* Go to 2k mode and write original data */ eeprom.size = EEPROM_2KB; eeprom.select_cmd = 0xA0; eeprom.sequential_write_pagesize = 16; if( eeprom_write_buf(0, buf_2k_start, 16) == 16) { } else { printk(KERN_INFO "%s: Failed to write back 2k start!\n", eeprom_name); } eeprom.size = EEPROM_2KB; } } if(!success) { if( eeprom_read_buf(LOC2, loc2, 1) == 1) { D(printk("0 loc1: (%i) '%4.4s' loc2 (%i) '%4.4s'\n", LOC1, loc1, LOC2, loc2)); if (memcmp(loc1, loc2, 4) == 0 ) { /* Data the same, must be mirrored -> 2k */ /* Restore data */ printk(KERN_INFO "%s: 2k detected in step 2\n", eeprom_name); loc1[0] = ~loc1[0]; if (eeprom_write_buf(LOC1, loc1, 1) == 1) { success = 1; } else { printk(KERN_INFO "%s: Restore 2k failed during probe," " EEPROM might be corrupt!\n", eeprom_name); } eeprom.size = EEPROM_2KB; } else { printk(KERN_INFO "%s: 16k detected in step 2\n", eeprom_name); loc1[0] = ~loc1[0]; /* Data differs, assume 16k */ /* Restore data */ if (eeprom_write_buf(LOC1, loc1, 1) == 1) { success = 1; } else { printk(KERN_INFO "%s: Restore 16k failed during probe," " EEPROM might be corrupt!\n", eeprom_name); } eeprom.size = EEPROM_16KB; } } } } } /* read LOC1 */ } /* address LOC1 */ if (!success) { printk(KERN_INFO "%s: Probing failed!, using 2KB!\n", eeprom_name); eeprom.size = EEPROM_2KB; } } /* read */ } } else { i2c_outbyte(0x00); if(!i2c_getack()) { /* No 8k */ eeprom.size = EEPROM_2KB; } else { i2c_start(); i2c_outbyte(0x81); if (!i2c_getack()) { eeprom.size = EEPROM_2KB; } else { /* It's a 8kB */ i2c_inbyte(); eeprom.size = EEPROM_8KB; } } } i2c_stop(); #elif defined(CONFIG_ETRAX_I2C_EEPROM_16KB) eeprom.size = EEPROM_16KB; #elif defined(CONFIG_ETRAX_I2C_EEPROM_8KB) eeprom.size = EEPROM_8KB; #elif defined(CONFIG_ETRAX_I2C_EEPROM_2KB) eeprom.size = EEPROM_2KB; #endif switch(eeprom.size) { case (EEPROM_2KB): printk("%s: " EETEXT " i2c compatible 2kB eeprom.\n", eeprom_name); eeprom.sequential_write_pagesize = 16; eeprom.select_cmd = 0xA0; break; case (EEPROM_8KB): printk("%s: " EETEXT " i2c compatible 8kB eeprom.\n", eeprom_name); eeprom.sequential_write_pagesize = 16; eeprom.select_cmd = 0x80; break; case (EEPROM_16KB): printk("%s: " EETEXT " i2c compatible 16kB eeprom.\n", eeprom_name); eeprom.sequential_write_pagesize = 64; eeprom.select_cmd = 0xA0; break; default: eeprom.size = 0; printk("%s: Did not find a supported eeprom\n", eeprom_name); break; } eeprom_disable_write_protect(); return 0; } /* Opens the device. */ static int eeprom_open(struct inode * inode, struct file * file) { if(iminor(inode) != EEPROM_MINOR_NR) return -ENXIO; if(imajor(inode) != EEPROM_MAJOR_NR) return -ENXIO; if( eeprom.size > 0 ) { /* OK */ return 0; } /* No EEprom found */ return -EFAULT; } /* Changes the current file position. */ static loff_t eeprom_lseek(struct file * file, loff_t offset, int orig) { /* * orig 0: position from begning of eeprom * orig 1: relative from current position * orig 2: position from last eeprom address */ switch (orig) { case 0: file->f_pos = offset; break; case 1: file->f_pos += offset; break; case 2: file->f_pos = eeprom.size - offset; break; default: return -EINVAL; } /* truncate position */ if (file->f_pos < 0) { file->f_pos = 0; return(-EOVERFLOW); } if (file->f_pos >= eeprom.size) { file->f_pos = eeprom.size - 1; return(-EOVERFLOW); } return ( file->f_pos ); } /* Reads data from eeprom. */ static int eeprom_read_buf(loff_t addr, char * buf, int count) { return eeprom_read(NULL, buf, count, &addr); } /* Reads data from eeprom. */ static ssize_t eeprom_read(struct file * file, char * buf, size_t count, loff_t *off) { int read=0; unsigned long p = *off; unsigned char page; if(p >= eeprom.size) /* Address i 0 - (size-1) */ { return -EFAULT; } if (mutex_lock_interruptible(&eeprom.lock)) return -EINTR; page = (unsigned char) (p >> 8); if(!eeprom_address(p)) { printk(KERN_INFO "%s: Read failed to address the eeprom: " "0x%08X (%i) page: %i\n", eeprom_name, (int)p, (int)p, page); i2c_stop(); /* don't forget to wake them up */ mutex_unlock(&eeprom.lock); return -EFAULT; } if( (p + count) > eeprom.size) { /* truncate count */ count = eeprom.size - p; } /* stop dummy write op and initiate the read op */ i2c_start(); /* special case for small eeproms */ if(eeprom.size < EEPROM_16KB) { i2c_outbyte( eeprom.select_cmd | 1 | (page << 1) ); } /* go on with the actual read */ read = read_from_eeprom( buf, count); if(read > 0) { *off += read; } mutex_unlock(&eeprom.lock); return read; } /* Writes data to eeprom. */ static int eeprom_write_buf(loff_t addr, const char * buf, int count) { return eeprom_write(NULL, buf, count, &addr); } /* Writes data to eeprom. */ static ssize_t eeprom_write(struct file * file, const char * buf, size_t count, loff_t *off) { int i, written, restart=1; unsigned long p; if (!access_ok(VERIFY_READ, buf, count)) { return -EFAULT; } /* bail out if we get interrupted */ if (mutex_lock_interruptible(&eeprom.lock)) return -EINTR; for(i = 0; (i < EEPROM_RETRIES) && (restart > 0); i++) { restart = 0; written = 0; p = *off; while( (written < count) && (p < eeprom.size)) { /* address the eeprom */ if(!eeprom_address(p)) { printk(KERN_INFO "%s: Write failed to address the eeprom: " "0x%08X (%i) \n", eeprom_name, (int)p, (int)p); i2c_stop(); /* don't forget to wake them up */ mutex_unlock(&eeprom.lock); return -EFAULT; } #ifdef EEPROM_ADAPTIVE_TIMING /* Adaptive algorithm to adjust timing */ if (eeprom.retry_cnt_addr > 0) { /* To Low now */ D(printk(">D=%i d=%i\n", eeprom.usec_delay_writecycles, eeprom.usec_delay_step)); if (eeprom.usec_delay_step < 4) { eeprom.usec_delay_step++; eeprom.usec_delay_writecycles += eeprom.usec_delay_step; } else { if (eeprom.adapt_state > 0) { /* To Low before */ eeprom.usec_delay_step *= 2; if (eeprom.usec_delay_step > 2) { eeprom.usec_delay_step--; } eeprom.usec_delay_writecycles += eeprom.usec_delay_step; } else if (eeprom.adapt_state < 0) { /* To High before (toggle dir) */ eeprom.usec_delay_writecycles += eeprom.usec_delay_step; if (eeprom.usec_delay_step > 1) { eeprom.usec_delay_step /= 2; eeprom.usec_delay_step--; } } } eeprom.adapt_state = 1; } else { /* To High (or good) now */ D(printk("<D=%i d=%i\n", eeprom.usec_delay_writecycles, eeprom.usec_delay_step)); if (eeprom.adapt_state < 0) { /* To High before */ if (eeprom.usec_delay_step > 1) { eeprom.usec_delay_step *= 2; eeprom.usec_delay_step--; if (eeprom.usec_delay_writecycles > eeprom.usec_delay_step) { eeprom.usec_delay_writecycles -= eeprom.usec_delay_step; } } } else if (eeprom.adapt_state > 0) { /* To Low before (toggle dir) */ if (eeprom.usec_delay_writecycles > eeprom.usec_delay_step) { eeprom.usec_delay_writecycles -= eeprom.usec_delay_step; } if (eeprom.usec_delay_step > 1) { eeprom.usec_delay_step /= 2; eeprom.usec_delay_step--; } eeprom.adapt_state = -1; } if (eeprom.adapt_state > -100) { eeprom.adapt_state--; } else { /* Restart adaption */ D(printk("#Restart\n")); eeprom.usec_delay_step++; } } #endif /* EEPROM_ADAPTIVE_TIMING */ /* write until we hit a page boundary or count */ do { i2c_outbyte(buf[written]); if(!i2c_getack()) { restart=1; printk(KERN_INFO "%s: write error, retrying. %d\n", eeprom_name, i); i2c_stop(); break; } written++; p++; } while( written < count && ( p % eeprom.sequential_write_pagesize )); /* end write cycle */ i2c_stop(); i2c_delay(eeprom.usec_delay_writecycles); } /* while */ } /* for */ mutex_unlock(&eeprom.lock); if (written == 0 && p >= eeprom.size){ return -ENOSPC; } *off = p; return written; } /* Closes the device. */ static int eeprom_close(struct inode * inode, struct file * file) { /* do nothing for now */ return 0; } /* Sets the current address of the eeprom. */ static int eeprom_address(unsigned long addr) { int i; unsigned char page, offset; page = (unsigned char) (addr >> 8); offset = (unsigned char) addr; for(i = 0; i < EEPROM_RETRIES; i++) { /* start a dummy write for addressing */ i2c_start(); if(eeprom.size == EEPROM_16KB) { i2c_outbyte( eeprom.select_cmd ); i2c_getack(); i2c_outbyte(page); } else { i2c_outbyte( eeprom.select_cmd | (page << 1) ); } if(!i2c_getack()) { /* retry */ i2c_stop(); /* Must have a delay here.. 500 works, >50, 100->works 5th time*/ i2c_delay(MAX_WRITEDELAY_US / EEPROM_RETRIES * i); /* The chip needs up to 10 ms from write stop to next start */ } else { i2c_outbyte(offset); if(!i2c_getack()) { /* retry */ i2c_stop(); } else break; } } eeprom.retry_cnt_addr = i; D(printk("%i\n", eeprom.retry_cnt_addr)); if(eeprom.retry_cnt_addr == EEPROM_RETRIES) { /* failed */ return 0; } return 1; } /* Reads from current address. */ static int read_from_eeprom(char * buf, int count) { int i, read=0; for(i = 0; i < EEPROM_RETRIES; i++) { if(eeprom.size == EEPROM_16KB) { i2c_outbyte( eeprom.select_cmd | 1 ); } if(i2c_getack()) { break; } } if(i == EEPROM_RETRIES) { printk(KERN_INFO "%s: failed to read from eeprom\n", eeprom_name); i2c_stop(); return -EFAULT; } while( (read < count)) { if (put_user(i2c_inbyte(), &buf[read++])) { i2c_stop(); return -EFAULT; } /* * make sure we don't ack last byte or you will get very strange * results! */ if(read < count) { i2c_sendack(); } } /* stop the operation */ i2c_stop(); return read; } /* Disables write protection if applicable. */ #define DBP_SAVE(x) #define ax_printf printk static void eeprom_disable_write_protect(void) { /* Disable write protect */ if (eeprom.size == EEPROM_8KB) { /* Step 1 Set WEL = 1 (write 00000010 to address 1FFFh */ i2c_start(); i2c_outbyte(0xbe); if(!i2c_getack()) { DBP_SAVE(ax_printf("Get ack returns false\n")); } i2c_outbyte(0xFF); if(!i2c_getack()) { DBP_SAVE(ax_printf("Get ack returns false 2\n")); } i2c_outbyte(0x02); if(!i2c_getack()) { DBP_SAVE(ax_printf("Get ack returns false 3\n")); } i2c_stop(); i2c_delay(1000); /* Step 2 Set RWEL = 1 (write 00000110 to address 1FFFh */ i2c_start(); i2c_outbyte(0xbe); if(!i2c_getack()) { DBP_SAVE(ax_printf("Get ack returns false 55\n")); } i2c_outbyte(0xFF); if(!i2c_getack()) { DBP_SAVE(ax_printf("Get ack returns false 52\n")); } i2c_outbyte(0x06); if(!i2c_getack()) { DBP_SAVE(ax_printf("Get ack returns false 53\n")); } i2c_stop(); /* Step 3 Set BP1, BP0, and/or WPEN bits (write 00000110 to address 1FFFh */ i2c_start(); i2c_outbyte(0xbe); if(!i2c_getack()) { DBP_SAVE(ax_printf("Get ack returns false 56\n")); } i2c_outbyte(0xFF); if(!i2c_getack()) { DBP_SAVE(ax_printf("Get ack returns false 57\n")); } i2c_outbyte(0x06); if(!i2c_getack()) { DBP_SAVE(ax_printf("Get ack returns false 58\n")); } i2c_stop(); /* Write protect disabled */ } } module_init(eeprom_init);
gpl-2.0
TeamJB/kernel_htc_m7
arch/cris/arch-v10/drivers/eeprom.c
11311
22088
/*!***************************************************************************** *! *! Implements an interface for i2c compatible eeproms to run under Linux. *! Supports 2k, 8k(?) and 16k. Uses adaptive timing adjustments by *! Johan.Adolfsson@axis.com *! *! Probing results: *! 8k or not is detected (the assumes 2k or 16k) *! 2k or 16k detected using test reads and writes. *! *!------------------------------------------------------------------------ *! HISTORY *! *! DATE NAME CHANGES *! ---- ---- ------- *! Aug 28 1999 Edgar Iglesias Initial Version *! Aug 31 1999 Edgar Iglesias Allow simultaneous users. *! Sep 03 1999 Edgar Iglesias Updated probe. *! Sep 03 1999 Edgar Iglesias Added bail-out stuff if we get interrupted *! in the spin-lock. *! *! (c) 1999 Axis Communications AB, Lund, Sweden *!*****************************************************************************/ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/wait.h> #include <asm/uaccess.h> #include "i2c.h" #define D(x) /* If we should use adaptive timing or not: */ /* #define EEPROM_ADAPTIVE_TIMING */ #define EEPROM_MAJOR_NR 122 /* use a LOCAL/EXPERIMENTAL major for now */ #define EEPROM_MINOR_NR 0 /* Empirical sane initial value of the delay, the value will be adapted to * what the chip needs when using EEPROM_ADAPTIVE_TIMING. */ #define INITIAL_WRITEDELAY_US 4000 #define MAX_WRITEDELAY_US 10000 /* 10 ms according to spec for 2KB EEPROM */ /* This one defines how many times to try when eeprom fails. */ #define EEPROM_RETRIES 10 #define EEPROM_2KB (2 * 1024) /*#define EEPROM_4KB (4 * 1024)*/ /* Exists but not used in Axis products */ #define EEPROM_8KB (8 * 1024 - 1 ) /* Last byte has write protection bit */ #define EEPROM_16KB (16 * 1024) #define i2c_delay(x) udelay(x) /* * This structure describes the attached eeprom chip. * The values are probed for. */ struct eeprom_type { unsigned long size; unsigned long sequential_write_pagesize; unsigned char select_cmd; unsigned long usec_delay_writecycles; /* Min time between write cycles (up to 10ms for some models) */ unsigned long usec_delay_step; /* For adaptive algorithm */ int adapt_state; /* 1 = To high , 0 = Even, -1 = To low */ /* this one is to keep the read/write operations atomic */ struct mutex lock; int retry_cnt_addr; /* Used to keep track of number of retries for adaptive timing adjustments */ int retry_cnt_read; }; static int eeprom_open(struct inode * inode, struct file * file); static loff_t eeprom_lseek(struct file * file, loff_t offset, int orig); static ssize_t eeprom_read(struct file * file, char * buf, size_t count, loff_t *off); static ssize_t eeprom_write(struct file * file, const char * buf, size_t count, loff_t *off); static int eeprom_close(struct inode * inode, struct file * file); static int eeprom_address(unsigned long addr); static int read_from_eeprom(char * buf, int count); static int eeprom_write_buf(loff_t addr, const char * buf, int count); static int eeprom_read_buf(loff_t addr, char * buf, int count); static void eeprom_disable_write_protect(void); static const char eeprom_name[] = "eeprom"; /* chip description */ static struct eeprom_type eeprom; /* This is the exported file-operations structure for this device. */ const struct file_operations eeprom_fops = { .llseek = eeprom_lseek, .read = eeprom_read, .write = eeprom_write, .open = eeprom_open, .release = eeprom_close }; /* eeprom init call. Probes for different eeprom models. */ int __init eeprom_init(void) { mutex_init(&eeprom.lock); #ifdef CONFIG_ETRAX_I2C_EEPROM_PROBE #define EETEXT "Found" #else #define EETEXT "Assuming" #endif if (register_chrdev(EEPROM_MAJOR_NR, eeprom_name, &eeprom_fops)) { printk(KERN_INFO "%s: unable to get major %d for eeprom device\n", eeprom_name, EEPROM_MAJOR_NR); return -1; } printk("EEPROM char device v0.3, (c) 2000 Axis Communications AB\n"); /* * Note: Most of this probing method was taken from the printserver (5470e) * codebase. It did not contain a way of finding the 16kB chips * (M24128 or variants). The method used here might not work * for all models. If you encounter problems the easiest way * is probably to define your model within #ifdef's, and hard- * code it. */ eeprom.size = 0; eeprom.usec_delay_writecycles = INITIAL_WRITEDELAY_US; eeprom.usec_delay_step = 128; eeprom.adapt_state = 0; #ifdef CONFIG_ETRAX_I2C_EEPROM_PROBE i2c_start(); i2c_outbyte(0x80); if(!i2c_getack()) { /* It's not 8k.. */ int success = 0; unsigned char buf_2k_start[16]; /* Im not sure this will work... :) */ /* assume 2kB, if failure go for 16kB */ /* Test with 16kB settings.. */ /* If it's a 2kB EEPROM and we address it outside it's range * it will mirror the address space: * 1. We read two locations (that are mirrored), * if the content differs * it's a 16kB EEPROM. * 2. if it doesn't differ - write different value to one of the locations, * check the other - if content still is the same it's a 2k EEPROM, * restore original data. */ #define LOC1 8 #define LOC2 (0x1fb) /*1fb, 3ed, 5df, 7d1 */ /* 2k settings */ i2c_stop(); eeprom.size = EEPROM_2KB; eeprom.select_cmd = 0xA0; eeprom.sequential_write_pagesize = 16; if( eeprom_read_buf( 0, buf_2k_start, 16 ) == 16 ) { D(printk("2k start: '%16.16s'\n", buf_2k_start)); } else { printk(KERN_INFO "%s: Failed to read in 2k mode!\n", eeprom_name); } /* 16k settings */ eeprom.size = EEPROM_16KB; eeprom.select_cmd = 0xA0; eeprom.sequential_write_pagesize = 64; { unsigned char loc1[4], loc2[4], tmp[4]; if( eeprom_read_buf(LOC2, loc2, 4) == 4) { if( eeprom_read_buf(LOC1, loc1, 4) == 4) { D(printk("0 loc1: (%i) '%4.4s' loc2 (%i) '%4.4s'\n", LOC1, loc1, LOC2, loc2)); #if 0 if (memcmp(loc1, loc2, 4) != 0 ) { /* It's 16k */ printk(KERN_INFO "%s: 16k detected in step 1\n", eeprom_name); eeprom.size = EEPROM_16KB; success = 1; } else #endif { /* Do step 2 check */ /* Invert value */ loc1[0] = ~loc1[0]; if (eeprom_write_buf(LOC1, loc1, 1) == 1) { /* If 2k EEPROM this write will actually write 10 bytes * from pos 0 */ D(printk("1 loc1: (%i) '%4.4s' loc2 (%i) '%4.4s'\n", LOC1, loc1, LOC2, loc2)); if( eeprom_read_buf(LOC1, tmp, 4) == 4) { D(printk("2 loc1: (%i) '%4.4s' tmp '%4.4s'\n", LOC1, loc1, tmp)); if (memcmp(loc1, tmp, 4) != 0 ) { printk(KERN_INFO "%s: read and write differs! Not 16kB\n", eeprom_name); loc1[0] = ~loc1[0]; if (eeprom_write_buf(LOC1, loc1, 1) == 1) { success = 1; } else { printk(KERN_INFO "%s: Restore 2k failed during probe," " EEPROM might be corrupt!\n", eeprom_name); } i2c_stop(); /* Go to 2k mode and write original data */ eeprom.size = EEPROM_2KB; eeprom.select_cmd = 0xA0; eeprom.sequential_write_pagesize = 16; if( eeprom_write_buf(0, buf_2k_start, 16) == 16) { } else { printk(KERN_INFO "%s: Failed to write back 2k start!\n", eeprom_name); } eeprom.size = EEPROM_2KB; } } if(!success) { if( eeprom_read_buf(LOC2, loc2, 1) == 1) { D(printk("0 loc1: (%i) '%4.4s' loc2 (%i) '%4.4s'\n", LOC1, loc1, LOC2, loc2)); if (memcmp(loc1, loc2, 4) == 0 ) { /* Data the same, must be mirrored -> 2k */ /* Restore data */ printk(KERN_INFO "%s: 2k detected in step 2\n", eeprom_name); loc1[0] = ~loc1[0]; if (eeprom_write_buf(LOC1, loc1, 1) == 1) { success = 1; } else { printk(KERN_INFO "%s: Restore 2k failed during probe," " EEPROM might be corrupt!\n", eeprom_name); } eeprom.size = EEPROM_2KB; } else { printk(KERN_INFO "%s: 16k detected in step 2\n", eeprom_name); loc1[0] = ~loc1[0]; /* Data differs, assume 16k */ /* Restore data */ if (eeprom_write_buf(LOC1, loc1, 1) == 1) { success = 1; } else { printk(KERN_INFO "%s: Restore 16k failed during probe," " EEPROM might be corrupt!\n", eeprom_name); } eeprom.size = EEPROM_16KB; } } } } } /* read LOC1 */ } /* address LOC1 */ if (!success) { printk(KERN_INFO "%s: Probing failed!, using 2KB!\n", eeprom_name); eeprom.size = EEPROM_2KB; } } /* read */ } } else { i2c_outbyte(0x00); if(!i2c_getack()) { /* No 8k */ eeprom.size = EEPROM_2KB; } else { i2c_start(); i2c_outbyte(0x81); if (!i2c_getack()) { eeprom.size = EEPROM_2KB; } else { /* It's a 8kB */ i2c_inbyte(); eeprom.size = EEPROM_8KB; } } } i2c_stop(); #elif defined(CONFIG_ETRAX_I2C_EEPROM_16KB) eeprom.size = EEPROM_16KB; #elif defined(CONFIG_ETRAX_I2C_EEPROM_8KB) eeprom.size = EEPROM_8KB; #elif defined(CONFIG_ETRAX_I2C_EEPROM_2KB) eeprom.size = EEPROM_2KB; #endif switch(eeprom.size) { case (EEPROM_2KB): printk("%s: " EETEXT " i2c compatible 2kB eeprom.\n", eeprom_name); eeprom.sequential_write_pagesize = 16; eeprom.select_cmd = 0xA0; break; case (EEPROM_8KB): printk("%s: " EETEXT " i2c compatible 8kB eeprom.\n", eeprom_name); eeprom.sequential_write_pagesize = 16; eeprom.select_cmd = 0x80; break; case (EEPROM_16KB): printk("%s: " EETEXT " i2c compatible 16kB eeprom.\n", eeprom_name); eeprom.sequential_write_pagesize = 64; eeprom.select_cmd = 0xA0; break; default: eeprom.size = 0; printk("%s: Did not find a supported eeprom\n", eeprom_name); break; } eeprom_disable_write_protect(); return 0; } /* Opens the device. */ static int eeprom_open(struct inode * inode, struct file * file) { if(iminor(inode) != EEPROM_MINOR_NR) return -ENXIO; if(imajor(inode) != EEPROM_MAJOR_NR) return -ENXIO; if( eeprom.size > 0 ) { /* OK */ return 0; } /* No EEprom found */ return -EFAULT; } /* Changes the current file position. */ static loff_t eeprom_lseek(struct file * file, loff_t offset, int orig) { /* * orig 0: position from begning of eeprom * orig 1: relative from current position * orig 2: position from last eeprom address */ switch (orig) { case 0: file->f_pos = offset; break; case 1: file->f_pos += offset; break; case 2: file->f_pos = eeprom.size - offset; break; default: return -EINVAL; } /* truncate position */ if (file->f_pos < 0) { file->f_pos = 0; return(-EOVERFLOW); } if (file->f_pos >= eeprom.size) { file->f_pos = eeprom.size - 1; return(-EOVERFLOW); } return ( file->f_pos ); } /* Reads data from eeprom. */ static int eeprom_read_buf(loff_t addr, char * buf, int count) { return eeprom_read(NULL, buf, count, &addr); } /* Reads data from eeprom. */ static ssize_t eeprom_read(struct file * file, char * buf, size_t count, loff_t *off) { int read=0; unsigned long p = *off; unsigned char page; if(p >= eeprom.size) /* Address i 0 - (size-1) */ { return -EFAULT; } if (mutex_lock_interruptible(&eeprom.lock)) return -EINTR; page = (unsigned char) (p >> 8); if(!eeprom_address(p)) { printk(KERN_INFO "%s: Read failed to address the eeprom: " "0x%08X (%i) page: %i\n", eeprom_name, (int)p, (int)p, page); i2c_stop(); /* don't forget to wake them up */ mutex_unlock(&eeprom.lock); return -EFAULT; } if( (p + count) > eeprom.size) { /* truncate count */ count = eeprom.size - p; } /* stop dummy write op and initiate the read op */ i2c_start(); /* special case for small eeproms */ if(eeprom.size < EEPROM_16KB) { i2c_outbyte( eeprom.select_cmd | 1 | (page << 1) ); } /* go on with the actual read */ read = read_from_eeprom( buf, count); if(read > 0) { *off += read; } mutex_unlock(&eeprom.lock); return read; } /* Writes data to eeprom. */ static int eeprom_write_buf(loff_t addr, const char * buf, int count) { return eeprom_write(NULL, buf, count, &addr); } /* Writes data to eeprom. */ static ssize_t eeprom_write(struct file * file, const char * buf, size_t count, loff_t *off) { int i, written, restart=1; unsigned long p; if (!access_ok(VERIFY_READ, buf, count)) { return -EFAULT; } /* bail out if we get interrupted */ if (mutex_lock_interruptible(&eeprom.lock)) return -EINTR; for(i = 0; (i < EEPROM_RETRIES) && (restart > 0); i++) { restart = 0; written = 0; p = *off; while( (written < count) && (p < eeprom.size)) { /* address the eeprom */ if(!eeprom_address(p)) { printk(KERN_INFO "%s: Write failed to address the eeprom: " "0x%08X (%i) \n", eeprom_name, (int)p, (int)p); i2c_stop(); /* don't forget to wake them up */ mutex_unlock(&eeprom.lock); return -EFAULT; } #ifdef EEPROM_ADAPTIVE_TIMING /* Adaptive algorithm to adjust timing */ if (eeprom.retry_cnt_addr > 0) { /* To Low now */ D(printk(">D=%i d=%i\n", eeprom.usec_delay_writecycles, eeprom.usec_delay_step)); if (eeprom.usec_delay_step < 4) { eeprom.usec_delay_step++; eeprom.usec_delay_writecycles += eeprom.usec_delay_step; } else { if (eeprom.adapt_state > 0) { /* To Low before */ eeprom.usec_delay_step *= 2; if (eeprom.usec_delay_step > 2) { eeprom.usec_delay_step--; } eeprom.usec_delay_writecycles += eeprom.usec_delay_step; } else if (eeprom.adapt_state < 0) { /* To High before (toggle dir) */ eeprom.usec_delay_writecycles += eeprom.usec_delay_step; if (eeprom.usec_delay_step > 1) { eeprom.usec_delay_step /= 2; eeprom.usec_delay_step--; } } } eeprom.adapt_state = 1; } else { /* To High (or good) now */ D(printk("<D=%i d=%i\n", eeprom.usec_delay_writecycles, eeprom.usec_delay_step)); if (eeprom.adapt_state < 0) { /* To High before */ if (eeprom.usec_delay_step > 1) { eeprom.usec_delay_step *= 2; eeprom.usec_delay_step--; if (eeprom.usec_delay_writecycles > eeprom.usec_delay_step) { eeprom.usec_delay_writecycles -= eeprom.usec_delay_step; } } } else if (eeprom.adapt_state > 0) { /* To Low before (toggle dir) */ if (eeprom.usec_delay_writecycles > eeprom.usec_delay_step) { eeprom.usec_delay_writecycles -= eeprom.usec_delay_step; } if (eeprom.usec_delay_step > 1) { eeprom.usec_delay_step /= 2; eeprom.usec_delay_step--; } eeprom.adapt_state = -1; } if (eeprom.adapt_state > -100) { eeprom.adapt_state--; } else { /* Restart adaption */ D(printk("#Restart\n")); eeprom.usec_delay_step++; } } #endif /* EEPROM_ADAPTIVE_TIMING */ /* write until we hit a page boundary or count */ do { i2c_outbyte(buf[written]); if(!i2c_getack()) { restart=1; printk(KERN_INFO "%s: write error, retrying. %d\n", eeprom_name, i); i2c_stop(); break; } written++; p++; } while( written < count && ( p % eeprom.sequential_write_pagesize )); /* end write cycle */ i2c_stop(); i2c_delay(eeprom.usec_delay_writecycles); } /* while */ } /* for */ mutex_unlock(&eeprom.lock); if (written == 0 && p >= eeprom.size){ return -ENOSPC; } *off = p; return written; } /* Closes the device. */ static int eeprom_close(struct inode * inode, struct file * file) { /* do nothing for now */ return 0; } /* Sets the current address of the eeprom. */ static int eeprom_address(unsigned long addr) { int i; unsigned char page, offset; page = (unsigned char) (addr >> 8); offset = (unsigned char) addr; for(i = 0; i < EEPROM_RETRIES; i++) { /* start a dummy write for addressing */ i2c_start(); if(eeprom.size == EEPROM_16KB) { i2c_outbyte( eeprom.select_cmd ); i2c_getack(); i2c_outbyte(page); } else { i2c_outbyte( eeprom.select_cmd | (page << 1) ); } if(!i2c_getack()) { /* retry */ i2c_stop(); /* Must have a delay here.. 500 works, >50, 100->works 5th time*/ i2c_delay(MAX_WRITEDELAY_US / EEPROM_RETRIES * i); /* The chip needs up to 10 ms from write stop to next start */ } else { i2c_outbyte(offset); if(!i2c_getack()) { /* retry */ i2c_stop(); } else break; } } eeprom.retry_cnt_addr = i; D(printk("%i\n", eeprom.retry_cnt_addr)); if(eeprom.retry_cnt_addr == EEPROM_RETRIES) { /* failed */ return 0; } return 1; } /* Reads from current address. */ static int read_from_eeprom(char * buf, int count) { int i, read=0; for(i = 0; i < EEPROM_RETRIES; i++) { if(eeprom.size == EEPROM_16KB) { i2c_outbyte( eeprom.select_cmd | 1 ); } if(i2c_getack()) { break; } } if(i == EEPROM_RETRIES) { printk(KERN_INFO "%s: failed to read from eeprom\n", eeprom_name); i2c_stop(); return -EFAULT; } while( (read < count)) { if (put_user(i2c_inbyte(), &buf[read++])) { i2c_stop(); return -EFAULT; } /* * make sure we don't ack last byte or you will get very strange * results! */ if(read < count) { i2c_sendack(); } } /* stop the operation */ i2c_stop(); return read; } /* Disables write protection if applicable. */ #define DBP_SAVE(x) #define ax_printf printk static void eeprom_disable_write_protect(void) { /* Disable write protect */ if (eeprom.size == EEPROM_8KB) { /* Step 1 Set WEL = 1 (write 00000010 to address 1FFFh */ i2c_start(); i2c_outbyte(0xbe); if(!i2c_getack()) { DBP_SAVE(ax_printf("Get ack returns false\n")); } i2c_outbyte(0xFF); if(!i2c_getack()) { DBP_SAVE(ax_printf("Get ack returns false 2\n")); } i2c_outbyte(0x02); if(!i2c_getack()) { DBP_SAVE(ax_printf("Get ack returns false 3\n")); } i2c_stop(); i2c_delay(1000); /* Step 2 Set RWEL = 1 (write 00000110 to address 1FFFh */ i2c_start(); i2c_outbyte(0xbe); if(!i2c_getack()) { DBP_SAVE(ax_printf("Get ack returns false 55\n")); } i2c_outbyte(0xFF); if(!i2c_getack()) { DBP_SAVE(ax_printf("Get ack returns false 52\n")); } i2c_outbyte(0x06); if(!i2c_getack()) { DBP_SAVE(ax_printf("Get ack returns false 53\n")); } i2c_stop(); /* Step 3 Set BP1, BP0, and/or WPEN bits (write 00000110 to address 1FFFh */ i2c_start(); i2c_outbyte(0xbe); if(!i2c_getack()) { DBP_SAVE(ax_printf("Get ack returns false 56\n")); } i2c_outbyte(0xFF); if(!i2c_getack()) { DBP_SAVE(ax_printf("Get ack returns false 57\n")); } i2c_outbyte(0x06); if(!i2c_getack()) { DBP_SAVE(ax_printf("Get ack returns false 58\n")); } i2c_stop(); /* Write protect disabled */ } } module_init(eeprom_init);
gpl-2.0
ptmr3/smdk4412
drivers/input/keyboard/gpio_keys.c
48
22558
/* * Driver for keys on GPIO lines capable of generating interrupts. * * Copyright 2005 Phil Blundell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/sched.h> #include <linux/pm.h> #include <linux/slab.h> #include <linux/sysctl.h> #include <linux/proc_fs.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/input.h> #include <linux/gpio_keys.h> #include <linux/workqueue.h> #include <linux/gpio.h> #include <linux/irqdesc.h> extern struct class *sec_class; struct gpio_button_data { struct gpio_keys_button *button; struct input_dev *input; struct timer_list timer; struct work_struct work; int timer_debounce; /* in msecs */ bool disabled; bool key_state; bool wakeup; }; struct gpio_keys_drvdata { struct input_dev *input; struct device *sec_key; struct mutex disable_lock; unsigned int n_buttons; int (*enable)(struct device *dev); void (*disable)(struct device *dev); struct gpio_button_data data[0]; /* WARNING: this area can be expanded. Do NOT add any member! */ }; /* * SYSFS interface for enabling/disabling keys and switches: * * There are 4 attributes under /sys/devices/platform/gpio-keys/ * keys [ro] - bitmap of keys (EV_KEY) which can be * disabled * switches [ro] - bitmap of switches (EV_SW) which can be * disabled * disabled_keys [rw] - bitmap of keys currently disabled * disabled_switches [rw] - bitmap of switches currently disabled * * Userland can change these values and hence disable event generation * for each key (or switch). Disabling a key means its interrupt line * is disabled. * * For example, if we have following switches set up as gpio-keys: * SW_DOCK = 5 * SW_CAMERA_LENS_COVER = 9 * SW_KEYPAD_SLIDE = 10 * SW_FRONT_PROXIMITY = 11 * This is read from switches: * 11-9,5 * Next we want to disable proximity (11) and dock (5), we write: * 11,5 * to file disabled_switches. Now proximity and dock IRQs are disabled. * This can be verified by reading the file disabled_switches: * 11,5 * If we now want to enable proximity (11) switch we write: * 5 * to disabled_switches. * * We can disable only those keys which don't allow sharing the irq. */ /** * get_n_events_by_type() - returns maximum number of events per @type * @type: type of button (%EV_KEY, %EV_SW) * * Return value of this function can be used to allocate bitmap * large enough to hold all bits for given type. */ static inline int get_n_events_by_type(int type) { BUG_ON(type != EV_SW && type != EV_KEY); return (type == EV_KEY) ? KEY_CNT : SW_CNT; } /** * gpio_keys_disable_button() - disables given GPIO button * @bdata: button data for button to be disabled * * Disables button pointed by @bdata. This is done by masking * IRQ line. After this function is called, button won't generate * input events anymore. Note that one can only disable buttons * that don't share IRQs. * * Make sure that @bdata->disable_lock is locked when entering * this function to avoid races when concurrent threads are * disabling buttons at the same time. */ static void gpio_keys_disable_button(struct gpio_button_data *bdata) { if (!bdata->disabled) { /* * Disable IRQ and possible debouncing timer. */ disable_irq(gpio_to_irq(bdata->button->gpio)); if (bdata->timer_debounce) del_timer_sync(&bdata->timer); bdata->disabled = true; } } /** * gpio_keys_enable_button() - enables given GPIO button * @bdata: button data for button to be disabled * * Enables given button pointed by @bdata. * * Make sure that @bdata->disable_lock is locked when entering * this function to avoid races with concurrent threads trying * to enable the same button at the same time. */ static void gpio_keys_enable_button(struct gpio_button_data *bdata) { if (bdata->disabled) { enable_irq(gpio_to_irq(bdata->button->gpio)); bdata->disabled = false; } } /** * gpio_keys_attr_show_helper() - fill in stringified bitmap of buttons * @ddata: pointer to drvdata * @buf: buffer where stringified bitmap is written * @type: button type (%EV_KEY, %EV_SW) * @only_disabled: does caller want only those buttons that are * currently disabled or all buttons that can be * disabled * * This function writes buttons that can be disabled to @buf. If * @only_disabled is true, then @buf contains only those buttons * that are currently disabled. Returns 0 on success or negative * errno on failure. */ static ssize_t gpio_keys_attr_show_helper(struct gpio_keys_drvdata *ddata, char *buf, unsigned int type, bool only_disabled) { int n_events = get_n_events_by_type(type); unsigned long *bits; ssize_t ret; int i; bits = kcalloc(BITS_TO_LONGS(n_events), sizeof(*bits), GFP_KERNEL); if (!bits) return -ENOMEM; for (i = 0; i < ddata->n_buttons; i++) { struct gpio_button_data *bdata = &ddata->data[i]; if (bdata->button->type != type) continue; if (only_disabled && !bdata->disabled) continue; __set_bit(bdata->button->code, bits); } ret = bitmap_scnlistprintf(buf, PAGE_SIZE - 2, bits, n_events); buf[ret++] = '\n'; buf[ret] = '\0'; kfree(bits); return ret; } /** * gpio_keys_attr_store_helper() - enable/disable buttons based on given bitmap * @ddata: pointer to drvdata * @buf: buffer from userspace that contains stringified bitmap * @type: button type (%EV_KEY, %EV_SW) * * This function parses stringified bitmap from @buf and disables/enables * GPIO buttons accordinly. Returns 0 on success and negative error * on failure. */ static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata, const char *buf, unsigned int type) { int n_events = get_n_events_by_type(type); unsigned long *bits; ssize_t error; int i; bits = kcalloc(BITS_TO_LONGS(n_events), sizeof(*bits), GFP_KERNEL); if (!bits) return -ENOMEM; error = bitmap_parselist(buf, bits, n_events); if (error) goto out; /* First validate */ for (i = 0; i < ddata->n_buttons; i++) { struct gpio_button_data *bdata = &ddata->data[i]; if (bdata->button->type != type) continue; if (test_bit(bdata->button->code, bits) && !bdata->button->can_disable) { error = -EINVAL; goto out; } } mutex_lock(&ddata->disable_lock); for (i = 0; i < ddata->n_buttons; i++) { struct gpio_button_data *bdata = &ddata->data[i]; if (bdata->button->type != type) continue; if (test_bit(bdata->button->code, bits)) gpio_keys_disable_button(bdata); else gpio_keys_enable_button(bdata); } mutex_unlock(&ddata->disable_lock); out: kfree(bits); return error; } #define ATTR_SHOW_FN(name, type, only_disabled) \ static ssize_t gpio_keys_show_##name(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct platform_device *pdev = to_platform_device(dev); \ struct gpio_keys_drvdata *ddata = platform_get_drvdata(pdev); \ \ return gpio_keys_attr_show_helper(ddata, buf, \ type, only_disabled); \ } ATTR_SHOW_FN(keys, EV_KEY, false); ATTR_SHOW_FN(switches, EV_SW, false); ATTR_SHOW_FN(disabled_keys, EV_KEY, true); ATTR_SHOW_FN(disabled_switches, EV_SW, true); /* * ATTRIBUTES: * * /sys/devices/platform/gpio-keys/keys [ro] * /sys/devices/platform/gpio-keys/switches [ro] */ static DEVICE_ATTR(keys, S_IRUGO, gpio_keys_show_keys, NULL); static DEVICE_ATTR(switches, S_IRUGO, gpio_keys_show_switches, NULL); #define ATTR_STORE_FN(name, type) \ static ssize_t gpio_keys_store_##name(struct device *dev, \ struct device_attribute *attr, \ const char *buf, \ size_t count) \ { \ struct platform_device *pdev = to_platform_device(dev); \ struct gpio_keys_drvdata *ddata = platform_get_drvdata(pdev); \ ssize_t error; \ \ error = gpio_keys_attr_store_helper(ddata, buf, type); \ if (error) \ return error; \ \ return count; \ } ATTR_STORE_FN(disabled_keys, EV_KEY); ATTR_STORE_FN(disabled_switches, EV_SW); /* * ATTRIBUTES: * * /sys/devices/platform/gpio-keys/disabled_keys [rw] * /sys/devices/platform/gpio-keys/disables_switches [rw] */ static DEVICE_ATTR(disabled_keys, S_IWUSR | S_IRUGO, gpio_keys_show_disabled_keys, gpio_keys_store_disabled_keys); static DEVICE_ATTR(disabled_switches, S_IWUSR | S_IRUGO, gpio_keys_show_disabled_switches, gpio_keys_store_disabled_switches); static struct attribute *gpio_keys_attrs[] = { &dev_attr_keys.attr, &dev_attr_switches.attr, &dev_attr_disabled_keys.attr, &dev_attr_disabled_switches.attr, NULL, }; static struct attribute_group gpio_keys_attr_group = { .attrs = gpio_keys_attrs, }; static ssize_t key_pressed_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev); int i; int keystate = 0; for (i = 0; i < ddata->n_buttons; i++) { struct gpio_button_data *bdata = &ddata->data[i]; keystate |= bdata->key_state; } if (keystate) sprintf(buf, "PRESS"); else sprintf(buf, "RELEASE"); return strlen(buf); } /* the volume keys can be the wakeup keys in special case */ static ssize_t wakeup_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev); int n_events = get_n_events_by_type(EV_KEY); unsigned long *bits; ssize_t error; int i; bits = kcalloc(BITS_TO_LONGS(n_events), sizeof(*bits), GFP_KERNEL); if (!bits) return -ENOMEM; error = bitmap_parselist(buf, bits, n_events); if (error) goto out; for (i = 0; i < ddata->n_buttons; i++) { struct gpio_button_data *button = &ddata->data[i]; if (test_bit(button->button->code, bits)) button->button->wakeup = 1; else button->button->wakeup = 0; } out: kfree(bits); return count; } static DEVICE_ATTR(sec_key_pressed, 0664, key_pressed_show, NULL); static DEVICE_ATTR(wakeup_keys, 0664, NULL, wakeup_enable); static struct attribute *sec_key_attrs[] = { &dev_attr_sec_key_pressed.attr, &dev_attr_wakeup_keys.attr, NULL, }; static struct attribute_group sec_key_attr_group = { .attrs = sec_key_attrs, }; #ifdef CONFIG_MACH_GC1 void gpio_keys_check_zoom_exception(unsigned int code, bool *zoomkey, unsigned int *hotkey, unsigned int *index) { switch (code) { case KEY_CAMERA_ZOOMIN: *hotkey = 0x221; *index = 5; break; case KEY_CAMERA_ZOOMOUT: *hotkey = 0x222; *index = 6; break; case 0x221: *hotkey = KEY_CAMERA_ZOOMIN; *index = 3; break; case 0x222: *hotkey = KEY_CAMERA_ZOOMOUT; *index = 4; break; default: *zoomkey = false; return; } *zoomkey = true; } #endif #ifdef CONFIG_FAST_BOOT extern bool fake_shut_down; struct timer_list fake_timer; bool fake_pressed; static void gpio_keys_fake_off_check(unsigned long _data) { struct input_dev *input = (struct input_dev *)_data; unsigned int type = EV_KEY; if (fake_pressed == false) return ; printk(KERN_DEBUG"[Keys] make event\n"); input_event(input, type, KEY_FAKE_PWR, 1); input_sync(input); input_event(input, type, KEY_FAKE_PWR, 0); input_sync(input); } #endif static void gpio_keys_report_event(struct gpio_button_data *bdata) { struct gpio_keys_button *button = bdata->button; struct input_dev *input = bdata->input; unsigned int type = button->type ?: EV_KEY; int state = (gpio_get_value_cansleep(button->gpio) ? 1 : 0) ^ button->active_low; #ifdef CONFIG_MACH_GC1 struct gpio_keys_drvdata *ddata = input_get_drvdata(input); struct gpio_button_data *tmp_bdata; static bool overlapped; static unsigned int hotkey; unsigned int index_hotkey = 0; bool zoomkey = false; #ifdef CONFIG_FAST_BOOT /*Fake pwr off control*/ if (fake_shut_down) { if (button->code == KEY_POWER) { if (!!state) { printk(KERN_DEBUG"[Keys] start fake check\n"); fake_pressed = true; mod_timer(&fake_timer, jiffies + msecs_to_jiffies(1000)); } else { printk(KERN_DEBUG"[Keys] end fake checkPwr 0\n"); fake_pressed = false; } } return ; } #endif if (system_rev < 6 && system_rev >= 2) { if (overlapped) { if (hotkey == button->code && !state) { bdata->key_state = !!state; bdata->wakeup = false; overlapped = false; #ifdef CONFIG_SAMSUNG_PRODUCT_SHIP printk(KERN_DEBUG"[KEYS] Ignored\n"); #else printk(KERN_DEBUG"[KEYS] Ignore %d %d\n", hotkey, state); #endif return; } } gpio_keys_check_zoom_exception(button->code, &zoomkey, &hotkey, &index_hotkey); } #endif if (type == EV_ABS) { if (state) { input_event(input, type, button->code, button->value); input_sync(input); } } else { if (bdata->wakeup && !state) { input_event(input, type, button->code, !state); input_sync(input); if (button->code == KEY_POWER) printk(KERN_DEBUG"[keys] f PWR %d\n", !state); } bdata->key_state = !!state; bdata->wakeup = false; #ifdef CONFIG_MACH_GC1 if (system_rev < 6 && system_rev >= 2 && zoomkey && state) { tmp_bdata = &ddata->data[index_hotkey]; if (tmp_bdata->key_state) { #ifdef CONFIG_SAMSUNG_PRODUCT_SHIP printk(KERN_DEBUG"[KEYS] overlapped\n"); #else printk(KERN_DEBUG"[KEYS] overlapped. Forced release c %d h %d\n", tmp_bdata->button->code, hotkey); #endif input_event(input, type, hotkey, 0); input_sync(input); overlapped = true; } } #endif input_event(input, type, button->code, !!state); input_sync(input); if (button->code == KEY_POWER) printk(KERN_DEBUG"[keys]PWR %d\n", !!state); } } static void gpio_keys_work_func(struct work_struct *work) { struct gpio_button_data *bdata = container_of(work, struct gpio_button_data, work); gpio_keys_report_event(bdata); } static void gpio_keys_timer(unsigned long _data) { struct gpio_button_data *data = (struct gpio_button_data *)_data; schedule_work(&data->work); } static irqreturn_t gpio_keys_isr(int irq, void *dev_id) { struct gpio_button_data *bdata = dev_id; struct gpio_keys_button *button = bdata->button; struct irq_desc *desc = irq_to_desc(irq); int state = (gpio_get_value_cansleep(button->gpio) ? 1 : 0) ^ button->active_low; BUG_ON(irq != gpio_to_irq(button->gpio)); if (desc) { if (0 < desc->wake_depth) { bdata->wakeup = true; printk(KERN_DEBUG"[keys] in the sleep\n"); } } if (bdata->timer_debounce) mod_timer(&bdata->timer, jiffies + msecs_to_jiffies(bdata->timer_debounce)); else schedule_work(&bdata->work); if (button->isr_hook) button->isr_hook(button->code, state); return IRQ_HANDLED; } static int __devinit gpio_keys_setup_key(struct platform_device *pdev, struct gpio_button_data *bdata, struct gpio_keys_button *button) { const char *desc = button->desc ? button->desc : "gpio_keys"; struct device *dev = &pdev->dev; unsigned long irqflags; int irq, error; setup_timer(&bdata->timer, gpio_keys_timer, (unsigned long)bdata); INIT_WORK(&bdata->work, gpio_keys_work_func); error = gpio_request(button->gpio, desc); if (error < 0) { dev_err(dev, "failed to request GPIO %d, error %d\n", button->gpio, error); goto fail2; } error = gpio_direction_input(button->gpio); if (error < 0) { dev_err(dev, "failed to configure" " direction for GPIO %d, error %d\n", button->gpio, error); goto fail3; } if (button->debounce_interval) { error = gpio_set_debounce(button->gpio, button->debounce_interval * 1000); /* use timer if gpiolib doesn't provide debounce */ if (error < 0) bdata->timer_debounce = button->debounce_interval; } irq = gpio_to_irq(button->gpio); if (irq < 0) { error = irq; dev_err(dev, "Unable to get irq number for GPIO %d, error %d\n", button->gpio, error); goto fail3; } irqflags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING; /* * If platform has specified that the button can be disabled, * we don't want it to share the interrupt line. */ if (!button->can_disable) irqflags |= IRQF_SHARED; if (button->wakeup) irqflags |= IRQF_NO_SUSPEND; error = request_any_context_irq(irq, gpio_keys_isr, irqflags, desc, bdata); if (error < 0) { dev_err(dev, "Unable to claim irq %d; error %d\n", irq, error); goto fail3; } return 0; fail3: gpio_free(button->gpio); fail2: return error; } static int gpio_keys_open(struct input_dev *input) { struct gpio_keys_drvdata *ddata = input_get_drvdata(input); return ddata->enable ? ddata->enable(input->dev.parent) : 0; } static void gpio_keys_close(struct input_dev *input) { struct gpio_keys_drvdata *ddata = input_get_drvdata(input); if (ddata->disable) ddata->disable(input->dev.parent); } static int __devinit gpio_keys_probe(struct platform_device *pdev) { struct gpio_keys_platform_data *pdata = pdev->dev.platform_data; struct gpio_keys_drvdata *ddata; struct device *dev = &pdev->dev; struct input_dev *input; int i, error; int wakeup = 0; ddata = kzalloc(sizeof(struct gpio_keys_drvdata) + pdata->nbuttons * sizeof(struct gpio_button_data), GFP_KERNEL); input = input_allocate_device(); if (!ddata || !input) { dev_err(dev, "failed to allocate state\n"); error = -ENOMEM; goto fail1; } ddata->input = input; ddata->n_buttons = pdata->nbuttons; ddata->enable = pdata->enable; ddata->disable = pdata->disable; mutex_init(&ddata->disable_lock); platform_set_drvdata(pdev, ddata); input_set_drvdata(input, ddata); input->name = pdata->name ? : pdev->name; input->phys = "gpio-keys/input0"; input->dev.parent = &pdev->dev; input->open = gpio_keys_open; input->close = gpio_keys_close; input->id.bustype = BUS_HOST; input->id.vendor = 0x0001; input->id.product = 0x0001; input->id.version = 0x0100; /* Enable auto repeat feature of Linux input subsystem */ if (pdata->rep) __set_bit(EV_REP, input->evbit); for (i = 0; i < pdata->nbuttons; i++) { struct gpio_keys_button *button = &pdata->buttons[i]; struct gpio_button_data *bdata = &ddata->data[i]; unsigned int type = button->type ?: EV_KEY; bdata->input = input; bdata->button = button; error = gpio_keys_setup_key(pdev, bdata, button); if (error) goto fail2; if (button->wakeup) wakeup = 1; input_set_capability(input, type, button->code); } error = sysfs_create_group(&pdev->dev.kobj, &gpio_keys_attr_group); if (error) { dev_err(dev, "Unable to export keys/switches, error: %d\n", error); goto fail2; } ddata->sec_key = device_create(sec_class, NULL, 0, ddata, "sec_key"); if (IS_ERR(ddata->sec_key)) dev_err(dev, "Failed to create sec_key device\n"); error = sysfs_create_group(&ddata->sec_key->kobj, &sec_key_attr_group); if (error) { dev_err(dev, "Failed to create the test sysfs: %d\n", error); goto fail2; } error = input_register_device(input); if (error) { dev_err(dev, "Unable to register input device, error: %d\n", error); goto fail3; } /* get current state of buttons */ for (i = 0; i < pdata->nbuttons; i++) gpio_keys_report_event(&ddata->data[i]); input_sync(input); #ifdef CONFIG_FAST_BOOT /*Fake power off*/ input_set_capability(input, EV_KEY, KEY_FAKE_PWR); setup_timer(&fake_timer, gpio_keys_fake_off_check, (unsigned long)input); #endif device_init_wakeup(&pdev->dev, wakeup); return 0; fail3: sysfs_remove_group(&pdev->dev.kobj, &gpio_keys_attr_group); sysfs_remove_group(&ddata->sec_key->kobj, &sec_key_attr_group); fail2: while (--i >= 0) { free_irq(gpio_to_irq(pdata->buttons[i].gpio), &ddata->data[i]); if (ddata->data[i].timer_debounce) del_timer_sync(&ddata->data[i].timer); cancel_work_sync(&ddata->data[i].work); gpio_free(pdata->buttons[i].gpio); } platform_set_drvdata(pdev, NULL); fail1: input_free_device(input); kfree(ddata); return error; } static int __devexit gpio_keys_remove(struct platform_device *pdev) { struct gpio_keys_platform_data *pdata = pdev->dev.platform_data; struct gpio_keys_drvdata *ddata = platform_get_drvdata(pdev); struct input_dev *input = ddata->input; int i; sysfs_remove_group(&pdev->dev.kobj, &gpio_keys_attr_group); device_init_wakeup(&pdev->dev, 0); for (i = 0; i < pdata->nbuttons; i++) { int irq = gpio_to_irq(pdata->buttons[i].gpio); free_irq(irq, &ddata->data[i]); if (ddata->data[i].timer_debounce) del_timer_sync(&ddata->data[i].timer); cancel_work_sync(&ddata->data[i].work); gpio_free(pdata->buttons[i].gpio); } input_unregister_device(input); return 0; } #ifdef CONFIG_PM static int gpio_keys_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct gpio_keys_platform_data *pdata = pdev->dev.platform_data; int i; if (device_may_wakeup(&pdev->dev)) { for (i = 0; i < pdata->nbuttons; i++) { struct gpio_keys_button *button = &pdata->buttons[i]; if (button->wakeup) { int irq = gpio_to_irq(button->gpio); enable_irq_wake(irq); } } } return 0; } static int gpio_keys_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct gpio_keys_drvdata *ddata = platform_get_drvdata(pdev); struct gpio_keys_platform_data *pdata = pdev->dev.platform_data; int i; for (i = 0; i < pdata->nbuttons; i++) { struct gpio_keys_button *button = &pdata->buttons[i]; if (button->wakeup && device_may_wakeup(&pdev->dev)) { int irq = gpio_to_irq(button->gpio); disable_irq_wake(irq); } gpio_keys_report_event(&ddata->data[i]); } input_sync(ddata->input); return 0; } static const struct dev_pm_ops gpio_keys_pm_ops = { .suspend = gpio_keys_suspend, .resume = gpio_keys_resume, }; #endif static struct platform_driver gpio_keys_device_driver = { .probe = gpio_keys_probe, .remove = __devexit_p(gpio_keys_remove), .driver = { .name = "gpio-keys", .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &gpio_keys_pm_ops, #endif } }; static int __init gpio_keys_init(void) { return platform_driver_register(&gpio_keys_device_driver); } static void __exit gpio_keys_exit(void) { platform_driver_unregister(&gpio_keys_device_driver); } module_init(gpio_keys_init); module_exit(gpio_keys_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Phil Blundell <pb@handhelds.org>"); MODULE_DESCRIPTION("Keyboard driver for CPU GPIOs"); MODULE_ALIAS("platform:gpio-keys");
gpl-2.0
CyanHacker-Lollipop/kernel_google_msm
drivers/staging/android/binder.c
48
105277
/* binder.c * * Android IPC Subsystem * * Copyright (C) 2007-2008 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <asm/cacheflush.h> #include <linux/fdtable.h> #include <linux/file.h> #include <linux/freezer.h> #include <linux/fs.h> #include <linux/list.h> #include <linux/miscdevice.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/nsproxy.h> #include <linux/poll.h> #include <linux/debugfs.h> #include <linux/rbtree.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/security.h> #include "binder.h" #include "binder_trace.h" static DEFINE_MUTEX(binder_main_lock); static DEFINE_MUTEX(binder_deferred_lock); static DEFINE_MUTEX(binder_mmap_lock); static HLIST_HEAD(binder_procs); static HLIST_HEAD(binder_deferred_list); static HLIST_HEAD(binder_dead_nodes); static struct dentry *binder_debugfs_dir_entry_root; static struct dentry *binder_debugfs_dir_entry_proc; static struct binder_node *binder_context_mgr_node; static uid_t binder_context_mgr_uid = -1; static int binder_last_id; static struct workqueue_struct *binder_deferred_workqueue; #define BINDER_DEBUG_ENTRY(name) \ static int binder_##name##_open(struct inode *inode, struct file *file) \ { \ return single_open(file, binder_##name##_show, inode->i_private); \ } \ \ static const struct file_operations binder_##name##_fops = { \ .owner = THIS_MODULE, \ .open = binder_##name##_open, \ .read = seq_read, \ .llseek = seq_lseek, \ .release = single_release, \ } static int binder_proc_show(struct seq_file *m, void *unused); BINDER_DEBUG_ENTRY(proc); /* This is only defined in include/asm-arm/sizes.h */ #ifndef SZ_1K #define SZ_1K 0x400 #endif #ifndef SZ_4M #define SZ_4M 0x400000 #endif #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) enum { BINDER_DEBUG_USER_ERROR = 1U << 0, BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, BINDER_DEBUG_OPEN_CLOSE = 1U << 3, BINDER_DEBUG_DEAD_BINDER = 1U << 4, BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, BINDER_DEBUG_READ_WRITE = 1U << 6, BINDER_DEBUG_USER_REFS = 1U << 7, BINDER_DEBUG_THREADS = 1U << 8, BINDER_DEBUG_TRANSACTION = 1U << 9, BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, BINDER_DEBUG_FREE_BUFFER = 1U << 11, BINDER_DEBUG_INTERNAL_REFS = 1U << 12, BINDER_DEBUG_BUFFER_ALLOC = 1U << 13, BINDER_DEBUG_PRIORITY_CAP = 1U << 14, BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15, }; static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); static bool binder_debug_no_lock; module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO); static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); static int binder_stop_on_user_error; static int binder_set_stop_on_user_error(const char *val, struct kernel_param *kp) { int ret; ret = param_set_int(val, kp); if (binder_stop_on_user_error < 2) wake_up(&binder_user_error_wait); return ret; } module_param_call(stop_on_user_error, binder_set_stop_on_user_error, param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); #define binder_debug(mask, x...) \ do { \ if (binder_debug_mask & mask) \ printk(KERN_INFO x); \ } while (0) #define binder_user_error(x...) \ do { \ if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ printk(KERN_INFO x); \ if (binder_stop_on_user_error) \ binder_stop_on_user_error = 2; \ } while (0) enum binder_stat_types { BINDER_STAT_PROC, BINDER_STAT_THREAD, BINDER_STAT_NODE, BINDER_STAT_REF, BINDER_STAT_DEATH, BINDER_STAT_TRANSACTION, BINDER_STAT_TRANSACTION_COMPLETE, BINDER_STAT_COUNT }; struct binder_stats { int br[_IOC_NR(BR_FAILED_REPLY) + 1]; int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1]; int obj_created[BINDER_STAT_COUNT]; int obj_deleted[BINDER_STAT_COUNT]; }; static struct binder_stats binder_stats; static inline void binder_stats_deleted(enum binder_stat_types type) { binder_stats.obj_deleted[type]++; } static inline void binder_stats_created(enum binder_stat_types type) { binder_stats.obj_created[type]++; } struct binder_transaction_log_entry { int debug_id; int call_type; int from_proc; int from_thread; int target_handle; int to_proc; int to_thread; int to_node; int data_size; int offsets_size; }; struct binder_transaction_log { int next; int full; struct binder_transaction_log_entry entry[32]; }; static struct binder_transaction_log binder_transaction_log; static struct binder_transaction_log binder_transaction_log_failed; static struct binder_transaction_log_entry *binder_transaction_log_add( struct binder_transaction_log *log) { struct binder_transaction_log_entry *e; e = &log->entry[log->next]; memset(e, 0, sizeof(*e)); log->next++; if (log->next == ARRAY_SIZE(log->entry)) { log->next = 0; log->full = 1; } return e; } struct binder_work { struct list_head entry; enum { BINDER_WORK_TRANSACTION = 1, BINDER_WORK_TRANSACTION_COMPLETE, BINDER_WORK_NODE, BINDER_WORK_DEAD_BINDER, BINDER_WORK_DEAD_BINDER_AND_CLEAR, BINDER_WORK_CLEAR_DEATH_NOTIFICATION, } type; }; struct binder_node { int debug_id; struct binder_work work; union { struct rb_node rb_node; struct hlist_node dead_node; }; struct binder_proc *proc; struct hlist_head refs; int internal_strong_refs; int local_weak_refs; int local_strong_refs; void __user *ptr; void __user *cookie; unsigned has_strong_ref:1; unsigned pending_strong_ref:1; unsigned has_weak_ref:1; unsigned pending_weak_ref:1; unsigned has_async_transaction:1; unsigned accept_fds:1; unsigned min_priority:8; struct list_head async_todo; }; struct binder_ref_death { struct binder_work work; void __user *cookie; }; struct binder_ref { /* Lookups needed: */ /* node + proc => ref (transaction) */ /* desc + proc => ref (transaction, inc/dec ref) */ /* node => refs + procs (proc exit) */ int debug_id; struct rb_node rb_node_desc; struct rb_node rb_node_node; struct hlist_node node_entry; struct binder_proc *proc; struct binder_node *node; uint32_t desc; int strong; int weak; struct binder_ref_death *death; }; struct binder_buffer { struct list_head entry; /* free and allocated entries by address */ struct rb_node rb_node; /* free entry by size or allocated entry */ /* by address */ unsigned free:1; unsigned allow_user_free:1; unsigned async_transaction:1; unsigned debug_id:29; struct binder_transaction *transaction; struct binder_node *target_node; size_t data_size; size_t offsets_size; uint8_t data[0]; }; enum binder_deferred_state { BINDER_DEFERRED_PUT_FILES = 0x01, BINDER_DEFERRED_FLUSH = 0x02, BINDER_DEFERRED_RELEASE = 0x04, }; struct binder_proc { struct hlist_node proc_node; struct rb_root threads; struct rb_root nodes; struct rb_root refs_by_desc; struct rb_root refs_by_node; int pid; struct vm_area_struct *vma; struct mm_struct *vma_vm_mm; struct task_struct *tsk; struct files_struct *files; struct hlist_node deferred_work_node; int deferred_work; void *buffer; ptrdiff_t user_buffer_offset; struct list_head buffers; struct rb_root free_buffers; struct rb_root allocated_buffers; size_t free_async_space; struct page **pages; size_t buffer_size; uint32_t buffer_free; struct list_head todo; wait_queue_head_t wait; struct binder_stats stats; struct list_head delivered_death; int max_threads; int requested_threads; int requested_threads_started; int ready_threads; long default_priority; struct dentry *debugfs_entry; }; enum { BINDER_LOOPER_STATE_REGISTERED = 0x01, BINDER_LOOPER_STATE_ENTERED = 0x02, BINDER_LOOPER_STATE_EXITED = 0x04, BINDER_LOOPER_STATE_INVALID = 0x08, BINDER_LOOPER_STATE_WAITING = 0x10, BINDER_LOOPER_STATE_NEED_RETURN = 0x20 }; struct binder_thread { struct binder_proc *proc; struct rb_node rb_node; int pid; int looper; struct binder_transaction *transaction_stack; struct list_head todo; uint32_t return_error; /* Write failed, return error code in read buf */ uint32_t return_error2; /* Write failed, return error code in read */ /* buffer. Used when sending a reply to a dead process that */ /* we are also waiting on */ wait_queue_head_t wait; struct binder_stats stats; }; struct binder_transaction { int debug_id; struct binder_work work; struct binder_thread *from; struct binder_transaction *from_parent; struct binder_proc *to_proc; struct binder_thread *to_thread; struct binder_transaction *to_parent; unsigned need_reply:1; /* unsigned is_dead:1; */ /* not used at the moment */ struct binder_buffer *buffer; unsigned int code; unsigned int flags; long priority; long saved_priority; uid_t sender_euid; }; static void binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); /* * copied from get_unused_fd_flags */ int task_get_unused_fd_flags(struct binder_proc *proc, int flags) { struct files_struct *files = proc->files; int fd, error; struct fdtable *fdt; unsigned long rlim_cur; unsigned long irqs; if (files == NULL) return -ESRCH; error = -EMFILE; spin_lock(&files->file_lock); repeat: fdt = files_fdtable(files); fd = find_next_zero_bit(fdt->open_fds, fdt->max_fds, files->next_fd); /* * N.B. For clone tasks sharing a files structure, this test * will limit the total number of files that can be opened. */ rlim_cur = 0; if (lock_task_sighand(proc->tsk, &irqs)) { rlim_cur = proc->tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur; unlock_task_sighand(proc->tsk, &irqs); } if (fd >= rlim_cur) goto out; /* Do we need to expand the fd array or fd set? */ error = expand_files(files, fd); if (error < 0) goto out; if (error) { /* * If we needed to expand the fs array we * might have blocked - try again. */ error = -EMFILE; goto repeat; } __set_open_fd(fd, fdt); if (flags & O_CLOEXEC) __set_close_on_exec(fd, fdt); else __clear_close_on_exec(fd, fdt); files->next_fd = fd + 1; #if 1 /* Sanity check */ if (fdt->fd[fd] != NULL) { printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd); fdt->fd[fd] = NULL; } #endif error = fd; out: spin_unlock(&files->file_lock); return error; } /* * copied from fd_install */ static void task_fd_install( struct binder_proc *proc, unsigned int fd, struct file *file) { struct files_struct *files = proc->files; struct fdtable *fdt; if (files == NULL) return; spin_lock(&files->file_lock); fdt = files_fdtable(files); BUG_ON(fdt->fd[fd] != NULL); rcu_assign_pointer(fdt->fd[fd], file); spin_unlock(&files->file_lock); } /* * copied from __put_unused_fd in open.c */ static void __put_unused_fd(struct files_struct *files, unsigned int fd) { struct fdtable *fdt = files_fdtable(files); __clear_open_fd(fd, fdt); if (fd < files->next_fd) files->next_fd = fd; } /* * copied from sys_close */ static long task_close_fd(struct binder_proc *proc, unsigned int fd) { struct file *filp; struct files_struct *files = proc->files; struct fdtable *fdt; int retval; if (files == NULL) return -ESRCH; spin_lock(&files->file_lock); fdt = files_fdtable(files); if (fd >= fdt->max_fds) goto out_unlock; filp = fdt->fd[fd]; if (!filp) goto out_unlock; rcu_assign_pointer(fdt->fd[fd], NULL); __clear_close_on_exec(fd, fdt); __put_unused_fd(files, fd); spin_unlock(&files->file_lock); retval = filp_close(filp, files); /* can't restart close syscall because file table entry was cleared */ if (unlikely(retval == -ERESTARTSYS || retval == -ERESTARTNOINTR || retval == -ERESTARTNOHAND || retval == -ERESTART_RESTARTBLOCK)) retval = -EINTR; return retval; out_unlock: spin_unlock(&files->file_lock); return -EBADF; } static inline void binder_lock(const char *tag) { trace_binder_lock(tag); mutex_lock(&binder_main_lock); trace_binder_locked(tag); } static inline void binder_unlock(const char *tag) { trace_binder_unlock(tag); mutex_unlock(&binder_main_lock); } static void binder_set_nice(long nice) { long min_nice; if (can_nice(current, nice)) { set_user_nice(current, nice); return; } min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur; binder_debug(BINDER_DEBUG_PRIORITY_CAP, "binder: %d: nice value %ld not allowed use " "%ld instead\n", current->pid, nice, min_nice); set_user_nice(current, min_nice); if (min_nice < 20) return; binder_user_error("binder: %d RLIMIT_NICE not set\n", current->pid); } static size_t binder_buffer_size(struct binder_proc *proc, struct binder_buffer *buffer) { if (list_is_last(&buffer->entry, &proc->buffers)) return proc->buffer + proc->buffer_size - (void *)buffer->data; else return (size_t)list_entry(buffer->entry.next, struct binder_buffer, entry) - (size_t)buffer->data; } static void binder_insert_free_buffer(struct binder_proc *proc, struct binder_buffer *new_buffer) { struct rb_node **p = &proc->free_buffers.rb_node; struct rb_node *parent = NULL; struct binder_buffer *buffer; size_t buffer_size; size_t new_buffer_size; BUG_ON(!new_buffer->free); new_buffer_size = binder_buffer_size(proc, new_buffer); binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "binder: %d: add free buffer, size %zd, " "at %p\n", proc->pid, new_buffer_size, new_buffer); while (*p) { parent = *p; buffer = rb_entry(parent, struct binder_buffer, rb_node); BUG_ON(!buffer->free); buffer_size = binder_buffer_size(proc, buffer); if (new_buffer_size < buffer_size) p = &parent->rb_left; else p = &parent->rb_right; } rb_link_node(&new_buffer->rb_node, parent, p); rb_insert_color(&new_buffer->rb_node, &proc->free_buffers); } static void binder_insert_allocated_buffer(struct binder_proc *proc, struct binder_buffer *new_buffer) { struct rb_node **p = &proc->allocated_buffers.rb_node; struct rb_node *parent = NULL; struct binder_buffer *buffer; BUG_ON(new_buffer->free); while (*p) { parent = *p; buffer = rb_entry(parent, struct binder_buffer, rb_node); BUG_ON(buffer->free); if (new_buffer < buffer) p = &parent->rb_left; else if (new_buffer > buffer) p = &parent->rb_right; else BUG(); } rb_link_node(&new_buffer->rb_node, parent, p); rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers); } static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, void __user *user_ptr) { struct rb_node *n = proc->allocated_buffers.rb_node; struct binder_buffer *buffer; struct binder_buffer *kern_ptr; kern_ptr = user_ptr - proc->user_buffer_offset - offsetof(struct binder_buffer, data); while (n) { buffer = rb_entry(n, struct binder_buffer, rb_node); BUG_ON(buffer->free); if (kern_ptr < buffer) n = n->rb_left; else if (kern_ptr > buffer) n = n->rb_right; else return buffer; } return NULL; } static int binder_update_page_range(struct binder_proc *proc, int allocate, void *start, void *end, struct vm_area_struct *vma) { void *page_addr; unsigned long user_page_addr; struct vm_struct tmp_area; struct page **page; struct mm_struct *mm; binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "binder: %d: %s pages %p-%p\n", proc->pid, allocate ? "allocate" : "free", start, end); if (end <= start) return 0; trace_binder_update_page_range(proc, allocate, start, end); if (vma) mm = NULL; else mm = get_task_mm(proc->tsk); if (mm) { down_write(&mm->mmap_sem); vma = proc->vma; if (vma && mm != proc->vma_vm_mm) { pr_err("binder: %d: vma mm and task mm mismatch\n", proc->pid); vma = NULL; } } if (allocate == 0) goto free_range; if (vma == NULL) { printk(KERN_ERR "binder: %d: binder_alloc_buf failed to " "map pages in userspace, no vma\n", proc->pid); goto err_no_vma; } for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { int ret; struct page **page_array_ptr; page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; BUG_ON(*page); *page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (*page == NULL) { printk(KERN_ERR "binder: %d: binder_alloc_buf failed " "for page at %p\n", proc->pid, page_addr); goto err_alloc_page_failed; } tmp_area.addr = page_addr; tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; page_array_ptr = page; ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr); if (ret) { printk(KERN_ERR "binder: %d: binder_alloc_buf failed " "to map page at %p in kernel\n", proc->pid, page_addr); goto err_map_kernel_failed; } user_page_addr = (uintptr_t)page_addr + proc->user_buffer_offset; ret = vm_insert_page(vma, user_page_addr, page[0]); if (ret) { printk(KERN_ERR "binder: %d: binder_alloc_buf failed " "to map page at %lx in userspace\n", proc->pid, user_page_addr); goto err_vm_insert_page_failed; } /* vm_insert_page does not seem to increment the refcount */ } if (mm) { up_write(&mm->mmap_sem); mmput(mm); } return 0; free_range: for (page_addr = end - PAGE_SIZE; page_addr >= start; page_addr -= PAGE_SIZE) { page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; if (vma) zap_page_range(vma, (uintptr_t)page_addr + proc->user_buffer_offset, PAGE_SIZE, NULL); err_vm_insert_page_failed: unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); err_map_kernel_failed: __free_page(*page); *page = NULL; err_alloc_page_failed: ; } err_no_vma: if (mm) { up_write(&mm->mmap_sem); mmput(mm); } return -ENOMEM; } static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, size_t data_size, size_t offsets_size, int is_async) { struct rb_node *n = proc->free_buffers.rb_node; struct binder_buffer *buffer; size_t buffer_size; struct rb_node *best_fit = NULL; void *has_page_addr; void *end_page_addr; size_t size; if (proc->vma == NULL) { printk(KERN_ERR "binder: %d: binder_alloc_buf, no vma\n", proc->pid); return NULL; } size = ALIGN(data_size, sizeof(void *)) + ALIGN(offsets_size, sizeof(void *)); if (size < data_size || size < offsets_size) { binder_user_error("binder: %d: got transaction with invalid " "size %zd-%zd\n", proc->pid, data_size, offsets_size); return NULL; } if (is_async && proc->free_async_space < size + sizeof(struct binder_buffer)) { binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "binder: %d: binder_alloc_buf size %zd" "failed, no async space left\n", proc->pid, size); return NULL; } while (n) { buffer = rb_entry(n, struct binder_buffer, rb_node); BUG_ON(!buffer->free); buffer_size = binder_buffer_size(proc, buffer); if (size < buffer_size) { best_fit = n; n = n->rb_left; } else if (size > buffer_size) n = n->rb_right; else { best_fit = n; break; } } if (best_fit == NULL) { printk(KERN_ERR "binder: %d: binder_alloc_buf size %zd failed, " "no address space\n", proc->pid, size); return NULL; } if (n == NULL) { buffer = rb_entry(best_fit, struct binder_buffer, rb_node); buffer_size = binder_buffer_size(proc, buffer); } binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "binder: %d: binder_alloc_buf size %zd got buff" "er %p size %zd\n", proc->pid, size, buffer, buffer_size); has_page_addr = (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); if (n == NULL) { if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) buffer_size = size; /* no room for other buffers */ else buffer_size = size + sizeof(struct binder_buffer); } end_page_addr = (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); if (end_page_addr > has_page_addr) end_page_addr = has_page_addr; if (binder_update_page_range(proc, 1, (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) return NULL; rb_erase(best_fit, &proc->free_buffers); buffer->free = 0; binder_insert_allocated_buffer(proc, buffer); if (buffer_size != size) { struct binder_buffer *new_buffer = (void *)buffer->data + size; list_add(&new_buffer->entry, &buffer->entry); new_buffer->free = 1; binder_insert_free_buffer(proc, new_buffer); } binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "binder: %d: binder_alloc_buf size %zd got " "%p\n", proc->pid, size, buffer); buffer->data_size = data_size; buffer->offsets_size = offsets_size; buffer->async_transaction = is_async; if (is_async) { proc->free_async_space -= size + sizeof(struct binder_buffer); binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, "binder: %d: binder_alloc_buf size %zd " "async free %zd\n", proc->pid, size, proc->free_async_space); } return buffer; } static void *buffer_start_page(struct binder_buffer *buffer) { return (void *)((uintptr_t)buffer & PAGE_MASK); } static void *buffer_end_page(struct binder_buffer *buffer) { return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); } static void binder_delete_free_buffer(struct binder_proc *proc, struct binder_buffer *buffer) { struct binder_buffer *prev, *next = NULL; int free_page_end = 1; int free_page_start = 1; BUG_ON(proc->buffers.next == &buffer->entry); prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); BUG_ON(!prev->free); if (buffer_end_page(prev) == buffer_start_page(buffer)) { free_page_start = 0; if (buffer_end_page(prev) == buffer_end_page(buffer)) free_page_end = 0; binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "binder: %d: merge free, buffer %p " "share page with %p\n", proc->pid, buffer, prev); } if (!list_is_last(&buffer->entry, &proc->buffers)) { next = list_entry(buffer->entry.next, struct binder_buffer, entry); if (buffer_start_page(next) == buffer_end_page(buffer)) { free_page_end = 0; if (buffer_start_page(next) == buffer_start_page(buffer)) free_page_start = 0; binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "binder: %d: merge free, buffer" " %p share page with %p\n", proc->pid, buffer, prev); } } list_del(&buffer->entry); if (free_page_start || free_page_end) { binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "binder: %d: merge free, buffer %p do " "not share page%s%s with with %p or %p\n", proc->pid, buffer, free_page_start ? "" : " end", free_page_end ? "" : " start", prev, next); binder_update_page_range(proc, 0, free_page_start ? buffer_start_page(buffer) : buffer_end_page(buffer), (free_page_end ? buffer_end_page(buffer) : buffer_start_page(buffer)) + PAGE_SIZE, NULL); } } static void binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer) { size_t size, buffer_size; buffer_size = binder_buffer_size(proc, buffer); size = ALIGN(buffer->data_size, sizeof(void *)) + ALIGN(buffer->offsets_size, sizeof(void *)); binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "binder: %d: binder_free_buf %p size %zd buffer" "_size %zd\n", proc->pid, buffer, size, buffer_size); BUG_ON(buffer->free); BUG_ON(size > buffer_size); BUG_ON(buffer->transaction != NULL); BUG_ON((void *)buffer < proc->buffer); BUG_ON((void *)buffer > proc->buffer + proc->buffer_size); if (buffer->async_transaction) { proc->free_async_space += size + sizeof(struct binder_buffer); binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, "binder: %d: binder_free_buf size %zd " "async free %zd\n", proc->pid, size, proc->free_async_space); } binder_update_page_range(proc, 0, (void *)PAGE_ALIGN((uintptr_t)buffer->data), (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), NULL); rb_erase(&buffer->rb_node, &proc->allocated_buffers); buffer->free = 1; if (!list_is_last(&buffer->entry, &proc->buffers)) { struct binder_buffer *next = list_entry(buffer->entry.next, struct binder_buffer, entry); if (next->free) { rb_erase(&next->rb_node, &proc->free_buffers); binder_delete_free_buffer(proc, next); } } if (proc->buffers.next != &buffer->entry) { struct binder_buffer *prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); if (prev->free) { binder_delete_free_buffer(proc, buffer); rb_erase(&prev->rb_node, &proc->free_buffers); buffer = prev; } } binder_insert_free_buffer(proc, buffer); } static struct binder_node *binder_get_node(struct binder_proc *proc, void __user *ptr) { struct rb_node *n = proc->nodes.rb_node; struct binder_node *node; while (n) { node = rb_entry(n, struct binder_node, rb_node); if (ptr < node->ptr) n = n->rb_left; else if (ptr > node->ptr) n = n->rb_right; else return node; } return NULL; } static struct binder_node *binder_new_node(struct binder_proc *proc, void __user *ptr, void __user *cookie) { struct rb_node **p = &proc->nodes.rb_node; struct rb_node *parent = NULL; struct binder_node *node; while (*p) { parent = *p; node = rb_entry(parent, struct binder_node, rb_node); if (ptr < node->ptr) p = &(*p)->rb_left; else if (ptr > node->ptr) p = &(*p)->rb_right; else return NULL; } node = kzalloc(sizeof(*node), GFP_KERNEL); if (node == NULL) return NULL; binder_stats_created(BINDER_STAT_NODE); rb_link_node(&node->rb_node, parent, p); rb_insert_color(&node->rb_node, &proc->nodes); node->debug_id = ++binder_last_id; node->proc = proc; node->ptr = ptr; node->cookie = cookie; node->work.type = BINDER_WORK_NODE; INIT_LIST_HEAD(&node->work.entry); INIT_LIST_HEAD(&node->async_todo); binder_debug(BINDER_DEBUG_INTERNAL_REFS, "binder: %d:%d node %d u%p c%p created\n", proc->pid, current->pid, node->debug_id, node->ptr, node->cookie); return node; } static int binder_inc_node(struct binder_node *node, int strong, int internal, struct list_head *target_list) { if (strong) { if (internal) { if (target_list == NULL && node->internal_strong_refs == 0 && !(node == binder_context_mgr_node && node->has_strong_ref)) { printk(KERN_ERR "binder: invalid inc strong " "node for %d\n", node->debug_id); return -EINVAL; } node->internal_strong_refs++; } else node->local_strong_refs++; if (!node->has_strong_ref && target_list) { list_del_init(&node->work.entry); list_add_tail(&node->work.entry, target_list); } } else { if (!internal) node->local_weak_refs++; if (!node->has_weak_ref && list_empty(&node->work.entry)) { if (target_list == NULL) { printk(KERN_ERR "binder: invalid inc weak node " "for %d\n", node->debug_id); return -EINVAL; } list_add_tail(&node->work.entry, target_list); } } return 0; } static int binder_dec_node(struct binder_node *node, int strong, int internal) { if (strong) { if (internal) node->internal_strong_refs--; else node->local_strong_refs--; if (node->local_strong_refs || node->internal_strong_refs) return 0; } else { if (!internal) node->local_weak_refs--; if (node->local_weak_refs || !hlist_empty(&node->refs)) return 0; } if (node->proc && (node->has_strong_ref || node->has_weak_ref)) { if (list_empty(&node->work.entry)) { list_add_tail(&node->work.entry, &node->proc->todo); wake_up_interruptible(&node->proc->wait); } } else { if (hlist_empty(&node->refs) && !node->local_strong_refs && !node->local_weak_refs) { list_del_init(&node->work.entry); if (node->proc) { rb_erase(&node->rb_node, &node->proc->nodes); binder_debug(BINDER_DEBUG_INTERNAL_REFS, "binder: refless node %d deleted\n", node->debug_id); } else { hlist_del(&node->dead_node); binder_debug(BINDER_DEBUG_INTERNAL_REFS, "binder: dead node %d deleted\n", node->debug_id); } kfree(node); binder_stats_deleted(BINDER_STAT_NODE); } } return 0; } static struct binder_ref *binder_get_ref(struct binder_proc *proc, uint32_t desc) { struct rb_node *n = proc->refs_by_desc.rb_node; struct binder_ref *ref; while (n) { ref = rb_entry(n, struct binder_ref, rb_node_desc); if (desc < ref->desc) n = n->rb_left; else if (desc > ref->desc) n = n->rb_right; else return ref; } return NULL; } static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, struct binder_node *node) { struct rb_node *n; struct rb_node **p = &proc->refs_by_node.rb_node; struct rb_node *parent = NULL; struct binder_ref *ref, *new_ref; while (*p) { parent = *p; ref = rb_entry(parent, struct binder_ref, rb_node_node); if (node < ref->node) p = &(*p)->rb_left; else if (node > ref->node) p = &(*p)->rb_right; else return ref; } new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); if (new_ref == NULL) return NULL; binder_stats_created(BINDER_STAT_REF); new_ref->debug_id = ++binder_last_id; new_ref->proc = proc; new_ref->node = node; rb_link_node(&new_ref->rb_node_node, parent, p); rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1; for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { ref = rb_entry(n, struct binder_ref, rb_node_desc); if (ref->desc > new_ref->desc) break; new_ref->desc = ref->desc + 1; } p = &proc->refs_by_desc.rb_node; while (*p) { parent = *p; ref = rb_entry(parent, struct binder_ref, rb_node_desc); if (new_ref->desc < ref->desc) p = &(*p)->rb_left; else if (new_ref->desc > ref->desc) p = &(*p)->rb_right; else BUG(); } rb_link_node(&new_ref->rb_node_desc, parent, p); rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); if (node) { hlist_add_head(&new_ref->node_entry, &node->refs); binder_debug(BINDER_DEBUG_INTERNAL_REFS, "binder: %d new ref %d desc %d for " "node %d\n", proc->pid, new_ref->debug_id, new_ref->desc, node->debug_id); } else { binder_debug(BINDER_DEBUG_INTERNAL_REFS, "binder: %d new ref %d desc %d for " "dead node\n", proc->pid, new_ref->debug_id, new_ref->desc); } return new_ref; } static void binder_delete_ref(struct binder_ref *ref) { binder_debug(BINDER_DEBUG_INTERNAL_REFS, "binder: %d delete ref %d desc %d for " "node %d\n", ref->proc->pid, ref->debug_id, ref->desc, ref->node->debug_id); rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); if (ref->strong) binder_dec_node(ref->node, 1, 1); hlist_del(&ref->node_entry); binder_dec_node(ref->node, 0, 1); if (ref->death) { binder_debug(BINDER_DEBUG_DEAD_BINDER, "binder: %d delete ref %d desc %d " "has death notification\n", ref->proc->pid, ref->debug_id, ref->desc); list_del(&ref->death->work.entry); kfree(ref->death); binder_stats_deleted(BINDER_STAT_DEATH); } kfree(ref); binder_stats_deleted(BINDER_STAT_REF); } static int binder_inc_ref(struct binder_ref *ref, int strong, struct list_head *target_list) { int ret; if (strong) { if (ref->strong == 0) { ret = binder_inc_node(ref->node, 1, 1, target_list); if (ret) return ret; } ref->strong++; } else { if (ref->weak == 0) { ret = binder_inc_node(ref->node, 0, 1, target_list); if (ret) return ret; } ref->weak++; } return 0; } static int binder_dec_ref(struct binder_ref *ref, int strong) { if (strong) { if (ref->strong == 0) { binder_user_error("binder: %d invalid dec strong, " "ref %d desc %d s %d w %d\n", ref->proc->pid, ref->debug_id, ref->desc, ref->strong, ref->weak); return -EINVAL; } ref->strong--; if (ref->strong == 0) { int ret; ret = binder_dec_node(ref->node, strong, 1); if (ret) return ret; } } else { if (ref->weak == 0) { binder_user_error("binder: %d invalid dec weak, " "ref %d desc %d s %d w %d\n", ref->proc->pid, ref->debug_id, ref->desc, ref->strong, ref->weak); return -EINVAL; } ref->weak--; } if (ref->strong == 0 && ref->weak == 0) binder_delete_ref(ref); return 0; } static void binder_pop_transaction(struct binder_thread *target_thread, struct binder_transaction *t) { if (target_thread) { BUG_ON(target_thread->transaction_stack != t); BUG_ON(target_thread->transaction_stack->from != target_thread); target_thread->transaction_stack = target_thread->transaction_stack->from_parent; t->from = NULL; } t->need_reply = 0; if (t->buffer) t->buffer->transaction = NULL; kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); } static void binder_send_failed_reply(struct binder_transaction *t, uint32_t error_code) { struct binder_thread *target_thread; BUG_ON(t->flags & TF_ONE_WAY); while (1) { target_thread = t->from; if (target_thread) { if (target_thread->return_error != BR_OK && target_thread->return_error2 == BR_OK) { target_thread->return_error2 = target_thread->return_error; target_thread->return_error = BR_OK; } if (target_thread->return_error == BR_OK) { binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, "binder: send failed reply for " "transaction %d to %d:%d\n", t->debug_id, target_thread->proc->pid, target_thread->pid); binder_pop_transaction(target_thread, t); target_thread->return_error = error_code; wake_up_interruptible(&target_thread->wait); } else { printk(KERN_ERR "binder: reply failed, target " "thread, %d:%d, has error code %d " "already\n", target_thread->proc->pid, target_thread->pid, target_thread->return_error); } return; } else { struct binder_transaction *next = t->from_parent; binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, "binder: send failed reply " "for transaction %d, target dead\n", t->debug_id); binder_pop_transaction(target_thread, t); if (next == NULL) { binder_debug(BINDER_DEBUG_DEAD_BINDER, "binder: reply failed," " no target thread at root\n"); return; } t = next; binder_debug(BINDER_DEBUG_DEAD_BINDER, "binder: reply failed, no target " "thread -- retry %d\n", t->debug_id); } } } static void binder_transaction_buffer_release(struct binder_proc *proc, struct binder_buffer *buffer, size_t *failed_at) { size_t *offp, *off_end; int debug_id = buffer->debug_id; binder_debug(BINDER_DEBUG_TRANSACTION, "binder: %d buffer release %d, size %zd-%zd, failed at %p\n", proc->pid, buffer->debug_id, buffer->data_size, buffer->offsets_size, failed_at); if (buffer->target_node) binder_dec_node(buffer->target_node, 1, 0); offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *))); if (failed_at) off_end = failed_at; else off_end = (void *)offp + buffer->offsets_size; for (; offp < off_end; offp++) { struct flat_binder_object *fp; if (*offp > buffer->data_size - sizeof(*fp) || buffer->data_size < sizeof(*fp) || !IS_ALIGNED(*offp, sizeof(void *))) { printk(KERN_ERR "binder: transaction release %d bad" "offset %zd, size %zd\n", debug_id, *offp, buffer->data_size); continue; } fp = (struct flat_binder_object *)(buffer->data + *offp); switch (fp->type) { case BINDER_TYPE_BINDER: case BINDER_TYPE_WEAK_BINDER: { struct binder_node *node = binder_get_node(proc, fp->binder); if (node == NULL) { printk(KERN_ERR "binder: transaction release %d" " bad node %p\n", debug_id, fp->binder); break; } binder_debug(BINDER_DEBUG_TRANSACTION, " node %d u%p\n", node->debug_id, node->ptr); binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0); } break; case BINDER_TYPE_HANDLE: case BINDER_TYPE_WEAK_HANDLE: { struct binder_ref *ref = binder_get_ref(proc, fp->handle); if (ref == NULL) { printk(KERN_ERR "binder: transaction release %d" " bad handle %ld\n", debug_id, fp->handle); break; } binder_debug(BINDER_DEBUG_TRANSACTION, " ref %d desc %d (node %d)\n", ref->debug_id, ref->desc, ref->node->debug_id); binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE); } break; case BINDER_TYPE_FD: binder_debug(BINDER_DEBUG_TRANSACTION, " fd %ld\n", fp->handle); if (failed_at) task_close_fd(proc, fp->handle); break; default: printk(KERN_ERR "binder: transaction release %d bad " "object type %lx\n", debug_id, fp->type); break; } } } static void binder_transaction(struct binder_proc *proc, struct binder_thread *thread, struct binder_transaction_data *tr, int reply) { struct binder_transaction *t; struct binder_work *tcomplete; size_t *offp, *off_end; struct binder_proc *target_proc; struct binder_thread *target_thread = NULL; struct binder_node *target_node = NULL; struct list_head *target_list; wait_queue_head_t *target_wait; struct binder_transaction *in_reply_to = NULL; struct binder_transaction_log_entry *e; uint32_t return_error; e = binder_transaction_log_add(&binder_transaction_log); e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); e->from_proc = proc->pid; e->from_thread = thread->pid; e->target_handle = tr->target.handle; e->data_size = tr->data_size; e->offsets_size = tr->offsets_size; if (reply) { in_reply_to = thread->transaction_stack; if (in_reply_to == NULL) { binder_user_error("binder: %d:%d got reply transaction " "with no transaction stack\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; goto err_empty_call_stack; } binder_set_nice(in_reply_to->saved_priority); if (in_reply_to->to_thread != thread) { binder_user_error("binder: %d:%d got reply transaction " "with bad transaction stack," " transaction %d has target %d:%d\n", proc->pid, thread->pid, in_reply_to->debug_id, in_reply_to->to_proc ? in_reply_to->to_proc->pid : 0, in_reply_to->to_thread ? in_reply_to->to_thread->pid : 0); return_error = BR_FAILED_REPLY; in_reply_to = NULL; goto err_bad_call_stack; } thread->transaction_stack = in_reply_to->to_parent; target_thread = in_reply_to->from; if (target_thread == NULL) { return_error = BR_DEAD_REPLY; goto err_dead_binder; } if (target_thread->transaction_stack != in_reply_to) { binder_user_error("binder: %d:%d got reply transaction " "with bad target transaction stack %d, " "expected %d\n", proc->pid, thread->pid, target_thread->transaction_stack ? target_thread->transaction_stack->debug_id : 0, in_reply_to->debug_id); return_error = BR_FAILED_REPLY; in_reply_to = NULL; target_thread = NULL; goto err_dead_binder; } target_proc = target_thread->proc; } else { if (tr->target.handle) { struct binder_ref *ref; ref = binder_get_ref(proc, tr->target.handle); if (ref == NULL) { binder_user_error("binder: %d:%d got " "transaction to invalid handle\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; goto err_invalid_target_handle; } target_node = ref->node; } else { target_node = binder_context_mgr_node; if (target_node == NULL) { return_error = BR_DEAD_REPLY; goto err_no_context_mgr_node; } } e->to_node = target_node->debug_id; target_proc = target_node->proc; if (target_proc == NULL) { return_error = BR_DEAD_REPLY; goto err_dead_binder; } if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) { return_error = BR_FAILED_REPLY; goto err_invalid_target_handle; } if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { struct binder_transaction *tmp; tmp = thread->transaction_stack; if (tmp->to_thread != thread) { binder_user_error("binder: %d:%d got new " "transaction with bad transaction stack" ", transaction %d has target %d:%d\n", proc->pid, thread->pid, tmp->debug_id, tmp->to_proc ? tmp->to_proc->pid : 0, tmp->to_thread ? tmp->to_thread->pid : 0); return_error = BR_FAILED_REPLY; goto err_bad_call_stack; } while (tmp) { if (tmp->from && tmp->from->proc == target_proc) target_thread = tmp->from; tmp = tmp->from_parent; } } } if (target_thread) { e->to_thread = target_thread->pid; target_list = &target_thread->todo; target_wait = &target_thread->wait; } else { target_list = &target_proc->todo; target_wait = &target_proc->wait; } e->to_proc = target_proc->pid; /* TODO: reuse incoming transaction for reply */ t = kzalloc(sizeof(*t), GFP_KERNEL); if (t == NULL) { return_error = BR_FAILED_REPLY; goto err_alloc_t_failed; } binder_stats_created(BINDER_STAT_TRANSACTION); tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); if (tcomplete == NULL) { return_error = BR_FAILED_REPLY; goto err_alloc_tcomplete_failed; } binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); t->debug_id = ++binder_last_id; e->debug_id = t->debug_id; if (reply) binder_debug(BINDER_DEBUG_TRANSACTION, "binder: %d:%d BC_REPLY %d -> %d:%d, " "data %p-%p size %zd-%zd\n", proc->pid, thread->pid, t->debug_id, target_proc->pid, target_thread->pid, tr->data.ptr.buffer, tr->data.ptr.offsets, tr->data_size, tr->offsets_size); else binder_debug(BINDER_DEBUG_TRANSACTION, "binder: %d:%d BC_TRANSACTION %d -> " "%d - node %d, data %p-%p size %zd-%zd\n", proc->pid, thread->pid, t->debug_id, target_proc->pid, target_node->debug_id, tr->data.ptr.buffer, tr->data.ptr.offsets, tr->data_size, tr->offsets_size); if (!reply && !(tr->flags & TF_ONE_WAY)) t->from = thread; else t->from = NULL; t->sender_euid = proc->tsk->cred->euid; t->to_proc = target_proc; t->to_thread = target_thread; t->code = tr->code; t->flags = tr->flags; t->priority = task_nice(current); trace_binder_transaction(reply, t, target_node); t->buffer = binder_alloc_buf(target_proc, tr->data_size, tr->offsets_size, !reply && (t->flags & TF_ONE_WAY)); if (t->buffer == NULL) { return_error = BR_FAILED_REPLY; goto err_binder_alloc_buf_failed; } t->buffer->allow_user_free = 0; t->buffer->debug_id = t->debug_id; t->buffer->transaction = t; t->buffer->target_node = target_node; trace_binder_transaction_alloc_buf(t->buffer); if (target_node) binder_inc_node(target_node, 1, 0, NULL); offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *))); if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) { binder_user_error("binder: %d:%d got transaction with invalid " "data ptr\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; goto err_copy_data_failed; } if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) { binder_user_error("binder: %d:%d got transaction with invalid " "offsets ptr\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; goto err_copy_data_failed; } if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) { binder_user_error("binder: %d:%d got transaction with " "invalid offsets size, %zd\n", proc->pid, thread->pid, tr->offsets_size); return_error = BR_FAILED_REPLY; goto err_bad_offset; } off_end = (void *)offp + tr->offsets_size; for (; offp < off_end; offp++) { struct flat_binder_object *fp; if (*offp > t->buffer->data_size - sizeof(*fp) || t->buffer->data_size < sizeof(*fp) || !IS_ALIGNED(*offp, sizeof(void *))) { binder_user_error("binder: %d:%d got transaction with " "invalid offset, %zd\n", proc->pid, thread->pid, *offp); return_error = BR_FAILED_REPLY; goto err_bad_offset; } fp = (struct flat_binder_object *)(t->buffer->data + *offp); switch (fp->type) { case BINDER_TYPE_BINDER: case BINDER_TYPE_WEAK_BINDER: { struct binder_ref *ref; struct binder_node *node = binder_get_node(proc, fp->binder); if (node == NULL) { node = binder_new_node(proc, fp->binder, fp->cookie); if (node == NULL) { return_error = BR_FAILED_REPLY; goto err_binder_new_node_failed; } node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); } if (fp->cookie != node->cookie) { binder_user_error("binder: %d:%d sending u%p " "node %d, cookie mismatch %p != %p\n", proc->pid, thread->pid, fp->binder, node->debug_id, fp->cookie, node->cookie); goto err_binder_get_ref_for_node_failed; } if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { return_error = BR_FAILED_REPLY; goto err_binder_get_ref_for_node_failed; } ref = binder_get_ref_for_node(target_proc, node); if (ref == NULL) { return_error = BR_FAILED_REPLY; goto err_binder_get_ref_for_node_failed; } if (fp->type == BINDER_TYPE_BINDER) fp->type = BINDER_TYPE_HANDLE; else fp->type = BINDER_TYPE_WEAK_HANDLE; fp->handle = ref->desc; binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, &thread->todo); trace_binder_transaction_node_to_ref(t, node, ref); binder_debug(BINDER_DEBUG_TRANSACTION, " node %d u%p -> ref %d desc %d\n", node->debug_id, node->ptr, ref->debug_id, ref->desc); } break; case BINDER_TYPE_HANDLE: case BINDER_TYPE_WEAK_HANDLE: { struct binder_ref *ref = binder_get_ref(proc, fp->handle); if (ref == NULL) { binder_user_error("binder: %d:%d got " "transaction with invalid " "handle, %ld\n", proc->pid, thread->pid, fp->handle); return_error = BR_FAILED_REPLY; goto err_binder_get_ref_failed; } if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { return_error = BR_FAILED_REPLY; goto err_binder_get_ref_failed; } if (ref->node->proc == target_proc) { if (fp->type == BINDER_TYPE_HANDLE) fp->type = BINDER_TYPE_BINDER; else fp->type = BINDER_TYPE_WEAK_BINDER; fp->binder = ref->node->ptr; fp->cookie = ref->node->cookie; binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL); trace_binder_transaction_ref_to_node(t, ref); binder_debug(BINDER_DEBUG_TRANSACTION, " ref %d desc %d -> node %d u%p\n", ref->debug_id, ref->desc, ref->node->debug_id, ref->node->ptr); } else { struct binder_ref *new_ref; new_ref = binder_get_ref_for_node(target_proc, ref->node); if (new_ref == NULL) { return_error = BR_FAILED_REPLY; goto err_binder_get_ref_for_node_failed; } fp->handle = new_ref->desc; binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL); trace_binder_transaction_ref_to_ref(t, ref, new_ref); binder_debug(BINDER_DEBUG_TRANSACTION, " ref %d desc %d -> ref %d desc %d (node %d)\n", ref->debug_id, ref->desc, new_ref->debug_id, new_ref->desc, ref->node->debug_id); } } break; case BINDER_TYPE_FD: { int target_fd; struct file *file; if (reply) { if (!(in_reply_to->flags & TF_ACCEPT_FDS)) { binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n", proc->pid, thread->pid, fp->handle); return_error = BR_FAILED_REPLY; goto err_fd_not_allowed; } } else if (!target_node->accept_fds) { binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n", proc->pid, thread->pid, fp->handle); return_error = BR_FAILED_REPLY; goto err_fd_not_allowed; } file = fget(fp->handle); if (file == NULL) { binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n", proc->pid, thread->pid, fp->handle); return_error = BR_FAILED_REPLY; goto err_fget_failed; } if (security_binder_transfer_file(proc->tsk, target_proc->tsk, file) < 0) { fput(file); return_error = BR_FAILED_REPLY; goto err_get_unused_fd_failed; } target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); if (target_fd < 0) { fput(file); return_error = BR_FAILED_REPLY; goto err_get_unused_fd_failed; } task_fd_install(target_proc, target_fd, file); trace_binder_transaction_fd(t, fp->handle, target_fd); binder_debug(BINDER_DEBUG_TRANSACTION, " fd %ld -> %d\n", fp->handle, target_fd); /* TODO: fput? */ fp->handle = target_fd; } break; default: binder_user_error("binder: %d:%d got transactio" "n with invalid object type, %lx\n", proc->pid, thread->pid, fp->type); return_error = BR_FAILED_REPLY; goto err_bad_object_type; } } if (reply) { BUG_ON(t->buffer->async_transaction != 0); binder_pop_transaction(target_thread, in_reply_to); } else if (!(t->flags & TF_ONE_WAY)) { BUG_ON(t->buffer->async_transaction != 0); t->need_reply = 1; t->from_parent = thread->transaction_stack; thread->transaction_stack = t; } else { BUG_ON(target_node == NULL); BUG_ON(t->buffer->async_transaction != 1); if (target_node->has_async_transaction) { target_list = &target_node->async_todo; target_wait = NULL; } else target_node->has_async_transaction = 1; } t->work.type = BINDER_WORK_TRANSACTION; list_add_tail(&t->work.entry, target_list); tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; list_add_tail(&tcomplete->entry, &thread->todo); if (target_wait) wake_up_interruptible(target_wait); return; err_get_unused_fd_failed: err_fget_failed: err_fd_not_allowed: err_binder_get_ref_for_node_failed: err_binder_get_ref_failed: err_binder_new_node_failed: err_bad_object_type: err_bad_offset: err_copy_data_failed: trace_binder_transaction_failed_buffer_release(t->buffer); binder_transaction_buffer_release(target_proc, t->buffer, offp); t->buffer->transaction = NULL; binder_free_buf(target_proc, t->buffer); err_binder_alloc_buf_failed: kfree(tcomplete); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); err_alloc_tcomplete_failed: kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); err_alloc_t_failed: err_bad_call_stack: err_empty_call_stack: err_dead_binder: err_invalid_target_handle: err_no_context_mgr_node: binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, "binder: %d:%d transaction failed %d, size %zd-%zd\n", proc->pid, thread->pid, return_error, tr->data_size, tr->offsets_size); { struct binder_transaction_log_entry *fe; fe = binder_transaction_log_add(&binder_transaction_log_failed); *fe = *e; } BUG_ON(thread->return_error != BR_OK); if (in_reply_to) { thread->return_error = BR_TRANSACTION_COMPLETE; binder_send_failed_reply(in_reply_to, return_error); } else thread->return_error = return_error; } int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, void __user *buffer, int size, signed long *consumed) { uint32_t cmd; void __user *ptr = buffer + *consumed; void __user *end = buffer + size; while (ptr < end && thread->return_error == BR_OK) { if (get_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); trace_binder_command(cmd); if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { binder_stats.bc[_IOC_NR(cmd)]++; proc->stats.bc[_IOC_NR(cmd)]++; thread->stats.bc[_IOC_NR(cmd)]++; } switch (cmd) { case BC_INCREFS: case BC_ACQUIRE: case BC_RELEASE: case BC_DECREFS: { uint32_t target; struct binder_ref *ref; const char *debug_string; if (get_user(target, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (target == 0 && binder_context_mgr_node && (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) { ref = binder_get_ref_for_node(proc, binder_context_mgr_node); if (ref->desc != target) { binder_user_error("binder: %d:" "%d tried to acquire " "reference to desc 0, " "got %d instead\n", proc->pid, thread->pid, ref->desc); } } else ref = binder_get_ref(proc, target); if (ref == NULL) { binder_user_error("binder: %d:%d refcou" "nt change on invalid ref %d\n", proc->pid, thread->pid, target); break; } switch (cmd) { case BC_INCREFS: debug_string = "IncRefs"; binder_inc_ref(ref, 0, NULL); break; case BC_ACQUIRE: debug_string = "Acquire"; binder_inc_ref(ref, 1, NULL); break; case BC_RELEASE: debug_string = "Release"; binder_dec_ref(ref, 1); break; case BC_DECREFS: default: debug_string = "DecRefs"; binder_dec_ref(ref, 0); break; } binder_debug(BINDER_DEBUG_USER_REFS, "binder: %d:%d %s ref %d desc %d s %d w %d for node %d\n", proc->pid, thread->pid, debug_string, ref->debug_id, ref->desc, ref->strong, ref->weak, ref->node->debug_id); break; } case BC_INCREFS_DONE: case BC_ACQUIRE_DONE: { void __user *node_ptr; void *cookie; struct binder_node *node; if (get_user(node_ptr, (void * __user *)ptr)) return -EFAULT; ptr += sizeof(void *); if (get_user(cookie, (void * __user *)ptr)) return -EFAULT; ptr += sizeof(void *); node = binder_get_node(proc, node_ptr); if (node == NULL) { binder_user_error("binder: %d:%d " "%s u%p no match\n", proc->pid, thread->pid, cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", node_ptr); break; } if (cookie != node->cookie) { binder_user_error("binder: %d:%d %s u%p node %d" " cookie mismatch %p != %p\n", proc->pid, thread->pid, cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", node_ptr, node->debug_id, cookie, node->cookie); break; } if (cmd == BC_ACQUIRE_DONE) { if (node->pending_strong_ref == 0) { binder_user_error("binder: %d:%d " "BC_ACQUIRE_DONE node %d has " "no pending acquire request\n", proc->pid, thread->pid, node->debug_id); break; } node->pending_strong_ref = 0; } else { if (node->pending_weak_ref == 0) { binder_user_error("binder: %d:%d " "BC_INCREFS_DONE node %d has " "no pending increfs request\n", proc->pid, thread->pid, node->debug_id); break; } node->pending_weak_ref = 0; } binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0); binder_debug(BINDER_DEBUG_USER_REFS, "binder: %d:%d %s node %d ls %d lw %d\n", proc->pid, thread->pid, cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", node->debug_id, node->local_strong_refs, node->local_weak_refs); break; } case BC_ATTEMPT_ACQUIRE: printk(KERN_ERR "binder: BC_ATTEMPT_ACQUIRE not supported\n"); return -EINVAL; case BC_ACQUIRE_RESULT: printk(KERN_ERR "binder: BC_ACQUIRE_RESULT not supported\n"); return -EINVAL; case BC_FREE_BUFFER: { void __user *data_ptr; struct binder_buffer *buffer; if (get_user(data_ptr, (void * __user *)ptr)) return -EFAULT; ptr += sizeof(void *); buffer = binder_buffer_lookup(proc, data_ptr); if (buffer == NULL) { binder_user_error("binder: %d:%d " "BC_FREE_BUFFER u%p no match\n", proc->pid, thread->pid, data_ptr); break; } if (!buffer->allow_user_free) { binder_user_error("binder: %d:%d " "BC_FREE_BUFFER u%p matched " "unreturned buffer\n", proc->pid, thread->pid, data_ptr); break; } binder_debug(BINDER_DEBUG_FREE_BUFFER, "binder: %d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n", proc->pid, thread->pid, data_ptr, buffer->debug_id, buffer->transaction ? "active" : "finished"); if (buffer->transaction) { buffer->transaction->buffer = NULL; buffer->transaction = NULL; } if (buffer->async_transaction && buffer->target_node) { BUG_ON(!buffer->target_node->has_async_transaction); if (list_empty(&buffer->target_node->async_todo)) buffer->target_node->has_async_transaction = 0; else list_move_tail(buffer->target_node->async_todo.next, &thread->todo); } trace_binder_transaction_buffer_release(buffer); binder_transaction_buffer_release(proc, buffer, NULL); binder_free_buf(proc, buffer); break; } case BC_TRANSACTION: case BC_REPLY: { struct binder_transaction_data tr; if (copy_from_user(&tr, ptr, sizeof(tr))) return -EFAULT; ptr += sizeof(tr); binder_transaction(proc, thread, &tr, cmd == BC_REPLY); break; } case BC_REGISTER_LOOPER: binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d BC_REGISTER_LOOPER\n", proc->pid, thread->pid); if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { thread->looper |= BINDER_LOOPER_STATE_INVALID; binder_user_error("binder: %d:%d ERROR:" " BC_REGISTER_LOOPER called " "after BC_ENTER_LOOPER\n", proc->pid, thread->pid); } else if (proc->requested_threads == 0) { thread->looper |= BINDER_LOOPER_STATE_INVALID; binder_user_error("binder: %d:%d ERROR:" " BC_REGISTER_LOOPER called " "without request\n", proc->pid, thread->pid); } else { proc->requested_threads--; proc->requested_threads_started++; } thread->looper |= BINDER_LOOPER_STATE_REGISTERED; break; case BC_ENTER_LOOPER: binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d BC_ENTER_LOOPER\n", proc->pid, thread->pid); if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { thread->looper |= BINDER_LOOPER_STATE_INVALID; binder_user_error("binder: %d:%d ERROR:" " BC_ENTER_LOOPER called after " "BC_REGISTER_LOOPER\n", proc->pid, thread->pid); } thread->looper |= BINDER_LOOPER_STATE_ENTERED; break; case BC_EXIT_LOOPER: binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d BC_EXIT_LOOPER\n", proc->pid, thread->pid); thread->looper |= BINDER_LOOPER_STATE_EXITED; break; case BC_REQUEST_DEATH_NOTIFICATION: case BC_CLEAR_DEATH_NOTIFICATION: { uint32_t target; void __user *cookie; struct binder_ref *ref; struct binder_ref_death *death; if (get_user(target, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (get_user(cookie, (void __user * __user *)ptr)) return -EFAULT; ptr += sizeof(void *); ref = binder_get_ref(proc, target); if (ref == NULL) { binder_user_error("binder: %d:%d %s " "invalid ref %d\n", proc->pid, thread->pid, cmd == BC_REQUEST_DEATH_NOTIFICATION ? "BC_REQUEST_DEATH_NOTIFICATION" : "BC_CLEAR_DEATH_NOTIFICATION", target); break; } binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, "binder: %d:%d %s %p ref %d desc %d s %d w %d for node %d\n", proc->pid, thread->pid, cmd == BC_REQUEST_DEATH_NOTIFICATION ? "BC_REQUEST_DEATH_NOTIFICATION" : "BC_CLEAR_DEATH_NOTIFICATION", cookie, ref->debug_id, ref->desc, ref->strong, ref->weak, ref->node->debug_id); if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { if (ref->death) { binder_user_error("binder: %d:%" "d BC_REQUEST_DEATH_NOTI" "FICATION death notific" "ation already set\n", proc->pid, thread->pid); break; } death = kzalloc(sizeof(*death), GFP_KERNEL); if (death == NULL) { thread->return_error = BR_ERROR; binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, "binder: %d:%d " "BC_REQUEST_DEATH_NOTIFICATION failed\n", proc->pid, thread->pid); break; } binder_stats_created(BINDER_STAT_DEATH); INIT_LIST_HEAD(&death->work.entry); death->cookie = cookie; ref->death = death; if (ref->node->proc == NULL) { ref->death->work.type = BINDER_WORK_DEAD_BINDER; if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { list_add_tail(&ref->death->work.entry, &thread->todo); } else { list_add_tail(&ref->death->work.entry, &proc->todo); wake_up_interruptible(&proc->wait); } } } else { if (ref->death == NULL) { binder_user_error("binder: %d:%" "d BC_CLEAR_DEATH_NOTIFI" "CATION death notificat" "ion not active\n", proc->pid, thread->pid); break; } death = ref->death; if (death->cookie != cookie) { binder_user_error("binder: %d:%" "d BC_CLEAR_DEATH_NOTIFI" "CATION death notificat" "ion cookie mismatch " "%p != %p\n", proc->pid, thread->pid, death->cookie, cookie); break; } ref->death = NULL; if (list_empty(&death->work.entry)) { death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { list_add_tail(&death->work.entry, &thread->todo); } else { list_add_tail(&death->work.entry, &proc->todo); wake_up_interruptible(&proc->wait); } } else { BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; } } } break; case BC_DEAD_BINDER_DONE: { struct binder_work *w; void __user *cookie; struct binder_ref_death *death = NULL; if (get_user(cookie, (void __user * __user *)ptr)) return -EFAULT; ptr += sizeof(void *); list_for_each_entry(w, &proc->delivered_death, entry) { struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); if (tmp_death->cookie == cookie) { death = tmp_death; break; } } binder_debug(BINDER_DEBUG_DEAD_BINDER, "binder: %d:%d BC_DEAD_BINDER_DONE %p found %p\n", proc->pid, thread->pid, cookie, death); if (death == NULL) { binder_user_error("binder: %d:%d BC_DEAD" "_BINDER_DONE %p not found\n", proc->pid, thread->pid, cookie); break; } list_del_init(&death->work.entry); if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { list_add_tail(&death->work.entry, &thread->todo); } else { list_add_tail(&death->work.entry, &proc->todo); wake_up_interruptible(&proc->wait); } } } break; default: printk(KERN_ERR "binder: %d:%d unknown command %d\n", proc->pid, thread->pid, cmd); return -EINVAL; } *consumed = ptr - buffer; } return 0; } void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread, uint32_t cmd) { trace_binder_return(cmd); if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { binder_stats.br[_IOC_NR(cmd)]++; proc->stats.br[_IOC_NR(cmd)]++; thread->stats.br[_IOC_NR(cmd)]++; } } static int binder_has_proc_work(struct binder_proc *proc, struct binder_thread *thread) { return !list_empty(&proc->todo) || (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); } static int binder_has_thread_work(struct binder_thread *thread) { return !list_empty(&thread->todo) || thread->return_error != BR_OK || (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); } static int binder_thread_read(struct binder_proc *proc, struct binder_thread *thread, void __user *buffer, int size, signed long *consumed, int non_block) { void __user *ptr = buffer + *consumed; void __user *end = buffer + size; int ret = 0; int wait_for_proc_work; if (*consumed == 0) { if (put_user(BR_NOOP, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); } retry: wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo); if (thread->return_error != BR_OK && ptr < end) { if (thread->return_error2 != BR_OK) { if (put_user(thread->return_error2, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); binder_stat_br(proc, thread, thread->return_error2); if (ptr == end) goto done; thread->return_error2 = BR_OK; } if (put_user(thread->return_error, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); binder_stat_br(proc, thread, thread->return_error); thread->return_error = BR_OK; goto done; } thread->looper |= BINDER_LOOPER_STATE_WAITING; if (wait_for_proc_work) proc->ready_threads++; binder_unlock(__func__); trace_binder_wait_for_work(wait_for_proc_work, !!thread->transaction_stack, !list_empty(&thread->todo)); if (wait_for_proc_work) { if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED))) { binder_user_error("binder: %d:%d ERROR: Thread waiting " "for process work before calling BC_REGISTER_" "LOOPER or BC_ENTER_LOOPER (state %x)\n", proc->pid, thread->pid, thread->looper); wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); } binder_set_nice(proc->default_priority); if (non_block) { if (!binder_has_proc_work(proc, thread)) ret = -EAGAIN; } else ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread)); } else { if (non_block) { if (!binder_has_thread_work(thread)) ret = -EAGAIN; } else ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread)); } binder_lock(__func__); if (wait_for_proc_work) proc->ready_threads--; thread->looper &= ~BINDER_LOOPER_STATE_WAITING; if (ret) return ret; while (1) { uint32_t cmd; struct binder_transaction_data tr; struct binder_work *w; struct binder_transaction *t = NULL; if (!list_empty(&thread->todo)) w = list_first_entry(&thread->todo, struct binder_work, entry); else if (!list_empty(&proc->todo) && wait_for_proc_work) w = list_first_entry(&proc->todo, struct binder_work, entry); else { if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */ goto retry; break; } if (end - ptr < sizeof(tr) + 4) break; switch (w->type) { case BINDER_WORK_TRANSACTION: { t = container_of(w, struct binder_transaction, work); } break; case BINDER_WORK_TRANSACTION_COMPLETE: { cmd = BR_TRANSACTION_COMPLETE; if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, "binder: %d:%d BR_TRANSACTION_COMPLETE\n", proc->pid, thread->pid); list_del(&w->entry); kfree(w); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); } break; case BINDER_WORK_NODE: { struct binder_node *node = container_of(w, struct binder_node, work); uint32_t cmd = BR_NOOP; const char *cmd_name; int strong = node->internal_strong_refs || node->local_strong_refs; int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; if (weak && !node->has_weak_ref) { cmd = BR_INCREFS; cmd_name = "BR_INCREFS"; node->has_weak_ref = 1; node->pending_weak_ref = 1; node->local_weak_refs++; } else if (strong && !node->has_strong_ref) { cmd = BR_ACQUIRE; cmd_name = "BR_ACQUIRE"; node->has_strong_ref = 1; node->pending_strong_ref = 1; node->local_strong_refs++; } else if (!strong && node->has_strong_ref) { cmd = BR_RELEASE; cmd_name = "BR_RELEASE"; node->has_strong_ref = 0; } else if (!weak && node->has_weak_ref) { cmd = BR_DECREFS; cmd_name = "BR_DECREFS"; node->has_weak_ref = 0; } if (cmd != BR_NOOP) { if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (put_user(node->ptr, (void * __user *)ptr)) return -EFAULT; ptr += sizeof(void *); if (put_user(node->cookie, (void * __user *)ptr)) return -EFAULT; ptr += sizeof(void *); binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_USER_REFS, "binder: %d:%d %s %d u%p c%p\n", proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie); } else { list_del_init(&w->entry); if (!weak && !strong) { binder_debug(BINDER_DEBUG_INTERNAL_REFS, "binder: %d:%d node %d u%p c%p deleted\n", proc->pid, thread->pid, node->debug_id, node->ptr, node->cookie); rb_erase(&node->rb_node, &proc->nodes); kfree(node); binder_stats_deleted(BINDER_STAT_NODE); } else { binder_debug(BINDER_DEBUG_INTERNAL_REFS, "binder: %d:%d node %d u%p c%p state unchanged\n", proc->pid, thread->pid, node->debug_id, node->ptr, node->cookie); } } } break; case BINDER_WORK_DEAD_BINDER: case BINDER_WORK_DEAD_BINDER_AND_CLEAR: case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { struct binder_ref_death *death; uint32_t cmd; death = container_of(w, struct binder_ref_death, work); if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; else cmd = BR_DEAD_BINDER; if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (put_user(death->cookie, (void * __user *)ptr)) return -EFAULT; ptr += sizeof(void *); binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, "binder: %d:%d %s %p\n", proc->pid, thread->pid, cmd == BR_DEAD_BINDER ? "BR_DEAD_BINDER" : "BR_CLEAR_DEATH_NOTIFICATION_DONE", death->cookie); if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { list_del(&w->entry); kfree(death); binder_stats_deleted(BINDER_STAT_DEATH); } else list_move(&w->entry, &proc->delivered_death); if (cmd == BR_DEAD_BINDER) goto done; /* DEAD_BINDER notifications can cause transactions */ } break; } if (!t) continue; BUG_ON(t->buffer == NULL); if (t->buffer->target_node) { struct binder_node *target_node = t->buffer->target_node; tr.target.ptr = target_node->ptr; tr.cookie = target_node->cookie; t->saved_priority = task_nice(current); if (t->priority < target_node->min_priority && !(t->flags & TF_ONE_WAY)) binder_set_nice(t->priority); else if (!(t->flags & TF_ONE_WAY) || t->saved_priority > target_node->min_priority) binder_set_nice(target_node->min_priority); cmd = BR_TRANSACTION; } else { tr.target.ptr = NULL; tr.cookie = NULL; cmd = BR_REPLY; } tr.code = t->code; tr.flags = t->flags; tr.sender_euid = t->sender_euid; if (t->from) { struct task_struct *sender = t->from->proc->tsk; tr.sender_pid = task_tgid_nr_ns(sender, current->nsproxy->pid_ns); } else { tr.sender_pid = 0; } tr.data_size = t->buffer->data_size; tr.offsets_size = t->buffer->offsets_size; tr.data.ptr.buffer = (void *)t->buffer->data + proc->user_buffer_offset; tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (copy_to_user(ptr, &tr, sizeof(tr))) return -EFAULT; ptr += sizeof(tr); trace_binder_transaction_received(t); binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_TRANSACTION, "binder: %d:%d %s %d %d:%d, cmd %d" "size %zd-%zd ptr %p-%p\n", proc->pid, thread->pid, (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : "BR_REPLY", t->debug_id, t->from ? t->from->proc->pid : 0, t->from ? t->from->pid : 0, cmd, t->buffer->data_size, t->buffer->offsets_size, tr.data.ptr.buffer, tr.data.ptr.offsets); list_del(&t->work.entry); t->buffer->allow_user_free = 1; if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { t->to_parent = thread->transaction_stack; t->to_thread = thread; thread->transaction_stack = t; } else { t->buffer->transaction = NULL; kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); } break; } done: *consumed = ptr - buffer; if (proc->requested_threads + proc->ready_threads == 0 && proc->requested_threads_started < proc->max_threads && (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ /*spawn a new thread if we leave this out */) { proc->requested_threads++; binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d BR_SPAWN_LOOPER\n", proc->pid, thread->pid); if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) return -EFAULT; binder_stat_br(proc, thread, BR_SPAWN_LOOPER); } return 0; } static void binder_release_work(struct list_head *list) { struct binder_work *w; while (!list_empty(list)) { w = list_first_entry(list, struct binder_work, entry); list_del_init(&w->entry); switch (w->type) { case BINDER_WORK_TRANSACTION: { struct binder_transaction *t; t = container_of(w, struct binder_transaction, work); if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) { binder_send_failed_reply(t, BR_DEAD_REPLY); } else { binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "binder: undelivered transaction %d\n", t->debug_id); t->buffer->transaction = NULL; kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); } } break; case BINDER_WORK_TRANSACTION_COMPLETE: { binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "binder: undelivered TRANSACTION_COMPLETE\n"); kfree(w); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); } break; case BINDER_WORK_DEAD_BINDER_AND_CLEAR: case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { struct binder_ref_death *death; death = container_of(w, struct binder_ref_death, work); binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "binder: undelivered death notification, %p\n", death->cookie); kfree(death); binder_stats_deleted(BINDER_STAT_DEATH); } break; default: pr_err("binder: unexpected work type, %d, not freed\n", w->type); break; } } } static struct binder_thread *binder_get_thread(struct binder_proc *proc) { struct binder_thread *thread = NULL; struct rb_node *parent = NULL; struct rb_node **p = &proc->threads.rb_node; while (*p) { parent = *p; thread = rb_entry(parent, struct binder_thread, rb_node); if (current->pid < thread->pid) p = &(*p)->rb_left; else if (current->pid > thread->pid) p = &(*p)->rb_right; else break; } if (*p == NULL) { thread = kzalloc(sizeof(*thread), GFP_KERNEL); if (thread == NULL) return NULL; binder_stats_created(BINDER_STAT_THREAD); thread->proc = proc; thread->pid = current->pid; init_waitqueue_head(&thread->wait); INIT_LIST_HEAD(&thread->todo); rb_link_node(&thread->rb_node, parent, p); rb_insert_color(&thread->rb_node, &proc->threads); thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; thread->return_error = BR_OK; thread->return_error2 = BR_OK; } return thread; } static int binder_free_thread(struct binder_proc *proc, struct binder_thread *thread) { struct binder_transaction *t; struct binder_transaction *send_reply = NULL; int active_transactions = 0; rb_erase(&thread->rb_node, &proc->threads); t = thread->transaction_stack; if (t && t->to_thread == thread) send_reply = t; while (t) { active_transactions++; binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "binder: release %d:%d transaction %d " "%s, still active\n", proc->pid, thread->pid, t->debug_id, (t->to_thread == thread) ? "in" : "out"); if (t->to_thread == thread) { t->to_proc = NULL; t->to_thread = NULL; if (t->buffer) { t->buffer->transaction = NULL; t->buffer = NULL; } t = t->to_parent; } else if (t->from == thread) { t->from = NULL; t = t->from_parent; } else BUG(); } if (send_reply) binder_send_failed_reply(send_reply, BR_DEAD_REPLY); binder_release_work(&thread->todo); kfree(thread); binder_stats_deleted(BINDER_STAT_THREAD); return active_transactions; } static unsigned int binder_poll(struct file *filp, struct poll_table_struct *wait) { struct binder_proc *proc = filp->private_data; struct binder_thread *thread = NULL; int wait_for_proc_work; binder_lock(__func__); thread = binder_get_thread(proc); wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo) && thread->return_error == BR_OK; binder_unlock(__func__); if (wait_for_proc_work) { if (binder_has_proc_work(proc, thread)) return POLLIN; poll_wait(filp, &proc->wait, wait); if (binder_has_proc_work(proc, thread)) return POLLIN; } else { if (binder_has_thread_work(thread)) return POLLIN; poll_wait(filp, &thread->wait, wait); if (binder_has_thread_work(thread)) return POLLIN; } return 0; } static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int ret; struct binder_proc *proc = filp->private_data; struct binder_thread *thread; unsigned int size = _IOC_SIZE(cmd); void __user *ubuf = (void __user *)arg; /*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/ trace_binder_ioctl(cmd, arg); ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); if (ret) goto err_unlocked; binder_lock(__func__); thread = binder_get_thread(proc); if (thread == NULL) { ret = -ENOMEM; goto err; } switch (cmd) { case BINDER_WRITE_READ: { struct binder_write_read bwr; if (size != sizeof(struct binder_write_read)) { ret = -EINVAL; goto err; } if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { ret = -EFAULT; goto err; } binder_debug(BINDER_DEBUG_READ_WRITE, "binder: %d:%d write %ld at %08lx, read %ld at %08lx\n", proc->pid, thread->pid, bwr.write_size, bwr.write_buffer, bwr.read_size, bwr.read_buffer); if (bwr.write_size > 0) { ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed); trace_binder_write_done(ret); if (ret < 0) { bwr.read_consumed = 0; if (copy_to_user(ubuf, &bwr, sizeof(bwr))) ret = -EFAULT; goto err; } } if (bwr.read_size > 0) { ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); trace_binder_read_done(ret); if (!list_empty(&proc->todo)) wake_up_interruptible(&proc->wait); if (ret < 0) { if (copy_to_user(ubuf, &bwr, sizeof(bwr))) ret = -EFAULT; goto err; } } binder_debug(BINDER_DEBUG_READ_WRITE, "binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n", proc->pid, thread->pid, bwr.write_consumed, bwr.write_size, bwr.read_consumed, bwr.read_size); if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { ret = -EFAULT; goto err; } break; } case BINDER_SET_MAX_THREADS: if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { ret = -EINVAL; goto err; } break; case BINDER_SET_CONTEXT_MGR: if (binder_context_mgr_node != NULL) { printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n"); ret = -EBUSY; goto err; } ret = security_binder_set_context_mgr(proc->tsk); if (ret < 0) goto err; if (binder_context_mgr_uid != -1) { if (binder_context_mgr_uid != current->cred->euid) { printk(KERN_ERR "binder: BINDER_SET_" "CONTEXT_MGR bad uid %d != %d\n", current->cred->euid, binder_context_mgr_uid); ret = -EPERM; goto err; } } else binder_context_mgr_uid = current->cred->euid; binder_context_mgr_node = binder_new_node(proc, NULL, NULL); if (binder_context_mgr_node == NULL) { ret = -ENOMEM; goto err; } binder_context_mgr_node->local_weak_refs++; binder_context_mgr_node->local_strong_refs++; binder_context_mgr_node->has_strong_ref = 1; binder_context_mgr_node->has_weak_ref = 1; break; case BINDER_THREAD_EXIT: binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d exit\n", proc->pid, thread->pid); binder_free_thread(proc, thread); thread = NULL; break; case BINDER_VERSION: if (size != sizeof(struct binder_version)) { ret = -EINVAL; goto err; } if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) { ret = -EINVAL; goto err; } break; default: ret = -EINVAL; goto err; } ret = 0; err: if (thread) thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; binder_unlock(__func__); wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); if (ret && ret != -ERESTARTSYS) printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); err_unlocked: trace_binder_ioctl_done(ret); return ret; } static void binder_vma_open(struct vm_area_struct *vma) { struct binder_proc *proc = vma->vm_private_data; binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); } static void binder_vma_close(struct vm_area_struct *vma) { struct binder_proc *proc = vma->vm_private_data; binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); proc->vma = NULL; proc->vma_vm_mm = NULL; binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); } static struct vm_operations_struct binder_vm_ops = { .open = binder_vma_open, .close = binder_vma_close, }; static int binder_mmap(struct file *filp, struct vm_area_struct *vma) { int ret; struct vm_struct *area; struct binder_proc *proc = filp->private_data; const char *failure_string; struct binder_buffer *buffer; if ((vma->vm_end - vma->vm_start) > SZ_4M) vma->vm_end = vma->vm_start + SZ_4M; binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { ret = -EPERM; failure_string = "bad vm_flags"; goto err_bad_arg; } vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; mutex_lock(&binder_mmap_lock); if (proc->buffer) { ret = -EBUSY; failure_string = "already mapped"; goto err_already_mapped; } area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); if (area == NULL) { ret = -ENOMEM; failure_string = "get_vm_area"; goto err_get_vm_area_failed; } proc->buffer = area->addr; proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; mutex_unlock(&binder_mmap_lock); #ifdef CONFIG_CPU_CACHE_VIPT if (cache_is_vipt_aliasing()) { while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); vma->vm_start += PAGE_SIZE; } } #endif proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); if (proc->pages == NULL) { ret = -ENOMEM; failure_string = "alloc page array"; goto err_alloc_pages_failed; } proc->buffer_size = vma->vm_end - vma->vm_start; vma->vm_ops = &binder_vm_ops; vma->vm_private_data = proc; if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { ret = -ENOMEM; failure_string = "alloc small buf"; goto err_alloc_small_buf_failed; } buffer = proc->buffer; INIT_LIST_HEAD(&proc->buffers); list_add(&buffer->entry, &proc->buffers); buffer->free = 1; binder_insert_free_buffer(proc, buffer); proc->free_async_space = proc->buffer_size / 2; barrier(); proc->files = get_files_struct(proc->tsk); proc->vma = vma; proc->vma_vm_mm = vma->vm_mm; /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ return 0; err_alloc_small_buf_failed: kfree(proc->pages); proc->pages = NULL; err_alloc_pages_failed: mutex_lock(&binder_mmap_lock); vfree(proc->buffer); proc->buffer = NULL; err_get_vm_area_failed: err_already_mapped: mutex_unlock(&binder_mmap_lock); err_bad_arg: printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n", proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); return ret; } static int binder_open(struct inode *nodp, struct file *filp) { struct binder_proc *proc; binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", current->group_leader->pid, current->pid); proc = kzalloc(sizeof(*proc), GFP_KERNEL); if (proc == NULL) return -ENOMEM; get_task_struct(current); proc->tsk = current; INIT_LIST_HEAD(&proc->todo); init_waitqueue_head(&proc->wait); proc->default_priority = task_nice(current); binder_lock(__func__); binder_stats_created(BINDER_STAT_PROC); hlist_add_head(&proc->proc_node, &binder_procs); proc->pid = current->group_leader->pid; INIT_LIST_HEAD(&proc->delivered_death); filp->private_data = proc; binder_unlock(__func__); if (binder_debugfs_dir_entry_proc) { char strbuf[11]; snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, binder_debugfs_dir_entry_proc, proc, &binder_proc_fops); } return 0; } static int binder_flush(struct file *filp, fl_owner_t id) { struct binder_proc *proc = filp->private_data; binder_defer_work(proc, BINDER_DEFERRED_FLUSH); return 0; } static void binder_deferred_flush(struct binder_proc *proc) { struct rb_node *n; int wake_count = 0; for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; if (thread->looper & BINDER_LOOPER_STATE_WAITING) { wake_up_interruptible(&thread->wait); wake_count++; } } wake_up_interruptible_all(&proc->wait); binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_flush: %d woke %d threads\n", proc->pid, wake_count); } static int binder_release(struct inode *nodp, struct file *filp) { struct binder_proc *proc = filp->private_data; debugfs_remove(proc->debugfs_entry); binder_defer_work(proc, BINDER_DEFERRED_RELEASE); return 0; } static void binder_deferred_release(struct binder_proc *proc) { struct hlist_node *pos; struct binder_transaction *t; struct rb_node *n; int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count; BUG_ON(proc->vma); BUG_ON(proc->files); hlist_del(&proc->proc_node); if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) { binder_debug(BINDER_DEBUG_DEAD_BINDER, "binder_release: %d context_mgr_node gone\n", proc->pid); binder_context_mgr_node = NULL; } threads = 0; active_transactions = 0; while ((n = rb_first(&proc->threads))) { struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); threads++; active_transactions += binder_free_thread(proc, thread); } nodes = 0; incoming_refs = 0; while ((n = rb_first(&proc->nodes))) { struct binder_node *node = rb_entry(n, struct binder_node, rb_node); nodes++; rb_erase(&node->rb_node, &proc->nodes); list_del_init(&node->work.entry); binder_release_work(&node->async_todo); if (hlist_empty(&node->refs)) { kfree(node); binder_stats_deleted(BINDER_STAT_NODE); } else { struct binder_ref *ref; int death = 0; node->proc = NULL; node->local_strong_refs = 0; node->local_weak_refs = 0; hlist_add_head(&node->dead_node, &binder_dead_nodes); hlist_for_each_entry(ref, pos, &node->refs, node_entry) { incoming_refs++; if (ref->death) { death++; if (list_empty(&ref->death->work.entry)) { ref->death->work.type = BINDER_WORK_DEAD_BINDER; list_add_tail(&ref->death->work.entry, &ref->proc->todo); wake_up_interruptible(&ref->proc->wait); } else BUG(); } } binder_debug(BINDER_DEBUG_DEAD_BINDER, "binder: node %d now dead, " "refs %d, death %d\n", node->debug_id, incoming_refs, death); } } outgoing_refs = 0; while ((n = rb_first(&proc->refs_by_desc))) { struct binder_ref *ref = rb_entry(n, struct binder_ref, rb_node_desc); outgoing_refs++; binder_delete_ref(ref); } binder_release_work(&proc->todo); binder_release_work(&proc->delivered_death); buffers = 0; while ((n = rb_first(&proc->allocated_buffers))) { struct binder_buffer *buffer = rb_entry(n, struct binder_buffer, rb_node); t = buffer->transaction; if (t) { t->buffer = NULL; buffer->transaction = NULL; printk(KERN_ERR "binder: release proc %d, " "transaction %d, not freed\n", proc->pid, t->debug_id); /*BUG();*/ } binder_free_buf(proc, buffer); buffers++; } binder_stats_deleted(BINDER_STAT_PROC); page_count = 0; if (proc->pages) { int i; for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) { if (proc->pages[i]) { void *page_addr = proc->buffer + i * PAGE_SIZE; binder_debug(BINDER_DEBUG_BUFFER_ALLOC, "binder_release: %d: " "page %d at %p not freed\n", proc->pid, i, page_addr); unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); __free_page(proc->pages[i]); page_count++; } } kfree(proc->pages); vfree(proc->buffer); } put_task_struct(proc->tsk); binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_release: %d threads %d, nodes %d (ref %d), " "refs %d, active transactions %d, buffers %d, " "pages %d\n", proc->pid, threads, nodes, incoming_refs, outgoing_refs, active_transactions, buffers, page_count); kfree(proc); } static void binder_deferred_func(struct work_struct *work) { struct binder_proc *proc; struct files_struct *files; int defer; do { binder_lock(__func__); mutex_lock(&binder_deferred_lock); if (!hlist_empty(&binder_deferred_list)) { proc = hlist_entry(binder_deferred_list.first, struct binder_proc, deferred_work_node); hlist_del_init(&proc->deferred_work_node); defer = proc->deferred_work; proc->deferred_work = 0; } else { proc = NULL; defer = 0; } mutex_unlock(&binder_deferred_lock); files = NULL; if (defer & BINDER_DEFERRED_PUT_FILES) { files = proc->files; if (files) proc->files = NULL; } if (defer & BINDER_DEFERRED_FLUSH) binder_deferred_flush(proc); if (defer & BINDER_DEFERRED_RELEASE) binder_deferred_release(proc); /* frees proc */ binder_unlock(__func__); if (files) put_files_struct(files); } while (proc); } static DECLARE_WORK(binder_deferred_work, binder_deferred_func); static void binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) { mutex_lock(&binder_deferred_lock); proc->deferred_work |= defer; if (hlist_unhashed(&proc->deferred_work_node)) { hlist_add_head(&proc->deferred_work_node, &binder_deferred_list); queue_work(binder_deferred_workqueue, &binder_deferred_work); } mutex_unlock(&binder_deferred_lock); } static void print_binder_transaction(struct seq_file *m, const char *prefix, struct binder_transaction *t) { seq_printf(m, "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", prefix, t->debug_id, t, t->from ? t->from->proc->pid : 0, t->from ? t->from->pid : 0, t->to_proc ? t->to_proc->pid : 0, t->to_thread ? t->to_thread->pid : 0, t->code, t->flags, t->priority, t->need_reply); if (t->buffer == NULL) { seq_puts(m, " buffer free\n"); return; } if (t->buffer->target_node) seq_printf(m, " node %d", t->buffer->target_node->debug_id); seq_printf(m, " size %zd:%zd data %p\n", t->buffer->data_size, t->buffer->offsets_size, t->buffer->data); } static void print_binder_buffer(struct seq_file *m, const char *prefix, struct binder_buffer *buffer) { seq_printf(m, "%s %d: %p size %zd:%zd %s\n", prefix, buffer->debug_id, buffer->data, buffer->data_size, buffer->offsets_size, buffer->transaction ? "active" : "delivered"); } static void print_binder_work(struct seq_file *m, const char *prefix, const char *transaction_prefix, struct binder_work *w) { struct binder_node *node; struct binder_transaction *t; switch (w->type) { case BINDER_WORK_TRANSACTION: t = container_of(w, struct binder_transaction, work); print_binder_transaction(m, transaction_prefix, t); break; case BINDER_WORK_TRANSACTION_COMPLETE: seq_printf(m, "%stransaction complete\n", prefix); break; case BINDER_WORK_NODE: node = container_of(w, struct binder_node, work); seq_printf(m, "%snode work %d: u%p c%p\n", prefix, node->debug_id, node->ptr, node->cookie); break; case BINDER_WORK_DEAD_BINDER: seq_printf(m, "%shas dead binder\n", prefix); break; case BINDER_WORK_DEAD_BINDER_AND_CLEAR: seq_printf(m, "%shas cleared dead binder\n", prefix); break; case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: seq_printf(m, "%shas cleared death notification\n", prefix); break; default: seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); break; } } static void print_binder_thread(struct seq_file *m, struct binder_thread *thread, int print_always) { struct binder_transaction *t; struct binder_work *w; size_t start_pos = m->count; size_t header_pos; seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper); header_pos = m->count; t = thread->transaction_stack; while (t) { if (t->from == thread) { print_binder_transaction(m, " outgoing transaction", t); t = t->from_parent; } else if (t->to_thread == thread) { print_binder_transaction(m, " incoming transaction", t); t = t->to_parent; } else { print_binder_transaction(m, " bad transaction", t); t = NULL; } } list_for_each_entry(w, &thread->todo, entry) { print_binder_work(m, " ", " pending transaction", w); } if (!print_always && m->count == header_pos) m->count = start_pos; } static void print_binder_node(struct seq_file *m, struct binder_node *node) { struct binder_ref *ref; struct hlist_node *pos; struct binder_work *w; int count; count = 0; hlist_for_each_entry(ref, pos, &node->refs, node_entry) count++; seq_printf(m, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d", node->debug_id, node->ptr, node->cookie, node->has_strong_ref, node->has_weak_ref, node->local_strong_refs, node->local_weak_refs, node->internal_strong_refs, count); if (count) { seq_puts(m, " proc"); hlist_for_each_entry(ref, pos, &node->refs, node_entry) seq_printf(m, " %d", ref->proc->pid); } seq_puts(m, "\n"); list_for_each_entry(w, &node->async_todo, entry) print_binder_work(m, " ", " pending async transaction", w); } static void print_binder_ref(struct seq_file *m, struct binder_ref *ref) { seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n", ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ", ref->node->debug_id, ref->strong, ref->weak, ref->death); } static void print_binder_proc(struct seq_file *m, struct binder_proc *proc, int print_all) { struct binder_work *w; struct rb_node *n; size_t start_pos = m->count; size_t header_pos; seq_printf(m, "proc %d\n", proc->pid); header_pos = m->count; for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) print_binder_thread(m, rb_entry(n, struct binder_thread, rb_node), print_all); for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { struct binder_node *node = rb_entry(n, struct binder_node, rb_node); if (print_all || node->has_async_transaction) print_binder_node(m, node); } if (print_all) { for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) print_binder_ref(m, rb_entry(n, struct binder_ref, rb_node_desc)); } for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) print_binder_buffer(m, " buffer", rb_entry(n, struct binder_buffer, rb_node)); list_for_each_entry(w, &proc->todo, entry) print_binder_work(m, " ", " pending transaction", w); list_for_each_entry(w, &proc->delivered_death, entry) { seq_puts(m, " has delivered dead binder\n"); break; } if (!print_all && m->count == header_pos) m->count = start_pos; } static const char *binder_return_strings[] = { "BR_ERROR", "BR_OK", "BR_TRANSACTION", "BR_REPLY", "BR_ACQUIRE_RESULT", "BR_DEAD_REPLY", "BR_TRANSACTION_COMPLETE", "BR_INCREFS", "BR_ACQUIRE", "BR_RELEASE", "BR_DECREFS", "BR_ATTEMPT_ACQUIRE", "BR_NOOP", "BR_SPAWN_LOOPER", "BR_FINISHED", "BR_DEAD_BINDER", "BR_CLEAR_DEATH_NOTIFICATION_DONE", "BR_FAILED_REPLY" }; static const char *binder_command_strings[] = { "BC_TRANSACTION", "BC_REPLY", "BC_ACQUIRE_RESULT", "BC_FREE_BUFFER", "BC_INCREFS", "BC_ACQUIRE", "BC_RELEASE", "BC_DECREFS", "BC_INCREFS_DONE", "BC_ACQUIRE_DONE", "BC_ATTEMPT_ACQUIRE", "BC_REGISTER_LOOPER", "BC_ENTER_LOOPER", "BC_EXIT_LOOPER", "BC_REQUEST_DEATH_NOTIFICATION", "BC_CLEAR_DEATH_NOTIFICATION", "BC_DEAD_BINDER_DONE" }; static const char *binder_objstat_strings[] = { "proc", "thread", "node", "ref", "death", "transaction", "transaction_complete" }; static void print_binder_stats(struct seq_file *m, const char *prefix, struct binder_stats *stats) { int i; BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != ARRAY_SIZE(binder_command_strings)); for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { if (stats->bc[i]) seq_printf(m, "%s%s: %d\n", prefix, binder_command_strings[i], stats->bc[i]); } BUILD_BUG_ON(ARRAY_SIZE(stats->br) != ARRAY_SIZE(binder_return_strings)); for (i = 0; i < ARRAY_SIZE(stats->br); i++) { if (stats->br[i]) seq_printf(m, "%s%s: %d\n", prefix, binder_return_strings[i], stats->br[i]); } BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != ARRAY_SIZE(binder_objstat_strings)); BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != ARRAY_SIZE(stats->obj_deleted)); for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { if (stats->obj_created[i] || stats->obj_deleted[i]) seq_printf(m, "%s%s: active %d total %d\n", prefix, binder_objstat_strings[i], stats->obj_created[i] - stats->obj_deleted[i], stats->obj_created[i]); } } static void print_binder_proc_stats(struct seq_file *m, struct binder_proc *proc) { struct binder_work *w; struct rb_node *n; int count, strong, weak; seq_printf(m, "proc %d\n", proc->pid); count = 0; for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) count++; seq_printf(m, " threads: %d\n", count); seq_printf(m, " requested threads: %d+%d/%d\n" " ready threads %d\n" " free async space %zd\n", proc->requested_threads, proc->requested_threads_started, proc->max_threads, proc->ready_threads, proc->free_async_space); count = 0; for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) count++; seq_printf(m, " nodes: %d\n", count); count = 0; strong = 0; weak = 0; for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { struct binder_ref *ref = rb_entry(n, struct binder_ref, rb_node_desc); count++; strong += ref->strong; weak += ref->weak; } seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); count = 0; for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) count++; seq_printf(m, " buffers: %d\n", count); count = 0; list_for_each_entry(w, &proc->todo, entry) { switch (w->type) { case BINDER_WORK_TRANSACTION: count++; break; default: break; } } seq_printf(m, " pending transactions: %d\n", count); print_binder_stats(m, " ", &proc->stats); } static int binder_state_show(struct seq_file *m, void *unused) { struct binder_proc *proc; struct hlist_node *pos; struct binder_node *node; int do_lock = !binder_debug_no_lock; if (do_lock) binder_lock(__func__); seq_puts(m, "binder state:\n"); if (!hlist_empty(&binder_dead_nodes)) seq_puts(m, "dead nodes:\n"); hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node) print_binder_node(m, node); hlist_for_each_entry(proc, pos, &binder_procs, proc_node) print_binder_proc(m, proc, 1); if (do_lock) binder_unlock(__func__); return 0; } static int binder_stats_show(struct seq_file *m, void *unused) { struct binder_proc *proc; struct hlist_node *pos; int do_lock = !binder_debug_no_lock; if (do_lock) binder_lock(__func__); seq_puts(m, "binder stats:\n"); print_binder_stats(m, "", &binder_stats); hlist_for_each_entry(proc, pos, &binder_procs, proc_node) print_binder_proc_stats(m, proc); if (do_lock) binder_unlock(__func__); return 0; } static int binder_transactions_show(struct seq_file *m, void *unused) { struct binder_proc *proc; struct hlist_node *pos; int do_lock = !binder_debug_no_lock; if (do_lock) binder_lock(__func__); seq_puts(m, "binder transactions:\n"); hlist_for_each_entry(proc, pos, &binder_procs, proc_node) print_binder_proc(m, proc, 0); if (do_lock) binder_unlock(__func__); return 0; } static int binder_proc_show(struct seq_file *m, void *unused) { struct binder_proc *proc = m->private; int do_lock = !binder_debug_no_lock; if (do_lock) binder_lock(__func__); seq_puts(m, "binder proc state:\n"); print_binder_proc(m, proc, 1); if (do_lock) binder_unlock(__func__); return 0; } static void print_binder_transaction_log_entry(struct seq_file *m, struct binder_transaction_log_entry *e) { seq_printf(m, "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n", e->debug_id, (e->call_type == 2) ? "reply" : ((e->call_type == 1) ? "async" : "call "), e->from_proc, e->from_thread, e->to_proc, e->to_thread, e->to_node, e->target_handle, e->data_size, e->offsets_size); } static int binder_transaction_log_show(struct seq_file *m, void *unused) { struct binder_transaction_log *log = m->private; int i; if (log->full) { for (i = log->next; i < ARRAY_SIZE(log->entry); i++) print_binder_transaction_log_entry(m, &log->entry[i]); } for (i = 0; i < log->next; i++) print_binder_transaction_log_entry(m, &log->entry[i]); return 0; } static const struct file_operations binder_fops = { .owner = THIS_MODULE, .poll = binder_poll, .unlocked_ioctl = binder_ioctl, .mmap = binder_mmap, .open = binder_open, .flush = binder_flush, .release = binder_release, }; static struct miscdevice binder_miscdev = { .minor = MISC_DYNAMIC_MINOR, .name = "binder", .fops = &binder_fops }; BINDER_DEBUG_ENTRY(state); BINDER_DEBUG_ENTRY(stats); BINDER_DEBUG_ENTRY(transactions); BINDER_DEBUG_ENTRY(transaction_log); static int __init binder_init(void) { int ret; binder_deferred_workqueue = create_singlethread_workqueue("binder"); if (!binder_deferred_workqueue) return -ENOMEM; binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); if (binder_debugfs_dir_entry_root) binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", binder_debugfs_dir_entry_root); ret = misc_register(&binder_miscdev); if (binder_debugfs_dir_entry_root) { debugfs_create_file("state", S_IRUGO, binder_debugfs_dir_entry_root, NULL, &binder_state_fops); debugfs_create_file("stats", S_IRUGO, binder_debugfs_dir_entry_root, NULL, &binder_stats_fops); debugfs_create_file("transactions", S_IRUGO, binder_debugfs_dir_entry_root, NULL, &binder_transactions_fops); debugfs_create_file("transaction_log", S_IRUGO, binder_debugfs_dir_entry_root, &binder_transaction_log, &binder_transaction_log_fops); debugfs_create_file("failed_transaction_log", S_IRUGO, binder_debugfs_dir_entry_root, &binder_transaction_log_failed, &binder_transaction_log_fops); } return ret; } device_initcall(binder_init); #define CREATE_TRACE_POINTS #include "binder_trace.h" MODULE_LICENSE("GPL v2");
gpl-2.0
tuxillo/aarch64-dragonfly-gcc
gcc/testsuite/gcc.dg/vect/vect-strided-u32-i8.c
48
1771
/* { dg-require-effective-target vect_int } */ #include <stdarg.h> #include "tree-vect.h" #define N 128 typedef struct { int a; int b; int c; int d; int e; int f; int g; int h; } s; volatile int y = 0; __attribute__ ((noinline)) int main1 (s *arr) { int i; s *ptr = arr; s res[N]; for (i = 0; i < N; i++) { res[i].c = ptr->b - ptr->a + ptr->d - ptr->c; res[i].a = ptr->a + ptr->g + ptr->b + ptr->d; res[i].d = ptr->b - ptr->a + ptr->d - ptr->c; res[i].b = ptr->h - ptr->a + ptr->d - ptr->c; res[i].f = ptr->f + ptr->h; res[i].e = ptr->b - ptr->e; res[i].h = ptr->d - ptr->g; res[i].g = ptr->b - ptr->a + ptr->d - ptr->c; ptr++; } /* check results: */ for (i = 0; i < N; i++) { if (res[i].c != arr[i].b - arr[i].a + arr[i].d - arr[i].c || res[i].a != arr[i].a + arr[i].g + arr[i].b + arr[i].d || res[i].d != arr[i].b - arr[i].a + arr[i].d - arr[i].c || res[i].b != arr[i].h - arr[i].a + arr[i].d - arr[i].c || res[i].f != arr[i].f + arr[i].h || res[i].e != arr[i].b - arr[i].e || res[i].h != arr[i].d - arr[i].g || res[i].g != arr[i].b - arr[i].a + arr[i].d - arr[i].c) abort(); } } int main (void) { int i; s arr[N]; check_vect (); for (i = 0; i < N; i++) { arr[i].a = i; arr[i].b = i * 2; arr[i].c = 17; arr[i].d = i+34; arr[i].e = i * 3 + 5; arr[i].f = i * 5; arr[i].g = i - 3; arr[i].h = 56; if (y) /* Avoid vectorization. */ abort (); } main1 (arr); return 0; } /* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target vect_strided8 } } } */
gpl-2.0
shesselba/linux-berlin
lib/percpu-refcount.c
48
6443
#define pr_fmt(fmt) "%s: " fmt "\n", __func__ #include <linux/kernel.h> #include <linux/percpu-refcount.h> /* * Initially, a percpu refcount is just a set of percpu counters. Initially, we * don't try to detect the ref hitting 0 - which means that get/put can just * increment or decrement the local counter. Note that the counter on a * particular cpu can (and will) wrap - this is fine, when we go to shutdown the * percpu counters will all sum to the correct value * * (More precisely: because moduler arithmatic is commutative the sum of all the * pcpu_count vars will be equal to what it would have been if all the gets and * puts were done to a single integer, even if some of the percpu integers * overflow or underflow). * * The real trick to implementing percpu refcounts is shutdown. We can't detect * the ref hitting 0 on every put - this would require global synchronization * and defeat the whole purpose of using percpu refs. * * What we do is require the user to keep track of the initial refcount; we know * the ref can't hit 0 before the user drops the initial ref, so as long as we * convert to non percpu mode before the initial ref is dropped everything * works. * * Converting to non percpu mode is done with some RCUish stuff in * percpu_ref_kill. Additionally, we need a bias value so that the atomic_t * can't hit 0 before we've added up all the percpu refs. */ #define PCPU_COUNT_BIAS (1U << 31) static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref) { return (unsigned __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD); } /** * percpu_ref_init - initialize a percpu refcount * @ref: percpu_ref to initialize * @release: function which will be called when refcount hits 0 * * Initializes the refcount in single atomic counter mode with a refcount of 1; * analagous to atomic_set(ref, 1). * * Note that @release must not sleep - it may potentially be called from RCU * callback context by percpu_ref_kill(). */ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release) { atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS); ref->pcpu_count_ptr = (unsigned long)alloc_percpu(unsigned); if (!ref->pcpu_count_ptr) return -ENOMEM; ref->release = release; return 0; } EXPORT_SYMBOL_GPL(percpu_ref_init); /** * percpu_ref_reinit - re-initialize a percpu refcount * @ref: perpcu_ref to re-initialize * * Re-initialize @ref so that it's in the same state as when it finished * percpu_ref_init(). @ref must have been initialized successfully, killed * and reached 0 but not exited. * * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while * this function is in progress. */ void percpu_ref_reinit(struct percpu_ref *ref) { unsigned __percpu *pcpu_count = pcpu_count_ptr(ref); int cpu; BUG_ON(!pcpu_count); WARN_ON(!percpu_ref_is_zero(ref)); atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS); /* * Restore per-cpu operation. smp_store_release() is paired with * smp_read_barrier_depends() in __pcpu_ref_alive() and guarantees * that the zeroing is visible to all percpu accesses which can see * the following PCPU_REF_DEAD clearing. */ for_each_possible_cpu(cpu) *per_cpu_ptr(pcpu_count, cpu) = 0; smp_store_release(&ref->pcpu_count_ptr, ref->pcpu_count_ptr & ~PCPU_REF_DEAD); } EXPORT_SYMBOL_GPL(percpu_ref_reinit); /** * percpu_ref_exit - undo percpu_ref_init() * @ref: percpu_ref to exit * * This function exits @ref. The caller is responsible for ensuring that * @ref is no longer in active use. The usual places to invoke this * function from are the @ref->release() callback or in init failure path * where percpu_ref_init() succeeded but other parts of the initialization * of the embedding object failed. */ void percpu_ref_exit(struct percpu_ref *ref) { unsigned __percpu *pcpu_count = pcpu_count_ptr(ref); if (pcpu_count) { free_percpu(pcpu_count); ref->pcpu_count_ptr = PCPU_REF_DEAD; } } EXPORT_SYMBOL_GPL(percpu_ref_exit); static void percpu_ref_kill_rcu(struct rcu_head *rcu) { struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); unsigned __percpu *pcpu_count = pcpu_count_ptr(ref); unsigned count = 0; int cpu; for_each_possible_cpu(cpu) count += *per_cpu_ptr(pcpu_count, cpu); pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count); /* * It's crucial that we sum the percpu counters _before_ adding the sum * to &ref->count; since gets could be happening on one cpu while puts * happen on another, adding a single cpu's count could cause * @ref->count to hit 0 before we've got a consistent value - but the * sum of all the counts will be consistent and correct. * * Subtracting the bias value then has to happen _after_ adding count to * &ref->count; we need the bias value to prevent &ref->count from * reaching 0 before we add the percpu counts. But doing it at the same * time is equivalent and saves us atomic operations: */ atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count); WARN_ONCE(atomic_read(&ref->count) <= 0, "percpu ref <= 0 (%i)", atomic_read(&ref->count)); /* @ref is viewed as dead on all CPUs, send out kill confirmation */ if (ref->confirm_kill) ref->confirm_kill(ref); /* * Now we're in single atomic_t mode with a consistent refcount, so it's * safe to drop our initial ref: */ percpu_ref_put(ref); } /** * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation * @ref: percpu_ref to kill * @confirm_kill: optional confirmation callback * * Equivalent to percpu_ref_kill() but also schedules kill confirmation if * @confirm_kill is not NULL. @confirm_kill, which may not block, will be * called after @ref is seen as dead from all CPUs - all further * invocations of percpu_ref_tryget() will fail. See percpu_ref_tryget() * for more details. * * Due to the way percpu_ref is implemented, @confirm_kill will be called * after at least one full RCU grace period has passed but this is an * implementation detail and callers must not depend on it. */ void percpu_ref_kill_and_confirm(struct percpu_ref *ref, percpu_ref_func_t *confirm_kill) { WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD, "percpu_ref_kill() called more than once!\n"); ref->pcpu_count_ptr |= PCPU_REF_DEAD; ref->confirm_kill = confirm_kill; call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu); } EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
gpl-2.0
darrengarvey/gst_plugins_bad_patches
ext/ladspa/gstladspa.c
48
15522
/* GStreamer LADSPA plugin * Copyright (C) 1999 Erik Walthinsen <omega@cse.ogi.edu> * 2001 Steve Baker <stevebaker_org@yahoo.co.uk> * 2003 Andy Wingo <wingo at pobox.com> * Copyright (C) 2013 Juan Manuel Borges Caño <juanmabcmail@gmail.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301, USA. */ /** * SECTION:element-ladspa * @short_description: bridge for LADSPA (Linux Audio Developer's Simple Plugin API) * @see_also: #GstAudioConvert #GstAudioResample, #GstAudioTestSrc, #GstAutoAudioSink * * The LADSPA (Linux Audio Developer's Simple Plugin API) element is a bridge * for plugins using the <ulink url="http://www.ladspa.org/">LADSPA</ulink> API. * It scans all installed LADSPA plugins and registers them as gstreamer * elements. If available it can also parse LRDF files and use the metadata for * element classification. The functionality you get depends on the LADSPA plugins * you have installed. * * <refsect2> * <title>Example LADSPA line without this plugins</title> * |[ * (padsp) listplugins * (padsp) analyseplugin cmt.so amp_mono * gst-launch -e filesrc location="$myfile" ! decodebin ! audioconvert ! audioresample ! "audio/x-raw,format=S16LE,rate=48000,channels=1" ! wavenc ! filesink location="testin.wav" * (padsp) applyplugin testin.wav testout.wav cmt.so amp_mono 2 * gst-launch playbin uri=file://"$PWD"/testout.wav * ]| Decode any audio file into wav with the format expected for the specific ladspa plugin to be applied, apply the ladspa filter and play it. * </refsect2> * * Now with this plugin: * * <refsect2> * <title>Example LADSPA line with this plugins</title> * |[ * gst-launch autoaudiosrc ! ladspa-cmt-so-amp-mono gain=2 ! ladspa-caps-so-plate ! ladspa-tap-echo-so-tap-stereo-echo l-delay=500 r-haas-delay=500 ! tee name=myT myT. ! queue ! autoaudiosink myT. ! queue ! audioconvert ! goom ! videoconvert ! xvimagesink pixel-aspect-ratio=3/4 * ]| Get audio input, filter it through CAPS Plate and TAP Stereo Echo, play it and show a visualization (recommended hearphones). * </refsect2> * * In case you wonder the plugin naming scheme, quoting ladspa.h: * "Plugin types should be identified by file and label rather than by * index or plugin name, which may be changed in new plugin versions." * This is really the best way then, and so it is less prone to conflicts. * * Also it is worth noting that LADSPA provides a control in and out interface, * on top of the audio in and out one, so some parameters are readable too. * * You can see the listing of plugins available with: * <refsect2> * <title>Inspecting the plugins list</title> * |[ * gst-inspect ladspa * ]| List available LADSPA plugins on gstreamer. * </refsect2> * * You can see the parameters of any plugin with: * <refsect2> * <title>Inspecting the plugins</title> * |[ * gst-inspect ladspa-retro-flange-1208-so-retroflange * ]| List details of the plugin, parameters, range and defaults included. * </refsect2> * * The elements categorize in: * <itemizedlist> * <listitem><para>Filter/Effect/Audio/LADSPA:</para> * <refsect2> * <title>Example Filter/Effect/Audio/LADSPA line with this plugins</title> * |[ * gst-launch filesrc location="$myfile" ! decodebin ! audioconvert ! audioresample ! ladspa-calf-so-reverb decay-time=15 high-frq-damp=20000 room-size=5 diffusion=1 wet-amount=2 dry-amount=2 pre-delay=50 bass-cut=20000 treble-cut=20000 ! ladspa-tap-echo-so-tap-stereo-echo l-delay=500 r-haas-delay=500 ! autoaudiosink * ]| Decode any audio file, filter it through Calf Reverb LADSPA then TAP Stereo Echo, and play it. * </refsect2> * </listitem> * <listitem><para>Source/Audio/LADSPA:</para> * <refsect2> * <title>Example Source/Audio/LADSPA line with this plugins</title> * |[ * gst-launch ladspasrc-sine-so-sine-fcac frequency=220 amplitude=100 ! audioconvert ! autoaudiosink * ]| Generate a sine wave with Sine Oscillator (Freq:control, Amp:control) and play it. * </refsect2> * <refsect2> * <title>Example Source/Audio/LADSPA line with this plugins</title> * |[ * gst-launch ladspasrc-caps-so-click bpm=240 volume=1 ! autoaudiosink * ]| Generate clicks with CAPS Click - Metronome at 240 beats per minute and play it. * </refsect2> * <refsect2> * <title>Example Source/Audio/LADSPA line with this plugins</title> * |[ * gst-launch ladspasrc-random-1661-so-random-fcsc-oa ! ladspa-cmt-so-amp-mono gain=1.5 ! ladspa-caps-so-plate ! tee name=myT myT. ! queue ! autoaudiosink myT. ! queue ! audioconvert ! wavescope ! videoconvert ! autovideosink * ]| Generate random wave, filter it trhough Mono Amplifier and Versatile Plate Reverb, and play, while showing, it. * </refsect2> * </listitem> * <listitem><para>Sink/Audio/LADSPA:</para> * <refsect2> * <title>Example Sink/Audio/LADSPA line with this plugins</title> * |[ * gst-launch autoaudiosrc ! ladspa-cmt-so-amp-mono gain=2 ! ladspa-caps-so-plate ! ladspa-tap-echo-so-tap-stereo-echo l-delay=500 r-haas-delay=500 ! tee name=myT myT. ! audioconvert ! audioresample ! queue ! ladspasink-cmt-so-null-ai myT. ! audioconvert ! audioresample ! queue ! goom ! videoconvert ! xvimagesink pixel-aspect-ratio=3/4 * ]| Get audio input, filter it trhough Mono Amplifier, CAPS Plate LADSPA and TAP Stereo Echo, explicitily anulate audio with Null (Audio Output), and play a visualization (recommended hearphones). * </refsect2> * </listitem> * </itemizedlist> */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "gstladspautils.h" #include "gstladspafilter.h" #include "gstladspasource.h" #include "gstladspasink.h" #include <gst/gst-i18n-plugin.h> #include <string.h> #include <ladspa.h> #ifdef HAVE_LRDF #include <lrdf.h> #endif GST_DEBUG_CATEGORY (ladspa_debug); #define GST_CAT_DEFAULT ladspa_debug /* * 1.0 and the 1.1 preliminary headers don't define a version, but * 1.1 finally does */ #ifndef LADSPA_VERSION #define LADSPA_VERSION "1.0" #endif #define GST_LADSPA_DEFAULT_PATH \ "/usr/lib/ladspa" G_SEARCHPATH_SEPARATOR_S \ "/usr/local/lib/ladspa" G_SEARCHPATH_SEPARATOR_S \ LIBDIR "/ladspa" GstStructure *ladspa_meta_all = NULL; static void ladspa_plugin_register_element (GstPlugin * plugin, GstStructure * ladspa_meta) { guint audio_in, audio_out; gst_structure_get_uint (ladspa_meta, "audio-in", &audio_in); gst_structure_get_uint (ladspa_meta, "audio-out", &audio_out); if (audio_in == 0) { ladspa_register_source_element (plugin, ladspa_meta); } else if (audio_out == 0) { ladspa_register_sink_element (plugin, ladspa_meta); } else { ladspa_register_filter_element (plugin, ladspa_meta); } } static void ladspa_count_ports (const LADSPA_Descriptor * descriptor, guint * audio_in, guint * audio_out, guint * control_in, guint * control_out) { guint i; *audio_in = *audio_out = *control_in = *control_out = 0; for (i = 0; i < descriptor->PortCount; i++) { LADSPA_PortDescriptor p = descriptor->PortDescriptors[i]; if (LADSPA_IS_PORT_AUDIO (p)) { if (LADSPA_IS_PORT_INPUT (p)) (*audio_in)++; else (*audio_out)++; } else if (LADSPA_IS_PORT_CONTROL (p)) { if (LADSPA_IS_PORT_INPUT (p)) (*control_in)++; else (*control_out)++; } } } static void ladspa_describe_plugin (const gchar * file_name, const gchar * entry_name, LADSPA_Descriptor_Function descriptor_function) { const LADSPA_Descriptor *desc; guint i; /* walk through all the plugins in this plugin library */ for (i = 0; (desc = descriptor_function (i)); i++) { GstStructure *ladspa_meta = NULL; GValue value = { 0, }; gchar *tmp; gchar *type_name; guint audio_in, audio_out, control_in, control_out; /* count ports of this plugin */ ladspa_count_ports (desc, &audio_in, &audio_out, &control_in, &control_out); /* categorize */ if (audio_in == 0 && audio_out == 0) { GST_WARNING ("Skipping control only element (%s:%lu/%s)", entry_name, desc->UniqueID, desc->Label); continue; } else if (audio_in == 0) { tmp = g_strdup_printf ("ladspasrc-%s-%s", entry_name, desc->Label); } else if (audio_out == 0) { tmp = g_strdup_printf ("ladspasink-%s-%s", entry_name, desc->Label); } else { tmp = g_strdup_printf ("ladspa-%s-%s", entry_name, desc->Label); } type_name = g_ascii_strdown (tmp, -1); g_free (tmp); g_strcanon (type_name, G_CSET_A_2_Z G_CSET_a_2_z G_CSET_DIGITS "-+", '-'); /* check if the type is already registered */ if (g_type_from_name (type_name)) { GST_WARNING ("Plugin identifier collision for %s (%s:%lu/%s)", type_name, entry_name, desc->UniqueID, desc->Label); g_free (type_name); continue; } ladspa_meta = gst_structure_new_empty ("ladspa"); gst_structure_set (ladspa_meta, "plugin-filename", G_TYPE_STRING, file_name, "element-ix", G_TYPE_UINT, i, "element-type-name", G_TYPE_STRING, type_name, "audio-in", G_TYPE_UINT, audio_in, "audio-out", G_TYPE_UINT, audio_out, "control-in", G_TYPE_UINT, control_in, "control-out", G_TYPE_UINT, control_out, NULL); g_value_init (&value, GST_TYPE_STRUCTURE); g_value_set_boxed (&value, ladspa_meta); gst_structure_set_value (ladspa_meta_all, type_name, &value); g_value_unset (&value); } } #ifdef HAVE_LRDF static gboolean ladspa_rdf_directory_search (const char *dir_name) { GDir *dir; gchar *file_name, *file_uri; const gchar *entry_name; gint ok; GST_INFO ("scanning directory for rdfs \"%s\"", dir_name); dir = g_dir_open (dir_name, 0, NULL); if (!dir) return FALSE; while ((entry_name = g_dir_read_name (dir))) { file_name = g_build_filename (dir_name, entry_name, NULL); file_uri = g_strconcat ("file://", file_name, NULL); ok = lrdf_read_file (file_uri); GST_INFO ("read %s : %d", file_uri, ok); g_free (file_uri); g_free (file_name); } g_dir_close (dir); return TRUE; } #endif /* search just the one directory */ static gboolean ladspa_plugin_directory_search (GstPlugin * ladspa_plugin, const char *dir_name) { GDir *dir; gchar *file_name; const gchar *entry_name; LADSPA_Descriptor_Function descriptor_function; GModule *plugin; gboolean ok = FALSE; GST_INFO ("scanning directory for plugins \"%s\"", dir_name); dir = g_dir_open (dir_name, 0, NULL); if (!dir) return FALSE; while ((entry_name = g_dir_read_name (dir))) { file_name = g_build_filename (dir_name, entry_name, NULL); plugin = g_module_open (file_name, G_MODULE_BIND_LAZY | G_MODULE_BIND_LOCAL); if (plugin) { /* the file is a shared library */ if (g_module_symbol (plugin, "ladspa_descriptor", (gpointer *) & descriptor_function)) { /* we've found a ladspa_descriptor function, now introspect it. */ GST_INFO ("describe %s", file_name); ladspa_describe_plugin (file_name, entry_name, descriptor_function); ok = TRUE; } else { /* it was a library, but not a LADSPA one. Unload it. */ g_module_close (plugin); } } g_free (file_name); } g_dir_close (dir); return ok; } /* search the plugin path */ static gboolean ladspa_plugin_path_search (GstPlugin * plugin) { const gchar *search_path; gchar *ladspa_path; gchar **paths; gint i, j, path_entries; gboolean res = FALSE, skip; #ifdef HAVE_LRDF gchar *pos, *prefix, *rdf_path; #endif search_path = g_getenv ("LADSPA_PATH"); if (search_path) { ladspa_path = g_strdup_printf ("%s" G_SEARCHPATH_SEPARATOR_S GST_LADSPA_DEFAULT_PATH, search_path); } else { ladspa_path = g_strdup (GST_LADSPA_DEFAULT_PATH); } paths = g_strsplit (ladspa_path, G_SEARCHPATH_SEPARATOR_S, 0); path_entries = g_strv_length (paths); GST_INFO ("%d dirs in search paths \"%s\"", path_entries, ladspa_path); #ifdef HAVE_LRDF for (i = 0; i < path_entries; i++) { skip = FALSE; for (j = 0; j < i; j++) { if (!strcmp (paths[i], paths[j])) { skip = TRUE; break; } } if (skip) break; /* * transform path: /usr/lib/ladspa -> /usr/share/ladspa/rdf/ * yes, this is ugly, but lrdf has not searchpath */ if ((pos = strstr (paths[i], "/lib/ladspa"))) { prefix = g_strndup (paths[i], (pos - paths[i])); rdf_path = g_build_filename (prefix, "share", "ladspa", "rdf", NULL); ladspa_rdf_directory_search (rdf_path); g_free (rdf_path); g_free (prefix); } } #endif for (i = 0; i < path_entries; i++) { skip = FALSE; for (j = 0; j < i; j++) { if (!strcmp (paths[i], paths[j])) { skip = TRUE; break; } } if (skip) break; res |= ladspa_plugin_directory_search (plugin, paths[i]); } g_strfreev (paths); g_free (ladspa_path); return res; } static gboolean plugin_init (GstPlugin * plugin) { gboolean res = FALSE; gint n = 0; GST_DEBUG_CATEGORY_INIT (ladspa_debug, "ladspa", 0, "LADSPA plugins"); #ifdef ENABLE_NLS GST_DEBUG ("binding text domain %s to locale dir %s", GETTEXT_PACKAGE, LOCALEDIR); bindtextdomain (GETTEXT_PACKAGE, LOCALEDIR); bind_textdomain_codeset (GETTEXT_PACKAGE, "UTF-8"); #endif gst_plugin_add_dependency_simple (plugin, "LADSPA_PATH", GST_LADSPA_DEFAULT_PATH, NULL, GST_PLUGIN_DEPENDENCY_FLAG_NONE); #ifdef HAVE_LRDF lrdf_init (); #endif ladspa_meta_all = (GstStructure *) gst_plugin_get_cache_data (plugin); if (ladspa_meta_all) { n = gst_structure_n_fields (ladspa_meta_all); } GST_INFO ("%d entries in cache", n); if (!n) { ladspa_meta_all = gst_structure_new_empty ("ladspa"); res = ladspa_plugin_path_search (plugin); if (res) { n = gst_structure_n_fields (ladspa_meta_all); GST_INFO ("%d entries after scanning", n); gst_plugin_set_cache_data (plugin, ladspa_meta_all); } } else { res = TRUE; } if (n) { gint i; const gchar *name; const GValue *value; GST_INFO ("register types"); for (i = 0; i < n; i++) { name = gst_structure_nth_field_name (ladspa_meta_all, i); value = gst_structure_get_value (ladspa_meta_all, name); if (G_VALUE_TYPE (value) == GST_TYPE_STRUCTURE) { GstStructure *ladspa_meta = g_value_get_boxed (value); ladspa_plugin_register_element (plugin, ladspa_meta); } } } if (!res) { GST_WARNING ("no LADSPA plugins found, check LADSPA_PATH"); } /* we don't want to fail, even if there are no elements registered */ return TRUE; } GST_PLUGIN_DEFINE (GST_VERSION_MAJOR, GST_VERSION_MINOR, ladspa, "LADSPA plugin", plugin_init, VERSION, GST_LICENSE, GST_PACKAGE_NAME, GST_PACKAGE_ORIGIN)
gpl-2.0
fredericgermain/linux-sunxi
drivers/media/platform/exynos4-is/fimc-isp-video.c
48
17328
/* * Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver * * FIMC-IS ISP video input and video output DMA interface driver * * Copyright (C) 2013 Samsung Electronics Co., Ltd. * Author: Sylwester Nawrocki <s.nawrocki@samsung.com> * * The hardware handling code derived from a driver written by * Younghwan Joo <yhwan.joo@samsung.com>. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/bitops.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/printk.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/videobuf2-core.h> #include <media/videobuf2-dma-contig.h> #include <media/s5p_fimc.h> #include "common.h" #include "media-dev.h" #include "fimc-is.h" #include "fimc-isp-video.h" #include "fimc-is-param.h" static int isp_video_capture_queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *allocators[]) { struct fimc_isp *isp = vb2_get_drv_priv(vq); struct v4l2_pix_format_mplane *vid_fmt = &isp->video_capture.pixfmt; const struct v4l2_pix_format_mplane *pixm = NULL; const struct fimc_fmt *fmt; unsigned int wh, i; if (pfmt) { pixm = &pfmt->fmt.pix_mp; fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, -1); wh = pixm->width * pixm->height; } else { fmt = isp->video_capture.format; wh = vid_fmt->width * vid_fmt->height; } if (fmt == NULL) return -EINVAL; *num_buffers = clamp_t(u32, *num_buffers, FIMC_ISP_REQ_BUFS_MIN, FIMC_ISP_REQ_BUFS_MAX); *num_planes = fmt->memplanes; for (i = 0; i < fmt->memplanes; i++) { unsigned int size = (wh * fmt->depth[i]) / 8; if (pixm) sizes[i] = max(size, pixm->plane_fmt[i].sizeimage); else sizes[i] = size; allocators[i] = isp->alloc_ctx; } return 0; } static inline struct param_dma_output *__get_isp_dma2(struct fimc_is *is) { return &__get_curr_is_config(is)->isp.dma2_output; } static int isp_video_capture_start_streaming(struct vb2_queue *q, unsigned int count) { struct fimc_isp *isp = vb2_get_drv_priv(q); struct fimc_is *is = fimc_isp_to_is(isp); struct param_dma_output *dma = __get_isp_dma2(is); struct fimc_is_video *video = &isp->video_capture; int ret; if (!test_bit(ST_ISP_VID_CAP_BUF_PREP, &isp->state) || test_bit(ST_ISP_VID_CAP_STREAMING, &isp->state)) return 0; dma->cmd = DMA_OUTPUT_COMMAND_ENABLE; dma->notify_dma_done = DMA_OUTPUT_NOTIFY_DMA_DONE_ENABLE; dma->buffer_address = is->is_dma_p_region + DMA2_OUTPUT_ADDR_ARRAY_OFFS; dma->buffer_number = video->reqbufs_count; dma->dma_out_mask = video->buf_mask; isp_dbg(2, &video->ve.vdev, "buf_count: %d, planes: %d, dma addr table: %#x\n", video->buf_count, video->format->memplanes, dma->buffer_address); fimc_is_mem_barrier(); fimc_is_set_param_bit(is, PARAM_ISP_DMA2_OUTPUT); __fimc_is_hw_update_param(is, PARAM_ISP_DMA2_OUTPUT); ret = fimc_is_itf_s_param(is, false); if (ret < 0) return ret; ret = fimc_pipeline_call(&video->ve, set_stream, 1); if (ret < 0) return ret; set_bit(ST_ISP_VID_CAP_STREAMING, &isp->state); return ret; } static int isp_video_capture_stop_streaming(struct vb2_queue *q) { struct fimc_isp *isp = vb2_get_drv_priv(q); struct fimc_is *is = fimc_isp_to_is(isp); struct param_dma_output *dma = __get_isp_dma2(is); int ret; ret = fimc_pipeline_call(&isp->video_capture.ve, set_stream, 0); if (ret < 0) return ret; dma->cmd = DMA_OUTPUT_COMMAND_DISABLE; dma->notify_dma_done = DMA_OUTPUT_NOTIFY_DMA_DONE_DISABLE; dma->buffer_number = 0; dma->buffer_address = 0; dma->dma_out_mask = 0; fimc_is_set_param_bit(is, PARAM_ISP_DMA2_OUTPUT); __fimc_is_hw_update_param(is, PARAM_ISP_DMA2_OUTPUT); ret = fimc_is_itf_s_param(is, false); if (ret < 0) dev_warn(&is->pdev->dev, "%s: DMA stop failed\n", __func__); fimc_is_hw_set_isp_buf_mask(is, 0); clear_bit(ST_ISP_VID_CAP_BUF_PREP, &isp->state); clear_bit(ST_ISP_VID_CAP_STREAMING, &isp->state); isp->video_capture.buf_count = 0; return 0; } static int isp_video_capture_buffer_prepare(struct vb2_buffer *vb) { struct fimc_isp *isp = vb2_get_drv_priv(vb->vb2_queue); struct fimc_is_video *video = &isp->video_capture; int i; if (video->format == NULL) return -EINVAL; for (i = 0; i < video->format->memplanes; i++) { unsigned long size = video->pixfmt.plane_fmt[i].sizeimage; if (vb2_plane_size(vb, i) < size) { v4l2_err(&video->ve.vdev, "User buffer too small (%ld < %ld)\n", vb2_plane_size(vb, i), size); return -EINVAL; } vb2_set_plane_payload(vb, i, size); } /* Check if we get one of the already known buffers. */ if (test_bit(ST_ISP_VID_CAP_BUF_PREP, &isp->state)) { dma_addr_t dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0); int i; for (i = 0; i < video->buf_count; i++) if (video->buffers[i]->dma_addr[0] == dma_addr) return 0; return -ENXIO; } return 0; } static void isp_video_capture_buffer_queue(struct vb2_buffer *vb) { struct fimc_isp *isp = vb2_get_drv_priv(vb->vb2_queue); struct fimc_is_video *video = &isp->video_capture; struct fimc_is *is = fimc_isp_to_is(isp); struct isp_video_buf *ivb = to_isp_video_buf(vb); unsigned long flags; unsigned int i; if (test_bit(ST_ISP_VID_CAP_BUF_PREP, &isp->state)) { spin_lock_irqsave(&is->slock, flags); video->buf_mask |= BIT(ivb->index); spin_unlock_irqrestore(&is->slock, flags); } else { unsigned int num_planes = video->format->memplanes; ivb->index = video->buf_count; video->buffers[ivb->index] = ivb; for (i = 0; i < num_planes; i++) { int buf_index = ivb->index * num_planes + i; ivb->dma_addr[i] = vb2_dma_contig_plane_dma_addr(vb, i); is->is_p_region->shared[32 + buf_index] = ivb->dma_addr[i]; isp_dbg(2, &video->ve.vdev, "dma_buf %d (%d/%d/%d) addr: %#x\n", buf_index, ivb->index, i, vb->v4l2_buf.index, ivb->dma_addr[i]); } if (++video->buf_count < video->reqbufs_count) return; video->buf_mask = (1UL << video->buf_count) - 1; set_bit(ST_ISP_VID_CAP_BUF_PREP, &isp->state); } if (!test_bit(ST_ISP_VID_CAP_STREAMING, &isp->state)) isp_video_capture_start_streaming(vb->vb2_queue, 0); } /* * FIMC-IS ISP input and output DMA interface interrupt handler. * Locking: called with is->slock spinlock held. */ void fimc_isp_video_irq_handler(struct fimc_is *is) { struct fimc_is_video *video = &is->isp.video_capture; struct vb2_buffer *vb; int buf_index; /* TODO: Ensure the DMA is really stopped in stop_streaming callback */ if (!test_bit(ST_ISP_VID_CAP_STREAMING, &is->isp.state)) return; buf_index = (is->i2h_cmd.args[1] - 1) % video->buf_count; vb = &video->buffers[buf_index]->vb; v4l2_get_timestamp(&vb->v4l2_buf.timestamp); vb2_buffer_done(vb, VB2_BUF_STATE_DONE); video->buf_mask &= ~BIT(buf_index); fimc_is_hw_set_isp_buf_mask(is, video->buf_mask); } static const struct vb2_ops isp_video_capture_qops = { .queue_setup = isp_video_capture_queue_setup, .buf_prepare = isp_video_capture_buffer_prepare, .buf_queue = isp_video_capture_buffer_queue, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, .start_streaming = isp_video_capture_start_streaming, .stop_streaming = isp_video_capture_stop_streaming, }; static int isp_video_open(struct file *file) { struct fimc_isp *isp = video_drvdata(file); struct exynos_video_entity *ve = &isp->video_capture.ve; struct media_entity *me = &ve->vdev.entity; int ret; if (mutex_lock_interruptible(&isp->video_lock)) return -ERESTARTSYS; ret = v4l2_fh_open(file); if (ret < 0) goto unlock; ret = pm_runtime_get_sync(&isp->pdev->dev); if (ret < 0) goto rel_fh; if (v4l2_fh_is_singular_file(file)) { mutex_lock(&me->parent->graph_mutex); ret = fimc_pipeline_call(ve, open, me, true); /* Mark the video pipeline as in use. */ if (ret == 0) me->use_count++; mutex_unlock(&me->parent->graph_mutex); } if (!ret) goto unlock; rel_fh: v4l2_fh_release(file); unlock: mutex_unlock(&isp->video_lock); return ret; } static int isp_video_release(struct file *file) { struct fimc_isp *isp = video_drvdata(file); struct fimc_is_video *ivc = &isp->video_capture; struct media_entity *entity = &ivc->ve.vdev.entity; struct media_device *mdev = entity->parent; int ret = 0; mutex_lock(&isp->video_lock); if (v4l2_fh_is_singular_file(file) && ivc->streaming) { media_entity_pipeline_stop(entity); ivc->streaming = 0; } vb2_fop_release(file); if (v4l2_fh_is_singular_file(file)) { fimc_pipeline_call(&ivc->ve, close); mutex_lock(&mdev->graph_mutex); entity->use_count--; mutex_unlock(&mdev->graph_mutex); } pm_runtime_put(&isp->pdev->dev); mutex_unlock(&isp->video_lock); return ret; } static const struct v4l2_file_operations isp_video_fops = { .owner = THIS_MODULE, .open = isp_video_open, .release = isp_video_release, .poll = vb2_fop_poll, .unlocked_ioctl = video_ioctl2, .mmap = vb2_fop_mmap, }; /* * Video node ioctl operations */ static int isp_video_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct fimc_isp *isp = video_drvdata(file); __fimc_vidioc_querycap(&isp->pdev->dev, cap, V4L2_CAP_STREAMING); return 0; } static int isp_video_enum_fmt_mplane(struct file *file, void *priv, struct v4l2_fmtdesc *f) { const struct fimc_fmt *fmt; if (f->index >= FIMC_ISP_NUM_FORMATS) return -EINVAL; fmt = fimc_isp_find_format(NULL, NULL, f->index); if (WARN_ON(fmt == NULL)) return -EINVAL; strlcpy(f->description, fmt->name, sizeof(f->description)); f->pixelformat = fmt->fourcc; return 0; } static int isp_video_g_fmt_mplane(struct file *file, void *fh, struct v4l2_format *f) { struct fimc_isp *isp = video_drvdata(file); f->fmt.pix_mp = isp->video_capture.pixfmt; return 0; } static void __isp_video_try_fmt(struct fimc_isp *isp, struct v4l2_pix_format_mplane *pixm, const struct fimc_fmt **fmt) { *fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, 2); pixm->colorspace = V4L2_COLORSPACE_SRGB; pixm->field = V4L2_FIELD_NONE; pixm->num_planes = (*fmt)->memplanes; pixm->pixelformat = (*fmt)->fourcc; /* * TODO: double check with the docmentation these width/height * constraints are correct. */ v4l_bound_align_image(&pixm->width, FIMC_ISP_SOURCE_WIDTH_MIN, FIMC_ISP_SOURCE_WIDTH_MAX, 3, &pixm->height, FIMC_ISP_SOURCE_HEIGHT_MIN, FIMC_ISP_SOURCE_HEIGHT_MAX, 0, 0); } static int isp_video_try_fmt_mplane(struct file *file, void *fh, struct v4l2_format *f) { struct fimc_isp *isp = video_drvdata(file); __isp_video_try_fmt(isp, &f->fmt.pix_mp, NULL); return 0; } static int isp_video_s_fmt_mplane(struct file *file, void *priv, struct v4l2_format *f) { struct fimc_isp *isp = video_drvdata(file); struct fimc_is *is = fimc_isp_to_is(isp); struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp; const struct fimc_fmt *ifmt = NULL; struct param_dma_output *dma = __get_isp_dma2(is); __isp_video_try_fmt(isp, pixm, &ifmt); if (WARN_ON(ifmt == NULL)) return -EINVAL; dma->format = DMA_OUTPUT_FORMAT_BAYER; dma->order = DMA_OUTPUT_ORDER_GB_BG; dma->plane = ifmt->memplanes; dma->bitwidth = ifmt->depth[0]; dma->width = pixm->width; dma->height = pixm->height; fimc_is_mem_barrier(); isp->video_capture.format = ifmt; isp->video_capture.pixfmt = *pixm; return 0; } /* * Check for source/sink format differences at each link. * Return 0 if the formats match or -EPIPE otherwise. */ static int isp_video_pipeline_validate(struct fimc_isp *isp) { struct v4l2_subdev *sd = &isp->subdev; struct v4l2_subdev_format sink_fmt, src_fmt; struct media_pad *pad; int ret; while (1) { /* Retrieve format at the sink pad */ pad = &sd->entity.pads[0]; if (!(pad->flags & MEDIA_PAD_FL_SINK)) break; sink_fmt.pad = pad->index; sink_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sink_fmt); if (ret < 0 && ret != -ENOIOCTLCMD) return -EPIPE; /* Retrieve format at the source pad */ pad = media_entity_remote_pad(pad); if (pad == NULL || media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV) break; sd = media_entity_to_v4l2_subdev(pad->entity); src_fmt.pad = pad->index; src_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &src_fmt); if (ret < 0 && ret != -ENOIOCTLCMD) return -EPIPE; if (src_fmt.format.width != sink_fmt.format.width || src_fmt.format.height != sink_fmt.format.height || src_fmt.format.code != sink_fmt.format.code) return -EPIPE; } return 0; } static int isp_video_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct fimc_isp *isp = video_drvdata(file); struct exynos_video_entity *ve = &isp->video_capture.ve; struct media_entity *me = &ve->vdev.entity; int ret; ret = media_entity_pipeline_start(me, &ve->pipe->mp); if (ret < 0) return ret; ret = isp_video_pipeline_validate(isp); if (ret < 0) goto p_stop; ret = vb2_ioctl_streamon(file, priv, type); if (ret < 0) goto p_stop; isp->video_capture.streaming = 1; return 0; p_stop: media_entity_pipeline_stop(me); return ret; } static int isp_video_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct fimc_isp *isp = video_drvdata(file); struct fimc_is_video *video = &isp->video_capture; int ret; ret = vb2_ioctl_streamoff(file, priv, type); if (ret < 0) return ret; media_entity_pipeline_stop(&video->ve.vdev.entity); video->streaming = 0; return 0; } static int isp_video_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *rb) { struct fimc_isp *isp = video_drvdata(file); int ret; ret = vb2_ioctl_reqbufs(file, priv, rb); if (ret < 0) return ret; if (rb->count && rb->count < FIMC_ISP_REQ_BUFS_MIN) { rb->count = 0; vb2_ioctl_reqbufs(file, priv, rb); ret = -ENOMEM; } isp->video_capture.reqbufs_count = rb->count; return ret; } static const struct v4l2_ioctl_ops isp_video_ioctl_ops = { .vidioc_querycap = isp_video_querycap, .vidioc_enum_fmt_vid_cap_mplane = isp_video_enum_fmt_mplane, .vidioc_try_fmt_vid_cap_mplane = isp_video_try_fmt_mplane, .vidioc_s_fmt_vid_cap_mplane = isp_video_s_fmt_mplane, .vidioc_g_fmt_vid_cap_mplane = isp_video_g_fmt_mplane, .vidioc_reqbufs = isp_video_reqbufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_streamon = isp_video_streamon, .vidioc_streamoff = isp_video_streamoff, }; int fimc_isp_video_device_register(struct fimc_isp *isp, struct v4l2_device *v4l2_dev, enum v4l2_buf_type type) { struct vb2_queue *q = &isp->video_capture.vb_queue; struct fimc_is_video *iv; struct video_device *vdev; int ret; if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) iv = &isp->video_capture; else return -ENOSYS; mutex_init(&isp->video_lock); INIT_LIST_HEAD(&iv->pending_buf_q); INIT_LIST_HEAD(&iv->active_buf_q); iv->format = fimc_isp_find_format(NULL, NULL, 0); iv->pixfmt.width = IS_DEFAULT_WIDTH; iv->pixfmt.height = IS_DEFAULT_HEIGHT; iv->pixfmt.pixelformat = iv->format->fourcc; iv->pixfmt.colorspace = V4L2_COLORSPACE_SRGB; iv->reqbufs_count = 0; memset(q, 0, sizeof(*q)); q->type = type; q->io_modes = VB2_MMAP | VB2_USERPTR; q->ops = &isp_video_capture_qops; q->mem_ops = &vb2_dma_contig_memops; q->buf_struct_size = sizeof(struct isp_video_buf); q->drv_priv = isp; q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q->lock = &isp->video_lock; ret = vb2_queue_init(q); if (ret < 0) return ret; vdev = &iv->ve.vdev; memset(vdev, 0, sizeof(*vdev)); snprintf(vdev->name, sizeof(vdev->name), "fimc-is-isp.%s", type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ? "capture" : "output"); vdev->queue = q; vdev->fops = &isp_video_fops; vdev->ioctl_ops = &isp_video_ioctl_ops; vdev->v4l2_dev = v4l2_dev; vdev->minor = -1; vdev->release = video_device_release_empty; vdev->lock = &isp->video_lock; iv->pad.flags = MEDIA_PAD_FL_SINK; ret = media_entity_init(&vdev->entity, 1, &iv->pad, 0); if (ret < 0) return ret; video_set_drvdata(vdev, isp); ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1); if (ret < 0) { media_entity_cleanup(&vdev->entity); return ret; } v4l2_info(v4l2_dev, "Registered %s as /dev/%s\n", vdev->name, video_device_node_name(vdev)); return 0; } void fimc_isp_video_device_unregister(struct fimc_isp *isp, enum v4l2_buf_type type) { struct exynos_video_entity *ve; if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) ve = &isp->video_capture.ve; else return; mutex_lock(&isp->video_lock); if (video_is_registered(&ve->vdev)) { video_unregister_device(&ve->vdev); media_entity_cleanup(&ve->vdev.entity); ve->pipe = NULL; } mutex_unlock(&isp->video_lock); }
gpl-2.0
kmobs/htc-kernel-pyramid
drivers/scsi/bfa/bfa_iocfc.c
816
21519
/* * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * * Linux driver for Brocade Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <cs/bfa_debug.h> #include <bfa_priv.h> #include <log/bfa_log_hal.h> #include <bfi/bfi_boot.h> #include <bfi/bfi_cbreg.h> #include <aen/bfa_aen_ioc.h> #include <defs/bfa_defs_iocfc.h> #include <defs/bfa_defs_pci.h> #include "bfa_callback_priv.h" #include "bfad_drv.h" BFA_TRC_FILE(HAL, IOCFC); /** * IOC local definitions */ #define BFA_IOCFC_TOV 5000 /* msecs */ enum { BFA_IOCFC_ACT_NONE = 0, BFA_IOCFC_ACT_INIT = 1, BFA_IOCFC_ACT_STOP = 2, BFA_IOCFC_ACT_DISABLE = 3, }; /* * forward declarations */ static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status); static void bfa_iocfc_disable_cbfn(void *bfa_arg); static void bfa_iocfc_hbfail_cbfn(void *bfa_arg); static void bfa_iocfc_reset_cbfn(void *bfa_arg); static void bfa_iocfc_stats_clear(void *bfa_arg); static void bfa_iocfc_stats_swap(struct bfa_fw_stats_s *d, struct bfa_fw_stats_s *s); static void bfa_iocfc_stats_clr_cb(void *bfa_arg, bfa_boolean_t complete); static void bfa_iocfc_stats_clr_timeout(void *bfa_arg); static void bfa_iocfc_stats_cb(void *bfa_arg, bfa_boolean_t complete); static void bfa_iocfc_stats_timeout(void *bfa_arg); static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn; /** * bfa_ioc_pvt BFA IOC private functions */ static void bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len) { int i, per_reqq_sz, per_rspq_sz; per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), BFA_DMA_ALIGN_SZ); per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), BFA_DMA_ALIGN_SZ); /* * Calculate CQ size */ for (i = 0; i < cfg->fwcfg.num_cqs; i++) { *dm_len = *dm_len + per_reqq_sz; *dm_len = *dm_len + per_rspq_sz; } /* * Calculate Shadow CI/PI size */ for (i = 0; i < cfg->fwcfg.num_cqs; i++) *dm_len += (2 * BFA_CACHELINE_SZ); } static void bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len) { *dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); *dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), BFA_CACHELINE_SZ); *dm_len += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ); } /** * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ */ static void bfa_iocfc_send_cfg(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_cfg_req_s cfg_req; struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo; struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg; int i; bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS); bfa_trc(bfa, cfg->fwcfg.num_cqs); iocfc->cfgdone = BFA_FALSE; bfa_iocfc_reset_queues(bfa); /** * initialize IOC configuration info */ cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; cfg_info->num_cqs = cfg->fwcfg.num_cqs; bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); bfa_dma_be_addr_set(cfg_info->stats_addr, iocfc->stats_pa); /** * dma map REQ and RSP circular queues and shadow pointers */ for (i = 0; i < cfg->fwcfg.num_cqs; i++) { bfa_dma_be_addr_set(cfg_info->req_cq_ba[i], iocfc->req_cq_ba[i].pa); bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i], iocfc->req_cq_shadow_ci[i].pa); cfg_info->req_cq_elems[i] = bfa_os_htons(cfg->drvcfg.num_reqq_elems); bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i], iocfc->rsp_cq_ba[i].pa); bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i], iocfc->rsp_cq_shadow_pi[i].pa); cfg_info->rsp_cq_elems[i] = bfa_os_htons(cfg->drvcfg.num_rspq_elems); } /** * dma map IOC configuration itself */ bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ, bfa_lpuid(bfa)); bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa); bfa_ioc_mbox_send(&bfa->ioc, &cfg_req, sizeof(struct bfi_iocfc_cfg_req_s)); } static void bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; bfa->bfad = bfad; iocfc->bfa = bfa; iocfc->action = BFA_IOCFC_ACT_NONE; bfa_os_assign(iocfc->cfg, *cfg); /** * Initialize chip specific handlers. */ if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT) { iocfc->hwif.hw_reginit = bfa_hwct_reginit; iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack; iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack; iocfc->hwif.hw_msix_init = bfa_hwct_msix_init; iocfc->hwif.hw_msix_install = bfa_hwct_msix_install; iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall; iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set; iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs; } else { iocfc->hwif.hw_reginit = bfa_hwcb_reginit; iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack; iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack; iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init; iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install; iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall; iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set; iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs; } iocfc->hwif.hw_reginit(bfa); bfa->msix.nvecs = 0; } static void bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo) { u8 *dm_kva; u64 dm_pa; int i, per_reqq_sz, per_rspq_sz; struct bfa_iocfc_s *iocfc = &bfa->iocfc; int dbgsz; dm_kva = bfa_meminfo_dma_virt(meminfo); dm_pa = bfa_meminfo_dma_phys(meminfo); /* * First allocate dma memory for IOC. */ bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa); dm_kva += bfa_ioc_meminfo(); dm_pa += bfa_ioc_meminfo(); /* * Claim DMA-able memory for the request/response queues and for shadow * ci/pi registers */ per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), BFA_DMA_ALIGN_SZ); per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), BFA_DMA_ALIGN_SZ); for (i = 0; i < cfg->fwcfg.num_cqs; i++) { iocfc->req_cq_ba[i].kva = dm_kva; iocfc->req_cq_ba[i].pa = dm_pa; bfa_os_memset(dm_kva, 0, per_reqq_sz); dm_kva += per_reqq_sz; dm_pa += per_reqq_sz; iocfc->rsp_cq_ba[i].kva = dm_kva; iocfc->rsp_cq_ba[i].pa = dm_pa; bfa_os_memset(dm_kva, 0, per_rspq_sz); dm_kva += per_rspq_sz; dm_pa += per_rspq_sz; } for (i = 0; i < cfg->fwcfg.num_cqs; i++) { iocfc->req_cq_shadow_ci[i].kva = dm_kva; iocfc->req_cq_shadow_ci[i].pa = dm_pa; dm_kva += BFA_CACHELINE_SZ; dm_pa += BFA_CACHELINE_SZ; iocfc->rsp_cq_shadow_pi[i].kva = dm_kva; iocfc->rsp_cq_shadow_pi[i].pa = dm_pa; dm_kva += BFA_CACHELINE_SZ; dm_pa += BFA_CACHELINE_SZ; } /* * Claim DMA-able memory for the config info page */ bfa->iocfc.cfg_info.kva = dm_kva; bfa->iocfc.cfg_info.pa = dm_pa; bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva; dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); /* * Claim DMA-able memory for the config response */ bfa->iocfc.cfgrsp_dma.kva = dm_kva; bfa->iocfc.cfgrsp_dma.pa = dm_pa; bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva; dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), BFA_CACHELINE_SZ); dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), BFA_CACHELINE_SZ); /* * Claim DMA-able memory for iocfc stats */ bfa->iocfc.stats_kva = dm_kva; bfa->iocfc.stats_pa = dm_pa; bfa->iocfc.fw_stats = (struct bfa_fw_stats_s *) dm_kva; dm_kva += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ); dm_pa += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ); bfa_meminfo_dma_virt(meminfo) = dm_kva; bfa_meminfo_dma_phys(meminfo) = dm_pa; dbgsz = bfa_ioc_debug_trcsz(bfa_auto_recover); if (dbgsz > 0) { bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo)); bfa_meminfo_kva(meminfo) += dbgsz; } } /** * BFA submodules initialization completion notification. */ static void bfa_iocfc_initdone_submod(struct bfa_s *bfa) { int i; for (i = 0; hal_mods[i]; i++) hal_mods[i]->initdone(bfa); } /** * Start BFA submodules. */ static void bfa_iocfc_start_submod(struct bfa_s *bfa) { int i; bfa->rme_process = BFA_TRUE; for (i = 0; hal_mods[i]; i++) hal_mods[i]->start(bfa); } /** * Disable BFA submodules. */ static void bfa_iocfc_disable_submod(struct bfa_s *bfa) { int i; for (i = 0; hal_mods[i]; i++) hal_mods[i]->iocdisable(bfa); } static void bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete) { struct bfa_s *bfa = bfa_arg; if (complete) { if (bfa->iocfc.cfgdone) bfa_cb_init(bfa->bfad, BFA_STATUS_OK); else bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED); } else { if (bfa->iocfc.cfgdone) bfa->iocfc.action = BFA_IOCFC_ACT_NONE; } } static void bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl) { struct bfa_s *bfa = bfa_arg; struct bfad_s *bfad = bfa->bfad; if (compl) complete(&bfad->comp); else bfa->iocfc.action = BFA_IOCFC_ACT_NONE; } static void bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl) { struct bfa_s *bfa = bfa_arg; struct bfad_s *bfad = bfa->bfad; if (compl) complete(&bfad->disable_comp); } /** * Update BFA configuration from firmware configuration. */ static void bfa_iocfc_cfgrsp(struct bfa_s *bfa) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg; struct bfi_iocfc_cfg_s *cfginfo = iocfc->cfginfo; fwcfg->num_cqs = fwcfg->num_cqs; fwcfg->num_ioim_reqs = bfa_os_ntohs(fwcfg->num_ioim_reqs); fwcfg->num_tskim_reqs = bfa_os_ntohs(fwcfg->num_tskim_reqs); fwcfg->num_fcxp_reqs = bfa_os_ntohs(fwcfg->num_fcxp_reqs); fwcfg->num_uf_bufs = bfa_os_ntohs(fwcfg->num_uf_bufs); fwcfg->num_rports = bfa_os_ntohs(fwcfg->num_rports); cfginfo->intr_attr.coalesce = cfgrsp->intr_attr.coalesce; cfginfo->intr_attr.delay = bfa_os_ntohs(cfgrsp->intr_attr.delay); cfginfo->intr_attr.latency = bfa_os_ntohs(cfgrsp->intr_attr.latency); iocfc->cfgdone = BFA_TRUE; /** * Configuration is complete - initialize/start submodules */ if (iocfc->action == BFA_IOCFC_ACT_INIT) bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa); else bfa_iocfc_start_submod(bfa); } static void bfa_iocfc_stats_clear(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_stats_req_s stats_req; bfa_timer_start(bfa, &iocfc->stats_timer, bfa_iocfc_stats_clr_timeout, bfa, BFA_IOCFC_TOV); bfi_h2i_set(stats_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CLEAR_STATS_REQ, bfa_lpuid(bfa)); bfa_ioc_mbox_send(&bfa->ioc, &stats_req, sizeof(struct bfi_iocfc_stats_req_s)); } static void bfa_iocfc_stats_swap(struct bfa_fw_stats_s *d, struct bfa_fw_stats_s *s) { u32 *dip = (u32 *) d; u32 *sip = (u32 *) s; int i; for (i = 0; i < (sizeof(struct bfa_fw_stats_s) / sizeof(u32)); i++) dip[i] = bfa_os_ntohl(sip[i]); } static void bfa_iocfc_stats_clr_cb(void *bfa_arg, bfa_boolean_t complete) { struct bfa_s *bfa = bfa_arg; struct bfa_iocfc_s *iocfc = &bfa->iocfc; if (complete) { bfa_ioc_clr_stats(&bfa->ioc); iocfc->stats_cbfn(iocfc->stats_cbarg, iocfc->stats_status); } else { iocfc->stats_busy = BFA_FALSE; iocfc->stats_status = BFA_STATUS_OK; } } static void bfa_iocfc_stats_clr_timeout(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; struct bfa_iocfc_s *iocfc = &bfa->iocfc; bfa_trc(bfa, 0); iocfc->stats_status = BFA_STATUS_ETIMER; bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_clr_cb, bfa); } static void bfa_iocfc_stats_cb(void *bfa_arg, bfa_boolean_t complete) { struct bfa_s *bfa = bfa_arg; struct bfa_iocfc_s *iocfc = &bfa->iocfc; if (complete) { if (iocfc->stats_status == BFA_STATUS_OK) { bfa_os_memset(iocfc->stats_ret, 0, sizeof(*iocfc->stats_ret)); bfa_iocfc_stats_swap(&iocfc->stats_ret->fw_stats, iocfc->fw_stats); } iocfc->stats_cbfn(iocfc->stats_cbarg, iocfc->stats_status); } else { iocfc->stats_busy = BFA_FALSE; iocfc->stats_status = BFA_STATUS_OK; } } static void bfa_iocfc_stats_timeout(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; struct bfa_iocfc_s *iocfc = &bfa->iocfc; bfa_trc(bfa, 0); iocfc->stats_status = BFA_STATUS_ETIMER; bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_cb, bfa); } static void bfa_iocfc_stats_query(struct bfa_s *bfa) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_stats_req_s stats_req; bfa_timer_start(bfa, &iocfc->stats_timer, bfa_iocfc_stats_timeout, bfa, BFA_IOCFC_TOV); bfi_h2i_set(stats_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_GET_STATS_REQ, bfa_lpuid(bfa)); bfa_ioc_mbox_send(&bfa->ioc, &stats_req, sizeof(struct bfi_iocfc_stats_req_s)); } void bfa_iocfc_reset_queues(struct bfa_s *bfa) { int q; for (q = 0; q < BFI_IOC_MAX_CQS; q++) { bfa_reqq_ci(bfa, q) = 0; bfa_reqq_pi(bfa, q) = 0; bfa_rspq_ci(bfa, q) = 0; bfa_rspq_pi(bfa, q) = 0; } } /** * IOC enable request is complete */ static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status) { struct bfa_s *bfa = bfa_arg; if (status != BFA_STATUS_OK) { bfa_isr_disable(bfa); if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb, bfa); return; } bfa_iocfc_initdone_submod(bfa); bfa_iocfc_send_cfg(bfa); } /** * IOC disable request is complete */ static void bfa_iocfc_disable_cbfn(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; bfa_isr_disable(bfa); bfa_iocfc_disable_submod(bfa); if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP) bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb, bfa); else { bfa_assert(bfa->iocfc.action == BFA_IOCFC_ACT_DISABLE); bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb, bfa); } } /** * Notify sub-modules of hardware failure. */ static void bfa_iocfc_hbfail_cbfn(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; bfa->rme_process = BFA_FALSE; bfa_isr_disable(bfa); bfa_iocfc_disable_submod(bfa); if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb, bfa); } /** * Actions on chip-reset completion. */ static void bfa_iocfc_reset_cbfn(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; bfa_iocfc_reset_queues(bfa); bfa_isr_enable(bfa); } /** * bfa_ioc_public */ /** * Query IOC memory requirement information. */ void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, u32 *dm_len) { /* dma memory for IOC */ *dm_len += bfa_ioc_meminfo(); bfa_iocfc_fw_cfg_sz(cfg, dm_len); bfa_iocfc_cqs_sz(cfg, dm_len); *km_len += bfa_ioc_debug_trcsz(bfa_auto_recover); } /** * Query IOC memory requirement information. */ void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) { int i; bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn; bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn; bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn; bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn; bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod, bfa->trcmod, bfa->aen, bfa->logm); /** * Choose FC (ssid: 0x1C) v/s FCoE (ssid: 0x14) mode. */ if (0) bfa_ioc_set_fcmode(&bfa->ioc); bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC); bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs); bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev); bfa_iocfc_mem_claim(bfa, cfg, meminfo); bfa_timer_init(&bfa->timer_mod); INIT_LIST_HEAD(&bfa->comp_q); for (i = 0; i < BFI_IOC_MAX_CQS; i++) INIT_LIST_HEAD(&bfa->reqq_waitq[i]); } /** * Query IOC memory requirement information. */ void bfa_iocfc_detach(struct bfa_s *bfa) { bfa_ioc_detach(&bfa->ioc); } /** * Query IOC memory requirement information. */ void bfa_iocfc_init(struct bfa_s *bfa) { bfa->iocfc.action = BFA_IOCFC_ACT_INIT; bfa_ioc_enable(&bfa->ioc); } /** * IOC start called from bfa_start(). Called to start IOC operations * at driver instantiation for this instance. */ void bfa_iocfc_start(struct bfa_s *bfa) { if (bfa->iocfc.cfgdone) bfa_iocfc_start_submod(bfa); } /** * IOC stop called from bfa_stop(). Called only when driver is unloaded * for this instance. */ void bfa_iocfc_stop(struct bfa_s *bfa) { bfa->iocfc.action = BFA_IOCFC_ACT_STOP; bfa->rme_process = BFA_FALSE; bfa_ioc_disable(&bfa->ioc); } void bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m) { struct bfa_s *bfa = bfaarg; struct bfa_iocfc_s *iocfc = &bfa->iocfc; union bfi_iocfc_i2h_msg_u *msg; msg = (union bfi_iocfc_i2h_msg_u *) m; bfa_trc(bfa, msg->mh.msg_id); switch (msg->mh.msg_id) { case BFI_IOCFC_I2H_CFG_REPLY: iocfc->cfg_reply = &msg->cfg_reply; bfa_iocfc_cfgrsp(bfa); break; case BFI_IOCFC_I2H_GET_STATS_RSP: if (iocfc->stats_busy == BFA_FALSE || iocfc->stats_status == BFA_STATUS_ETIMER) break; bfa_timer_stop(&iocfc->stats_timer); iocfc->stats_status = BFA_STATUS_OK; bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_cb, bfa); break; case BFI_IOCFC_I2H_CLEAR_STATS_RSP: /* * check for timer pop before processing the rsp */ if (iocfc->stats_busy == BFA_FALSE || iocfc->stats_status == BFA_STATUS_ETIMER) break; bfa_timer_stop(&iocfc->stats_timer); iocfc->stats_status = BFA_STATUS_OK; bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_clr_cb, bfa); break; case BFI_IOCFC_I2H_UPDATEQ_RSP: iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK); break; default: bfa_assert(0); } } #ifndef BFA_BIOS_BUILD void bfa_adapter_get_attr(struct bfa_s *bfa, struct bfa_adapter_attr_s *ad_attr) { bfa_ioc_get_adapter_attr(&bfa->ioc, ad_attr); } u64 bfa_adapter_get_id(struct bfa_s *bfa) { return bfa_ioc_get_adid(&bfa->ioc); } void bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; attr->intr_attr = iocfc->cfginfo->intr_attr; attr->config = iocfc->cfg; } bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_set_intr_req_s *m; iocfc->cfginfo->intr_attr = *attr; if (!bfa_iocfc_is_operational(bfa)) return BFA_STATUS_OK; m = bfa_reqq_next(bfa, BFA_REQQ_IOC); if (!m) return BFA_STATUS_DEVBUSY; bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ, bfa_lpuid(bfa)); m->coalesce = attr->coalesce; m->delay = bfa_os_htons(attr->delay); m->latency = bfa_os_htons(attr->latency); bfa_trc(bfa, attr->delay); bfa_trc(bfa, attr->latency); bfa_reqq_produce(bfa, BFA_REQQ_IOC); return BFA_STATUS_OK; } void bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1); bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa); } bfa_status_t bfa_iocfc_get_stats(struct bfa_s *bfa, struct bfa_iocfc_stats_s *stats, bfa_cb_ioc_t cbfn, void *cbarg) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; if (iocfc->stats_busy) { bfa_trc(bfa, iocfc->stats_busy); return BFA_STATUS_DEVBUSY; } if (!bfa_iocfc_is_operational(bfa)) { bfa_trc(bfa, 0); return BFA_STATUS_IOC_NON_OP; } iocfc->stats_busy = BFA_TRUE; iocfc->stats_ret = stats; iocfc->stats_cbfn = cbfn; iocfc->stats_cbarg = cbarg; bfa_iocfc_stats_query(bfa); return BFA_STATUS_OK; } bfa_status_t bfa_iocfc_clear_stats(struct bfa_s *bfa, bfa_cb_ioc_t cbfn, void *cbarg) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; if (iocfc->stats_busy) { bfa_trc(bfa, iocfc->stats_busy); return BFA_STATUS_DEVBUSY; } if (!bfa_iocfc_is_operational(bfa)) { bfa_trc(bfa, 0); return BFA_STATUS_IOC_NON_OP; } iocfc->stats_busy = BFA_TRUE; iocfc->stats_cbfn = cbfn; iocfc->stats_cbarg = cbarg; bfa_iocfc_stats_clear(bfa); return BFA_STATUS_OK; } /** * Enable IOC after it is disabled. */ void bfa_iocfc_enable(struct bfa_s *bfa) { bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, "IOC Enable"); bfa_ioc_enable(&bfa->ioc); } void bfa_iocfc_disable(struct bfa_s *bfa) { bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, "IOC Disable"); bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE; bfa->rme_process = BFA_FALSE; bfa_ioc_disable(&bfa->ioc); } bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa) { return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone; } /** * Return boot target port wwns -- read from boot information in flash. */ void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t **wwns) { struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; *nwwns = cfgrsp->bootwwns.nwwns; *wwns = cfgrsp->bootwwns.wwn; } #endif
gpl-2.0
agrabren/android_kernel_htc_shooter_u
kernel/time/clocksource.c
1840
25943
/* * linux/kernel/time/clocksource.c * * This file contains the functions which manage clocksource drivers. * * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * TODO WishList: * o Allow clocksource drivers to be unregistered */ #include <linux/clocksource.h> #include <linux/sysdev.h> #include <linux/init.h> #include <linux/module.h> #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ #include <linux/tick.h> #include <linux/kthread.h> void timecounter_init(struct timecounter *tc, const struct cyclecounter *cc, u64 start_tstamp) { tc->cc = cc; tc->cycle_last = cc->read(cc); tc->nsec = start_tstamp; } EXPORT_SYMBOL_GPL(timecounter_init); /** * timecounter_read_delta - get nanoseconds since last call of this function * @tc: Pointer to time counter * * When the underlying cycle counter runs over, this will be handled * correctly as long as it does not run over more than once between * calls. * * The first call to this function for a new time counter initializes * the time tracking and returns an undefined result. */ static u64 timecounter_read_delta(struct timecounter *tc) { cycle_t cycle_now, cycle_delta; u64 ns_offset; /* read cycle counter: */ cycle_now = tc->cc->read(tc->cc); /* calculate the delta since the last timecounter_read_delta(): */ cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask; /* convert to nanoseconds: */ ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta); /* update time stamp of timecounter_read_delta() call: */ tc->cycle_last = cycle_now; return ns_offset; } u64 timecounter_read(struct timecounter *tc) { u64 nsec; /* increment time by nanoseconds since last call */ nsec = timecounter_read_delta(tc); nsec += tc->nsec; tc->nsec = nsec; return nsec; } EXPORT_SYMBOL_GPL(timecounter_read); u64 timecounter_cyc2time(struct timecounter *tc, cycle_t cycle_tstamp) { u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask; u64 nsec; /* * Instead of always treating cycle_tstamp as more recent * than tc->cycle_last, detect when it is too far in the * future and treat it as old time stamp instead. */ if (cycle_delta > tc->cc->mask / 2) { cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask; nsec = tc->nsec - cyclecounter_cyc2ns(tc->cc, cycle_delta); } else { nsec = cyclecounter_cyc2ns(tc->cc, cycle_delta) + tc->nsec; } return nsec; } EXPORT_SYMBOL_GPL(timecounter_cyc2time); /** * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks * @mult: pointer to mult variable * @shift: pointer to shift variable * @from: frequency to convert from * @to: frequency to convert to * @maxsec: guaranteed runtime conversion range in seconds * * The function evaluates the shift/mult pair for the scaled math * operations of clocksources and clockevents. * * @to and @from are frequency values in HZ. For clock sources @to is * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock * event @to is the counter frequency and @from is NSEC_PER_SEC. * * The @maxsec conversion range argument controls the time frame in * seconds which must be covered by the runtime conversion with the * calculated mult and shift factors. This guarantees that no 64bit * overflow happens when the input value of the conversion is * multiplied with the calculated mult factor. Larger ranges may * reduce the conversion accuracy by chosing smaller mult and shift * factors. */ void clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec) { u64 tmp; u32 sft, sftacc= 32; /* * Calculate the shift factor which is limiting the conversion * range: */ tmp = ((u64)maxsec * from) >> 32; while (tmp) { tmp >>=1; sftacc--; } /* * Find the conversion shift/mult pair which has the best * accuracy and fits the maxsec conversion range: */ for (sft = 32; sft > 0; sft--) { tmp = (u64) to << sft; tmp += from / 2; do_div(tmp, from); if ((tmp >> sftacc) == 0) break; } *mult = tmp; *shift = sft; } /*[Clocksource internal variables]--------- * curr_clocksource: * currently selected clocksource. * clocksource_list: * linked list with the registered clocksources * clocksource_mutex: * protects manipulations to curr_clocksource and the clocksource_list * override_name: * Name of the user-specified clocksource. */ static struct clocksource *curr_clocksource; static LIST_HEAD(clocksource_list); static DEFINE_MUTEX(clocksource_mutex); static char override_name[32]; static int finished_booting; #ifdef CONFIG_CLOCKSOURCE_WATCHDOG static void clocksource_watchdog_work(struct work_struct *work); static LIST_HEAD(watchdog_list); static struct clocksource *watchdog; static struct timer_list watchdog_timer; static DECLARE_WORK(watchdog_work, clocksource_watchdog_work); static DEFINE_SPINLOCK(watchdog_lock); static int watchdog_running; static int clocksource_watchdog_kthread(void *data); static void __clocksource_change_rating(struct clocksource *cs, int rating); /* * Interval: 0.5sec Threshold: 0.0625s */ #define WATCHDOG_INTERVAL (HZ >> 1) #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4) static void clocksource_watchdog_work(struct work_struct *work) { /* * If kthread_run fails the next watchdog scan over the * watchdog_list will find the unstable clock again. */ kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog"); } static void __clocksource_unstable(struct clocksource *cs) { cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); cs->flags |= CLOCK_SOURCE_UNSTABLE; if (finished_booting) schedule_work(&watchdog_work); } static void clocksource_unstable(struct clocksource *cs, int64_t delta) { printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n", cs->name, delta); __clocksource_unstable(cs); } /** * clocksource_mark_unstable - mark clocksource unstable via watchdog * @cs: clocksource to be marked unstable * * This function is called instead of clocksource_change_rating from * cpu hotplug code to avoid a deadlock between the clocksource mutex * and the cpu hotplug mutex. It defers the update of the clocksource * to the watchdog thread. */ void clocksource_mark_unstable(struct clocksource *cs) { unsigned long flags; spin_lock_irqsave(&watchdog_lock, flags); if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { if (list_empty(&cs->wd_list)) list_add(&cs->wd_list, &watchdog_list); __clocksource_unstable(cs); } spin_unlock_irqrestore(&watchdog_lock, flags); } static void clocksource_watchdog(unsigned long data) { struct clocksource *cs; cycle_t csnow, wdnow; int64_t wd_nsec, cs_nsec; int next_cpu; spin_lock(&watchdog_lock); if (!watchdog_running) goto out; list_for_each_entry(cs, &watchdog_list, wd_list) { /* Clocksource already marked unstable? */ if (cs->flags & CLOCK_SOURCE_UNSTABLE) { if (finished_booting) schedule_work(&watchdog_work); continue; } local_irq_disable(); csnow = cs->read(cs); wdnow = watchdog->read(watchdog); local_irq_enable(); /* Clocksource initialized ? */ if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { cs->flags |= CLOCK_SOURCE_WATCHDOG; cs->wd_last = wdnow; cs->cs_last = csnow; continue; } wd_nsec = clocksource_cyc2ns((wdnow - cs->wd_last) & watchdog->mask, watchdog->mult, watchdog->shift); cs_nsec = clocksource_cyc2ns((csnow - cs->cs_last) & cs->mask, cs->mult, cs->shift); cs->cs_last = csnow; cs->wd_last = wdnow; /* Check the deviation from the watchdog clocksource. */ if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { clocksource_unstable(cs, cs_nsec - wd_nsec); continue; } if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) && (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) { cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; /* * We just marked the clocksource as highres-capable, * notify the rest of the system as well so that we * transition into high-res mode: */ tick_clock_notify(); } } /* * Cycle through CPUs to check if the CPUs stay synchronized * to each other. */ next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); if (next_cpu >= nr_cpu_ids) next_cpu = cpumask_first(cpu_online_mask); watchdog_timer.expires += WATCHDOG_INTERVAL; add_timer_on(&watchdog_timer, next_cpu); out: spin_unlock(&watchdog_lock); } static inline void clocksource_start_watchdog(void) { if (watchdog_running || !watchdog || list_empty(&watchdog_list)) return; init_timer(&watchdog_timer); watchdog_timer.function = clocksource_watchdog; watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask)); watchdog_running = 1; } static inline void clocksource_stop_watchdog(void) { if (!watchdog_running || (watchdog && !list_empty(&watchdog_list))) return; del_timer(&watchdog_timer); watchdog_running = 0; } static inline void clocksource_reset_watchdog(void) { struct clocksource *cs; list_for_each_entry(cs, &watchdog_list, wd_list) cs->flags &= ~CLOCK_SOURCE_WATCHDOG; } static void clocksource_resume_watchdog(void) { unsigned long flags; /* * We use trylock here to avoid a potential dead lock when * kgdb calls this code after the kernel has been stopped with * watchdog_lock held. When watchdog_lock is held we just * return and accept, that the watchdog might trigger and mark * the monitored clock source (usually TSC) unstable. * * This does not affect the other caller clocksource_resume() * because at this point the kernel is UP, interrupts are * disabled and nothing can hold watchdog_lock. */ if (!spin_trylock_irqsave(&watchdog_lock, flags)) return; clocksource_reset_watchdog(); spin_unlock_irqrestore(&watchdog_lock, flags); } static void clocksource_enqueue_watchdog(struct clocksource *cs) { unsigned long flags; spin_lock_irqsave(&watchdog_lock, flags); if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { /* cs is a clocksource to be watched. */ list_add(&cs->wd_list, &watchdog_list); cs->flags &= ~CLOCK_SOURCE_WATCHDOG; } else { /* cs is a watchdog. */ if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; /* Pick the best watchdog. */ if (!watchdog || cs->rating > watchdog->rating) { watchdog = cs; /* Reset watchdog cycles */ clocksource_reset_watchdog(); } } /* Check if the watchdog timer needs to be started. */ clocksource_start_watchdog(); spin_unlock_irqrestore(&watchdog_lock, flags); } static void clocksource_dequeue_watchdog(struct clocksource *cs) { struct clocksource *tmp; unsigned long flags; spin_lock_irqsave(&watchdog_lock, flags); if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { /* cs is a watched clocksource. */ list_del_init(&cs->wd_list); } else if (cs == watchdog) { /* Reset watchdog cycles */ clocksource_reset_watchdog(); /* Current watchdog is removed. Find an alternative. */ watchdog = NULL; list_for_each_entry(tmp, &clocksource_list, list) { if (tmp == cs || tmp->flags & CLOCK_SOURCE_MUST_VERIFY) continue; if (!watchdog || tmp->rating > watchdog->rating) watchdog = tmp; } } cs->flags &= ~CLOCK_SOURCE_WATCHDOG; /* Check if the watchdog timer needs to be stopped. */ clocksource_stop_watchdog(); spin_unlock_irqrestore(&watchdog_lock, flags); } static int clocksource_watchdog_kthread(void *data) { struct clocksource *cs, *tmp; unsigned long flags; LIST_HEAD(unstable); mutex_lock(&clocksource_mutex); spin_lock_irqsave(&watchdog_lock, flags); list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) if (cs->flags & CLOCK_SOURCE_UNSTABLE) { list_del_init(&cs->wd_list); list_add(&cs->wd_list, &unstable); } /* Check if the watchdog timer needs to be stopped. */ clocksource_stop_watchdog(); spin_unlock_irqrestore(&watchdog_lock, flags); /* Needs to be done outside of watchdog lock */ list_for_each_entry_safe(cs, tmp, &unstable, wd_list) { list_del_init(&cs->wd_list); __clocksource_change_rating(cs, 0); } mutex_unlock(&clocksource_mutex); return 0; } #else /* CONFIG_CLOCKSOURCE_WATCHDOG */ static void clocksource_enqueue_watchdog(struct clocksource *cs) { if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; } static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } static inline void clocksource_resume_watchdog(void) { } static inline int clocksource_watchdog_kthread(void *data) { return 0; } #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ /** * clocksource_suspend - suspend the clocksource(s) */ void clocksource_suspend(void) { struct clocksource *cs; list_for_each_entry_reverse(cs, &clocksource_list, list) if (cs->suspend) cs->suspend(cs); } /** * clocksource_resume - resume the clocksource(s) */ void clocksource_resume(void) { struct clocksource *cs; list_for_each_entry(cs, &clocksource_list, list) if (cs->resume) cs->resume(cs); clocksource_resume_watchdog(); } /** * clocksource_touch_watchdog - Update watchdog * * Update the watchdog after exception contexts such as kgdb so as not * to incorrectly trip the watchdog. This might fail when the kernel * was stopped in code which holds watchdog_lock. */ void clocksource_touch_watchdog(void) { clocksource_resume_watchdog(); } /** * clocksource_max_deferment - Returns max time the clocksource can be deferred * @cs: Pointer to clocksource * */ static u64 clocksource_max_deferment(struct clocksource *cs) { u64 max_nsecs, max_cycles; /* * Calculate the maximum number of cycles that we can pass to the * cyc2ns function without overflowing a 64-bit signed result. The * maximum number of cycles is equal to ULLONG_MAX/cs->mult which * is equivalent to the below. * max_cycles < (2^63)/cs->mult * max_cycles < 2^(log2((2^63)/cs->mult)) * max_cycles < 2^(log2(2^63) - log2(cs->mult)) * max_cycles < 2^(63 - log2(cs->mult)) * max_cycles < 1 << (63 - log2(cs->mult)) * Please note that we add 1 to the result of the log2 to account for * any rounding errors, ensure the above inequality is satisfied and * no overflow will occur. */ max_cycles = 1ULL << (63 - (ilog2(cs->mult) + 1)); /* * The actual maximum number of cycles we can defer the clocksource is * determined by the minimum of max_cycles and cs->mask. */ max_cycles = min_t(u64, max_cycles, (u64) cs->mask); max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult, cs->shift); /* * To ensure that the clocksource does not wrap whilst we are idle, * limit the time the clocksource can be deferred by 12.5%. Please * note a margin of 12.5% is used because this can be computed with * a shift, versus say 10% which would require division. */ return max_nsecs - (max_nsecs >> 3); } #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET /** * clocksource_select - Select the best clocksource available * * Private function. Must hold clocksource_mutex when called. * * Select the clocksource with the best rating, or the clocksource, * which is selected by userspace override. */ static void clocksource_select(void) { struct clocksource *best, *cs; if (!finished_booting || list_empty(&clocksource_list)) return; /* First clocksource on the list has the best rating. */ best = list_first_entry(&clocksource_list, struct clocksource, list); /* Check for the override clocksource. */ list_for_each_entry(cs, &clocksource_list, list) { if (strcmp(cs->name, override_name) != 0) continue; /* * Check to make sure we don't switch to a non-highres * capable clocksource if the tick code is in oneshot * mode (highres or nohz) */ if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && tick_oneshot_mode_active()) { /* Override clocksource cannot be used. */ printk(KERN_WARNING "Override clocksource %s is not " "HRT compatible. Cannot switch while in " "HRT/NOHZ mode\n", cs->name); override_name[0] = 0; } else /* Override clocksource can be used. */ best = cs; break; } if (curr_clocksource != best) { printk(KERN_INFO "Switching to clocksource %s\n", best->name); curr_clocksource = best; timekeeping_notify(curr_clocksource); } } #else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */ static inline void clocksource_select(void) { } #endif /* * clocksource_done_booting - Called near the end of core bootup * * Hack to avoid lots of clocksource churn at boot time. * We use fs_initcall because we want this to start before * device_initcall but after subsys_initcall. */ static int __init clocksource_done_booting(void) { mutex_lock(&clocksource_mutex); curr_clocksource = clocksource_default_clock(); mutex_unlock(&clocksource_mutex); finished_booting = 1; /* * Run the watchdog first to eliminate unstable clock sources */ clocksource_watchdog_kthread(NULL); mutex_lock(&clocksource_mutex); clocksource_select(); mutex_unlock(&clocksource_mutex); return 0; } fs_initcall(clocksource_done_booting); /* * Enqueue the clocksource sorted by rating */ static void clocksource_enqueue(struct clocksource *cs) { struct list_head *entry = &clocksource_list; struct clocksource *tmp; list_for_each_entry(tmp, &clocksource_list, list) /* Keep track of the place, where to insert */ if (tmp->rating >= cs->rating) entry = &tmp->list; list_add(&cs->list, entry); } /** * __clocksource_updatefreq_scale - Used update clocksource with new freq * @t: clocksource to be registered * @scale: Scale factor multiplied against freq to get clocksource hz * @freq: clocksource frequency (cycles per second) divided by scale * * This should only be called from the clocksource->enable() method. * * This *SHOULD NOT* be called directly! Please use the * clocksource_updatefreq_hz() or clocksource_updatefreq_khz helper functions. */ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq) { u64 sec; /* * Calc the maximum number of seconds which we can run before * wrapping around. For clocksources which have a mask > 32bit * we need to limit the max sleep time to have a good * conversion precision. 10 minutes is still a reasonable * amount. That results in a shift value of 24 for a * clocksource with mask >= 40bit and f >= 4GHz. That maps to * ~ 0.06ppm granularity for NTP. We apply the same 12.5% * margin as we do in clocksource_max_deferment() */ sec = (cs->mask - (cs->mask >> 3)); do_div(sec, freq); do_div(sec, scale); if (!sec) sec = 1; else if (sec > 600 && cs->mask > UINT_MAX) sec = 600; clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, NSEC_PER_SEC / scale, sec * scale); cs->max_idle_ns = clocksource_max_deferment(cs); } EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale); /** * __clocksource_register_scale - Used to install new clocksources * @t: clocksource to be registered * @scale: Scale factor multiplied against freq to get clocksource hz * @freq: clocksource frequency (cycles per second) divided by scale * * Returns -EBUSY if registration fails, zero otherwise. * * This *SHOULD NOT* be called directly! Please use the * clocksource_register_hz() or clocksource_register_khz helper functions. */ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) { /* Initialize mult/shift and max_idle_ns */ __clocksource_updatefreq_scale(cs, scale, freq); /* Add clocksource to the clcoksource list */ mutex_lock(&clocksource_mutex); clocksource_enqueue(cs); clocksource_enqueue_watchdog(cs); clocksource_select(); mutex_unlock(&clocksource_mutex); return 0; } EXPORT_SYMBOL_GPL(__clocksource_register_scale); /** * clocksource_register - Used to install new clocksources * @t: clocksource to be registered * * Returns -EBUSY if registration fails, zero otherwise. */ int clocksource_register(struct clocksource *cs) { /* calculate max idle time permitted for this clocksource */ cs->max_idle_ns = clocksource_max_deferment(cs); mutex_lock(&clocksource_mutex); clocksource_enqueue(cs); clocksource_enqueue_watchdog(cs); clocksource_select(); mutex_unlock(&clocksource_mutex); return 0; } EXPORT_SYMBOL(clocksource_register); static void __clocksource_change_rating(struct clocksource *cs, int rating) { list_del(&cs->list); cs->rating = rating; clocksource_enqueue(cs); clocksource_select(); } /** * clocksource_change_rating - Change the rating of a registered clocksource */ void clocksource_change_rating(struct clocksource *cs, int rating) { mutex_lock(&clocksource_mutex); __clocksource_change_rating(cs, rating); mutex_unlock(&clocksource_mutex); } EXPORT_SYMBOL(clocksource_change_rating); /** * clocksource_unregister - remove a registered clocksource */ void clocksource_unregister(struct clocksource *cs) { mutex_lock(&clocksource_mutex); clocksource_dequeue_watchdog(cs); list_del(&cs->list); clocksource_select(); mutex_unlock(&clocksource_mutex); } EXPORT_SYMBOL(clocksource_unregister); #ifdef CONFIG_SYSFS /** * sysfs_show_current_clocksources - sysfs interface for current clocksource * @dev: unused * @buf: char buffer to be filled with clocksource list * * Provides sysfs interface for listing current clocksource. */ static ssize_t sysfs_show_current_clocksources(struct sys_device *dev, struct sysdev_attribute *attr, char *buf) { ssize_t count = 0; mutex_lock(&clocksource_mutex); count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name); mutex_unlock(&clocksource_mutex); return count; } /** * sysfs_override_clocksource - interface for manually overriding clocksource * @dev: unused * @buf: name of override clocksource * @count: length of buffer * * Takes input from sysfs interface for manually overriding the default * clocksource selection. */ static ssize_t sysfs_override_clocksource(struct sys_device *dev, struct sysdev_attribute *attr, const char *buf, size_t count) { size_t ret = count; /* strings from sysfs write are not 0 terminated! */ if (count >= sizeof(override_name)) return -EINVAL; /* strip of \n: */ if (buf[count-1] == '\n') count--; mutex_lock(&clocksource_mutex); if (count > 0) memcpy(override_name, buf, count); override_name[count] = 0; clocksource_select(); mutex_unlock(&clocksource_mutex); return ret; } /** * sysfs_show_available_clocksources - sysfs interface for listing clocksource * @dev: unused * @buf: char buffer to be filled with clocksource list * * Provides sysfs interface for listing registered clocksources */ static ssize_t sysfs_show_available_clocksources(struct sys_device *dev, struct sysdev_attribute *attr, char *buf) { struct clocksource *src; ssize_t count = 0; mutex_lock(&clocksource_mutex); list_for_each_entry(src, &clocksource_list, list) { /* * Don't show non-HRES clocksource if the tick code is * in one shot mode (highres=on or nohz=on) */ if (!tick_oneshot_mode_active() || (src->flags & CLOCK_SOURCE_VALID_FOR_HRES)) count += snprintf(buf + count, max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "%s ", src->name); } mutex_unlock(&clocksource_mutex); count += snprintf(buf + count, max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n"); return count; } /* * Sysfs setup bits: */ static SYSDEV_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources, sysfs_override_clocksource); static SYSDEV_ATTR(available_clocksource, 0444, sysfs_show_available_clocksources, NULL); static struct sysdev_class clocksource_sysclass = { .name = "clocksource", }; static struct sys_device device_clocksource = { .id = 0, .cls = &clocksource_sysclass, }; static int __init init_clocksource_sysfs(void) { int error = sysdev_class_register(&clocksource_sysclass); if (!error) error = sysdev_register(&device_clocksource); if (!error) error = sysdev_create_file( &device_clocksource, &attr_current_clocksource); if (!error) error = sysdev_create_file( &device_clocksource, &attr_available_clocksource); return error; } device_initcall(init_clocksource_sysfs); #endif /* CONFIG_SYSFS */ /** * boot_override_clocksource - boot clock override * @str: override name * * Takes a clocksource= boot argument and uses it * as the clocksource override name. */ static int __init boot_override_clocksource(char* str) { mutex_lock(&clocksource_mutex); if (str) strlcpy(override_name, str, sizeof(override_name)); mutex_unlock(&clocksource_mutex); return 1; } __setup("clocksource=", boot_override_clocksource); /** * boot_override_clock - Compatibility layer for deprecated boot option * @str: override name * * DEPRECATED! Takes a clock= boot argument and uses it * as the clocksource override name */ static int __init boot_override_clock(char* str) { if (!strcmp(str, "pmtmr")) { printk("Warning: clock=pmtmr is deprecated. " "Use clocksource=acpi_pm.\n"); return boot_override_clocksource("acpi_pm"); } printk("Warning! clock= boot option is deprecated. " "Use clocksource=xyz\n"); return boot_override_clocksource(str); } __setup("clock=", boot_override_clock);
gpl-2.0
ptmr3/GalaxyNote2_Kernel2
arch/sparc/kernel/process_64.c
2608
20656
/* arch/sparc64/kernel/process.c * * Copyright (C) 1995, 1996, 2008 David S. Miller (davem@davemloft.net) * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ /* * This file handles the architecture-dependent parts of process handling.. */ #include <stdarg.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/user.h> #include <linux/delay.h> #include <linux/compat.h> #include <linux/tick.h> #include <linux/init.h> #include <linux/cpu.h> #include <linux/elfcore.h> #include <linux/sysrq.h> #include <linux/nmi.h> #include <asm/uaccess.h> #include <asm/system.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/pstate.h> #include <asm/elf.h> #include <asm/fpumacro.h> #include <asm/head.h> #include <asm/cpudata.h> #include <asm/mmu_context.h> #include <asm/unistd.h> #include <asm/hypervisor.h> #include <asm/syscalls.h> #include <asm/irq_regs.h> #include <asm/smp.h> #include "kstack.h" static void sparc64_yield(int cpu) { if (tlb_type != hypervisor) { touch_nmi_watchdog(); return; } clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb__after_clear_bit(); while (!need_resched() && !cpu_is_offline(cpu)) { unsigned long pstate; /* Disable interrupts. */ __asm__ __volatile__( "rdpr %%pstate, %0\n\t" "andn %0, %1, %0\n\t" "wrpr %0, %%g0, %%pstate" : "=&r" (pstate) : "i" (PSTATE_IE)); if (!need_resched() && !cpu_is_offline(cpu)) sun4v_cpu_yield(); /* Re-enable interrupts. */ __asm__ __volatile__( "rdpr %%pstate, %0\n\t" "or %0, %1, %0\n\t" "wrpr %0, %%g0, %%pstate" : "=&r" (pstate) : "i" (PSTATE_IE)); } set_thread_flag(TIF_POLLING_NRFLAG); } /* The idle loop on sparc64. */ void cpu_idle(void) { int cpu = smp_processor_id(); set_thread_flag(TIF_POLLING_NRFLAG); while(1) { tick_nohz_stop_sched_tick(1); while (!need_resched() && !cpu_is_offline(cpu)) sparc64_yield(cpu); tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); #ifdef CONFIG_HOTPLUG_CPU if (cpu_is_offline(cpu)) cpu_play_dead(); #endif schedule(); preempt_disable(); } } #ifdef CONFIG_COMPAT static void show_regwindow32(struct pt_regs *regs) { struct reg_window32 __user *rw; struct reg_window32 r_w; mm_segment_t old_fs; __asm__ __volatile__ ("flushw"); rw = compat_ptr((unsigned)regs->u_regs[14]); old_fs = get_fs(); set_fs (USER_DS); if (copy_from_user (&r_w, rw, sizeof(r_w))) { set_fs (old_fs); return; } set_fs (old_fs); printk("l0: %08x l1: %08x l2: %08x l3: %08x " "l4: %08x l5: %08x l6: %08x l7: %08x\n", r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3], r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]); printk("i0: %08x i1: %08x i2: %08x i3: %08x " "i4: %08x i5: %08x i6: %08x i7: %08x\n", r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3], r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]); } #else #define show_regwindow32(regs) do { } while (0) #endif static void show_regwindow(struct pt_regs *regs) { struct reg_window __user *rw; struct reg_window *rwk; struct reg_window r_w; mm_segment_t old_fs; if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) { __asm__ __volatile__ ("flushw"); rw = (struct reg_window __user *) (regs->u_regs[14] + STACK_BIAS); rwk = (struct reg_window *) (regs->u_regs[14] + STACK_BIAS); if (!(regs->tstate & TSTATE_PRIV)) { old_fs = get_fs(); set_fs (USER_DS); if (copy_from_user (&r_w, rw, sizeof(r_w))) { set_fs (old_fs); return; } rwk = &r_w; set_fs (old_fs); } } else { show_regwindow32(regs); return; } printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n", rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]); printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n", rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]); printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n", rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]); printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n", rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]); if (regs->tstate & TSTATE_PRIV) printk("I7: <%pS>\n", (void *) rwk->ins[7]); } void show_regs(struct pt_regs *regs) { printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate, regs->tpc, regs->tnpc, regs->y, print_tainted()); printk("TPC: <%pS>\n", (void *) regs->tpc); printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n", regs->u_regs[0], regs->u_regs[1], regs->u_regs[2], regs->u_regs[3]); printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n", regs->u_regs[4], regs->u_regs[5], regs->u_regs[6], regs->u_regs[7]); printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n", regs->u_regs[8], regs->u_regs[9], regs->u_regs[10], regs->u_regs[11]); printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n", regs->u_regs[12], regs->u_regs[13], regs->u_regs[14], regs->u_regs[15]); printk("RPC: <%pS>\n", (void *) regs->u_regs[15]); show_regwindow(regs); show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]); } struct global_reg_snapshot global_reg_snapshot[NR_CPUS]; static DEFINE_SPINLOCK(global_reg_snapshot_lock); static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs, int this_cpu) { flushw_all(); global_reg_snapshot[this_cpu].tstate = regs->tstate; global_reg_snapshot[this_cpu].tpc = regs->tpc; global_reg_snapshot[this_cpu].tnpc = regs->tnpc; global_reg_snapshot[this_cpu].o7 = regs->u_regs[UREG_I7]; if (regs->tstate & TSTATE_PRIV) { struct reg_window *rw; rw = (struct reg_window *) (regs->u_regs[UREG_FP] + STACK_BIAS); if (kstack_valid(tp, (unsigned long) rw)) { global_reg_snapshot[this_cpu].i7 = rw->ins[7]; rw = (struct reg_window *) (rw->ins[6] + STACK_BIAS); if (kstack_valid(tp, (unsigned long) rw)) global_reg_snapshot[this_cpu].rpc = rw->ins[7]; } } else { global_reg_snapshot[this_cpu].i7 = 0; global_reg_snapshot[this_cpu].rpc = 0; } global_reg_snapshot[this_cpu].thread = tp; } /* In order to avoid hangs we do not try to synchronize with the * global register dump client cpus. The last store they make is to * the thread pointer, so do a short poll waiting for that to become * non-NULL. */ static void __global_reg_poll(struct global_reg_snapshot *gp) { int limit = 0; while (!gp->thread && ++limit < 100) { barrier(); udelay(1); } } void arch_trigger_all_cpu_backtrace(void) { struct thread_info *tp = current_thread_info(); struct pt_regs *regs = get_irq_regs(); unsigned long flags; int this_cpu, cpu; if (!regs) regs = tp->kregs; spin_lock_irqsave(&global_reg_snapshot_lock, flags); memset(global_reg_snapshot, 0, sizeof(global_reg_snapshot)); this_cpu = raw_smp_processor_id(); __global_reg_self(tp, regs, this_cpu); smp_fetch_global_regs(); for_each_online_cpu(cpu) { struct global_reg_snapshot *gp = &global_reg_snapshot[cpu]; __global_reg_poll(gp); tp = gp->thread; printk("%c CPU[%3d]: TSTATE[%016lx] TPC[%016lx] TNPC[%016lx] TASK[%s:%d]\n", (cpu == this_cpu ? '*' : ' '), cpu, gp->tstate, gp->tpc, gp->tnpc, ((tp && tp->task) ? tp->task->comm : "NULL"), ((tp && tp->task) ? tp->task->pid : -1)); if (gp->tstate & TSTATE_PRIV) { printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n", (void *) gp->tpc, (void *) gp->o7, (void *) gp->i7, (void *) gp->rpc); } else { printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n", gp->tpc, gp->o7, gp->i7, gp->rpc); } } memset(global_reg_snapshot, 0, sizeof(global_reg_snapshot)); spin_unlock_irqrestore(&global_reg_snapshot_lock, flags); } #ifdef CONFIG_MAGIC_SYSRQ static void sysrq_handle_globreg(int key) { arch_trigger_all_cpu_backtrace(); } static struct sysrq_key_op sparc_globalreg_op = { .handler = sysrq_handle_globreg, .help_msg = "Globalregs", .action_msg = "Show Global CPU Regs", }; static int __init sparc_globreg_init(void) { return register_sysrq_key('y', &sparc_globalreg_op); } core_initcall(sparc_globreg_init); #endif unsigned long thread_saved_pc(struct task_struct *tsk) { struct thread_info *ti = task_thread_info(tsk); unsigned long ret = 0xdeadbeefUL; if (ti && ti->ksp) { unsigned long *sp; sp = (unsigned long *)(ti->ksp + STACK_BIAS); if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL && sp[14]) { unsigned long *fp; fp = (unsigned long *)(sp[14] + STACK_BIAS); if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL) ret = fp[15]; } } return ret; } /* Free current thread data structures etc.. */ void exit_thread(void) { struct thread_info *t = current_thread_info(); if (t->utraps) { if (t->utraps[0] < 2) kfree (t->utraps); else t->utraps[0]--; } } void flush_thread(void) { struct thread_info *t = current_thread_info(); struct mm_struct *mm; mm = t->task->mm; if (mm) tsb_context_switch(mm); set_thread_wsaved(0); /* Clear FPU register state. */ t->fpsaved[0] = 0; if (get_thread_current_ds() != ASI_AIUS) set_fs(USER_DS); } /* It's a bit more tricky when 64-bit tasks are involved... */ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp) { unsigned long fp, distance, rval; if (!(test_thread_flag(TIF_32BIT))) { csp += STACK_BIAS; psp += STACK_BIAS; __get_user(fp, &(((struct reg_window __user *)psp)->ins[6])); fp += STACK_BIAS; } else __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6])); /* Now align the stack as this is mandatory in the Sparc ABI * due to how register windows work. This hides the * restriction from thread libraries etc. */ csp &= ~15UL; distance = fp - psp; rval = (csp - distance); if (copy_in_user((void __user *) rval, (void __user *) psp, distance)) rval = 0; else if (test_thread_flag(TIF_32BIT)) { if (put_user(((u32)csp), &(((struct reg_window32 __user *)rval)->ins[6]))) rval = 0; } else { if (put_user(((u64)csp - STACK_BIAS), &(((struct reg_window __user *)rval)->ins[6]))) rval = 0; else rval = rval - STACK_BIAS; } return rval; } /* Standard stuff. */ static inline void shift_window_buffer(int first_win, int last_win, struct thread_info *t) { int i; for (i = first_win; i < last_win; i++) { t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1]; memcpy(&t->reg_window[i], &t->reg_window[i+1], sizeof(struct reg_window)); } } void synchronize_user_stack(void) { struct thread_info *t = current_thread_info(); unsigned long window; flush_user_windows(); if ((window = get_thread_wsaved()) != 0) { int winsize = sizeof(struct reg_window); int bias = 0; if (test_thread_flag(TIF_32BIT)) winsize = sizeof(struct reg_window32); else bias = STACK_BIAS; window -= 1; do { unsigned long sp = (t->rwbuf_stkptrs[window] + bias); struct reg_window *rwin = &t->reg_window[window]; if (!copy_to_user((char __user *)sp, rwin, winsize)) { shift_window_buffer(window, get_thread_wsaved() - 1, t); set_thread_wsaved(get_thread_wsaved() - 1); } } while (window--); } } static void stack_unaligned(unsigned long sp) { siginfo_t info; info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRALN; info.si_addr = (void __user *) sp; info.si_trapno = 0; force_sig_info(SIGBUS, &info, current); } void fault_in_user_windows(void) { struct thread_info *t = current_thread_info(); unsigned long window; int winsize = sizeof(struct reg_window); int bias = 0; if (test_thread_flag(TIF_32BIT)) winsize = sizeof(struct reg_window32); else bias = STACK_BIAS; flush_user_windows(); window = get_thread_wsaved(); if (likely(window != 0)) { window -= 1; do { unsigned long sp = (t->rwbuf_stkptrs[window] + bias); struct reg_window *rwin = &t->reg_window[window]; if (unlikely(sp & 0x7UL)) stack_unaligned(sp); if (unlikely(copy_to_user((char __user *)sp, rwin, winsize))) goto barf; } while (window--); } set_thread_wsaved(0); return; barf: set_thread_wsaved(window + 1); do_exit(SIGILL); } asmlinkage long sparc_do_fork(unsigned long clone_flags, unsigned long stack_start, struct pt_regs *regs, unsigned long stack_size) { int __user *parent_tid_ptr, *child_tid_ptr; unsigned long orig_i1 = regs->u_regs[UREG_I1]; long ret; #ifdef CONFIG_COMPAT if (test_thread_flag(TIF_32BIT)) { parent_tid_ptr = compat_ptr(regs->u_regs[UREG_I2]); child_tid_ptr = compat_ptr(regs->u_regs[UREG_I4]); } else #endif { parent_tid_ptr = (int __user *) regs->u_regs[UREG_I2]; child_tid_ptr = (int __user *) regs->u_regs[UREG_I4]; } ret = do_fork(clone_flags, stack_start, regs, stack_size, parent_tid_ptr, child_tid_ptr); /* If we get an error and potentially restart the system * call, we're screwed because copy_thread() clobbered * the parent's %o1. So detect that case and restore it * here. */ if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK) regs->u_regs[UREG_I1] = orig_i1; return ret; } /* Copy a Sparc thread. The fork() return value conventions * under SunOS are nothing short of bletcherous: * Parent --> %o0 == childs pid, %o1 == 0 * Child --> %o0 == parents pid, %o1 == 1 */ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long unused, struct task_struct *p, struct pt_regs *regs) { struct thread_info *t = task_thread_info(p); struct sparc_stackf *parent_sf; unsigned long child_stack_sz; char *child_trap_frame; int kernel_thread; kernel_thread = (regs->tstate & TSTATE_PRIV) ? 1 : 0; parent_sf = ((struct sparc_stackf *) regs) - 1; /* Calculate offset to stack_frame & pt_regs */ child_stack_sz = ((STACKFRAME_SZ + TRACEREG_SZ) + (kernel_thread ? STACKFRAME_SZ : 0)); child_trap_frame = (task_stack_page(p) + (THREAD_SIZE - child_stack_sz)); memcpy(child_trap_frame, parent_sf, child_stack_sz); t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) | (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) | (((regs->tstate + 1) & TSTATE_CWP) << TI_FLAG_CWP_SHIFT); t->new_child = 1; t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS; t->kregs = (struct pt_regs *) (child_trap_frame + sizeof(struct sparc_stackf)); t->fpsaved[0] = 0; if (kernel_thread) { struct sparc_stackf *child_sf = (struct sparc_stackf *) (child_trap_frame + (STACKFRAME_SZ + TRACEREG_SZ)); /* Zero terminate the stack backtrace. */ child_sf->fp = NULL; t->kregs->u_regs[UREG_FP] = ((unsigned long) child_sf) - STACK_BIAS; t->flags |= ((long)ASI_P << TI_FLAG_CURRENT_DS_SHIFT); t->kregs->u_regs[UREG_G6] = (unsigned long) t; t->kregs->u_regs[UREG_G4] = (unsigned long) t->task; } else { if (t->flags & _TIF_32BIT) { sp &= 0x00000000ffffffffUL; regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL; } t->kregs->u_regs[UREG_FP] = sp; t->flags |= ((long)ASI_AIUS << TI_FLAG_CURRENT_DS_SHIFT); if (sp != regs->u_regs[UREG_FP]) { unsigned long csp; csp = clone_stackframe(sp, regs->u_regs[UREG_FP]); if (!csp) return -EFAULT; t->kregs->u_regs[UREG_FP] = csp; } if (t->utraps) t->utraps[0]++; } /* Set the return value for the child. */ t->kregs->u_regs[UREG_I0] = current->pid; t->kregs->u_regs[UREG_I1] = 1; /* Set the second return value for the parent. */ regs->u_regs[UREG_I1] = 0; if (clone_flags & CLONE_SETTLS) t->kregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3]; return 0; } /* * This is the mechanism for creating a new kernel thread. * * NOTE! Only a kernel-only process(ie the swapper or direct descendants * who haven't done an "execve()") should use this: it will work within * a system call from a "real" process, but the process memory space will * not be freed until both the parent and the child have exited. */ pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) { long retval; /* If the parent runs before fn(arg) is called by the child, * the input registers of this function can be clobbered. * So we stash 'fn' and 'arg' into global registers which * will not be modified by the parent. */ __asm__ __volatile__("mov %4, %%g2\n\t" /* Save FN into global */ "mov %5, %%g3\n\t" /* Save ARG into global */ "mov %1, %%g1\n\t" /* Clone syscall nr. */ "mov %2, %%o0\n\t" /* Clone flags. */ "mov 0, %%o1\n\t" /* usp arg == 0 */ "t 0x6d\n\t" /* Linux/Sparc clone(). */ "brz,a,pn %%o1, 1f\n\t" /* Parent, just return. */ " mov %%o0, %0\n\t" "jmpl %%g2, %%o7\n\t" /* Call the function. */ " mov %%g3, %%o0\n\t" /* Set arg in delay. */ "mov %3, %%g1\n\t" "t 0x6d\n\t" /* Linux/Sparc exit(). */ /* Notreached by child. */ "1:" : "=r" (retval) : "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED), "i" (__NR_exit), "r" (fn), "r" (arg) : "g1", "g2", "g3", "o0", "o1", "memory", "cc"); return retval; } EXPORT_SYMBOL(kernel_thread); typedef struct { union { unsigned int pr_regs[32]; unsigned long pr_dregs[16]; } pr_fr; unsigned int __unused; unsigned int pr_fsr; unsigned char pr_qcnt; unsigned char pr_q_entrysize; unsigned char pr_en; unsigned int pr_q[64]; } elf_fpregset_t32; /* * fill in the fpu structure for a core dump. */ int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs) { unsigned long *kfpregs = current_thread_info()->fpregs; unsigned long fprs = current_thread_info()->fpsaved[0]; if (test_thread_flag(TIF_32BIT)) { elf_fpregset_t32 *fpregs32 = (elf_fpregset_t32 *)fpregs; if (fprs & FPRS_DL) memcpy(&fpregs32->pr_fr.pr_regs[0], kfpregs, sizeof(unsigned int) * 32); else memset(&fpregs32->pr_fr.pr_regs[0], 0, sizeof(unsigned int) * 32); fpregs32->pr_qcnt = 0; fpregs32->pr_q_entrysize = 8; memset(&fpregs32->pr_q[0], 0, (sizeof(unsigned int) * 64)); if (fprs & FPRS_FEF) { fpregs32->pr_fsr = (unsigned int) current_thread_info()->xfsr[0]; fpregs32->pr_en = 1; } else { fpregs32->pr_fsr = 0; fpregs32->pr_en = 0; } } else { if(fprs & FPRS_DL) memcpy(&fpregs->pr_regs[0], kfpregs, sizeof(unsigned int) * 32); else memset(&fpregs->pr_regs[0], 0, sizeof(unsigned int) * 32); if(fprs & FPRS_DU) memcpy(&fpregs->pr_regs[16], kfpregs+16, sizeof(unsigned int) * 32); else memset(&fpregs->pr_regs[16], 0, sizeof(unsigned int) * 32); if(fprs & FPRS_FEF) { fpregs->pr_fsr = current_thread_info()->xfsr[0]; fpregs->pr_gsr = current_thread_info()->gsr[0]; } else { fpregs->pr_fsr = fpregs->pr_gsr = 0; } fpregs->pr_fprs = fprs; } return 1; } EXPORT_SYMBOL(dump_fpu); /* * sparc_execve() executes a new program after the asm stub has set * things up for us. This should basically do what I want it to. */ asmlinkage int sparc_execve(struct pt_regs *regs) { int error, base = 0; char *filename; /* User register window flush is done by entry.S */ /* Check for indirect call. */ if (regs->u_regs[UREG_G1] == 0) base = 1; filename = getname((char __user *)regs->u_regs[base + UREG_I0]); error = PTR_ERR(filename); if (IS_ERR(filename)) goto out; error = do_execve(filename, (const char __user *const __user *) regs->u_regs[base + UREG_I1], (const char __user *const __user *) regs->u_regs[base + UREG_I2], regs); putname(filename); if (!error) { fprs_write(0); current_thread_info()->xfsr[0] = 0; current_thread_info()->fpsaved[0] = 0; regs->tstate &= ~TSTATE_PEF; } out: return error; } unsigned long get_wchan(struct task_struct *task) { unsigned long pc, fp, bias = 0; struct thread_info *tp; struct reg_window *rw; unsigned long ret = 0; int count = 0; if (!task || task == current || task->state == TASK_RUNNING) goto out; tp = task_thread_info(task); bias = STACK_BIAS; fp = task_thread_info(task)->ksp + bias; do { if (!kstack_valid(tp, fp)) break; rw = (struct reg_window *) fp; pc = rw->ins[7]; if (!in_sched_functions(pc)) { ret = pc; goto out; } fp = rw->ins[6] + bias; } while (++count < 16); out: return ret; }
gpl-2.0
p2pjack/Virtuous-Beastmode
sound/pci/vx222/vx222.c
3632
7678
/* * Driver for Digigram VX222 V2/Mic PCI soundcards * * Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/moduleparam.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/tlv.h> #include "vx222.h" #define CARD_NAME "VX222" MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("Digigram VX222 V2/Mic"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Digigram," CARD_NAME "}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ static int mic[SNDRV_CARDS]; /* microphone */ static int ibl[SNDRV_CARDS]; /* microphone */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for Digigram " CARD_NAME " soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for Digigram " CARD_NAME " soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable Digigram " CARD_NAME " soundcard."); module_param_array(mic, bool, NULL, 0444); MODULE_PARM_DESC(mic, "Enable Microphone."); module_param_array(ibl, int, NULL, 0444); MODULE_PARM_DESC(ibl, "Capture IBL size."); /* */ enum { VX_PCI_VX222_OLD, VX_PCI_VX222_NEW }; static DEFINE_PCI_DEVICE_TABLE(snd_vx222_ids) = { { 0x10b5, 0x9050, 0x1369, PCI_ANY_ID, 0, 0, VX_PCI_VX222_OLD, }, /* PLX */ { 0x10b5, 0x9030, 0x1369, PCI_ANY_ID, 0, 0, VX_PCI_VX222_NEW, }, /* PLX */ { 0, } }; MODULE_DEVICE_TABLE(pci, snd_vx222_ids); /* */ static const DECLARE_TLV_DB_SCALE(db_scale_old_vol, -11350, 50, 0); static const DECLARE_TLV_DB_SCALE(db_scale_akm, -7350, 50, 0); static struct snd_vx_hardware vx222_old_hw = { .name = "VX222/Old", .type = VX_TYPE_BOARD, /* hw specs */ .num_codecs = 1, .num_ins = 1, .num_outs = 1, .output_level_max = VX_ANALOG_OUT_LEVEL_MAX, .output_level_db_scale = db_scale_old_vol, }; static struct snd_vx_hardware vx222_v2_hw = { .name = "VX222/v2", .type = VX_TYPE_V2, /* hw specs */ .num_codecs = 1, .num_ins = 1, .num_outs = 1, .output_level_max = VX2_AKM_LEVEL_MAX, .output_level_db_scale = db_scale_akm, }; static struct snd_vx_hardware vx222_mic_hw = { .name = "VX222/Mic", .type = VX_TYPE_MIC, /* hw specs */ .num_codecs = 1, .num_ins = 1, .num_outs = 1, .output_level_max = VX2_AKM_LEVEL_MAX, .output_level_db_scale = db_scale_akm, }; /* */ static int snd_vx222_free(struct vx_core *chip) { struct snd_vx222 *vx = (struct snd_vx222 *)chip; if (chip->irq >= 0) free_irq(chip->irq, (void*)chip); if (vx->port[0]) pci_release_regions(vx->pci); pci_disable_device(vx->pci); kfree(chip); return 0; } static int snd_vx222_dev_free(struct snd_device *device) { struct vx_core *chip = device->device_data; return snd_vx222_free(chip); } static int __devinit snd_vx222_create(struct snd_card *card, struct pci_dev *pci, struct snd_vx_hardware *hw, struct snd_vx222 **rchip) { struct vx_core *chip; struct snd_vx222 *vx; int i, err; static struct snd_device_ops ops = { .dev_free = snd_vx222_dev_free, }; struct snd_vx_ops *vx_ops; /* enable PCI device */ if ((err = pci_enable_device(pci)) < 0) return err; pci_set_master(pci); vx_ops = hw->type == VX_TYPE_BOARD ? &vx222_old_ops : &vx222_ops; chip = snd_vx_create(card, hw, vx_ops, sizeof(struct snd_vx222) - sizeof(struct vx_core)); if (! chip) { pci_disable_device(pci); return -ENOMEM; } vx = (struct snd_vx222 *)chip; vx->pci = pci; if ((err = pci_request_regions(pci, CARD_NAME)) < 0) { snd_vx222_free(chip); return err; } for (i = 0; i < 2; i++) vx->port[i] = pci_resource_start(pci, i + 1); if (request_irq(pci->irq, snd_vx_irq_handler, IRQF_SHARED, CARD_NAME, chip)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_vx222_free(chip); return -EBUSY; } chip->irq = pci->irq; if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_vx222_free(chip); return err; } snd_card_set_dev(card, &pci->dev); *rchip = vx; return 0; } static int __devinit snd_vx222_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct snd_vx_hardware *hw; struct snd_vx222 *vx; int err; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; switch ((int)pci_id->driver_data) { case VX_PCI_VX222_OLD: hw = &vx222_old_hw; break; case VX_PCI_VX222_NEW: default: if (mic[dev]) hw = &vx222_mic_hw; else hw = &vx222_v2_hw; break; } if ((err = snd_vx222_create(card, pci, hw, &vx)) < 0) { snd_card_free(card); return err; } card->private_data = vx; vx->core.ibl.size = ibl[dev]; sprintf(card->longname, "%s at 0x%lx & 0x%lx, irq %i", card->shortname, vx->port[0], vx->port[1], vx->core.irq); snd_printdd("%s at 0x%lx & 0x%lx, irq %i\n", card->shortname, vx->port[0], vx->port[1], vx->core.irq); #ifdef SND_VX_FW_LOADER vx->core.dev = &pci->dev; #endif if ((err = snd_vx_setup_firmware(&vx->core)) < 0) { snd_card_free(card); return err; } if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); dev++; return 0; } static void __devexit snd_vx222_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } #ifdef CONFIG_PM static int snd_vx222_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct snd_vx222 *vx = card->private_data; int err; err = snd_vx_suspend(&vx->core, state); pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); return err; } static int snd_vx222_resume(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); struct snd_vx222 *vx = card->private_data; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "vx222: pci_enable_device failed, " "disabling device\n"); snd_card_disconnect(card); return -EIO; } pci_set_master(pci); return snd_vx_resume(&vx->core); } #endif static struct pci_driver driver = { .name = "Digigram VX222", .id_table = snd_vx222_ids, .probe = snd_vx222_probe, .remove = __devexit_p(snd_vx222_remove), #ifdef CONFIG_PM .suspend = snd_vx222_suspend, .resume = snd_vx222_resume, #endif }; static int __init alsa_card_vx222_init(void) { return pci_register_driver(&driver); } static void __exit alsa_card_vx222_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_vx222_init) module_exit(alsa_card_vx222_exit)
gpl-2.0
tarunkapadia93/android_kernel_xiaomi_cancro
arch/x86/kernel/process_32.c
4656
8888
/* * Copyright (C) 1995 Linus Torvalds * * Pentium III FXSR, SSE support * Gareth Hughes <gareth@valinux.com>, May 2000 */ /* * This file handles the architecture-dependent parts of process handling.. */ #include <linux/cpu.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/elfcore.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/user.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/mc146818rtc.h> #include <linux/module.h> #include <linux/kallsyms.h> #include <linux/ptrace.h> #include <linux/personality.h> #include <linux/percpu.h> #include <linux/prctl.h> #include <linux/ftrace.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/kdebug.h> #include <asm/pgtable.h> #include <asm/ldt.h> #include <asm/processor.h> #include <asm/i387.h> #include <asm/fpu-internal.h> #include <asm/desc.h> #ifdef CONFIG_MATH_EMULATION #include <asm/math_emu.h> #endif #include <linux/err.h> #include <asm/tlbflush.h> #include <asm/cpu.h> #include <asm/idle.h> #include <asm/syscalls.h> #include <asm/debugreg.h> #include <asm/switch_to.h> asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); /* * Return saved PC of a blocked thread. */ unsigned long thread_saved_pc(struct task_struct *tsk) { return ((unsigned long *)tsk->thread.sp)[3]; } void __show_regs(struct pt_regs *regs, int all) { unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; unsigned long d0, d1, d2, d3, d6, d7; unsigned long sp; unsigned short ss, gs; if (user_mode_vm(regs)) { sp = regs->sp; ss = regs->ss & 0xffff; gs = get_user_gs(regs); } else { sp = kernel_stack_pointer(regs); savesegment(ss, ss); savesegment(gs, gs); } show_regs_common(); printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", (u16)regs->cs, regs->ip, regs->flags, smp_processor_id()); print_symbol("EIP is at %s\n", regs->ip); printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", regs->ax, regs->bx, regs->cx, regs->dx); printk(KERN_DEFAULT "ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", regs->si, regs->di, regs->bp, sp); printk(KERN_DEFAULT " DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss); if (!all) return; cr0 = read_cr0(); cr2 = read_cr2(); cr3 = read_cr3(); cr4 = read_cr4_safe(); printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4); get_debugreg(d0, 0); get_debugreg(d1, 1); get_debugreg(d2, 2); get_debugreg(d3, 3); printk(KERN_DEFAULT "DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", d0, d1, d2, d3); get_debugreg(d6, 6); get_debugreg(d7, 7); printk(KERN_DEFAULT "DR6: %08lx DR7: %08lx\n", d6, d7); } void release_thread(struct task_struct *dead_task) { BUG_ON(dead_task->mm); release_vm86_irqs(dead_task); } /* * This gets called before we allocate a new thread and copy * the current task into it. */ void prepare_to_copy(struct task_struct *tsk) { unlazy_fpu(tsk); } int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long unused, struct task_struct *p, struct pt_regs *regs) { struct pt_regs *childregs; struct task_struct *tsk; int err; childregs = task_pt_regs(p); *childregs = *regs; childregs->ax = 0; childregs->sp = sp; p->thread.sp = (unsigned long) childregs; p->thread.sp0 = (unsigned long) (childregs+1); p->thread.ip = (unsigned long) ret_from_fork; task_user_gs(p) = get_user_gs(regs); p->fpu_counter = 0; p->thread.io_bitmap_ptr = NULL; tsk = current; err = -ENOMEM; memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, IO_BITMAP_BYTES, GFP_KERNEL); if (!p->thread.io_bitmap_ptr) { p->thread.io_bitmap_max = 0; return -ENOMEM; } set_tsk_thread_flag(p, TIF_IO_BITMAP); } err = 0; /* * Set a new TLS for the child thread? */ if (clone_flags & CLONE_SETTLS) err = do_set_thread_area(p, -1, (struct user_desc __user *)childregs->si, 0); if (err && p->thread.io_bitmap_ptr) { kfree(p->thread.io_bitmap_ptr); p->thread.io_bitmap_max = 0; } return err; } void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) { set_user_gs(regs, 0); regs->fs = 0; regs->ds = __USER_DS; regs->es = __USER_DS; regs->ss = __USER_DS; regs->cs = __USER_CS; regs->ip = new_ip; regs->sp = new_sp; /* * Free the old FP and other extended state */ free_thread_xstate(current); } EXPORT_SYMBOL_GPL(start_thread); /* * switch_to(x,y) should switch tasks from x to y. * * We fsave/fwait so that an exception goes off at the right time * (as a call from the fsave or fwait in effect) rather than to * the wrong process. Lazy FP saving no longer makes any sense * with modern CPU's, and this simplifies a lot of things (SMP * and UP become the same). * * NOTE! We used to use the x86 hardware context switching. The * reason for not using it any more becomes apparent when you * try to recover gracefully from saved state that is no longer * valid (stale segment register values in particular). With the * hardware task-switch, there is no way to fix up bad state in * a reasonable manner. * * The fact that Intel documents the hardware task-switching to * be slow is a fairly red herring - this code is not noticeably * faster. However, there _is_ some room for improvement here, * so the performance issues may eventually be a valid point. * More important, however, is the fact that this allows us much * more flexibility. * * The return value (in %ax) will be the "prev" task after * the task-switch, and shows up in ret_from_fork in entry.S, * for example. */ __notrace_funcgraph struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev = &prev_p->thread, *next = &next_p->thread; int cpu = smp_processor_id(); struct tss_struct *tss = &per_cpu(init_tss, cpu); fpu_switch_t fpu; /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ fpu = switch_fpu_prepare(prev_p, next_p, cpu); /* * Reload esp0. */ load_sp0(tss, next); /* * Save away %gs. No need to save %fs, as it was saved on the * stack on entry. No need to save %es and %ds, as those are * always kernel segments while inside the kernel. Doing this * before setting the new TLS descriptors avoids the situation * where we temporarily have non-reloadable segments in %fs * and %gs. This could be an issue if the NMI handler ever * used %fs or %gs (it does not today), or if the kernel is * running inside of a hypervisor layer. */ lazy_save_gs(prev->gs); /* * Load the per-thread Thread-Local Storage descriptor. */ load_TLS(next, cpu); /* * Restore IOPL if needed. In normal use, the flags restore * in the switch assembly will handle this. But if the kernel * is running virtualized at a non-zero CPL, the popf will * not restore flags, so it must be done in a separate step. */ if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) set_iopl_mask(next->iopl); /* * Now maybe handle debug registers and/or IO bitmaps */ if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV || task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) __switch_to_xtra(prev_p, next_p, tss); /* * Leave lazy mode, flushing any hypercalls made here. * This must be done before restoring TLS segments so * the GDT and LDT are properly updated, and must be * done before math_state_restore, so the TS bit is up * to date. */ arch_end_context_switch(next_p); /* * Restore %gs if needed (which is common) */ if (prev->gs | next->gs) lazy_load_gs(next->gs); switch_fpu_finish(next_p, fpu); percpu_write(current_task, next_p); return prev_p; } #define top_esp (THREAD_SIZE - sizeof(unsigned long)) #define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long)) unsigned long get_wchan(struct task_struct *p) { unsigned long bp, sp, ip; unsigned long stack_page; int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; stack_page = (unsigned long)task_stack_page(p); sp = p->thread.sp; if (!stack_page || sp < stack_page || sp > top_esp+stack_page) return 0; /* include/asm-i386/system.h:switch_to() pushes bp last. */ bp = *(unsigned long *) sp; do { if (bp < stack_page || bp > top_ebp+stack_page) return 0; ip = *(unsigned long *) (bp+4); if (!in_sched_functions(ip)) return ip; bp = *(unsigned long *) bp; } while (count++ < 16); return 0; }
gpl-2.0
andip71/boeffla-kernel-samsung-s5
drivers/usb/storage/realtek_cr.c
4912
27996
/* Driver for Realtek RTS51xx USB card reader * * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, see <http://www.gnu.org/licenses/>. * * Author: * wwang (wei_wang@realsil.com.cn) * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/kthread.h> #include <linux/sched.h> #include <linux/kernel.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <linux/cdrom.h> #include <linux/usb.h> #include <linux/slab.h> #include <linux/usb_usual.h> #include "usb.h" #include "transport.h" #include "protocol.h" #include "debug.h" MODULE_DESCRIPTION("Driver for Realtek USB Card Reader"); MODULE_AUTHOR("wwang <wei_wang@realsil.com.cn>"); MODULE_LICENSE("GPL"); MODULE_VERSION("1.03"); static int auto_delink_en = 1; module_param(auto_delink_en, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(auto_delink_en, "enable auto delink"); #ifdef CONFIG_REALTEK_AUTOPM static int ss_en = 1; module_param(ss_en, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ss_en, "enable selective suspend"); static int ss_delay = 50; module_param(ss_delay, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ss_delay, "seconds to delay before entering selective suspend"); enum RTS51X_STAT { RTS51X_STAT_INIT, RTS51X_STAT_IDLE, RTS51X_STAT_RUN, RTS51X_STAT_SS }; #define POLLING_INTERVAL 50 #define rts51x_set_stat(chip, stat) \ ((chip)->state = (enum RTS51X_STAT)(stat)) #define rts51x_get_stat(chip) ((chip)->state) #define SET_LUN_READY(chip, lun) ((chip)->lun_ready |= ((u8)1 << (lun))) #define CLR_LUN_READY(chip, lun) ((chip)->lun_ready &= ~((u8)1 << (lun))) #define TST_LUN_READY(chip, lun) ((chip)->lun_ready & ((u8)1 << (lun))) #endif struct rts51x_status { u16 vid; u16 pid; u8 cur_lun; u8 card_type; u8 total_lun; u16 fw_ver; u8 phy_exist; u8 multi_flag; u8 multi_card; u8 log_exist; union { u8 detailed_type1; u8 detailed_type2; } detailed_type; u8 function[2]; }; struct rts51x_chip { u16 vendor_id; u16 product_id; char max_lun; struct rts51x_status *status; int status_len; u32 flag; #ifdef CONFIG_REALTEK_AUTOPM struct us_data *us; struct timer_list rts51x_suspend_timer; unsigned long timer_expires; int pwr_state; u8 lun_ready; enum RTS51X_STAT state; int support_auto_delink; #endif /* used to back up the protocal choosen in probe1 phase */ proto_cmnd proto_handler_backup; }; /* flag definition */ #define FLIDX_AUTO_DELINK 0x01 #define SCSI_LUN(srb) ((srb)->device->lun) /* Bit Operation */ #define SET_BIT(data, idx) ((data) |= 1 << (idx)) #define CLR_BIT(data, idx) ((data) &= ~(1 << (idx))) #define CHK_BIT(data, idx) ((data) & (1 << (idx))) #define SET_AUTO_DELINK(chip) ((chip)->flag |= FLIDX_AUTO_DELINK) #define CLR_AUTO_DELINK(chip) ((chip)->flag &= ~FLIDX_AUTO_DELINK) #define CHK_AUTO_DELINK(chip) ((chip)->flag & FLIDX_AUTO_DELINK) #define RTS51X_GET_VID(chip) ((chip)->vendor_id) #define RTS51X_GET_PID(chip) ((chip)->product_id) #define VENDOR_ID(chip) ((chip)->status[0].vid) #define PRODUCT_ID(chip) ((chip)->status[0].pid) #define FW_VERSION(chip) ((chip)->status[0].fw_ver) #define STATUS_LEN(chip) ((chip)->status_len) #define STATUS_SUCCESS 0 #define STATUS_FAIL 1 /* Check card reader function */ #define SUPPORT_DETAILED_TYPE1(chip) \ CHK_BIT((chip)->status[0].function[0], 1) #define SUPPORT_OT(chip) \ CHK_BIT((chip)->status[0].function[0], 2) #define SUPPORT_OC(chip) \ CHK_BIT((chip)->status[0].function[0], 3) #define SUPPORT_AUTO_DELINK(chip) \ CHK_BIT((chip)->status[0].function[0], 4) #define SUPPORT_SDIO(chip) \ CHK_BIT((chip)->status[0].function[1], 0) #define SUPPORT_DETAILED_TYPE2(chip) \ CHK_BIT((chip)->status[0].function[1], 1) #define CHECK_PID(chip, pid) (RTS51X_GET_PID(chip) == (pid)) #define CHECK_FW_VER(chip, fw_ver) (FW_VERSION(chip) == (fw_ver)) #define CHECK_ID(chip, pid, fw_ver) \ (CHECK_PID((chip), (pid)) && CHECK_FW_VER((chip), (fw_ver))) static int init_realtek_cr(struct us_data *us); /* * The table of devices */ #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ vendorName, productName, useProtocol, useTransport, \ initFunction, flags) \ {\ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags)|(USB_US_TYPE_STOR<<24)\ } static const struct usb_device_id realtek_cr_ids[] = { # include "unusual_realtek.h" {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, realtek_cr_ids); #undef UNUSUAL_DEV /* * The flags table */ #define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \ vendor_name, product_name, use_protocol, use_transport, \ init_function, Flags) \ { \ .vendorName = vendor_name, \ .productName = product_name, \ .useProtocol = use_protocol, \ .useTransport = use_transport, \ .initFunction = init_function, \ } static struct us_unusual_dev realtek_cr_unusual_dev_list[] = { # include "unusual_realtek.h" {} /* Terminating entry */ }; #undef UNUSUAL_DEV static int rts51x_bulk_transport(struct us_data *us, u8 lun, u8 *cmd, int cmd_len, u8 *buf, int buf_len, enum dma_data_direction dir, int *act_len) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *)us->iobuf; struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *)us->iobuf; int result; unsigned int residue; unsigned int cswlen; unsigned int cbwlen = US_BULK_CB_WRAP_LEN; /* set up the command wrapper */ bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = cpu_to_le32(buf_len); bcb->Flags = (dir == DMA_FROM_DEVICE) ? US_BULK_FLAG_IN : 0; bcb->Tag = ++us->tag; bcb->Lun = lun; bcb->Length = cmd_len; /* copy the command payload */ memset(bcb->CDB, 0, sizeof(bcb->CDB)); memcpy(bcb->CDB, cmd, bcb->Length); /* send it to out endpoint */ result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb, cbwlen, NULL); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; /* DATA STAGE */ /* send/receive data payload, if there is any */ if (buf && buf_len) { unsigned int pipe = (dir == DMA_FROM_DEVICE) ? us->recv_bulk_pipe : us->send_bulk_pipe; result = usb_stor_bulk_transfer_buf(us, pipe, buf, buf_len, NULL); if (result == USB_STOR_XFER_ERROR) return USB_STOR_TRANSPORT_ERROR; } /* get CSW for device status */ result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, &cswlen); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; /* check bulk status */ if (bcs->Signature != cpu_to_le32(US_BULK_CS_SIGN)) { US_DEBUGP("Signature mismatch: got %08X, expecting %08X\n", le32_to_cpu(bcs->Signature), US_BULK_CS_SIGN); return USB_STOR_TRANSPORT_ERROR; } residue = bcs->Residue; if (bcs->Tag != us->tag) return USB_STOR_TRANSPORT_ERROR; /* try to compute the actual residue, based on how much data * was really transferred and what the device tells us */ if (residue) residue = residue < buf_len ? residue : buf_len; if (act_len) *act_len = buf_len - residue; /* based on the status code, we report good or bad */ switch (bcs->Status) { case US_BULK_STAT_OK: /* command good -- note that data could be short */ return USB_STOR_TRANSPORT_GOOD; case US_BULK_STAT_FAIL: /* command failed */ return USB_STOR_TRANSPORT_FAILED; case US_BULK_STAT_PHASE: /* phase error -- note that a transport reset will be * invoked by the invoke_transport() function */ return USB_STOR_TRANSPORT_ERROR; } /* we should never get here, but if we do, we're in trouble */ return USB_STOR_TRANSPORT_ERROR; } static int rts51x_bulk_transport_special(struct us_data *us, u8 lun, u8 *cmd, int cmd_len, u8 *buf, int buf_len, enum dma_data_direction dir, int *act_len) { struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf; int result; unsigned int cswlen; unsigned int cbwlen = US_BULK_CB_WRAP_LEN; /* set up the command wrapper */ bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = cpu_to_le32(buf_len); bcb->Flags = (dir == DMA_FROM_DEVICE) ? US_BULK_FLAG_IN : 0; bcb->Tag = ++us->tag; bcb->Lun = lun; bcb->Length = cmd_len; /* copy the command payload */ memset(bcb->CDB, 0, sizeof(bcb->CDB)); memcpy(bcb->CDB, cmd, bcb->Length); /* send it to out endpoint */ result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcb, cbwlen, NULL); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; /* DATA STAGE */ /* send/receive data payload, if there is any */ if (buf && buf_len) { unsigned int pipe = (dir == DMA_FROM_DEVICE) ? us->recv_bulk_pipe : us->send_bulk_pipe; result = usb_stor_bulk_transfer_buf(us, pipe, buf, buf_len, NULL); if (result == USB_STOR_XFER_ERROR) return USB_STOR_TRANSPORT_ERROR; } /* get CSW for device status */ result = usb_bulk_msg(us->pusb_dev, us->recv_bulk_pipe, bcs, US_BULK_CS_WRAP_LEN, &cswlen, 250); return result; } /* Determine what the maximum LUN supported is */ static int rts51x_get_max_lun(struct us_data *us) { int result; /* issue the command */ us->iobuf[0] = 0; result = usb_stor_control_msg(us, us->recv_ctrl_pipe, US_BULK_GET_MAX_LUN, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, us->ifnum, us->iobuf, 1, 10 * HZ); US_DEBUGP("GetMaxLUN command result is %d, data is %d\n", result, us->iobuf[0]); /* if we have a successful request, return the result */ if (result > 0) return us->iobuf[0]; return 0; } static int rts51x_read_mem(struct us_data *us, u16 addr, u8 *data, u16 len) { int retval; u8 cmnd[12] = { 0 }; u8 *buf; buf = kmalloc(len, GFP_NOIO); if (buf == NULL) return USB_STOR_TRANSPORT_ERROR; US_DEBUGP("%s, addr = 0x%x, len = %d\n", __func__, addr, len); cmnd[0] = 0xF0; cmnd[1] = 0x0D; cmnd[2] = (u8) (addr >> 8); cmnd[3] = (u8) addr; cmnd[4] = (u8) (len >> 8); cmnd[5] = (u8) len; retval = rts51x_bulk_transport(us, 0, cmnd, 12, buf, len, DMA_FROM_DEVICE, NULL); if (retval != USB_STOR_TRANSPORT_GOOD) { kfree(buf); return -EIO; } memcpy(data, buf, len); kfree(buf); return 0; } static int rts51x_write_mem(struct us_data *us, u16 addr, u8 *data, u16 len) { int retval; u8 cmnd[12] = { 0 }; u8 *buf; buf = kmemdup(data, len, GFP_NOIO); if (buf == NULL) return USB_STOR_TRANSPORT_ERROR; US_DEBUGP("%s, addr = 0x%x, len = %d\n", __func__, addr, len); cmnd[0] = 0xF0; cmnd[1] = 0x0E; cmnd[2] = (u8) (addr >> 8); cmnd[3] = (u8) addr; cmnd[4] = (u8) (len >> 8); cmnd[5] = (u8) len; retval = rts51x_bulk_transport(us, 0, cmnd, 12, buf, len, DMA_TO_DEVICE, NULL); kfree(buf); if (retval != USB_STOR_TRANSPORT_GOOD) return -EIO; return 0; } static int rts51x_read_status(struct us_data *us, u8 lun, u8 *status, int len, int *actlen) { int retval; u8 cmnd[12] = { 0 }; u8 *buf; buf = kmalloc(len, GFP_NOIO); if (buf == NULL) return USB_STOR_TRANSPORT_ERROR; US_DEBUGP("%s, lun = %d\n", __func__, lun); cmnd[0] = 0xF0; cmnd[1] = 0x09; retval = rts51x_bulk_transport(us, lun, cmnd, 12, buf, len, DMA_FROM_DEVICE, actlen); if (retval != USB_STOR_TRANSPORT_GOOD) { kfree(buf); return -EIO; } memcpy(status, buf, len); kfree(buf); return 0; } static int rts51x_check_status(struct us_data *us, u8 lun) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); int retval; u8 buf[16]; retval = rts51x_read_status(us, lun, buf, 16, &(chip->status_len)); if (retval < 0) return -EIO; US_DEBUGP("chip->status_len = %d\n", chip->status_len); chip->status[lun].vid = ((u16) buf[0] << 8) | buf[1]; chip->status[lun].pid = ((u16) buf[2] << 8) | buf[3]; chip->status[lun].cur_lun = buf[4]; chip->status[lun].card_type = buf[5]; chip->status[lun].total_lun = buf[6]; chip->status[lun].fw_ver = ((u16) buf[7] << 8) | buf[8]; chip->status[lun].phy_exist = buf[9]; chip->status[lun].multi_flag = buf[10]; chip->status[lun].multi_card = buf[11]; chip->status[lun].log_exist = buf[12]; if (chip->status_len == 16) { chip->status[lun].detailed_type.detailed_type1 = buf[13]; chip->status[lun].function[0] = buf[14]; chip->status[lun].function[1] = buf[15]; } return 0; } static int enable_oscillator(struct us_data *us) { int retval; u8 value; retval = rts51x_read_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; value |= 0x04; retval = rts51x_write_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; retval = rts51x_read_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; if (!(value & 0x04)) return -EIO; return 0; } static int __do_config_autodelink(struct us_data *us, u8 *data, u16 len) { int retval; u8 cmnd[12] = {0}; u8 *buf; US_DEBUGP("%s, addr = 0xfe47, len = %d\n", __FUNCTION__, len); buf = kmemdup(data, len, GFP_NOIO); if (!buf) return USB_STOR_TRANSPORT_ERROR; cmnd[0] = 0xF0; cmnd[1] = 0x0E; cmnd[2] = 0xfe; cmnd[3] = 0x47; cmnd[4] = (u8)(len >> 8); cmnd[5] = (u8)len; retval = rts51x_bulk_transport_special(us, 0, cmnd, 12, buf, len, DMA_TO_DEVICE, NULL); kfree(buf); if (retval != USB_STOR_TRANSPORT_GOOD) { return -EIO; } return 0; } static int do_config_autodelink(struct us_data *us, int enable, int force) { int retval; u8 value; retval = rts51x_read_mem(us, 0xFE47, &value, 1); if (retval < 0) return -EIO; if (enable) { if (force) value |= 0x03; else value |= 0x01; } else { value &= ~0x03; } US_DEBUGP("In %s,set 0xfe47 to 0x%x\n", __func__, value); /* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */ retval = __do_config_autodelink(us, &value, 1); if (retval < 0) return -EIO; return 0; } static int config_autodelink_after_power_on(struct us_data *us) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); int retval; u8 value; US_DEBUGP("%s: <---\n", __func__); if (!CHK_AUTO_DELINK(chip)) return 0; retval = rts51x_read_mem(us, 0xFE47, &value, 1); if (retval < 0) return -EIO; if (auto_delink_en) { CLR_BIT(value, 0); CLR_BIT(value, 1); SET_BIT(value, 2); if (CHECK_ID(chip, 0x0138, 0x3882)) CLR_BIT(value, 2); SET_BIT(value, 7); /* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */ retval = __do_config_autodelink(us, &value, 1); if (retval < 0) return -EIO; retval = enable_oscillator(us); if (retval == 0) (void)do_config_autodelink(us, 1, 0); } else { /* Autodelink controlled by firmware */ SET_BIT(value, 2); if (CHECK_ID(chip, 0x0138, 0x3882)) CLR_BIT(value, 2); if (CHECK_ID(chip, 0x0159, 0x5889) || CHECK_ID(chip, 0x0138, 0x3880)) { CLR_BIT(value, 0); CLR_BIT(value, 7); } /* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */ retval = __do_config_autodelink(us, &value, 1); if (retval < 0) return -EIO; if (CHECK_ID(chip, 0x0159, 0x5888)) { value = 0xFF; retval = rts51x_write_mem(us, 0xFE79, &value, 1); if (retval < 0) return -EIO; value = 0x01; retval = rts51x_write_mem(us, 0x48, &value, 1); if (retval < 0) return -EIO; } } US_DEBUGP("%s: --->\n", __func__); return 0; } static int config_autodelink_before_power_down(struct us_data *us) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); int retval; u8 value; US_DEBUGP("%s: <---\n", __func__); if (!CHK_AUTO_DELINK(chip)) return 0; if (auto_delink_en) { retval = rts51x_read_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; SET_BIT(value, 2); retval = rts51x_write_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; if (CHECK_ID(chip, 0x0159, 0x5888)) { value = 0x01; retval = rts51x_write_mem(us, 0x48, &value, 1); if (retval < 0) return -EIO; } retval = rts51x_read_mem(us, 0xFE47, &value, 1); if (retval < 0) return -EIO; SET_BIT(value, 0); if (CHECK_ID(chip, 0x0138, 0x3882)) SET_BIT(value, 2); retval = rts51x_write_mem(us, 0xFE77, &value, 1); if (retval < 0) return -EIO; } else { if (CHECK_ID(chip, 0x0159, 0x5889) || CHECK_ID(chip, 0x0138, 0x3880) || CHECK_ID(chip, 0x0138, 0x3882)) { retval = rts51x_read_mem(us, 0xFE47, &value, 1); if (retval < 0) return -EIO; if (CHECK_ID(chip, 0x0159, 0x5889) || CHECK_ID(chip, 0x0138, 0x3880)) { SET_BIT(value, 0); SET_BIT(value, 7); } if (CHECK_ID(chip, 0x0138, 0x3882)) SET_BIT(value, 2); /* retval = rts51x_write_mem(us, 0xFE47, &value, 1); */ retval = __do_config_autodelink(us, &value, 1); if (retval < 0) return -EIO; } if (CHECK_ID(chip, 0x0159, 0x5888)) { value = 0x01; retval = rts51x_write_mem(us, 0x48, &value, 1); if (retval < 0) return -EIO; } } US_DEBUGP("%s: --->\n", __func__); return 0; } static void fw5895_init(struct us_data *us) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); int retval; u8 val; US_DEBUGP("%s: <---\n", __func__); if ((PRODUCT_ID(chip) != 0x0158) || (FW_VERSION(chip) != 0x5895)) { US_DEBUGP("Not the specified device, return immediately!\n"); } else { retval = rts51x_read_mem(us, 0xFD6F, &val, 1); if (retval == STATUS_SUCCESS && (val & 0x1F) == 0) { val = 0x1F; retval = rts51x_write_mem(us, 0xFD70, &val, 1); if (retval != STATUS_SUCCESS) US_DEBUGP("Write memory fail\n"); } else { US_DEBUGP("Read memory fail, OR (val & 0x1F) != 0\n"); } } US_DEBUGP("%s: --->\n", __func__); } #ifdef CONFIG_REALTEK_AUTOPM static void fw5895_set_mmc_wp(struct us_data *us) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); int retval; u8 buf[13]; US_DEBUGP("%s: <---\n", __func__); if ((PRODUCT_ID(chip) != 0x0158) || (FW_VERSION(chip) != 0x5895)) { US_DEBUGP("Not the specified device, return immediately!\n"); } else { retval = rts51x_read_mem(us, 0xFD6F, buf, 1); if (retval == STATUS_SUCCESS && (buf[0] & 0x24) == 0x24) { /* SD Exist and SD WP */ retval = rts51x_read_mem(us, 0xD04E, buf, 1); if (retval == STATUS_SUCCESS) { buf[0] |= 0x04; retval = rts51x_write_mem(us, 0xFD70, buf, 1); if (retval != STATUS_SUCCESS) US_DEBUGP("Write memory fail\n"); } else { US_DEBUGP("Read memory fail\n"); } } else { US_DEBUGP("Read memory fail, OR (buf[0]&0x24)!=0x24\n"); } } US_DEBUGP("%s: --->\n", __func__); } static void rts51x_modi_suspend_timer(struct rts51x_chip *chip) { US_DEBUGP("%s: <---, state:%d\n", __func__, rts51x_get_stat(chip)); chip->timer_expires = jiffies + msecs_to_jiffies(1000*ss_delay); mod_timer(&chip->rts51x_suspend_timer, chip->timer_expires); US_DEBUGP("%s: --->\n", __func__); } static void rts51x_suspend_timer_fn(unsigned long data) { struct rts51x_chip *chip = (struct rts51x_chip *)data; struct us_data *us = chip->us; US_DEBUGP("%s: <---\n", __func__); switch (rts51x_get_stat(chip)) { case RTS51X_STAT_INIT: case RTS51X_STAT_RUN: rts51x_modi_suspend_timer(chip); break; case RTS51X_STAT_IDLE: case RTS51X_STAT_SS: US_DEBUGP("%s: RTS51X_STAT_SS, intf->pm_usage_cnt:%d," "power.usage:%d\n", __func__, atomic_read(&us->pusb_intf->pm_usage_cnt), atomic_read(&us->pusb_intf->dev.power.usage_count)); if (atomic_read(&us->pusb_intf->pm_usage_cnt) > 0) { US_DEBUGP("%s: Ready to enter SS state.\n", __func__); rts51x_set_stat(chip, RTS51X_STAT_SS); /* ignore mass storage interface's children */ pm_suspend_ignore_children(&us->pusb_intf->dev, true); usb_autopm_put_interface_async(us->pusb_intf); US_DEBUGP("%s: RTS51X_STAT_SS 01," "intf->pm_usage_cnt:%d, power.usage:%d\n", __func__, atomic_read(&us->pusb_intf->pm_usage_cnt), atomic_read( &us->pusb_intf->dev.power.usage_count)); } break; default: US_DEBUGP("%s: Unknonwn state !!!\n", __func__); break; } US_DEBUGP("%s: --->\n", __func__); } static inline int working_scsi(struct scsi_cmnd *srb) { if ((srb->cmnd[0] == TEST_UNIT_READY) || (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL)) { return 0; } return 1; } static void rts51x_invoke_transport(struct scsi_cmnd *srb, struct us_data *us) { struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); static int card_first_show = 1; static u8 media_not_present[] = { 0x70, 0, 0x02, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0x3A, 0, 0, 0, 0, 0 }; static u8 invalid_cmd_field[] = { 0x70, 0, 0x05, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0x24, 0, 0, 0, 0, 0 }; int ret; US_DEBUGP("%s: <---\n", __func__); if (working_scsi(srb)) { US_DEBUGP("%s: working scsi, intf->pm_usage_cnt:%d," "power.usage:%d\n", __func__, atomic_read(&us->pusb_intf->pm_usage_cnt), atomic_read(&us->pusb_intf->dev.power.usage_count)); if (atomic_read(&us->pusb_intf->pm_usage_cnt) <= 0) { ret = usb_autopm_get_interface(us->pusb_intf); US_DEBUGP("%s: working scsi, ret=%d\n", __func__, ret); } if (rts51x_get_stat(chip) != RTS51X_STAT_RUN) rts51x_set_stat(chip, RTS51X_STAT_RUN); chip->proto_handler_backup(srb, us); } else { if (rts51x_get_stat(chip) == RTS51X_STAT_SS) { US_DEBUGP("%s: NOT working scsi\n", __func__); if ((srb->cmnd[0] == TEST_UNIT_READY) && (chip->pwr_state == US_SUSPEND)) { if (TST_LUN_READY(chip, srb->device->lun)) { srb->result = SAM_STAT_GOOD; } else { srb->result = SAM_STAT_CHECK_CONDITION; memcpy(srb->sense_buffer, media_not_present, US_SENSE_SIZE); } US_DEBUGP("%s: TEST_UNIT_READY--->\n", __func__); goto out; } if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) { int prevent = srb->cmnd[4] & 0x1; if (prevent) { srb->result = SAM_STAT_CHECK_CONDITION; memcpy(srb->sense_buffer, invalid_cmd_field, US_SENSE_SIZE); } else { srb->result = SAM_STAT_GOOD; } US_DEBUGP("%s: ALLOW_MEDIUM_REMOVAL--->\n", __func__); goto out; } } else { US_DEBUGP("%s: NOT working scsi, not SS\n", __func__); chip->proto_handler_backup(srb, us); /* Check wether card is plugged in */ if (srb->cmnd[0] == TEST_UNIT_READY) { if (srb->result == SAM_STAT_GOOD) { SET_LUN_READY(chip, srb->device->lun); if (card_first_show) { card_first_show = 0; fw5895_set_mmc_wp(us); } } else { CLR_LUN_READY(chip, srb->device->lun); card_first_show = 1; } } if (rts51x_get_stat(chip) != RTS51X_STAT_IDLE) rts51x_set_stat(chip, RTS51X_STAT_IDLE); } } out: US_DEBUGP("%s: state:%d\n", __func__, rts51x_get_stat(chip)); if (rts51x_get_stat(chip) == RTS51X_STAT_RUN) rts51x_modi_suspend_timer(chip); US_DEBUGP("%s: --->\n", __func__); } static int realtek_cr_autosuspend_setup(struct us_data *us) { struct rts51x_chip *chip; struct rts51x_status *status = NULL; u8 buf[16]; int retval; chip = (struct rts51x_chip *)us->extra; chip->support_auto_delink = 0; chip->pwr_state = US_RESUME; chip->lun_ready = 0; rts51x_set_stat(chip, RTS51X_STAT_INIT); retval = rts51x_read_status(us, 0, buf, 16, &(chip->status_len)); if (retval != STATUS_SUCCESS) { US_DEBUGP("Read status fail\n"); return -EIO; } status = chip->status; status->vid = ((u16) buf[0] << 8) | buf[1]; status->pid = ((u16) buf[2] << 8) | buf[3]; status->cur_lun = buf[4]; status->card_type = buf[5]; status->total_lun = buf[6]; status->fw_ver = ((u16) buf[7] << 8) | buf[8]; status->phy_exist = buf[9]; status->multi_flag = buf[10]; status->multi_card = buf[11]; status->log_exist = buf[12]; if (chip->status_len == 16) { status->detailed_type.detailed_type1 = buf[13]; status->function[0] = buf[14]; status->function[1] = buf[15]; } /* back up the proto_handler in us->extra */ chip = (struct rts51x_chip *)(us->extra); chip->proto_handler_backup = us->proto_handler; /* Set the autosuspend_delay to 0 */ pm_runtime_set_autosuspend_delay(&us->pusb_dev->dev, 0); /* override us->proto_handler setted in get_protocol() */ us->proto_handler = rts51x_invoke_transport; chip->timer_expires = 0; setup_timer(&chip->rts51x_suspend_timer, rts51x_suspend_timer_fn, (unsigned long)chip); fw5895_init(us); /* enable autosuspend funciton of the usb device */ usb_enable_autosuspend(us->pusb_dev); return 0; } #endif static void realtek_cr_destructor(void *extra) { struct rts51x_chip *chip = (struct rts51x_chip *)extra; US_DEBUGP("%s: <---\n", __func__); if (!chip) return; #ifdef CONFIG_REALTEK_AUTOPM if (ss_en) { del_timer(&chip->rts51x_suspend_timer); chip->timer_expires = 0; } #endif kfree(chip->status); } #ifdef CONFIG_PM static int realtek_cr_suspend(struct usb_interface *iface, pm_message_t message) { struct us_data *us = usb_get_intfdata(iface); US_DEBUGP("%s: <---\n", __func__); /* wait until no command is running */ mutex_lock(&us->dev_mutex); config_autodelink_before_power_down(us); mutex_unlock(&us->dev_mutex); US_DEBUGP("%s: --->\n", __func__); return 0; } static int realtek_cr_resume(struct usb_interface *iface) { struct us_data *us = usb_get_intfdata(iface); US_DEBUGP("%s: <---\n", __func__); fw5895_init(us); config_autodelink_after_power_on(us); US_DEBUGP("%s: --->\n", __func__); return 0; } #else #define realtek_cr_suspend NULL #define realtek_cr_resume NULL #endif static int init_realtek_cr(struct us_data *us) { struct rts51x_chip *chip; int size, i, retval; chip = kzalloc(sizeof(struct rts51x_chip), GFP_KERNEL); if (!chip) return -ENOMEM; us->extra = chip; us->extra_destructor = realtek_cr_destructor; us->max_lun = chip->max_lun = rts51x_get_max_lun(us); US_DEBUGP("chip->max_lun = %d\n", chip->max_lun); size = (chip->max_lun + 1) * sizeof(struct rts51x_status); chip->status = kzalloc(size, GFP_KERNEL); if (!chip->status) goto INIT_FAIL; for (i = 0; i <= (int)(chip->max_lun); i++) { retval = rts51x_check_status(us, (u8) i); if (retval < 0) goto INIT_FAIL; } if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) || CHECK_FW_VER(chip, 0x5901)) SET_AUTO_DELINK(chip); if (STATUS_LEN(chip) == 16) { if (SUPPORT_AUTO_DELINK(chip)) SET_AUTO_DELINK(chip); } #ifdef CONFIG_REALTEK_AUTOPM if (ss_en) { chip->us = us; realtek_cr_autosuspend_setup(us); } #endif US_DEBUGP("chip->flag = 0x%x\n", chip->flag); (void)config_autodelink_after_power_on(us); return 0; INIT_FAIL: if (us->extra) { kfree(chip->status); kfree(us->extra); us->extra = NULL; } return -EIO; } static int realtek_cr_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct us_data *us; int result; US_DEBUGP("Probe Realtek Card Reader!\n"); result = usb_stor_probe1(&us, intf, id, (id - realtek_cr_ids) + realtek_cr_unusual_dev_list); if (result) return result; result = usb_stor_probe2(us); return result; } static struct usb_driver realtek_cr_driver = { .name = "ums-realtek", .probe = realtek_cr_probe, .disconnect = usb_stor_disconnect, /* .suspend = usb_stor_suspend, */ /* .resume = usb_stor_resume, */ .reset_resume = usb_stor_reset_resume, .suspend = realtek_cr_suspend, .resume = realtek_cr_resume, .pre_reset = usb_stor_pre_reset, .post_reset = usb_stor_post_reset, .id_table = realtek_cr_ids, .soft_unbind = 1, .supports_autosuspend = 1, .no_dynamic_id = 1, }; module_usb_driver(realtek_cr_driver);
gpl-2.0
sloanyang/android_kernel_huawei_mediapad10fhd
drivers/staging/comedi/drivers/pcl711.c
8240
15276
/* comedi/drivers/pcl711.c hardware driver for PC-LabCard PCL-711 and AdSys ACL-8112 and compatibles COMEDI - Linux Control and Measurement Device Interface Copyright (C) 1998 David A. Schleef <ds@schleef.org> Janne Jalkanen <jalkanen@cs.hut.fi> Eric Bunn <ebu@cs.hut.fi> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: pcl711 Description: Advantech PCL-711 and 711b, ADLink ACL-8112 Author: ds, Janne Jalkanen <jalkanen@cs.hut.fi>, Eric Bunn <ebu@cs.hut.fi> Status: mostly complete Devices: [Advantech] PCL-711 (pcl711), PCL-711B (pcl711b), [AdLink] ACL-8112HG (acl8112hg), ACL-8112DG (acl8112dg) Since these boards do not have DMA or FIFOs, only immediate mode is supported. */ /* Dave Andruczyk <dave@tech.buffalostate.edu> also wrote a driver for the PCL-711. I used a few ideas from his driver here. His driver also has more comments, if you are interested in understanding how this driver works. http://tech.buffalostate.edu/~dave/driver/ The ACL-8112 driver was hacked from the sources of the PCL-711 driver (the 744 chip used on the 8112 is almost the same as the 711b chip, but it has more I/O channels) by Janne Jalkanen (jalkanen@cs.hut.fi) and Erik Bunn (ebu@cs.hut.fi). Remerged with the PCL-711 driver by ds. [acl-8112] This driver supports both TRIGNOW and TRIGCLK, but does not yet support DMA transfers. It also supports both high (HG) and low (DG) versions of the card, though the HG version has been untested. */ #include <linux/interrupt.h> #include "../comedidev.h" #include <linux/ioport.h> #include <linux/delay.h> #include "8253.h" #define PCL711_SIZE 16 #define PCL711_CTR0 0 #define PCL711_CTR1 1 #define PCL711_CTR2 2 #define PCL711_CTRCTL 3 #define PCL711_AD_LO 4 #define PCL711_DA0_LO 4 #define PCL711_AD_HI 5 #define PCL711_DA0_HI 5 #define PCL711_DI_LO 6 #define PCL711_DA1_LO 6 #define PCL711_DI_HI 7 #define PCL711_DA1_HI 7 #define PCL711_CLRINTR 8 #define PCL711_GAIN 9 #define PCL711_MUX 10 #define PCL711_MODE 11 #define PCL711_SOFTTRIG 12 #define PCL711_DO_LO 13 #define PCL711_DO_HI 14 static const struct comedi_lrange range_pcl711b_ai = { 5, { BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), BIP_RANGE(0.625), BIP_RANGE(0.3125) } }; static const struct comedi_lrange range_acl8112hg_ai = { 12, { BIP_RANGE(5), BIP_RANGE(0.5), BIP_RANGE(0.05), BIP_RANGE(0.005), UNI_RANGE(10), UNI_RANGE(1), UNI_RANGE(0.1), UNI_RANGE(0.01), BIP_RANGE(10), BIP_RANGE(1), BIP_RANGE(0.1), BIP_RANGE(0.01) } }; static const struct comedi_lrange range_acl8112dg_ai = { 9, { BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), BIP_RANGE(0.625), UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2.5), UNI_RANGE(1.25), BIP_RANGE(10) } }; /* * flags */ #define PCL711_TIMEOUT 100 #define PCL711_DRDY 0x10 static const int i8253_osc_base = 500; /* 2 Mhz */ struct pcl711_board { const char *name; int is_pcl711b; int is_8112; int is_dg; int n_ranges; int n_aichan; int n_aochan; int maxirq; const struct comedi_lrange *ai_range_type; }; static const struct pcl711_board boardtypes[] = { {"pcl711", 0, 0, 0, 5, 8, 1, 0, &range_bipolar5}, {"pcl711b", 1, 0, 0, 5, 8, 1, 7, &range_pcl711b_ai}, {"acl8112hg", 0, 1, 0, 12, 16, 2, 15, &range_acl8112hg_ai}, {"acl8112dg", 0, 1, 1, 9, 16, 2, 15, &range_acl8112dg_ai}, }; #define n_boardtypes (sizeof(boardtypes)/sizeof(struct pcl711_board)) #define this_board ((const struct pcl711_board *)dev->board_ptr) static int pcl711_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int pcl711_detach(struct comedi_device *dev); static struct comedi_driver driver_pcl711 = { .driver_name = "pcl711", .module = THIS_MODULE, .attach = pcl711_attach, .detach = pcl711_detach, .board_name = &boardtypes[0].name, .num_names = n_boardtypes, .offset = sizeof(struct pcl711_board), }; static int __init driver_pcl711_init_module(void) { return comedi_driver_register(&driver_pcl711); } static void __exit driver_pcl711_cleanup_module(void) { comedi_driver_unregister(&driver_pcl711); } module_init(driver_pcl711_init_module); module_exit(driver_pcl711_cleanup_module); struct pcl711_private { int board; int adchan; int ntrig; int aip[8]; int mode; unsigned int ao_readback[2]; unsigned int divisor1; unsigned int divisor2; }; #define devpriv ((struct pcl711_private *)dev->private) static irqreturn_t pcl711_interrupt(int irq, void *d) { int lo, hi; int data; struct comedi_device *dev = d; struct comedi_subdevice *s = dev->subdevices + 0; if (!dev->attached) { comedi_error(dev, "spurious interrupt"); return IRQ_HANDLED; } hi = inb(dev->iobase + PCL711_AD_HI); lo = inb(dev->iobase + PCL711_AD_LO); outb(0, dev->iobase + PCL711_CLRINTR); data = (hi << 8) | lo; /* FIXME! Nothing else sets ntrig! */ if (!(--devpriv->ntrig)) { if (this_board->is_8112) outb(1, dev->iobase + PCL711_MODE); else outb(0, dev->iobase + PCL711_MODE); s->async->events |= COMEDI_CB_EOA; } comedi_event(dev, s); return IRQ_HANDLED; } static void pcl711_set_changain(struct comedi_device *dev, int chan) { int chan_register; outb(CR_RANGE(chan), dev->iobase + PCL711_GAIN); chan_register = CR_CHAN(chan); if (this_board->is_8112) { /* * Set the correct channel. The two channel banks are switched * using the mask value. * NB: To use differential channels, you should use * mask = 0x30, but I haven't written the support for this * yet. /JJ */ if (chan_register >= 8) chan_register = 0x20 | (chan_register & 0x7); else chan_register |= 0x10; } else { outb(chan_register, dev->iobase + PCL711_MUX); } } static int pcl711_ai_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i, n; int hi, lo; pcl711_set_changain(dev, insn->chanspec); for (n = 0; n < insn->n; n++) { /* * Write the correct mode (software polling) and start polling * by writing to the trigger register */ outb(1, dev->iobase + PCL711_MODE); if (!this_board->is_8112) outb(0, dev->iobase + PCL711_SOFTTRIG); i = PCL711_TIMEOUT; while (--i) { hi = inb(dev->iobase + PCL711_AD_HI); if (!(hi & PCL711_DRDY)) goto ok; udelay(1); } printk(KERN_ERR "comedi%d: pcl711: A/D timeout\n", dev->minor); return -ETIME; ok: lo = inb(dev->iobase + PCL711_AD_LO); data[n] = ((hi & 0xf) << 8) | lo; } return n; } static int pcl711_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int tmp; int err = 0; /* step 1 */ tmp = cmd->start_src; cmd->start_src &= TRIG_NOW; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_TIMER | TRIG_EXT; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_NOW; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT | TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* step 2 */ if (cmd->scan_begin_src != TRIG_TIMER && cmd->scan_begin_src != TRIG_EXT) err++; if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE) err++; if (err) return 2; /* step 3 */ if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } if (cmd->scan_begin_src == TRIG_EXT) { if (cmd->scan_begin_arg != 0) { cmd->scan_begin_arg = 0; err++; } } else { #define MAX_SPEED 1000 #define TIMER_BASE 100 if (cmd->scan_begin_arg < MAX_SPEED) { cmd->scan_begin_arg = MAX_SPEED; err++; } } if (cmd->convert_arg != 0) { cmd->convert_arg = 0; err++; } if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } if (cmd->stop_src == TRIG_NONE) { if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } } else { /* ignore */ } if (err) return 3; /* step 4 */ if (cmd->scan_begin_src == TRIG_TIMER) { tmp = cmd->scan_begin_arg; i8253_cascade_ns_to_timer_2div(TIMER_BASE, &devpriv->divisor1, &devpriv->divisor2, &cmd->scan_begin_arg, cmd->flags & TRIG_ROUND_MASK); if (tmp != cmd->scan_begin_arg) err++; } if (err) return 4; return 0; } static int pcl711_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { int timer1, timer2; struct comedi_cmd *cmd = &s->async->cmd; pcl711_set_changain(dev, cmd->chanlist[0]); if (cmd->scan_begin_src == TRIG_TIMER) { /* * Set timers * timer chip is an 8253, with timers 1 and 2 * cascaded * 0x74 = Select Counter 1 | LSB/MSB | Mode=2 | Binary * Mode 2 = Rate generator * * 0xb4 = Select Counter 2 | LSB/MSB | Mode=2 | Binary */ timer1 = timer2 = 0; i8253_cascade_ns_to_timer(i8253_osc_base, &timer1, &timer2, &cmd->scan_begin_arg, TRIG_ROUND_NEAREST); outb(0x74, dev->iobase + PCL711_CTRCTL); outb(timer1 & 0xff, dev->iobase + PCL711_CTR1); outb((timer1 >> 8) & 0xff, dev->iobase + PCL711_CTR1); outb(0xb4, dev->iobase + PCL711_CTRCTL); outb(timer2 & 0xff, dev->iobase + PCL711_CTR2); outb((timer2 >> 8) & 0xff, dev->iobase + PCL711_CTR2); /* clear pending interrupts (just in case) */ outb(0, dev->iobase + PCL711_CLRINTR); /* * Set mode to IRQ transfer */ outb(devpriv->mode | 6, dev->iobase + PCL711_MODE); } else { /* external trigger */ outb(devpriv->mode | 3, dev->iobase + PCL711_MODE); } return 0; } /* analog output */ static int pcl711_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int n; int chan = CR_CHAN(insn->chanspec); for (n = 0; n < insn->n; n++) { outb((data[n] & 0xff), dev->iobase + (chan ? PCL711_DA1_LO : PCL711_DA0_LO)); outb((data[n] >> 8), dev->iobase + (chan ? PCL711_DA1_HI : PCL711_DA0_HI)); devpriv->ao_readback[chan] = data[n]; } return n; } static int pcl711_ao_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int n; int chan = CR_CHAN(insn->chanspec); for (n = 0; n < insn->n; n++) data[n] = devpriv->ao_readback[chan]; return n; } /* Digital port read - Untested on 8112 */ static int pcl711_di_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (insn->n != 2) return -EINVAL; data[1] = inb(dev->iobase + PCL711_DI_LO) | (inb(dev->iobase + PCL711_DI_HI) << 8); return 2; } /* Digital port write - Untested on 8112 */ static int pcl711_do_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (insn->n != 2) return -EINVAL; if (data[0]) { s->state &= ~data[0]; s->state |= data[0] & data[1]; } if (data[0] & 0x00ff) outb(s->state & 0xff, dev->iobase + PCL711_DO_LO); if (data[0] & 0xff00) outb((s->state >> 8), dev->iobase + PCL711_DO_HI); data[1] = s->state; return 2; } /* Free any resources that we have claimed */ static int pcl711_detach(struct comedi_device *dev) { printk(KERN_INFO "comedi%d: pcl711: remove\n", dev->minor); if (dev->irq) free_irq(dev->irq, dev); if (dev->iobase) release_region(dev->iobase, PCL711_SIZE); return 0; } /* Initialization */ static int pcl711_attach(struct comedi_device *dev, struct comedi_devconfig *it) { int ret; unsigned long iobase; unsigned int irq; struct comedi_subdevice *s; /* claim our I/O space */ iobase = it->options[0]; printk(KERN_INFO "comedi%d: pcl711: 0x%04lx ", dev->minor, iobase); if (!request_region(iobase, PCL711_SIZE, "pcl711")) { printk("I/O port conflict\n"); return -EIO; } dev->iobase = iobase; /* there should be a sanity check here */ /* set up some name stuff */ dev->board_name = this_board->name; /* grab our IRQ */ irq = it->options[1]; if (irq > this_board->maxirq) { printk(KERN_ERR "irq out of range\n"); return -EINVAL; } if (irq) { if (request_irq(irq, pcl711_interrupt, 0, "pcl711", dev)) { printk(KERN_ERR "unable to allocate irq %u\n", irq); return -EINVAL; } else { printk(KERN_INFO "( irq = %u )\n", irq); } } dev->irq = irq; ret = alloc_subdevices(dev, 4); if (ret < 0) return ret; ret = alloc_private(dev, sizeof(struct pcl711_private)); if (ret < 0) return ret; s = dev->subdevices + 0; /* AI subdevice */ s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_GROUND; s->n_chan = this_board->n_aichan; s->maxdata = 0xfff; s->len_chanlist = 1; s->range_table = this_board->ai_range_type; s->insn_read = pcl711_ai_insn; if (irq) { dev->read_subdev = s; s->subdev_flags |= SDF_CMD_READ; s->do_cmdtest = pcl711_ai_cmdtest; s->do_cmd = pcl711_ai_cmd; } s++; /* AO subdevice */ s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE; s->n_chan = this_board->n_aochan; s->maxdata = 0xfff; s->len_chanlist = 1; s->range_table = &range_bipolar5; s->insn_write = pcl711_ao_insn; s->insn_read = pcl711_ao_insn_read; s++; /* 16-bit digital input */ s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE; s->n_chan = 16; s->maxdata = 1; s->len_chanlist = 16; s->range_table = &range_digital; s->insn_bits = pcl711_di_insn_bits; s++; /* 16-bit digital out */ s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_WRITABLE; s->n_chan = 16; s->maxdata = 1; s->len_chanlist = 16; s->range_table = &range_digital; s->state = 0; s->insn_bits = pcl711_do_insn_bits; /* this is the "base value" for the mode register, which is used for the irq on the PCL711 */ if (this_board->is_pcl711b) devpriv->mode = (dev->irq << 4); /* clear DAC */ outb(0, dev->iobase + PCL711_DA0_LO); outb(0, dev->iobase + PCL711_DA0_HI); outb(0, dev->iobase + PCL711_DA1_LO); outb(0, dev->iobase + PCL711_DA1_HI); printk(KERN_INFO "\n"); return 0; } MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
meyskld/samsung-kernel-galaxysii
arch/mips/sni/setup.c
8496
5672
/* * Setup pointers to hardware-dependent routines. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1996, 97, 98, 2000, 03, 04, 06 Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 2006,2007 Thomas Bogendoerfer (tsbogend@alpha.franken.de) */ #include <linux/eisa.h> #include <linux/init.h> #include <linux/console.h> #include <linux/fb.h> #include <linux/screen_info.h> #ifdef CONFIG_ARC #include <asm/fw/arc/types.h> #include <asm/sgialib.h> #endif #ifdef CONFIG_SNIPROM #include <asm/mipsprom.h> #endif #include <asm/bootinfo.h> #include <asm/io.h> #include <asm/reboot.h> #include <asm/sni.h> unsigned int sni_brd_type; EXPORT_SYMBOL(sni_brd_type); extern void sni_machine_restart(char *command); extern void sni_machine_power_off(void); static void __init sni_display_setup(void) { #if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) && defined(CONFIG_ARC) struct screen_info *si = &screen_info; DISPLAY_STATUS *di; di = ArcGetDisplayStatus(1); if (di) { si->orig_x = di->CursorXPosition; si->orig_y = di->CursorYPosition; si->orig_video_cols = di->CursorMaxXPosition; si->orig_video_lines = di->CursorMaxYPosition; si->orig_video_isVGA = VIDEO_TYPE_VGAC; si->orig_video_points = 16; } #endif } static void __init sni_console_setup(void) { #ifndef CONFIG_ARC char *ctype; char *cdev; char *baud; int port; static char options[8] __initdata; cdev = prom_getenv("console_dev"); if (strncmp(cdev, "tty", 3) == 0) { ctype = prom_getenv("console"); switch (*ctype) { default: case 'l': port = 0; baud = prom_getenv("lbaud"); break; case 'r': port = 1; baud = prom_getenv("rbaud"); break; } if (baud) strcpy(options, baud); if (strncmp(cdev, "tty552", 6) == 0) add_preferred_console("ttyS", port, baud ? options : NULL); else add_preferred_console("ttySC", port, baud ? options : NULL); } #endif } #ifdef DEBUG static void __init sni_idprom_dump(void) { int i; pr_debug("SNI IDProm dump:\n"); for (i = 0; i < 256; i++) { if (i%16 == 0) pr_debug("%04x ", i); printk("%02x ", *(unsigned char *) (SNI_IDPROM_BASE + i)); if (i % 16 == 15) printk("\n"); } } #endif void __init plat_mem_setup(void) { int cputype; set_io_port_base(SNI_PORT_BASE); // ioport_resource.end = sni_io_resource.end; /* * Setup (E)ISA I/O memory access stuff */ #ifdef CONFIG_EISA EISA_bus = 1; #endif sni_brd_type = *(unsigned char *)SNI_IDPROM_BRDTYPE; cputype = *(unsigned char *)SNI_IDPROM_CPUTYPE; switch (sni_brd_type) { case SNI_BRD_TOWER_OASIC: switch (cputype) { case SNI_CPU_M8030: system_type = "RM400-330"; break; case SNI_CPU_M8031: system_type = "RM400-430"; break; case SNI_CPU_M8037: system_type = "RM400-530"; break; case SNI_CPU_M8034: system_type = "RM400-730"; break; default: system_type = "RM400-xxx"; break; } break; case SNI_BRD_MINITOWER: switch (cputype) { case SNI_CPU_M8021: case SNI_CPU_M8043: system_type = "RM400-120"; break; case SNI_CPU_M8040: system_type = "RM400-220"; break; case SNI_CPU_M8053: system_type = "RM400-225"; break; case SNI_CPU_M8050: system_type = "RM400-420"; break; default: system_type = "RM400-xxx"; break; } break; case SNI_BRD_PCI_TOWER: system_type = "RM400-Cxx"; break; case SNI_BRD_RM200: system_type = "RM200-xxx"; break; case SNI_BRD_PCI_MTOWER: system_type = "RM300-Cxx"; break; case SNI_BRD_PCI_DESKTOP: switch (read_c0_prid() & 0xff00) { case PRID_IMP_R4600: case PRID_IMP_R4700: system_type = "RM200-C20"; break; case PRID_IMP_R5000: system_type = "RM200-C40"; break; default: system_type = "RM200-Cxx"; break; } break; case SNI_BRD_PCI_TOWER_CPLUS: system_type = "RM400-Exx"; break; case SNI_BRD_PCI_MTOWER_CPLUS: system_type = "RM300-Exx"; break; } pr_debug("Found SNI brdtype %02x name %s\n", sni_brd_type, system_type); #ifdef DEBUG sni_idprom_dump(); #endif switch (sni_brd_type) { case SNI_BRD_10: case SNI_BRD_10NEW: case SNI_BRD_TOWER_OASIC: case SNI_BRD_MINITOWER: sni_a20r_init(); break; case SNI_BRD_PCI_TOWER: case SNI_BRD_PCI_TOWER_CPLUS: sni_pcit_init(); break; case SNI_BRD_RM200: sni_rm200_init(); break; case SNI_BRD_PCI_MTOWER: case SNI_BRD_PCI_DESKTOP: case SNI_BRD_PCI_MTOWER_CPLUS: sni_pcimt_init(); break; } _machine_restart = sni_machine_restart; pm_power_off = sni_machine_power_off; sni_display_setup(); sni_console_setup(); } #ifdef CONFIG_PCI #include <linux/pci.h> #include <video/vga.h> #include <video/cirrus.h> static void __devinit quirk_cirrus_ram_size(struct pci_dev *dev) { u16 cmd; /* * firmware doesn't set the ram size correct, so we * need to do it here, otherwise we get screen corruption * on older Cirrus chips */ pci_read_config_word(dev, PCI_COMMAND, &cmd); if ((cmd & (PCI_COMMAND_IO|PCI_COMMAND_MEMORY)) == (PCI_COMMAND_IO|PCI_COMMAND_MEMORY)) { vga_wseq(NULL, CL_SEQR6, 0x12); /* unlock all extension registers */ vga_wseq(NULL, CL_SEQRF, 0x18); } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5434_8, quirk_cirrus_ram_size); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5436, quirk_cirrus_ram_size); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, quirk_cirrus_ram_size); #endif
gpl-2.0
Abhinav1997/kernel-diff
arch/powerpc/platforms/amigaone/setup.c
10544
4379
/* * AmigaOne platform setup * * Copyright 2008 Gerhard Pircher (gerhard_pircher@gmx.net) * * Based on original amigaone_setup.c source code * Copyright 2003 by Hans-Joerg Frieden and Thomas Frieden * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/seq_file.h> #include <generated/utsrelease.h> #include <asm/machdep.h> #include <asm/cputable.h> #include <asm/pci-bridge.h> #include <asm/i8259.h> #include <asm/time.h> #include <asm/udbg.h> extern void __flush_disable_L1(void); void amigaone_show_cpuinfo(struct seq_file *m) { seq_printf(m, "vendor\t\t: Eyetech Ltd.\n"); } static int __init amigaone_add_bridge(struct device_node *dev) { const u32 *cfg_addr, *cfg_data; int len; const int *bus_range; struct pci_controller *hose; printk(KERN_INFO "Adding PCI host bridge %s\n", dev->full_name); cfg_addr = of_get_address(dev, 0, NULL, NULL); cfg_data = of_get_address(dev, 1, NULL, NULL); if ((cfg_addr == NULL) || (cfg_data == NULL)) return -ENODEV; bus_range = of_get_property(dev, "bus-range", &len); if ((bus_range == NULL) || (len < 2 * sizeof(int))) printk(KERN_WARNING "Can't get bus-range for %s, assume" " bus 0\n", dev->full_name); hose = pcibios_alloc_controller(dev); if (hose == NULL) return -ENOMEM; hose->first_busno = bus_range ? bus_range[0] : 0; hose->last_busno = bus_range ? bus_range[1] : 0xff; setup_indirect_pci(hose, cfg_addr[0], cfg_data[0], 0); /* Interpret the "ranges" property */ /* This also maps the I/O region and sets isa_io/mem_base */ pci_process_bridge_OF_ranges(hose, dev, 1); return 0; } void __init amigaone_setup_arch(void) { struct device_node *np; int phb = -ENODEV; /* Lookup PCI host bridges. */ for_each_compatible_node(np, "pci", "mai-logic,articia-s") phb = amigaone_add_bridge(np); BUG_ON(phb != 0); if (ppc_md.progress) ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0); } void __init amigaone_init_IRQ(void) { struct device_node *pic, *np = NULL; const unsigned long *prop = NULL; unsigned long int_ack = 0; /* Search for ISA interrupt controller. */ pic = of_find_compatible_node(NULL, "interrupt-controller", "pnpPNP,000"); BUG_ON(pic == NULL); /* Look for interrupt acknowledge address in the PCI root node. */ np = of_find_compatible_node(NULL, "pci", "mai-logic,articia-s"); if (np) { prop = of_get_property(np, "8259-interrupt-acknowledge", NULL); if (prop) int_ack = prop[0]; of_node_put(np); } if (int_ack == 0) printk(KERN_WARNING "Cannot find PCI interrupt acknowledge" " address, polling\n"); i8259_init(pic, int_ack); ppc_md.get_irq = i8259_irq; irq_set_default_host(i8259_get_host()); } static int __init request_isa_regions(void) { request_region(0x00, 0x20, "dma1"); request_region(0x40, 0x20, "timer"); request_region(0x80, 0x10, "dma page reg"); request_region(0xc0, 0x20, "dma2"); return 0; } machine_device_initcall(amigaone, request_isa_regions); void amigaone_restart(char *cmd) { local_irq_disable(); /* Flush and disable caches. */ __flush_disable_L1(); /* Set SRR0 to the reset vector and turn on MSR_IP. */ mtspr(SPRN_SRR0, 0xfff00100); mtspr(SPRN_SRR1, MSR_IP); /* Do an rfi to jump back to firmware. */ __asm__ __volatile__("rfi" : : : "memory"); /* Not reached. */ while (1); } static int __init amigaone_probe(void) { unsigned long root = of_get_flat_dt_root(); if (of_flat_dt_is_compatible(root, "eyetech,amigaone")) { /* * Coherent memory access cause complete system lockup! Thus * disable this CPU feature, even if the CPU needs it. */ cur_cpu_spec->cpu_features &= ~CPU_FTR_NEED_COHERENT; ISA_DMA_THRESHOLD = 0x00ffffff; DMA_MODE_READ = 0x44; DMA_MODE_WRITE = 0x48; return 1; } return 0; } define_machine(amigaone) { .name = "AmigaOne", .probe = amigaone_probe, .setup_arch = amigaone_setup_arch, .show_cpuinfo = amigaone_show_cpuinfo, .init_IRQ = amigaone_init_IRQ, .restart = amigaone_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, };
gpl-2.0
Jason-Lam/linux-am335x
arch/sh/boards/mach-microdev/io.c
12336
4159
/* * linux/arch/sh/boards/superh/microdev/io.c * * Copyright (C) 2003 Sean McGoogan (Sean.McGoogan@superh.com) * Copyright (C) 2003, 2004 SuperH, Inc. * Copyright (C) 2004 Paul Mundt * * SuperH SH4-202 MicroDev board support. * * May be copied or modified under the terms of the GNU General Public * License. See linux/COPYING for more information. */ #include <linux/init.h> #include <linux/pci.h> #include <linux/wait.h> #include <asm/io.h> #include <mach/microdev.h> /* * we need to have a 'safe' address to re-direct all I/O requests * that we do not explicitly wish to handle. This safe address * must have the following properies: * * * writes are ignored (no exception) * * reads are benign (no side-effects) * * accesses of width 1, 2 and 4-bytes are all valid. * * The Processor Version Register (PVR) has these properties. */ #define PVR 0xff000030 /* Processor Version Register */ #define IO_IDE2_BASE 0x170ul /* I/O base for SMSC FDC37C93xAPM IDE #2 */ #define IO_IDE1_BASE 0x1f0ul /* I/O base for SMSC FDC37C93xAPM IDE #1 */ #define IO_ISP1161_BASE 0x290ul /* I/O port for Philips ISP1161x USB chip */ #define IO_SERIAL2_BASE 0x2f8ul /* I/O base for SMSC FDC37C93xAPM Serial #2 */ #define IO_LAN91C111_BASE 0x300ul /* I/O base for SMSC LAN91C111 Ethernet chip */ #define IO_IDE2_MISC 0x376ul /* I/O misc for SMSC FDC37C93xAPM IDE #2 */ #define IO_SUPERIO_BASE 0x3f0ul /* I/O base for SMSC FDC37C93xAPM SuperIO chip */ #define IO_IDE1_MISC 0x3f6ul /* I/O misc for SMSC FDC37C93xAPM IDE #1 */ #define IO_SERIAL1_BASE 0x3f8ul /* I/O base for SMSC FDC37C93xAPM Serial #1 */ #define IO_ISP1161_EXTENT 0x04ul /* I/O extent for Philips ISP1161x USB chip */ #define IO_LAN91C111_EXTENT 0x10ul /* I/O extent for SMSC LAN91C111 Ethernet chip */ #define IO_SUPERIO_EXTENT 0x02ul /* I/O extent for SMSC FDC37C93xAPM SuperIO chip */ #define IO_IDE_EXTENT 0x08ul /* I/O extent for IDE Task Register set */ #define IO_SERIAL_EXTENT 0x10ul #define IO_LAN91C111_PHYS 0xa7500000ul /* Physical address of SMSC LAN91C111 Ethernet chip */ #define IO_ISP1161_PHYS 0xa7700000ul /* Physical address of Philips ISP1161x USB chip */ #define IO_SUPERIO_PHYS 0xa7800000ul /* Physical address of SMSC FDC37C93xAPM SuperIO chip */ /* * map I/O ports to memory-mapped addresses */ void __iomem *microdev_ioport_map(unsigned long offset, unsigned int len) { unsigned long result; if ((offset >= IO_LAN91C111_BASE) && (offset < IO_LAN91C111_BASE + IO_LAN91C111_EXTENT)) { /* * SMSC LAN91C111 Ethernet chip */ result = IO_LAN91C111_PHYS + offset - IO_LAN91C111_BASE; } else if ((offset >= IO_SUPERIO_BASE) && (offset < IO_SUPERIO_BASE + IO_SUPERIO_EXTENT)) { /* * SMSC FDC37C93xAPM SuperIO chip * * Configuration Registers */ result = IO_SUPERIO_PHYS + (offset << 1); } else if (((offset >= IO_IDE1_BASE) && (offset < IO_IDE1_BASE + IO_IDE_EXTENT)) || (offset == IO_IDE1_MISC)) { /* * SMSC FDC37C93xAPM SuperIO chip * * IDE #1 */ result = IO_SUPERIO_PHYS + (offset << 1); } else if (((offset >= IO_IDE2_BASE) && (offset < IO_IDE2_BASE + IO_IDE_EXTENT)) || (offset == IO_IDE2_MISC)) { /* * SMSC FDC37C93xAPM SuperIO chip * * IDE #2 */ result = IO_SUPERIO_PHYS + (offset << 1); } else if ((offset >= IO_SERIAL1_BASE) && (offset < IO_SERIAL1_BASE + IO_SERIAL_EXTENT)) { /* * SMSC FDC37C93xAPM SuperIO chip * * Serial #1 */ result = IO_SUPERIO_PHYS + (offset << 1); } else if ((offset >= IO_SERIAL2_BASE) && (offset < IO_SERIAL2_BASE + IO_SERIAL_EXTENT)) { /* * SMSC FDC37C93xAPM SuperIO chip * * Serial #2 */ result = IO_SUPERIO_PHYS + (offset << 1); } else if ((offset >= IO_ISP1161_BASE) && (offset < IO_ISP1161_BASE + IO_ISP1161_EXTENT)) { /* * Philips USB ISP1161x chip */ result = IO_ISP1161_PHYS + offset - IO_ISP1161_BASE; } else { /* * safe default. */ printk("Warning: unexpected port in %s( offset = 0x%lx )\n", __func__, offset); result = PVR; } return (void __iomem *)result; }
gpl-2.0
cs-willian-silva/vlc
modules/mux/mpeg/tsutil.c
49
3514
/***************************************************************************** * tsutil.c ***************************************************************************** * Copyright (C) 2001-2005, 2015 VLC authors and VideoLAN * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA. *****************************************************************************/ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include <vlc_common.h> #include <vlc_block.h> #include "tsutil.h" void PEStoTS( void *p_opaque, PEStoTSCallback pf_callback, block_t *p_pes, int i_pid, bool *pb_discontinuity, int *pi_continuity_counter ) { /* get PES total size */ uint8_t *p_data = p_pes->p_buffer; int i_size = p_pes->i_buffer; bool b_new_pes = true; for (;;) { /* write header * 8b 0x47 sync byte * 1b transport_error_indicator * 1b payload_unit_start * 1b transport_priority * 13b pid * 2b transport_scrambling_control * 2b if adaptation_field 0x03 else 0x01 * 4b continuity_counter */ int i_copy = __MIN( i_size, 184 ); bool b_adaptation_field = i_size < 184; block_t *p_ts = block_Alloc( 188 ); p_ts->p_buffer[0] = 0x47; p_ts->p_buffer[1] = ( b_new_pes ? 0x40 : 0x00 )| ( ( i_pid >> 8 )&0x1f ); p_ts->p_buffer[2] = i_pid & 0xff; p_ts->p_buffer[3] = ( b_adaptation_field ? 0x30 : 0x10 )| *pi_continuity_counter; b_new_pes = false; *pi_continuity_counter = (*pi_continuity_counter+1)%16; if( b_adaptation_field ) { int i_stuffing = 184 - i_copy; p_ts->p_buffer[4] = i_stuffing - 1; if( i_stuffing > 1 ) { p_ts->p_buffer[5] = 0x00; if( *pb_discontinuity ) { p_ts->p_buffer[5] |= 0x80; *pb_discontinuity = false; } for (int i = 6; i < 6 + i_stuffing - 2; i++ ) { p_ts->p_buffer[i] = 0xff; } } } /* copy payload */ memcpy( &p_ts->p_buffer[188 - i_copy], p_data, i_copy ); p_data += i_copy; i_size -= i_copy; pf_callback( p_opaque, p_ts ); if( i_size <= 0 ) { block_t *p_next = p_pes->p_next; p_pes->p_next = NULL; block_Release( p_pes ); if( p_next == NULL ) return; b_new_pes = true; p_pes = p_next; i_size = p_pes->i_buffer; p_data = p_pes->p_buffer; } } }
gpl-2.0
WarheadsSE/OX820-2.6-linux
drivers/staging/winbond/wb35tx.c
49
7795
//============================================================================ // Copyright (c) 1996-2002 Winbond Electronic Corporation // // Module Name: // Wb35Tx.c // // Abstract: // Processing the Tx message and put into down layer // //============================================================================ #include <linux/usb.h> #include "wb35tx_f.h" #include "mds_f.h" #include "sysdef.h" unsigned char Wb35Tx_get_tx_buffer(struct hw_data * pHwData, u8 **pBuffer) { struct wb35_tx *pWb35Tx = &pHwData->Wb35Tx; *pBuffer = pWb35Tx->TxBuffer[0]; return true; } static void Wb35Tx(struct wbsoft_priv *adapter); static void Wb35Tx_complete(struct urb * pUrb) { struct wbsoft_priv *adapter = pUrb->context; struct hw_data * pHwData = &adapter->sHwData; struct wb35_tx *pWb35Tx = &pHwData->Wb35Tx; PMDS pMds = &adapter->Mds; printk("wb35: tx complete\n"); // Variable setting pWb35Tx->EP4vm_state = VM_COMPLETED; pWb35Tx->EP4VM_status = pUrb->status; //Store the last result of Irp pMds->TxOwner[ pWb35Tx->TxSendIndex ] = 0;// Set the owner. Free the owner bit always. pWb35Tx->TxSendIndex++; pWb35Tx->TxSendIndex %= MAX_USB_TX_BUFFER_NUMBER; if (pHwData->SurpriseRemove || pHwData->HwStop) // Let WbWlanHalt to handle surprise remove goto error; if (pWb35Tx->tx_halt) goto error; // The URB is completed, check the result if (pWb35Tx->EP4VM_status != 0) { printk("URB submission failed\n"); pWb35Tx->EP4vm_state = VM_STOP; goto error; } Mds_Tx(adapter); Wb35Tx(adapter); return; error: atomic_dec(&pWb35Tx->TxFireCounter); pWb35Tx->EP4vm_state = VM_STOP; } static void Wb35Tx(struct wbsoft_priv *adapter) { struct hw_data * pHwData = &adapter->sHwData; struct wb35_tx *pWb35Tx = &pHwData->Wb35Tx; u8 *pTxBufferAddress; PMDS pMds = &adapter->Mds; struct urb * pUrb = (struct urb *)pWb35Tx->Tx4Urb; int retv; u32 SendIndex; if (pHwData->SurpriseRemove || pHwData->HwStop) goto cleanup; if (pWb35Tx->tx_halt) goto cleanup; // Ownership checking SendIndex = pWb35Tx->TxSendIndex; if (!pMds->TxOwner[SendIndex]) //No more data need to be sent, return immediately goto cleanup; pTxBufferAddress = pWb35Tx->TxBuffer[SendIndex]; // // Issuing URB // usb_fill_bulk_urb(pUrb, pHwData->WbUsb.udev, usb_sndbulkpipe(pHwData->WbUsb.udev, 4), pTxBufferAddress, pMds->TxBufferSize[ SendIndex ], Wb35Tx_complete, adapter); pWb35Tx->EP4vm_state = VM_RUNNING; retv = usb_submit_urb(pUrb, GFP_ATOMIC); if (retv<0) { printk("EP4 Tx Irp sending error\n"); goto cleanup; } // Check if driver needs issue Irp for EP2 pWb35Tx->TxFillCount += pMds->TxCountInBuffer[SendIndex]; if (pWb35Tx->TxFillCount > 12) Wb35Tx_EP2VM_start(adapter); pWb35Tx->ByteTransfer += pMds->TxBufferSize[SendIndex]; return; cleanup: pWb35Tx->EP4vm_state = VM_STOP; atomic_dec(&pWb35Tx->TxFireCounter); } void Wb35Tx_start(struct wbsoft_priv *adapter) { struct hw_data * pHwData = &adapter->sHwData; struct wb35_tx *pWb35Tx = &pHwData->Wb35Tx; // Allow only one thread to run into function if (atomic_inc_return(&pWb35Tx->TxFireCounter) == 1) { pWb35Tx->EP4vm_state = VM_RUNNING; Wb35Tx(adapter); } else atomic_dec(&pWb35Tx->TxFireCounter); } unsigned char Wb35Tx_initial(struct hw_data * pHwData) { struct wb35_tx *pWb35Tx = &pHwData->Wb35Tx; pWb35Tx->Tx4Urb = usb_alloc_urb(0, GFP_ATOMIC); if (!pWb35Tx->Tx4Urb) return false; pWb35Tx->Tx2Urb = usb_alloc_urb(0, GFP_ATOMIC); if (!pWb35Tx->Tx2Urb) { usb_free_urb( pWb35Tx->Tx4Urb ); return false; } return true; } //====================================================== void Wb35Tx_stop(struct hw_data * pHwData) { struct wb35_tx *pWb35Tx = &pHwData->Wb35Tx; // Trying to canceling the Trp of EP2 if (pWb35Tx->EP2vm_state == VM_RUNNING) usb_unlink_urb( pWb35Tx->Tx2Urb ); // Only use unlink, let Wb35Tx_destrot to free them #ifdef _PE_TX_DUMP_ printk("EP2 Tx stop\n"); #endif // Trying to canceling the Irp of EP4 if (pWb35Tx->EP4vm_state == VM_RUNNING) usb_unlink_urb( pWb35Tx->Tx4Urb ); // Only use unlink, let Wb35Tx_destrot to free them #ifdef _PE_TX_DUMP_ printk("EP4 Tx stop\n"); #endif } //====================================================== void Wb35Tx_destroy(struct hw_data * pHwData) { struct wb35_tx *pWb35Tx = &pHwData->Wb35Tx; // Wait for VM stop do { msleep(10); // Delay for waiting function enter 940623.1.a } while( (pWb35Tx->EP2vm_state != VM_STOP) && (pWb35Tx->EP4vm_state != VM_STOP) ); msleep(10); // Delay for waiting function enter 940623.1.b if (pWb35Tx->Tx4Urb) usb_free_urb( pWb35Tx->Tx4Urb ); if (pWb35Tx->Tx2Urb) usb_free_urb( pWb35Tx->Tx2Urb ); #ifdef _PE_TX_DUMP_ printk("Wb35Tx_destroy OK\n"); #endif } void Wb35Tx_CurrentTime(struct wbsoft_priv *adapter, u32 TimeCount) { struct hw_data * pHwData = &adapter->sHwData; struct wb35_tx *pWb35Tx = &pHwData->Wb35Tx; unsigned char Trigger = false; if (pWb35Tx->TxTimer > TimeCount) Trigger = true; else if (TimeCount > (pWb35Tx->TxTimer+500)) Trigger = true; if (Trigger) { pWb35Tx->TxTimer = TimeCount; Wb35Tx_EP2VM_start(adapter); } } static void Wb35Tx_EP2VM(struct wbsoft_priv *adapter); static void Wb35Tx_EP2VM_complete(struct urb * pUrb) { struct wbsoft_priv *adapter = pUrb->context; struct hw_data * pHwData = &adapter->sHwData; T02_DESCRIPTOR T02, TSTATUS; struct wb35_tx *pWb35Tx = &pHwData->Wb35Tx; u32 * pltmp = (u32 *)pWb35Tx->EP2_buf; u32 i; u16 InterruptInLength; // Variable setting pWb35Tx->EP2vm_state = VM_COMPLETED; pWb35Tx->EP2VM_status = pUrb->status; // For Linux 2.4. Interrupt will always trigger if (pHwData->SurpriseRemove || pHwData->HwStop) // Let WbWlanHalt to handle surprise remove goto error; if (pWb35Tx->tx_halt) goto error; //The Urb is completed, check the result if (pWb35Tx->EP2VM_status != 0) { printk("EP2 IoCompleteRoutine return error\n"); pWb35Tx->EP2vm_state= VM_STOP; goto error; } // Update the Tx result InterruptInLength = pUrb->actual_length; // Modify for minimum memory access and DWORD alignment. T02.value = cpu_to_le32(pltmp[0]) >> 8; // [31:8] -> [24:0] InterruptInLength -= 1;// 20051221.1.c Modify the follow for more stable InterruptInLength >>= 2; // InterruptInLength/4 for (i = 1; i <= InterruptInLength; i++) { T02.value |= ((cpu_to_le32(pltmp[i]) & 0xff) << 24); TSTATUS.value = T02.value; //20061009 anson's endian Mds_SendComplete( adapter, &TSTATUS ); T02.value = cpu_to_le32(pltmp[i]) >> 8; } return; error: atomic_dec(&pWb35Tx->TxResultCount); pWb35Tx->EP2vm_state = VM_STOP; } static void Wb35Tx_EP2VM(struct wbsoft_priv *adapter) { struct hw_data * pHwData = &adapter->sHwData; struct wb35_tx *pWb35Tx = &pHwData->Wb35Tx; struct urb * pUrb = (struct urb *)pWb35Tx->Tx2Urb; u32 * pltmp = (u32 *)pWb35Tx->EP2_buf; int retv; if (pHwData->SurpriseRemove || pHwData->HwStop) goto error; if (pWb35Tx->tx_halt) goto error; // // Issuing URB // usb_fill_int_urb( pUrb, pHwData->WbUsb.udev, usb_rcvintpipe(pHwData->WbUsb.udev,2), pltmp, MAX_INTERRUPT_LENGTH, Wb35Tx_EP2VM_complete, adapter, 32); pWb35Tx->EP2vm_state = VM_RUNNING; retv = usb_submit_urb(pUrb, GFP_ATOMIC); if (retv < 0) { #ifdef _PE_TX_DUMP_ printk("EP2 Tx Irp sending error\n"); #endif goto error; } return; error: pWb35Tx->EP2vm_state = VM_STOP; atomic_dec(&pWb35Tx->TxResultCount); } void Wb35Tx_EP2VM_start(struct wbsoft_priv *adapter) { struct hw_data * pHwData = &adapter->sHwData; struct wb35_tx *pWb35Tx = &pHwData->Wb35Tx; // Allow only one thread to run into function if (atomic_inc_return(&pWb35Tx->TxResultCount) == 1) { pWb35Tx->EP2vm_state = VM_RUNNING; Wb35Tx_EP2VM(adapter); } else atomic_dec(&pWb35Tx->TxResultCount); }
gpl-2.0
fards/ainol_elfii_kernel
drivers/leds/leds-ss4200.c
49
14775
/* * SS4200-E Hardware API * Copyright (c) 2009, Intel Corporation. * Copyright IBM Corporation, 2009 * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * Author: Dave Hansen <dave@sr71.net> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/dmi.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/leds.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/types.h> #include <linux/uaccess.h> MODULE_AUTHOR("Rodney Girod <rgirod@confocus.com>, Dave Hansen <dave@sr71.net>"); MODULE_DESCRIPTION("Intel NAS/Home Server ICH7 GPIO Driver"); MODULE_LICENSE("GPL"); /* * ICH7 LPC/GPIO PCI Config register offsets */ #define PMBASE 0x040 #define GPIO_BASE 0x048 #define GPIO_CTRL 0x04c #define GPIO_EN 0x010 /* * The ICH7 GPIO register block is 64 bytes in size. */ #define ICH7_GPIO_SIZE 64 /* * Define register offsets within the ICH7 register block. */ #define GPIO_USE_SEL 0x000 #define GP_IO_SEL 0x004 #define GP_LVL 0x00c #define GPO_BLINK 0x018 #define GPI_INV 0x030 #define GPIO_USE_SEL2 0x034 #define GP_IO_SEL2 0x038 #define GP_LVL2 0x03c /* * PCI ID of the Intel ICH7 LPC Device within which the GPIO block lives. */ static const struct pci_device_id ich7_lpc_pci_id[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_30) }, { } /* NULL entry */ }; MODULE_DEVICE_TABLE(pci, ich7_lpc_pci_id); static int __init ss4200_led_dmi_callback(const struct dmi_system_id *id) { pr_info("detected '%s'\n", id->ident); return 1; } static unsigned int __initdata nodetect; module_param_named(nodetect, nodetect, bool, 0); MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection"); /* * struct nas_led_whitelist - List of known good models * * Contains the known good models this driver is compatible with. * When adding a new model try to be as strict as possible. This * makes it possible to keep the false positives (the model is * detected as working, but in reality it is not) as low as * possible. */ static struct dmi_system_id __initdata nas_led_whitelist[] = { { .callback = ss4200_led_dmi_callback, .ident = "Intel SS4200-E", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel"), DMI_MATCH(DMI_PRODUCT_NAME, "SS4200-E"), DMI_MATCH(DMI_PRODUCT_VERSION, "1.00.00") } }, }; /* * Base I/O address assigned to the Power Management register block */ static u32 g_pm_io_base; /* * Base I/O address assigned to the ICH7 GPIO register block */ static u32 nas_gpio_io_base; /* * When we successfully register a region, we are returned a resource. * We use these to identify which regions we need to release on our way * back out. */ static struct resource *gp_gpio_resource; struct nasgpio_led { char *name; u32 gpio_bit; struct led_classdev led_cdev; }; /* * gpio_bit(s) are the ICH7 GPIO bit assignments */ static struct nasgpio_led nasgpio_leds[] = { { .name = "hdd1:blue:sata", .gpio_bit = 0 }, { .name = "hdd1:amber:sata", .gpio_bit = 1 }, { .name = "hdd2:blue:sata", .gpio_bit = 2 }, { .name = "hdd2:amber:sata", .gpio_bit = 3 }, { .name = "hdd3:blue:sata", .gpio_bit = 4 }, { .name = "hdd3:amber:sata", .gpio_bit = 5 }, { .name = "hdd4:blue:sata", .gpio_bit = 6 }, { .name = "hdd4:amber:sata", .gpio_bit = 7 }, { .name = "power:blue:power", .gpio_bit = 27}, { .name = "power:amber:power", .gpio_bit = 28}, }; #define NAS_RECOVERY 0x00000400 /* GPIO10 */ static struct nasgpio_led * led_classdev_to_nasgpio_led(struct led_classdev *led_cdev) { return container_of(led_cdev, struct nasgpio_led, led_cdev); } static struct nasgpio_led *get_led_named(char *name) { int i; for (i = 0; i < ARRAY_SIZE(nasgpio_leds); i++) { if (strcmp(nasgpio_leds[i].name, name)) continue; return &nasgpio_leds[i]; } return NULL; } /* * This protects access to the gpio ports. */ static DEFINE_SPINLOCK(nasgpio_gpio_lock); /* * There are two gpio ports, one for blinking and the other * for power. @port tells us if we're doing blinking or * power control. * * Caller must hold nasgpio_gpio_lock */ static void __nasgpio_led_set_attr(struct led_classdev *led_cdev, u32 port, u32 value) { struct nasgpio_led *led = led_classdev_to_nasgpio_led(led_cdev); u32 gpio_out; gpio_out = inl(nas_gpio_io_base + port); if (value) gpio_out |= (1<<led->gpio_bit); else gpio_out &= ~(1<<led->gpio_bit); outl(gpio_out, nas_gpio_io_base + port); } static void nasgpio_led_set_attr(struct led_classdev *led_cdev, u32 port, u32 value) { spin_lock(&nasgpio_gpio_lock); __nasgpio_led_set_attr(led_cdev, port, value); spin_unlock(&nasgpio_gpio_lock); } u32 nasgpio_led_get_attr(struct led_classdev *led_cdev, u32 port) { struct nasgpio_led *led = led_classdev_to_nasgpio_led(led_cdev); u32 gpio_in; spin_lock(&nasgpio_gpio_lock); gpio_in = inl(nas_gpio_io_base + port); spin_unlock(&nasgpio_gpio_lock); if (gpio_in & (1<<led->gpio_bit)) return 1; return 0; } /* * There is actual brightness control in the hardware, * but it is via smbus commands and not implemented * in this driver. */ static void nasgpio_led_set_brightness(struct led_classdev *led_cdev, enum led_brightness brightness) { u32 setting = 0; if (brightness >= LED_HALF) setting = 1; /* * Hold the lock across both operations. This ensures * consistency so that both the "turn off blinking" * and "turn light off" operations complete as a set. */ spin_lock(&nasgpio_gpio_lock); /* * LED class documentation asks that past blink state * be disabled when brightness is turned to zero. */ if (brightness == 0) __nasgpio_led_set_attr(led_cdev, GPO_BLINK, 0); __nasgpio_led_set_attr(led_cdev, GP_LVL, setting); spin_unlock(&nasgpio_gpio_lock); } static int nasgpio_led_set_blink(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { u32 setting = 1; if (!(*delay_on == 0 && *delay_off == 0) && !(*delay_on == 500 && *delay_off == 500)) return -EINVAL; /* * These are very approximate. */ *delay_on = 500; *delay_off = 500; nasgpio_led_set_attr(led_cdev, GPO_BLINK, setting); return 0; } /* * Initialize the ICH7 GPIO registers for NAS usage. The BIOS should have * already taken care of this, but we will do so in a non destructive manner * so that we have what we need whether the BIOS did it or not. */ static int __devinit ich7_gpio_init(struct device *dev) { int i; u32 config_data = 0; u32 all_nas_led = 0; for (i = 0; i < ARRAY_SIZE(nasgpio_leds); i++) all_nas_led |= (1<<nasgpio_leds[i].gpio_bit); spin_lock(&nasgpio_gpio_lock); /* * We need to enable all of the GPIO lines used by the NAS box, * so we will read the current Use Selection and add our usage * to it. This should be benign with regard to the original * BIOS configuration. */ config_data = inl(nas_gpio_io_base + GPIO_USE_SEL); dev_dbg(dev, ": Data read from GPIO_USE_SEL = 0x%08x\n", config_data); config_data |= all_nas_led + NAS_RECOVERY; outl(config_data, nas_gpio_io_base + GPIO_USE_SEL); config_data = inl(nas_gpio_io_base + GPIO_USE_SEL); dev_dbg(dev, ": GPIO_USE_SEL = 0x%08x\n\n", config_data); /* * The LED GPIO outputs need to be configured for output, so we * will ensure that all LED lines are cleared for output and the * RECOVERY line ready for input. This too should be benign with * regard to BIOS configuration. */ config_data = inl(nas_gpio_io_base + GP_IO_SEL); dev_dbg(dev, ": Data read from GP_IO_SEL = 0x%08x\n", config_data); config_data &= ~all_nas_led; config_data |= NAS_RECOVERY; outl(config_data, nas_gpio_io_base + GP_IO_SEL); config_data = inl(nas_gpio_io_base + GP_IO_SEL); dev_dbg(dev, ": GP_IO_SEL = 0x%08x\n", config_data); /* * In our final system, the BIOS will initialize the state of all * of the LEDs. For now, we turn them all off (or Low). */ config_data = inl(nas_gpio_io_base + GP_LVL); dev_dbg(dev, ": Data read from GP_LVL = 0x%08x\n", config_data); /* * In our final system, the BIOS will initialize the blink state of all * of the LEDs. For now, we turn blink off for all of them. */ config_data = inl(nas_gpio_io_base + GPO_BLINK); dev_dbg(dev, ": Data read from GPO_BLINK = 0x%08x\n", config_data); /* * At this moment, I am unsure if anything needs to happen with GPI_INV */ config_data = inl(nas_gpio_io_base + GPI_INV); dev_dbg(dev, ": Data read from GPI_INV = 0x%08x\n", config_data); spin_unlock(&nasgpio_gpio_lock); return 0; } static void ich7_lpc_cleanup(struct device *dev) { /* * If we were given exclusive use of the GPIO * I/O Address range, we must return it. */ if (gp_gpio_resource) { dev_dbg(dev, ": Releasing GPIO I/O addresses\n"); release_region(nas_gpio_io_base, ICH7_GPIO_SIZE); gp_gpio_resource = NULL; } } /* * The OS has determined that the LPC of the Intel ICH7 Southbridge is present * so we can retrive the required operational information and prepare the GPIO. */ static struct pci_dev *nas_gpio_pci_dev; static int __devinit ich7_lpc_probe(struct pci_dev *dev, const struct pci_device_id *id) { int status; u32 gc = 0; status = pci_enable_device(dev); if (status) { dev_err(&dev->dev, "pci_enable_device failed\n"); return -EIO; } nas_gpio_pci_dev = dev; status = pci_read_config_dword(dev, PMBASE, &g_pm_io_base); if (status) goto out; g_pm_io_base &= 0x00000ff80; status = pci_read_config_dword(dev, GPIO_CTRL, &gc); if (!(GPIO_EN & gc)) { status = -EEXIST; dev_info(&dev->dev, "ERROR: The LPC GPIO Block has not been enabled.\n"); goto out; } status = pci_read_config_dword(dev, GPIO_BASE, &nas_gpio_io_base); if (0 > status) { dev_info(&dev->dev, "Unable to read GPIOBASE.\n"); goto out; } dev_dbg(&dev->dev, ": GPIOBASE = 0x%08x\n", nas_gpio_io_base); nas_gpio_io_base &= 0x00000ffc0; /* * Insure that we have exclusive access to the GPIO I/O address range. */ gp_gpio_resource = request_region(nas_gpio_io_base, ICH7_GPIO_SIZE, KBUILD_MODNAME); if (NULL == gp_gpio_resource) { dev_info(&dev->dev, "ERROR Unable to register GPIO I/O addresses.\n"); status = -1; goto out; } /* * Initialize the GPIO for NAS/Home Server Use */ ich7_gpio_init(&dev->dev); out: if (status) { ich7_lpc_cleanup(&dev->dev); pci_disable_device(dev); } return status; } static void ich7_lpc_remove(struct pci_dev *dev) { ich7_lpc_cleanup(&dev->dev); pci_disable_device(dev); } /* * pci_driver structure passed to the PCI modules */ static struct pci_driver nas_gpio_pci_driver = { .name = KBUILD_MODNAME, .id_table = ich7_lpc_pci_id, .probe = ich7_lpc_probe, .remove = ich7_lpc_remove, }; static struct led_classdev *get_classdev_for_led_nr(int nr) { struct nasgpio_led *nas_led = &nasgpio_leds[nr]; struct led_classdev *led = &nas_led->led_cdev; return led; } static void set_power_light_amber_noblink(void) { struct nasgpio_led *amber = get_led_named("power:amber:power"); struct nasgpio_led *blue = get_led_named("power:blue:power"); if (!amber || !blue) return; /* * LED_OFF implies disabling future blinking */ pr_debug("setting blue off and amber on\n"); nasgpio_led_set_brightness(&blue->led_cdev, LED_OFF); nasgpio_led_set_brightness(&amber->led_cdev, LED_FULL); } static ssize_t nas_led_blink_show(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led = dev_get_drvdata(dev); int blinking = 0; if (nasgpio_led_get_attr(led, GPO_BLINK)) blinking = 1; return sprintf(buf, "%u\n", blinking); } static ssize_t nas_led_blink_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { int ret; struct led_classdev *led = dev_get_drvdata(dev); unsigned long blink_state; ret = strict_strtoul(buf, 10, &blink_state); if (ret) return ret; nasgpio_led_set_attr(led, GPO_BLINK, blink_state); return size; } static DEVICE_ATTR(blink, 0644, nas_led_blink_show, nas_led_blink_store); static int register_nasgpio_led(int led_nr) { int ret; struct nasgpio_led *nas_led = &nasgpio_leds[led_nr]; struct led_classdev *led = get_classdev_for_led_nr(led_nr); led->name = nas_led->name; led->brightness = LED_OFF; if (nasgpio_led_get_attr(led, GP_LVL)) led->brightness = LED_FULL; led->brightness_set = nasgpio_led_set_brightness; led->blink_set = nasgpio_led_set_blink; ret = led_classdev_register(&nas_gpio_pci_dev->dev, led); if (ret) return ret; ret = device_create_file(led->dev, &dev_attr_blink); if (ret) led_classdev_unregister(led); return ret; } static void unregister_nasgpio_led(int led_nr) { struct led_classdev *led = get_classdev_for_led_nr(led_nr); led_classdev_unregister(led); device_remove_file(led->dev, &dev_attr_blink); } /* * module load/initialization */ static int __init nas_gpio_init(void) { int i; int ret = 0; int nr_devices = 0; nr_devices = dmi_check_system(nas_led_whitelist); if (nodetect) { pr_info("skipping hardware autodetection\n"); pr_info("Please send 'dmidecode' output to dave@sr71.net\n"); nr_devices++; } if (nr_devices <= 0) { pr_info("no LED devices found\n"); return -ENODEV; } pr_info("registering PCI driver\n"); ret = pci_register_driver(&nas_gpio_pci_driver); if (ret) return ret; for (i = 0; i < ARRAY_SIZE(nasgpio_leds); i++) { ret = register_nasgpio_led(i); if (ret) goto out_err; } /* * When the system powers on, the BIOS leaves the power * light blue and blinking. This will turn it solid * amber once the driver is loaded. */ set_power_light_amber_noblink(); return 0; out_err: for (; i >= 0; i--) unregister_nasgpio_led(i); pci_unregister_driver(&nas_gpio_pci_driver); return ret; } /* * module unload */ static void __exit nas_gpio_exit(void) { int i; pr_info("Unregistering driver\n"); for (i = 0; i < ARRAY_SIZE(nasgpio_leds); i++) unregister_nasgpio_led(i); pci_unregister_driver(&nas_gpio_pci_driver); } module_init(nas_gpio_init); module_exit(nas_gpio_exit);
gpl-2.0
carz2/cm-kernel
drivers/net/irda/au1k_ir.c
49
20044
/* * Alchemy Semi Au1000 IrDA driver * * Copyright 2001 MontaVista Software Inc. * Author: MontaVista Software, Inc. * ppopov@mvista.com or source@mvista.com * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/slab.h> #include <linux/rtnetlink.h> #include <linux/interrupt.h> #include <linux/pm.h> #include <linux/bitops.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/au1000.h> #if defined(CONFIG_MIPS_PB1000) || defined(CONFIG_MIPS_PB1100) #include <asm/pb1000.h> #elif defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) #include <asm/db1x00.h> #include <asm/mach-db1x00/bcsr.h> #else #error au1k_ir: unsupported board #endif #include <net/irda/irda.h> #include <net/irda/irmod.h> #include <net/irda/wrapper.h> #include <net/irda/irda_device.h> #include "au1000_ircc.h" static int au1k_irda_net_init(struct net_device *); static int au1k_irda_start(struct net_device *); static int au1k_irda_stop(struct net_device *dev); static int au1k_irda_hard_xmit(struct sk_buff *, struct net_device *); static int au1k_irda_rx(struct net_device *); static void au1k_irda_interrupt(int, void *); static void au1k_tx_timeout(struct net_device *); static int au1k_irda_ioctl(struct net_device *, struct ifreq *, int); static int au1k_irda_set_speed(struct net_device *dev, int speed); static void *dma_alloc(size_t, dma_addr_t *); static void dma_free(void *, size_t); static int qos_mtt_bits = 0x07; /* 1 ms or more */ static struct net_device *ir_devs[NUM_IR_IFF]; static char version[] __devinitdata = "au1k_ircc:1.2 ppopov@mvista.com\n"; #define RUN_AT(x) (jiffies + (x)) static DEFINE_SPINLOCK(ir_lock); /* * IrDA peripheral bug. You have to read the register * twice to get the right value. */ u32 read_ir_reg(u32 addr) { readl(addr); return readl(addr); } /* * Buffer allocation/deallocation routines. The buffer descriptor returned * has the virtual and dma address of a buffer suitable for * both, receive and transmit operations. */ static db_dest_t *GetFreeDB(struct au1k_private *aup) { db_dest_t *pDB; pDB = aup->pDBfree; if (pDB) { aup->pDBfree = pDB->pnext; } return pDB; } static void ReleaseDB(struct au1k_private *aup, db_dest_t *pDB) { db_dest_t *pDBfree = aup->pDBfree; if (pDBfree) pDBfree->pnext = pDB; aup->pDBfree = pDB; } /* DMA memory allocation, derived from pci_alloc_consistent. However, the Au1000 data cache is coherent (when programmed so), therefore we return KSEG0 address, not KSEG1. */ static void *dma_alloc(size_t size, dma_addr_t * dma_handle) { void *ret; int gfp = GFP_ATOMIC | GFP_DMA; ret = (void *) __get_free_pages(gfp, get_order(size)); if (ret != NULL) { memset(ret, 0, size); *dma_handle = virt_to_bus(ret); ret = (void *)KSEG0ADDR(ret); } return ret; } static void dma_free(void *vaddr, size_t size) { vaddr = (void *)KSEG0ADDR(vaddr); free_pages((unsigned long) vaddr, get_order(size)); } static void setup_hw_rings(struct au1k_private *aup, u32 rx_base, u32 tx_base) { int i; for (i=0; i<NUM_IR_DESC; i++) { aup->rx_ring[i] = (volatile ring_dest_t *) (rx_base + sizeof(ring_dest_t)*i); } for (i=0; i<NUM_IR_DESC; i++) { aup->tx_ring[i] = (volatile ring_dest_t *) (tx_base + sizeof(ring_dest_t)*i); } } static int au1k_irda_init(void) { static unsigned version_printed = 0; struct au1k_private *aup; struct net_device *dev; int err; if (version_printed++ == 0) printk(version); dev = alloc_irdadev(sizeof(struct au1k_private)); if (!dev) return -ENOMEM; dev->irq = AU1000_IRDA_RX_INT; /* TX has its own interrupt */ err = au1k_irda_net_init(dev); if (err) goto out; err = register_netdev(dev); if (err) goto out1; ir_devs[0] = dev; printk(KERN_INFO "IrDA: Registered device %s\n", dev->name); return 0; out1: aup = netdev_priv(dev); dma_free((void *)aup->db[0].vaddr, MAX_BUF_SIZE * 2*NUM_IR_DESC); dma_free((void *)aup->rx_ring[0], 2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t))); kfree(aup->rx_buff.head); out: free_netdev(dev); return err; } static int au1k_irda_init_iobuf(iobuff_t *io, int size) { io->head = kmalloc(size, GFP_KERNEL); if (io->head != NULL) { io->truesize = size; io->in_frame = FALSE; io->state = OUTSIDE_FRAME; io->data = io->head; } return io->head ? 0 : -ENOMEM; } static const struct net_device_ops au1k_irda_netdev_ops = { .ndo_open = au1k_irda_start, .ndo_stop = au1k_irda_stop, .ndo_start_xmit = au1k_irda_hard_xmit, .ndo_tx_timeout = au1k_tx_timeout, .ndo_do_ioctl = au1k_irda_ioctl, }; static int au1k_irda_net_init(struct net_device *dev) { struct au1k_private *aup = netdev_priv(dev); int i, retval = 0, err; db_dest_t *pDB, *pDBfree; dma_addr_t temp; err = au1k_irda_init_iobuf(&aup->rx_buff, 14384); if (err) goto out1; dev->netdev_ops = &au1k_irda_netdev_ops; irda_init_max_qos_capabilies(&aup->qos); /* The only value we must override it the baudrate */ aup->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600| IR_115200|IR_576000 |(IR_4000000 << 8); aup->qos.min_turn_time.bits = qos_mtt_bits; irda_qos_bits_to_value(&aup->qos); retval = -ENOMEM; /* Tx ring follows rx ring + 512 bytes */ /* we need a 1k aligned buffer */ aup->rx_ring[0] = (ring_dest_t *) dma_alloc(2*MAX_NUM_IR_DESC*(sizeof(ring_dest_t)), &temp); if (!aup->rx_ring[0]) goto out2; /* allocate the data buffers */ aup->db[0].vaddr = (void *)dma_alloc(MAX_BUF_SIZE * 2*NUM_IR_DESC, &temp); if (!aup->db[0].vaddr) goto out3; setup_hw_rings(aup, (u32)aup->rx_ring[0], (u32)aup->rx_ring[0] + 512); pDBfree = NULL; pDB = aup->db; for (i=0; i<(2*NUM_IR_DESC); i++) { pDB->pnext = pDBfree; pDBfree = pDB; pDB->vaddr = (u32 *)((unsigned)aup->db[0].vaddr + MAX_BUF_SIZE*i); pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr); pDB++; } aup->pDBfree = pDBfree; /* attach a data buffer to each descriptor */ for (i=0; i<NUM_IR_DESC; i++) { pDB = GetFreeDB(aup); if (!pDB) goto out; aup->rx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff); aup->rx_ring[i]->addr_1 = (u8)((pDB->dma_addr>>8) & 0xff); aup->rx_ring[i]->addr_2 = (u8)((pDB->dma_addr>>16) & 0xff); aup->rx_ring[i]->addr_3 = (u8)((pDB->dma_addr>>24) & 0xff); aup->rx_db_inuse[i] = pDB; } for (i=0; i<NUM_IR_DESC; i++) { pDB = GetFreeDB(aup); if (!pDB) goto out; aup->tx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff); aup->tx_ring[i]->addr_1 = (u8)((pDB->dma_addr>>8) & 0xff); aup->tx_ring[i]->addr_2 = (u8)((pDB->dma_addr>>16) & 0xff); aup->tx_ring[i]->addr_3 = (u8)((pDB->dma_addr>>24) & 0xff); aup->tx_ring[i]->count_0 = 0; aup->tx_ring[i]->count_1 = 0; aup->tx_ring[i]->flags = 0; aup->tx_db_inuse[i] = pDB; } #if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) /* power on */ bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK, BCSR_RESETS_IRDA_MODE_FULL); #endif return 0; out3: dma_free((void *)aup->rx_ring[0], 2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t))); out2: kfree(aup->rx_buff.head); out1: printk(KERN_ERR "au1k_init_module failed. Returns %d\n", retval); return retval; } static int au1k_init(struct net_device *dev) { struct au1k_private *aup = netdev_priv(dev); int i; u32 control; u32 ring_address; /* bring the device out of reset */ control = 0xe; /* coherent, clock enable, one half system clock */ #ifndef CONFIG_CPU_LITTLE_ENDIAN control |= 1; #endif aup->tx_head = 0; aup->tx_tail = 0; aup->rx_head = 0; for (i=0; i<NUM_IR_DESC; i++) { aup->rx_ring[i]->flags = AU_OWN; } writel(control, IR_INTERFACE_CONFIG); au_sync_delay(10); writel(read_ir_reg(IR_ENABLE) & ~0x8000, IR_ENABLE); /* disable PHY */ au_sync_delay(1); writel(MAX_BUF_SIZE, IR_MAX_PKT_LEN); ring_address = (u32)virt_to_phys((void *)aup->rx_ring[0]); writel(ring_address >> 26, IR_RING_BASE_ADDR_H); writel((ring_address >> 10) & 0xffff, IR_RING_BASE_ADDR_L); writel(RING_SIZE_64<<8 | RING_SIZE_64<<12, IR_RING_SIZE); writel(1<<2 | IR_ONE_PIN, IR_CONFIG_2); /* 48MHz */ writel(0, IR_RING_ADDR_CMPR); au1k_irda_set_speed(dev, 9600); return 0; } static int au1k_irda_start(struct net_device *dev) { int retval; char hwname[32]; struct au1k_private *aup = netdev_priv(dev); if ((retval = au1k_init(dev))) { printk(KERN_ERR "%s: error in au1k_init\n", dev->name); return retval; } if ((retval = request_irq(AU1000_IRDA_TX_INT, au1k_irda_interrupt, 0, dev->name, dev))) { printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name, dev->irq); return retval; } if ((retval = request_irq(AU1000_IRDA_RX_INT, au1k_irda_interrupt, 0, dev->name, dev))) { free_irq(AU1000_IRDA_TX_INT, dev); printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name, dev->irq); return retval; } /* Give self a hardware name */ sprintf(hwname, "Au1000 SIR/FIR"); aup->irlap = irlap_open(dev, &aup->qos, hwname); netif_start_queue(dev); writel(read_ir_reg(IR_CONFIG_2) | 1<<8, IR_CONFIG_2); /* int enable */ aup->timer.expires = RUN_AT((3*HZ)); aup->timer.data = (unsigned long)dev; return 0; } static int au1k_irda_stop(struct net_device *dev) { struct au1k_private *aup = netdev_priv(dev); /* disable interrupts */ writel(read_ir_reg(IR_CONFIG_2) & ~(1<<8), IR_CONFIG_2); writel(0, IR_CONFIG_1); writel(0, IR_INTERFACE_CONFIG); /* disable clock */ au_sync(); if (aup->irlap) { irlap_close(aup->irlap); aup->irlap = NULL; } netif_stop_queue(dev); del_timer(&aup->timer); /* disable the interrupt */ free_irq(AU1000_IRDA_TX_INT, dev); free_irq(AU1000_IRDA_RX_INT, dev); return 0; } static void __exit au1k_irda_exit(void) { struct net_device *dev = ir_devs[0]; struct au1k_private *aup = netdev_priv(dev); unregister_netdev(dev); dma_free((void *)aup->db[0].vaddr, MAX_BUF_SIZE * 2*NUM_IR_DESC); dma_free((void *)aup->rx_ring[0], 2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t))); kfree(aup->rx_buff.head); free_netdev(dev); } static inline void update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len) { struct au1k_private *aup = netdev_priv(dev); struct net_device_stats *ps = &aup->stats; ps->tx_packets++; ps->tx_bytes += pkt_len; if (status & IR_TX_ERROR) { ps->tx_errors++; ps->tx_aborted_errors++; } } static void au1k_tx_ack(struct net_device *dev) { struct au1k_private *aup = netdev_priv(dev); volatile ring_dest_t *ptxd; ptxd = aup->tx_ring[aup->tx_tail]; while (!(ptxd->flags & AU_OWN) && (aup->tx_tail != aup->tx_head)) { update_tx_stats(dev, ptxd->flags, ptxd->count_1<<8 | ptxd->count_0); ptxd->count_0 = 0; ptxd->count_1 = 0; au_sync(); aup->tx_tail = (aup->tx_tail + 1) & (NUM_IR_DESC - 1); ptxd = aup->tx_ring[aup->tx_tail]; if (aup->tx_full) { aup->tx_full = 0; netif_wake_queue(dev); } } if (aup->tx_tail == aup->tx_head) { if (aup->newspeed) { au1k_irda_set_speed(dev, aup->newspeed); aup->newspeed = 0; } else { writel(read_ir_reg(IR_CONFIG_1) & ~IR_TX_ENABLE, IR_CONFIG_1); au_sync(); writel(read_ir_reg(IR_CONFIG_1) | IR_RX_ENABLE, IR_CONFIG_1); writel(0, IR_RING_PROMPT); au_sync(); } } } /* * Au1000 transmit routine. */ static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) { struct au1k_private *aup = netdev_priv(dev); int speed = irda_get_next_speed(skb); volatile ring_dest_t *ptxd; u32 len; u32 flags; db_dest_t *pDB; if (speed != aup->speed && speed != -1) { aup->newspeed = speed; } if ((skb->len == 0) && (aup->newspeed)) { if (aup->tx_tail == aup->tx_head) { au1k_irda_set_speed(dev, speed); aup->newspeed = 0; } dev_kfree_skb(skb); return NETDEV_TX_OK; } ptxd = aup->tx_ring[aup->tx_head]; flags = ptxd->flags; if (flags & AU_OWN) { printk(KERN_DEBUG "%s: tx_full\n", dev->name); netif_stop_queue(dev); aup->tx_full = 1; return NETDEV_TX_BUSY; } else if (((aup->tx_head + 1) & (NUM_IR_DESC - 1)) == aup->tx_tail) { printk(KERN_DEBUG "%s: tx_full\n", dev->name); netif_stop_queue(dev); aup->tx_full = 1; return NETDEV_TX_BUSY; } pDB = aup->tx_db_inuse[aup->tx_head]; #if 0 if (read_ir_reg(IR_RX_BYTE_CNT) != 0) { printk("tx warning: rx byte cnt %x\n", read_ir_reg(IR_RX_BYTE_CNT)); } #endif if (aup->speed == 4000000) { /* FIR */ skb_copy_from_linear_data(skb, pDB->vaddr, skb->len); ptxd->count_0 = skb->len & 0xff; ptxd->count_1 = (skb->len >> 8) & 0xff; } else { /* SIR */ len = async_wrap_skb(skb, (u8 *)pDB->vaddr, MAX_BUF_SIZE); ptxd->count_0 = len & 0xff; ptxd->count_1 = (len >> 8) & 0xff; ptxd->flags |= IR_DIS_CRC; au_writel(au_readl(0xae00000c) & ~(1<<13), 0xae00000c); } ptxd->flags |= AU_OWN; au_sync(); writel(read_ir_reg(IR_CONFIG_1) | IR_TX_ENABLE, IR_CONFIG_1); writel(0, IR_RING_PROMPT); au_sync(); dev_kfree_skb(skb); aup->tx_head = (aup->tx_head + 1) & (NUM_IR_DESC - 1); dev->trans_start = jiffies; return NETDEV_TX_OK; } static inline void update_rx_stats(struct net_device *dev, u32 status, u32 count) { struct au1k_private *aup = netdev_priv(dev); struct net_device_stats *ps = &aup->stats; ps->rx_packets++; if (status & IR_RX_ERROR) { ps->rx_errors++; if (status & (IR_PHY_ERROR|IR_FIFO_OVER)) ps->rx_missed_errors++; if (status & IR_MAX_LEN) ps->rx_length_errors++; if (status & IR_CRC_ERROR) ps->rx_crc_errors++; } else ps->rx_bytes += count; } /* * Au1000 receive routine. */ static int au1k_irda_rx(struct net_device *dev) { struct au1k_private *aup = netdev_priv(dev); struct sk_buff *skb; volatile ring_dest_t *prxd; u32 flags, count; db_dest_t *pDB; prxd = aup->rx_ring[aup->rx_head]; flags = prxd->flags; while (!(flags & AU_OWN)) { pDB = aup->rx_db_inuse[aup->rx_head]; count = prxd->count_1<<8 | prxd->count_0; if (!(flags & IR_RX_ERROR)) { /* good frame */ update_rx_stats(dev, flags, count); skb=alloc_skb(count+1,GFP_ATOMIC); if (skb == NULL) { aup->netdev->stats.rx_dropped++; continue; } skb_reserve(skb, 1); if (aup->speed == 4000000) skb_put(skb, count); else skb_put(skb, count-2); skb_copy_to_linear_data(skb, pDB->vaddr, count - 2); skb->dev = dev; skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IRDA); netif_rx(skb); prxd->count_0 = 0; prxd->count_1 = 0; } prxd->flags |= AU_OWN; aup->rx_head = (aup->rx_head + 1) & (NUM_IR_DESC - 1); writel(0, IR_RING_PROMPT); au_sync(); /* next descriptor */ prxd = aup->rx_ring[aup->rx_head]; flags = prxd->flags; } return 0; } static irqreturn_t au1k_irda_interrupt(int dummy, void *dev_id) { struct net_device *dev = dev_id; writel(0, IR_INT_CLEAR); /* ack irda interrupts */ au1k_irda_rx(dev); au1k_tx_ack(dev); return IRQ_HANDLED; } /* * The Tx ring has been full longer than the watchdog timeout * value. The transmitter must be hung? */ static void au1k_tx_timeout(struct net_device *dev) { u32 speed; struct au1k_private *aup = netdev_priv(dev); printk(KERN_ERR "%s: tx timeout\n", dev->name); speed = aup->speed; aup->speed = 0; au1k_irda_set_speed(dev, speed); aup->tx_full = 0; netif_wake_queue(dev); } /* * Set the IrDA communications speed. */ static int au1k_irda_set_speed(struct net_device *dev, int speed) { unsigned long flags; struct au1k_private *aup = netdev_priv(dev); u32 control; int ret = 0, timeout = 10, i; volatile ring_dest_t *ptxd; #if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) unsigned long irda_resets; #endif if (speed == aup->speed) return ret; spin_lock_irqsave(&ir_lock, flags); /* disable PHY first */ writel(read_ir_reg(IR_ENABLE) & ~0x8000, IR_ENABLE); /* disable RX/TX */ writel(read_ir_reg(IR_CONFIG_1) & ~(IR_RX_ENABLE|IR_TX_ENABLE), IR_CONFIG_1); au_sync_delay(1); while (read_ir_reg(IR_ENABLE) & (IR_RX_STATUS | IR_TX_STATUS)) { mdelay(1); if (!timeout--) { printk(KERN_ERR "%s: rx/tx disable timeout\n", dev->name); break; } } /* disable DMA */ writel(read_ir_reg(IR_CONFIG_1) & ~IR_DMA_ENABLE, IR_CONFIG_1); au_sync_delay(1); /* * After we disable tx/rx. the index pointers * go back to zero. */ aup->tx_head = aup->tx_tail = aup->rx_head = 0; for (i=0; i<NUM_IR_DESC; i++) { ptxd = aup->tx_ring[i]; ptxd->flags = 0; ptxd->count_0 = 0; ptxd->count_1 = 0; } for (i=0; i<NUM_IR_DESC; i++) { ptxd = aup->rx_ring[i]; ptxd->count_0 = 0; ptxd->count_1 = 0; ptxd->flags = AU_OWN; } if (speed == 4000000) { #if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) bcsr_mod(BCSR_RESETS, 0, BCSR_RESETS_FIR_SEL); #else /* Pb1000 and Pb1100 */ writel(1<<13, CPLD_AUX1); #endif } else { #if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) bcsr_mod(BCSR_RESETS, BCSR_RESETS_FIR_SEL, 0); #else /* Pb1000 and Pb1100 */ writel(readl(CPLD_AUX1) & ~(1<<13), CPLD_AUX1); #endif } switch (speed) { case 9600: writel(11<<10 | 12<<5, IR_WRITE_PHY_CONFIG); writel(IR_SIR_MODE, IR_CONFIG_1); break; case 19200: writel(5<<10 | 12<<5, IR_WRITE_PHY_CONFIG); writel(IR_SIR_MODE, IR_CONFIG_1); break; case 38400: writel(2<<10 | 12<<5, IR_WRITE_PHY_CONFIG); writel(IR_SIR_MODE, IR_CONFIG_1); break; case 57600: writel(1<<10 | 12<<5, IR_WRITE_PHY_CONFIG); writel(IR_SIR_MODE, IR_CONFIG_1); break; case 115200: writel(12<<5, IR_WRITE_PHY_CONFIG); writel(IR_SIR_MODE, IR_CONFIG_1); break; case 4000000: writel(0xF, IR_WRITE_PHY_CONFIG); writel(IR_FIR|IR_DMA_ENABLE|IR_RX_ENABLE, IR_CONFIG_1); break; default: printk(KERN_ERR "%s unsupported speed %x\n", dev->name, speed); ret = -EINVAL; break; } aup->speed = speed; writel(read_ir_reg(IR_ENABLE) | 0x8000, IR_ENABLE); au_sync(); control = read_ir_reg(IR_ENABLE); writel(0, IR_RING_PROMPT); au_sync(); if (control & (1<<14)) { printk(KERN_ERR "%s: configuration error\n", dev->name); } else { if (control & (1<<11)) printk(KERN_DEBUG "%s Valid SIR config\n", dev->name); if (control & (1<<12)) printk(KERN_DEBUG "%s Valid MIR config\n", dev->name); if (control & (1<<13)) printk(KERN_DEBUG "%s Valid FIR config\n", dev->name); if (control & (1<<10)) printk(KERN_DEBUG "%s TX enabled\n", dev->name); if (control & (1<<9)) printk(KERN_DEBUG "%s RX enabled\n", dev->name); } spin_unlock_irqrestore(&ir_lock, flags); return ret; } static int au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd) { struct if_irda_req *rq = (struct if_irda_req *)ifreq; struct au1k_private *aup = netdev_priv(dev); int ret = -EOPNOTSUPP; switch (cmd) { case SIOCSBANDWIDTH: if (capable(CAP_NET_ADMIN)) { /* * We are unable to set the speed if the * device is not running. */ if (aup->open) ret = au1k_irda_set_speed(dev, rq->ifr_baudrate); else { printk(KERN_ERR "%s ioctl: !netif_running\n", dev->name); ret = 0; } } break; case SIOCSMEDIABUSY: ret = -EPERM; if (capable(CAP_NET_ADMIN)) { irda_device_set_media_busy(dev, TRUE); ret = 0; } break; case SIOCGRECEIVING: rq->ifr_receiving = 0; break; default: break; } return ret; } MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>"); MODULE_DESCRIPTION("Au1000 IrDA Device Driver"); module_init(au1k_irda_init); module_exit(au1k_irda_exit);
gpl-2.0
nemomobile/kernel-adaptation-n950-n9
net/ipv6/icmp.c
49
22500
/* * Internet Control Message Protocol (ICMPv6) * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * * Based on net/ipv4/icmp.c * * RFC 1885 * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ /* * Changes: * * Andi Kleen : exception handling * Andi Kleen add rate limits. never reply to a icmp. * add more length checks and other fixes. * yoshfuji : ensure to sent parameter problem for * fragments. * YOSHIFUJI Hideaki @USAGI: added sysctl for icmp rate limit. * Randy Dunlap and * YOSHIFUJI Hideaki @USAGI: Per-interface statistics support * Kazunori MIYAZAWA @USAGI: change output process to use ip6_append_data */ #define pr_fmt(fmt) "IPv6: " fmt #include <linux/module.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/netfilter.h> #include <linux/slab.h> #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> #endif #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/icmpv6.h> #include <net/ip.h> #include <net/sock.h> #include <net/ipv6.h> #include <net/ip6_checksum.h> #include <net/protocol.h> #include <net/raw.h> #include <net/rawv6.h> #include <net/transp_v6.h> #include <net/ip6_route.h> #include <net/addrconf.h> #include <net/icmp.h> #include <net/xfrm.h> #include <net/inet_common.h> #include <asm/uaccess.h> /* * The ICMP socket(s). This is the most convenient way to flow control * our ICMP output as well as maintain a clean interface throughout * all layers. All Socketless IP sends will soon be gone. * * On SMP we have one ICMP socket per-cpu. */ static inline struct sock *icmpv6_sk(struct net *net) { return net->ipv6.icmp_sk[smp_processor_id()]; } static int icmpv6_rcv(struct sk_buff *skb); static const struct inet6_protocol icmpv6_protocol = { .handler = icmpv6_rcv, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, }; static __inline__ struct sock *icmpv6_xmit_lock(struct net *net) { struct sock *sk; local_bh_disable(); sk = icmpv6_sk(net); if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { /* This can happen if the output path (f.e. SIT or * ip6ip6 tunnel) signals dst_link_failure() for an * outgoing ICMP6 packet. */ local_bh_enable(); return NULL; } return sk; } static __inline__ void icmpv6_xmit_unlock(struct sock *sk) { spin_unlock_bh(&sk->sk_lock.slock); } /* * Slightly more convenient version of icmpv6_send. */ void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos) { icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos); kfree_skb(skb); } /* * Figure out, may we reply to this packet with icmp error. * * We do not reply, if: * - it was icmp error message. * - it is truncated, so that it is known, that protocol is ICMPV6 * (i.e. in the middle of some exthdr) * * --ANK (980726) */ static bool is_ineligible(const struct sk_buff *skb) { int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data; int len = skb->len - ptr; __u8 nexthdr = ipv6_hdr(skb)->nexthdr; __be16 frag_off; if (len < 0) return true; ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr, &frag_off); if (ptr < 0) return false; if (nexthdr == IPPROTO_ICMPV6) { u8 _type, *tp; tp = skb_header_pointer(skb, ptr+offsetof(struct icmp6hdr, icmp6_type), sizeof(_type), &_type); if (tp == NULL || !(*tp & ICMPV6_INFOMSG_MASK)) return true; } return false; } /* * Check the ICMP output rate limit */ static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type, struct flowi6 *fl6) { struct dst_entry *dst; struct net *net = sock_net(sk); bool res = false; /* Informational messages are not limited. */ if (type & ICMPV6_INFOMSG_MASK) return true; /* Do not limit pmtu discovery, it would break it. */ if (type == ICMPV6_PKT_TOOBIG) return true; /* * Look up the output route. * XXX: perhaps the expire for routing entries cloned by * this lookup should be more aggressive (not longer than timeout). */ dst = ip6_route_output(net, sk, fl6); if (dst->error) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); } else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) { res = true; } else { struct rt6_info *rt = (struct rt6_info *)dst; int tmo = net->ipv6.sysctl.icmpv6_time; /* Give more bandwidth to wider prefixes. */ if (rt->rt6i_dst.plen < 128) tmo >>= ((128 - rt->rt6i_dst.plen)>>5); if (!rt->rt6i_peer) rt6_bind_peer(rt, 1); res = inet_peer_xrlim_allow(rt->rt6i_peer, tmo); } dst_release(dst); return res; } /* * an inline helper for the "simple" if statement below * checks if parameter problem report is caused by an * unrecognized IPv6 option that has the Option Type * highest-order two bits set to 10 */ static bool opt_unrec(struct sk_buff *skb, __u32 offset) { u8 _optval, *op; offset += skb_network_offset(skb); op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval); if (op == NULL) return true; return (*op & 0xC0) == 0x80; } static int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, struct icmp6hdr *thdr, int len) { struct sk_buff *skb; struct icmp6hdr *icmp6h; int err = 0; if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) goto out; icmp6h = icmp6_hdr(skb); memcpy(icmp6h, thdr, sizeof(struct icmp6hdr)); icmp6h->icmp6_cksum = 0; if (skb_queue_len(&sk->sk_write_queue) == 1) { skb->csum = csum_partial(icmp6h, sizeof(struct icmp6hdr), skb->csum); icmp6h->icmp6_cksum = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, len, fl6->flowi6_proto, skb->csum); } else { __wsum tmp_csum = 0; skb_queue_walk(&sk->sk_write_queue, skb) { tmp_csum = csum_add(tmp_csum, skb->csum); } tmp_csum = csum_partial(icmp6h, sizeof(struct icmp6hdr), tmp_csum); icmp6h->icmp6_cksum = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, len, fl6->flowi6_proto, tmp_csum); } ip6_push_pending_frames(sk); out: return err; } struct icmpv6_msg { struct sk_buff *skb; int offset; uint8_t type; }; static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) { struct icmpv6_msg *msg = (struct icmpv6_msg *) from; struct sk_buff *org_skb = msg->skb; __wsum csum = 0; csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset, to, len, csum); skb->csum = csum_block_add(skb->csum, csum, odd); if (!(msg->type & ICMPV6_INFOMSG_MASK)) nf_ct_attach(skb, org_skb); return 0; } #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) static void mip6_addr_swap(struct sk_buff *skb) { struct ipv6hdr *iph = ipv6_hdr(skb); struct inet6_skb_parm *opt = IP6CB(skb); struct ipv6_destopt_hao *hao; struct in6_addr tmp; int off; if (opt->dsthao) { off = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO); if (likely(off >= 0)) { hao = (struct ipv6_destopt_hao *) (skb_network_header(skb) + off); tmp = iph->saddr; iph->saddr = hao->addr; hao->addr = tmp; } } } #else static inline void mip6_addr_swap(struct sk_buff *skb) {} #endif static struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb, struct sock *sk, struct flowi6 *fl6) { struct dst_entry *dst, *dst2; struct flowi6 fl2; int err; err = ip6_dst_lookup(sk, &dst, fl6); if (err) return ERR_PTR(err); /* * We won't send icmp if the destination is known * anycast. */ if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) { LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: acast source\n"); dst_release(dst); return ERR_PTR(-EINVAL); } /* No need to clone since we're just using its address. */ dst2 = dst; dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), sk, 0); if (!IS_ERR(dst)) { if (dst != dst2) return dst; } else { if (PTR_ERR(dst) == -EPERM) dst = NULL; else return dst; } err = xfrm_decode_session_reverse(skb, flowi6_to_flowi(&fl2), AF_INET6); if (err) goto relookup_failed; err = ip6_dst_lookup(sk, &dst2, &fl2); if (err) goto relookup_failed; dst2 = xfrm_lookup(net, dst2, flowi6_to_flowi(&fl2), sk, XFRM_LOOKUP_ICMP); if (!IS_ERR(dst2)) { dst_release(dst); dst = dst2; } else { err = PTR_ERR(dst2); if (err == -EPERM) { dst_release(dst); return dst2; } else goto relookup_failed; } relookup_failed: if (dst) return dst; return ERR_PTR(err); } /* * Send an ICMP message in response to a packet in error */ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) { struct net *net = dev_net(skb->dev); struct inet6_dev *idev = NULL; struct ipv6hdr *hdr = ipv6_hdr(skb); struct sock *sk; struct ipv6_pinfo *np; const struct in6_addr *saddr = NULL; struct dst_entry *dst; struct icmp6hdr tmp_hdr; struct flowi6 fl6; struct icmpv6_msg msg; int iif = 0; int addr_type = 0; int len; int hlimit; int err = 0; if ((u8 *)hdr < skb->head || (skb->network_header + sizeof(*hdr)) > skb->tail) return; /* * Make sure we respect the rules * i.e. RFC 1885 2.4(e) * Rule (e.1) is enforced by not using icmpv6_send * in any code that processes icmp errors. */ addr_type = ipv6_addr_type(&hdr->daddr); if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0)) saddr = &hdr->daddr; /* * Dest addr check */ if ((addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST)) { if (type != ICMPV6_PKT_TOOBIG && !(type == ICMPV6_PARAMPROB && code == ICMPV6_UNK_OPTION && (opt_unrec(skb, info)))) return; saddr = NULL; } addr_type = ipv6_addr_type(&hdr->saddr); /* * Source addr check */ if (addr_type & IPV6_ADDR_LINKLOCAL) iif = skb->dev->ifindex; /* * Must not send error if the source does not uniquely * identify a single node (RFC2463 Section 2.4). * We check unspecified / multicast addresses here, * and anycast addresses will be checked later. */ if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) { LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: addr_any/mcast source\n"); return; } /* * Never answer to a ICMP packet. */ if (is_ineligible(skb)) { LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: no reply to icmp error\n"); return; } mip6_addr_swap(skb); memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_ICMPV6; fl6.daddr = hdr->saddr; if (saddr) fl6.saddr = *saddr; fl6.flowi6_oif = iif; fl6.fl6_icmp_type = type; fl6.fl6_icmp_code = code; security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); sk = icmpv6_xmit_lock(net); if (sk == NULL) return; np = inet6_sk(sk); if (!icmpv6_xrlim_allow(sk, type, &fl6)) goto out; tmp_hdr.icmp6_type = type; tmp_hdr.icmp6_code = code; tmp_hdr.icmp6_cksum = 0; tmp_hdr.icmp6_pointer = htonl(info); if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) fl6.flowi6_oif = np->mcast_oif; else if (!fl6.flowi6_oif) fl6.flowi6_oif = np->ucast_oif; dst = icmpv6_route_lookup(net, skb, sk, &fl6); if (IS_ERR(dst)) goto out; if (ipv6_addr_is_multicast(&fl6.daddr)) hlimit = np->mcast_hops; else hlimit = np->hop_limit; if (hlimit < 0) hlimit = ip6_dst_hoplimit(dst); msg.skb = skb; msg.offset = skb_network_offset(skb); msg.type = type; len = skb->len - msg.offset; len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr)); if (len < 0) { LIMIT_NETDEBUG(KERN_DEBUG "icmp: len problem\n"); goto out_dst_release; } rcu_read_lock(); idev = __in6_dev_get(skb->dev); err = ip6_append_data(sk, icmpv6_getfrag, &msg, len + sizeof(struct icmp6hdr), sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl6, (struct rt6_info *)dst, MSG_DONTWAIT, np->dontfrag); if (err) { ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); ip6_flush_pending_frames(sk); } else { err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, len + sizeof(struct icmp6hdr)); } rcu_read_unlock(); out_dst_release: dst_release(dst); out: icmpv6_xmit_unlock(sk); } EXPORT_SYMBOL(icmpv6_send); static void icmpv6_echo_reply(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); struct sock *sk; struct inet6_dev *idev; struct ipv6_pinfo *np; const struct in6_addr *saddr = NULL; struct icmp6hdr *icmph = icmp6_hdr(skb); struct icmp6hdr tmp_hdr; struct flowi6 fl6; struct icmpv6_msg msg; struct dst_entry *dst; int err = 0; int hlimit; saddr = &ipv6_hdr(skb)->daddr; if (!ipv6_unicast_destination(skb)) saddr = NULL; memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr)); tmp_hdr.icmp6_type = ICMPV6_ECHO_REPLY; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_ICMPV6; fl6.daddr = ipv6_hdr(skb)->saddr; if (saddr) fl6.saddr = *saddr; fl6.flowi6_oif = skb->dev->ifindex; fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY; security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); sk = icmpv6_xmit_lock(net); if (sk == NULL) return; np = inet6_sk(sk); if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) fl6.flowi6_oif = np->mcast_oif; else if (!fl6.flowi6_oif) fl6.flowi6_oif = np->ucast_oif; err = ip6_dst_lookup(sk, &dst, &fl6); if (err) goto out; dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0); if (IS_ERR(dst)) goto out; if (ipv6_addr_is_multicast(&fl6.daddr)) hlimit = np->mcast_hops; else hlimit = np->hop_limit; if (hlimit < 0) hlimit = ip6_dst_hoplimit(dst); idev = __in6_dev_get(skb->dev); msg.skb = skb; msg.offset = 0; msg.type = ICMPV6_ECHO_REPLY; err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl6, (struct rt6_info *)dst, MSG_DONTWAIT, np->dontfrag); if (err) { ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); ip6_flush_pending_frames(sk); } else { err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, skb->len + sizeof(struct icmp6hdr)); } dst_release(dst); out: icmpv6_xmit_unlock(sk); } static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info) { const struct inet6_protocol *ipprot; int inner_offset; int hash; u8 nexthdr; __be16 frag_off; if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) return; nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr; if (ipv6_ext_hdr(nexthdr)) { /* now skip over extension headers */ inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off); if (inner_offset<0) return; } else { inner_offset = sizeof(struct ipv6hdr); } /* Checkin header including 8 bytes of inner protocol header. */ if (!pskb_may_pull(skb, inner_offset+8)) return; /* BUGGG_FUTURE: we should try to parse exthdrs in this packet. Without this we will not able f.e. to make source routed pmtu discovery. Corresponding argument (opt) to notifiers is already added. --ANK (980726) */ hash = nexthdr & (MAX_INET_PROTOS - 1); rcu_read_lock(); ipprot = rcu_dereference(inet6_protos[hash]); if (ipprot && ipprot->err_handler) ipprot->err_handler(skb, NULL, type, code, inner_offset, info); rcu_read_unlock(); raw6_icmp_error(skb, nexthdr, type, code, inner_offset, info); } /* * Handle icmp messages */ static int icmpv6_rcv(struct sk_buff *skb) { struct net_device *dev = skb->dev; struct inet6_dev *idev = __in6_dev_get(dev); const struct in6_addr *saddr, *daddr; const struct ipv6hdr *orig_hdr; struct icmp6hdr *hdr; u8 type; if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { struct sec_path *sp = skb_sec_path(skb); int nh; if (!(sp && sp->xvec[sp->len - 1]->props.flags & XFRM_STATE_ICMP)) goto drop_no_count; if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(*orig_hdr))) goto drop_no_count; nh = skb_network_offset(skb); skb_set_network_header(skb, sizeof(*hdr)); if (!xfrm6_policy_check_reverse(NULL, XFRM_POLICY_IN, skb)) goto drop_no_count; skb_set_network_header(skb, nh); } ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INMSGS); saddr = &ipv6_hdr(skb)->saddr; daddr = &ipv6_hdr(skb)->daddr; /* Perform checksum. */ switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (!csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6, skb->csum)) break; /* fall through */ case CHECKSUM_NONE: skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6, 0)); if (__skb_checksum_complete(skb)) { LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [%pI6 > %pI6]\n", saddr, daddr); goto discard_it; } } if (!pskb_pull(skb, sizeof(*hdr))) goto discard_it; hdr = icmp6_hdr(skb); type = hdr->icmp6_type; ICMP6MSGIN_INC_STATS_BH(dev_net(dev), idev, type); switch (type) { case ICMPV6_ECHO_REQUEST: icmpv6_echo_reply(skb); break; case ICMPV6_ECHO_REPLY: /* we couldn't care less */ break; case ICMPV6_PKT_TOOBIG: /* BUGGG_FUTURE: if packet contains rthdr, we cannot update standard destination cache. Seems, only "advanced" destination cache will allow to solve this problem --ANK (980726) */ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) goto discard_it; hdr = icmp6_hdr(skb); orig_hdr = (struct ipv6hdr *) (hdr + 1); rt6_pmtu_discovery(&orig_hdr->daddr, &orig_hdr->saddr, dev, ntohl(hdr->icmp6_mtu)); /* * Drop through to notify */ case ICMPV6_DEST_UNREACH: case ICMPV6_TIME_EXCEED: case ICMPV6_PARAMPROB: icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu); break; case NDISC_ROUTER_SOLICITATION: case NDISC_ROUTER_ADVERTISEMENT: case NDISC_NEIGHBOUR_SOLICITATION: case NDISC_NEIGHBOUR_ADVERTISEMENT: case NDISC_REDIRECT: ndisc_rcv(skb); break; case ICMPV6_MGM_QUERY: igmp6_event_query(skb); break; case ICMPV6_MGM_REPORT: igmp6_event_report(skb); break; case ICMPV6_MGM_REDUCTION: case ICMPV6_NI_QUERY: case ICMPV6_NI_REPLY: case ICMPV6_MLD2_REPORT: case ICMPV6_DHAAD_REQUEST: case ICMPV6_DHAAD_REPLY: case ICMPV6_MOBILE_PREFIX_SOL: case ICMPV6_MOBILE_PREFIX_ADV: break; default: LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n"); /* informational */ if (type & ICMPV6_INFOMSG_MASK) break; /* * error of unknown type. * must pass to upper level */ icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu); } kfree_skb(skb); return 0; discard_it: ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INERRORS); drop_no_count: kfree_skb(skb); return 0; } void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6, u8 type, const struct in6_addr *saddr, const struct in6_addr *daddr, int oif) { memset(fl6, 0, sizeof(*fl6)); fl6->saddr = *saddr; fl6->daddr = *daddr; fl6->flowi6_proto = IPPROTO_ICMPV6; fl6->fl6_icmp_type = type; fl6->fl6_icmp_code = 0; fl6->flowi6_oif = oif; security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); } /* * Special lock-class for __icmpv6_sk: */ static struct lock_class_key icmpv6_socket_sk_dst_lock_key; static int __net_init icmpv6_sk_init(struct net *net) { struct sock *sk; int err, i, j; net->ipv6.icmp_sk = kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL); if (net->ipv6.icmp_sk == NULL) return -ENOMEM; for_each_possible_cpu(i) { err = inet_ctl_sock_create(&sk, PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, net); if (err < 0) { pr_err("Failed to initialize the ICMP6 control socket (err %d)\n", err); goto fail; } net->ipv6.icmp_sk[i] = sk; /* * Split off their lock-class, because sk->sk_dst_lock * gets used from softirqs, which is safe for * __icmpv6_sk (because those never get directly used * via userspace syscalls), but unsafe for normal sockets. */ lockdep_set_class(&sk->sk_dst_lock, &icmpv6_socket_sk_dst_lock_key); /* Enough space for 2 64K ICMP packets, including * sk_buff struct overhead. */ sk->sk_sndbuf = 2 * SKB_TRUESIZE(64 * 1024); } return 0; fail: for (j = 0; j < i; j++) inet_ctl_sock_destroy(net->ipv6.icmp_sk[j]); kfree(net->ipv6.icmp_sk); return err; } static void __net_exit icmpv6_sk_exit(struct net *net) { int i; for_each_possible_cpu(i) { inet_ctl_sock_destroy(net->ipv6.icmp_sk[i]); } kfree(net->ipv6.icmp_sk); } static struct pernet_operations icmpv6_sk_ops = { .init = icmpv6_sk_init, .exit = icmpv6_sk_exit, }; int __init icmpv6_init(void) { int err; err = register_pernet_subsys(&icmpv6_sk_ops); if (err < 0) return err; err = -EAGAIN; if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0) goto fail; return 0; fail: pr_err("Failed to register ICMP6 protocol\n"); unregister_pernet_subsys(&icmpv6_sk_ops); return err; } void icmpv6_cleanup(void) { unregister_pernet_subsys(&icmpv6_sk_ops); inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6); } static const struct icmp6_err { int err; int fatal; } tab_unreach[] = { { /* NOROUTE */ .err = ENETUNREACH, .fatal = 0, }, { /* ADM_PROHIBITED */ .err = EACCES, .fatal = 1, }, { /* Was NOT_NEIGHBOUR, now reserved */ .err = EHOSTUNREACH, .fatal = 0, }, { /* ADDR_UNREACH */ .err = EHOSTUNREACH, .fatal = 0, }, { /* PORT_UNREACH */ .err = ECONNREFUSED, .fatal = 1, }, }; int icmpv6_err_convert(u8 type, u8 code, int *err) { int fatal = 0; *err = EPROTO; switch (type) { case ICMPV6_DEST_UNREACH: fatal = 1; if (code <= ICMPV6_PORT_UNREACH) { *err = tab_unreach[code].err; fatal = tab_unreach[code].fatal; } break; case ICMPV6_PKT_TOOBIG: *err = EMSGSIZE; break; case ICMPV6_PARAMPROB: *err = EPROTO; fatal = 1; break; case ICMPV6_TIME_EXCEED: *err = EHOSTUNREACH; break; } return fatal; } EXPORT_SYMBOL(icmpv6_err_convert); #ifdef CONFIG_SYSCTL ctl_table ipv6_icmp_table_template[] = { { .procname = "ratelimit", .data = &init_net.ipv6.sysctl.icmpv6_time, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_ms_jiffies, }, { }, }; struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net) { struct ctl_table *table; table = kmemdup(ipv6_icmp_table_template, sizeof(ipv6_icmp_table_template), GFP_KERNEL); if (table) table[0].data = &net->ipv6.sysctl.icmpv6_time; return table; } #endif
gpl-2.0
mk01/linux-fslc
drivers/staging/lustre/lnet/libcfs/fail.c
49
3745
/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 only, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see http://www.gnu.org/licenses * * GPL HEADER END */ /* * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * * Copyright (c) 2011, 2015, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Oracle Corporation, Inc. */ #include "../../include/linux/libcfs/libcfs.h" unsigned long cfs_fail_loc; EXPORT_SYMBOL(cfs_fail_loc); unsigned int cfs_fail_val; EXPORT_SYMBOL(cfs_fail_val); int cfs_fail_err; EXPORT_SYMBOL(cfs_fail_err); DECLARE_WAIT_QUEUE_HEAD(cfs_race_waitq); EXPORT_SYMBOL(cfs_race_waitq); int cfs_race_state; EXPORT_SYMBOL(cfs_race_state); int __cfs_fail_check_set(__u32 id, __u32 value, int set) { static atomic_t cfs_fail_count = ATOMIC_INIT(0); LASSERT(!(id & CFS_FAIL_ONCE)); if ((cfs_fail_loc & (CFS_FAILED | CFS_FAIL_ONCE)) == (CFS_FAILED | CFS_FAIL_ONCE)) { atomic_set(&cfs_fail_count, 0); /* paranoia */ return 0; } /* Fail 1/cfs_fail_val times */ if (cfs_fail_loc & CFS_FAIL_RAND) { if (cfs_fail_val < 2 || cfs_rand() % cfs_fail_val > 0) return 0; } /* Skip the first cfs_fail_val, then fail */ if (cfs_fail_loc & CFS_FAIL_SKIP) { if (atomic_inc_return(&cfs_fail_count) <= cfs_fail_val) return 0; } /* check cfs_fail_val... */ if (set == CFS_FAIL_LOC_VALUE) { if (cfs_fail_val != -1 && cfs_fail_val != value) return 0; } /* Fail cfs_fail_val times, overridden by FAIL_ONCE */ if (cfs_fail_loc & CFS_FAIL_SOME && (!(cfs_fail_loc & CFS_FAIL_ONCE) || cfs_fail_val <= 1)) { int count = atomic_inc_return(&cfs_fail_count); if (count >= cfs_fail_val) { set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc); atomic_set(&cfs_fail_count, 0); /* we are lost race to increase */ if (count > cfs_fail_val) return 0; } } if ((set == CFS_FAIL_LOC_ORSET || set == CFS_FAIL_LOC_RESET) && (value & CFS_FAIL_ONCE)) set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc); /* Lost race to set CFS_FAILED_BIT. */ if (test_and_set_bit(CFS_FAILED_BIT, &cfs_fail_loc)) { /* If CFS_FAIL_ONCE is valid, only one process can fail, * otherwise multi-process can fail at the same time. */ if (cfs_fail_loc & CFS_FAIL_ONCE) return 0; } switch (set) { case CFS_FAIL_LOC_NOSET: case CFS_FAIL_LOC_VALUE: break; case CFS_FAIL_LOC_ORSET: cfs_fail_loc |= value & ~(CFS_FAILED | CFS_FAIL_ONCE); break; case CFS_FAIL_LOC_RESET: cfs_fail_loc = value; break; default: LASSERTF(0, "called with bad set %u\n", set); break; } return 1; } EXPORT_SYMBOL(__cfs_fail_check_set); int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set) { int ret; ret = __cfs_fail_check_set(id, value, set); if (ret && likely(ms > 0)) { CERROR("cfs_fail_timeout id %x sleeping for %dms\n", id, ms); set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(cfs_time_seconds(ms) / 1000); CERROR("cfs_fail_timeout id %x awake\n", id); } return ret; } EXPORT_SYMBOL(__cfs_fail_timeout_set);
gpl-2.0
Fagulhas/android_kernel_huawei_u8815
drivers/video/msm/mddi_nt35560.c
49
20627
/* drivers\video\msm\mddi_nt35560.c * NT35560 LCD driver for 7x30 platform * * Copyright (C) 2010 HUAWEI Technology Co., ltd. * * Date: 2011/03/08 * By jiaoshuangwei * */ #include "msm_fb.h" #include "mddihost.h" #include "mddihosti.h" #include <linux/mfd/pmic8058.h> #include <mach/gpio.h> #include <mach/vreg.h> #include <linux/kernel.h> #include <linux/gpio.h> #include <linux/hardware_self_adapt.h> #include <linux/pwm.h> #include <mach/pmic.h> #include "hw_backlight.h" #include "hw_lcd_common.h" #include "lcd_hw_debug.h" #include <asm/mach-types.h> struct sequence* nt35560_fwvga_init_table = NULL; static lcd_panel_type lcd_panel_fwvga = LCD_NONE; #ifdef CONFIG_FB_AUTO_CABC /* NT35560 CABC registers default value */ #define DEFAULT_VAL_ABC_CTRL2 0x0080 #define DEFAULT_VAL_ABC_CTRL6 0x0021 #define DEFAULT_VAL_ABC_CTRL7 0x0040 #define DEFAULT_VAL_ABC_CTRL14 0x0000 #define DEFAULT_VAL_MOV_CTRL1 0x0000 /* Value for NT35560 CABC registers parameters */ #define VAL_BIT_DMCT (0x01 << 1) #define VAL_BIT_DD_C (0x01 << 7) #define VAL_DIM_STEP_STILL(x) ((x & 0x07) << 0) #define VAL_DMST_C(x) ((x & 0x0F) << 0) #define VAL_MOVDET(x) ((x & 0x7F) << 0) /* Bit mask for parameter */ #define MASK_DIM_STEP_STILL (0x07 << 0) #define MASK_DMST_C (0x0F << 0) #define MASK_MOVDET (0x7F << 0) /* CABC state macro */ #define STATE_OFF 0 #define STATE_ON 1 static struct sequence nt35560_fwvga_write_cabc_mode_table[] = { /* Write CABC mode */ {0x5500,0x00,0} }; static struct sequence nt35560_fwvga_abc_ctrl_2_table[] = { /* ABC CTRL 2 */ {0x19C0,0x80,0} }; static struct sequence nt35560_fwvga_abc_ctrl_6_table[] = { /* ABC CTRL 6 */ {0x1DC0,0x21,0} }; static struct sequence nt35560_fwvga_abc_ctrl_7_table[] = { /* ABC CTRL 7 */ {0x1EC0,0x40,0} }; static struct sequence nt35560_fwvga_abc_ctrl_14_table[] = { /* ABC CTRL 14 */ {0x25C0,0x00,0} }; static struct sequence nt35560_fwvga_automatic_moving_selection_table[] = { /* MOV CTRL 1 */ {0x72C0,0x00,0} }; #endif // CONFIG_FB_AUTO_CABC static struct sequence nt35560_fwvga_write_cabc_brightness_table[] = { /* Write CABC brightness */ {0x5100,0x00,0} }; /* reload CABC frequency register ,because sleep out ,it recover default value * U8860 and C8860 use 300Hz */ static const struct sequence nt35560_fwvga_standby_exit_table[] = { {0x1100,0x00,120}, {0x22C0,0xFF,0}, }; /* reload CABC frequency register ,because sleep out ,it recover default value * low power board is 22kHz */ static const struct sequence nt35560_fwvga_standby_exit_tablelp[] = { {0x1100,0x00,120}, {0x22C0,0x04,0}, }; static const struct sequence nt35560_fwvga_standby_enter_table[]= { {0x1000,0x00,120} }; /* add the code for dynamic gamma function */ #ifdef CONFIG_FB_DYNAMIC_GAMMA //gamma 2.2 /* Revise some spelling mistake */ static const struct sequence nt35560_fwvga_dynamic_gamma22_table[] = { {0XC980,0X01,0}, {0x5500,0x00,0}, {0X0180,0X14,0}, {0X0280,0X00,0}, {0X0380,0X33,0}, {0X0480,0X48,0}, {0X0780,0X00,0}, {0X0880,0X44,0}, {0X0980,0X54,0}, {0X0A80,0X12,0}, {0X1280,0X00,0}, {0X1380,0X10,0}, {0X1480,0X0d,0}, {0X1580,0XA0,0}, {0X1A80,0X67,0}, {0X1F80,0X00,0}, {0X2080,0X01,0}, {0X2180,0X63,0}, {0X2480,0X09,0}, {0X2580,0X1E,0}, {0X2680,0X4B,0}, {0X2780,0X68,0}, {0X2880,0X1F,0}, {0X2980,0X37,0}, {0X2A80,0X64,0}, {0X2B80,0X84,0}, {0X2D80,0X20,0}, {0X2F80,0X2B,0}, {0X3080,0XBD,0}, {0X3180,0X1B,0}, {0X3280,0X3A,0}, {0X3380,0X4E,0}, {0X3480,0X9B,0}, {0X3580,0XBF,0}, {0X3680,0XD9,0}, {0X3780,0X76,0}, {0X3880,0X09,0}, {0X3980,0X26,0}, {0X3A80,0X40,0}, {0X3B80,0X64,0}, {0X3D80,0X31,0}, {0X3F80,0X45,0}, {0X4080,0X64,0}, {0X4180,0X42,0}, {0X4280,0X14,0}, {0X4380,0X1F,0}, {0X4480,0X7B,0}, {0X4580,0X1B,0}, {0X4680,0X48,0}, {0X4780,0X60,0}, {0X4880,0X97,0}, {0X4980,0XB4,0}, {0X4A80,0XE1,0}, {0X4B80,0X76,0}, {0X4C80,0X09,0}, {0X4D80,0X1E,0}, {0X4E80,0X4B,0}, {0X4F80,0X68,0}, {0X5080,0X1F,0}, {0X5180,0X37,0}, {0X5280,0X64,0}, {0X5380,0X8D,0}, {0X5480,0X20,0}, {0X5580,0X2B,0}, {0X5680,0XC2,0}, {0X5780,0X1E,0}, {0X5880,0X40,0}, {0X5980,0X54,0}, {0X5A80,0X98,0}, {0X5B80,0XB0,0}, {0X5C80,0XD0,0}, {0X5D80,0X76,0}, {0X5E80,0X09,0}, {0X5F80,0X2F,0}, {0X6080,0X4F,0}, {0X6180,0X67,0}, {0X6280,0X2B,0}, {0X6380,0X3F,0}, {0X6480,0X61,0}, {0X6580,0X3D,0}, {0X6680,0X14,0}, {0X6780,0X1F,0}, {0X6880,0X72,0}, {0X6980,0X1B,0}, {0X6A80,0X48,0}, {0X6B80,0X60,0}, {0X6C80,0X97,0}, {0X6D80,0XB4,0}, {0X6E80,0XE1,0}, {0X6F80,0X76,0}, {0X7080,0X19,0}, {0X7180,0X1E,0}, {0X7280,0X4B,0}, {0X7380,0X90,0}, {0X7480,0X35,0}, {0X7580,0X46,0}, {0X7680,0X69,0}, {0X7780,0XA4,0}, {0X7880,0X20,0}, {0X7980,0X2B,0}, {0X7A80,0XCC,0}, {0X7B80,0X19,0}, {0X7C80,0X3C,0}, {0X7D80,0X51,0}, {0X7E80,0XA5,0}, {0X7F80,0XD0,0}, {0X8080,0XD0,0}, {0X8180,0X76,0}, {0X8280,0X09,0}, {0X8380,0X2F,0}, {0X8480,0X2F,0}, {0X8580,0X5A,0}, {0X8680,0X2E,0}, {0X8780,0X43,0}, {0X8880,0X66,0}, {0X8980,0X33,0}, {0X8A80,0X14,0}, {0X8B80,0X1F,0}, {0X8C80,0X5B,0}, {0X8D80,0X16,0}, {0X8E80,0X39,0}, {0X8F80,0X4A,0}, {0X9080,0X6F,0}, {0X9180,0XB4,0}, {0X9280,0XE1,0}, {0X9380,0X66,0}, {0X9480,0XBF,0}, {0X9580,0X00,0}, {0X9680,0X00,0}, {0X9780,0XB4,0}, {0X9880,0X0D,0}, {0X9980,0X2C,0}, {0X9A80,0X0A,0}, {0X9B80,0X01,0}, {0X9C80,0X01,0}, {0X9D80,0X00,0}, {0X9E80,0X00,0}, {0X9F80,0X00,0}, {0XA080,0X0A,0}, {0XA280,0X06,0}, {0XA380,0X2E,0}, {0XA480,0X0E,0}, {0XA580,0XC0,0}, {0XA680,0X01,0}, {0XA780,0X00,0}, {0XA980,0X00,0}, {0XAA80,0X00,0}, {0XE780,0X00,0}, {0XED80,0X00,0}, {0XF380,0XCC,0}, {0XFB80,0X00,0}, {0X3500,0X00,0}, /* delete two lines */ }; //gamma1.9 /* Revise some spelling mistake */ static const struct sequence nt35560_fwvga_dynamic_gamma19_table[] = { {0x1100,0x00,120} }; //gamma2.5 /* Revise some spelling mistake */ static const struct sequence nt35560_fwvga_dynamic_gamma25_table[] = { /*there is 2.5 GAMA initialization sequence */ {0XC980,0X01,0}, {0x5500,0x00,0}, {0X0180,0X14,0}, {0X0280,0X00,0}, {0X0380,0X33,0}, {0X0480,0X48,0}, {0X0780,0X00,0}, {0X0880,0X44,0}, {0X0980,0X54,0}, {0X0A80,0X12,0}, {0X1280,0X00,0}, {0X1380,0X10,0}, {0X1480,0X0d,0}, {0X1580,0XA0,0}, {0X1A80,0X67,0}, {0X1F80,0X00,0}, {0X2080,0X01,0}, {0X2180,0X63,0}, {0X2480,0X09,0}, {0X2580,0X1E,0}, {0X2680,0X54,0}, {0X2780,0X73,0}, {0X2880,0X1F,0}, {0X2980,0X36,0}, {0X2A80,0X64,0}, {0X2B80,0X8B,0}, {0X2D80,0X20,0}, {0X2F80,0X29,0}, {0X3080,0XC5,0}, {0X3180,0X16,0}, {0X3280,0X38,0}, {0X3380,0X4D,0}, {0X3480,0XAB,0}, {0X3580,0XCB,0}, {0X3680,0XD9,0}, {0X3780,0X76,0}, {0X3880,0X09,0}, {0X3980,0X26,0}, {0X3A80,0X34,0}, {0X3B80,0X54,0}, {0X3D80,0X32,0}, {0X3F80,0X47,0}, {0X4080,0X69,0}, {0X4180,0X3A,0}, {0X4280,0X16,0}, {0X4380,0X1F,0}, {0X4480,0X74,0}, {0X4580,0X1B,0}, {0X4680,0X49,0}, {0X4780,0X60,0}, {0X4880,0X8C,0}, {0X4980,0XAB,0}, {0X4A80,0XE1,0}, {0X4B80,0X76,0}, {0X4C80,0X09,0}, {0X4D80,0X1E,0}, {0X4E80,0X54,0}, {0X4F80,0X73,0}, {0X5080,0X1F,0}, {0X5180,0X36,0}, {0X5280,0X64,0}, {0X5380,0X94,0}, {0X5480,0X20,0}, {0X5580,0X29,0}, {0X5680,0XCA,0}, {0X5780,0X19,0}, {0X5880,0X3E,0}, {0X5980,0X53,0}, {0X5A80,0XA8,0}, {0X5B80,0XBC,0}, {0X5C80,0XD0,0}, {0X5D80,0X76,0}, {0X5E80,0X09,0}, {0X5F80,0X2F,0}, {0X6080,0X43,0}, {0X6180,0X57,0}, {0X6280,0X2C,0}, {0X6380,0X41,0}, {0X6480,0X66,0}, {0X6580,0X35,0}, {0X6680,0X16,0}, {0X6780,0X1F,0}, {0X6880,0X6B,0}, {0X6980,0X1B,0}, {0X6A80,0X49,0}, {0X6B80,0X60,0}, {0X6C80,0X8C,0}, {0X6D80,0XAB,0}, {0X6E80,0XE1,0}, {0X6F80,0X76,0}, {0X7080,0X19,0}, {0X7180,0X1E,0}, {0X7280,0X54,0}, {0X7380,0X9B,0}, {0X7480,0X35,0}, {0X7580,0X45,0}, {0X7680,0X69,0}, {0X7780,0XAB,0}, {0X7880,0X20,0}, {0X7980,0X29,0}, {0X7A80,0XD4,0}, {0X7B80,0X14,0}, {0X7C80,0X3A,0}, {0X7D80,0X50,0}, {0X7E80,0XB5,0}, {0X7F80,0XDC,0}, {0X8080,0XD0,0}, {0X8180,0X76,0}, {0X8280,0X09,0}, {0X8380,0X2F,0}, {0X8480,0X23,0}, {0X8580,0X4A,0}, {0X8680,0X2F,0}, {0X8780,0X45,0}, {0X8880,0X6B,0}, {0X8980,0X2B,0}, {0X8A80,0X16,0}, {0X8B80,0X1F,0}, {0X8C80,0X54,0}, {0X8D80,0X16,0}, {0X8E80,0X3A,0}, {0X8F80,0X4A,0}, {0X9080,0X64,0}, {0X9180,0XAB,0}, {0X9280,0XE1,0}, {0X9380,0X66,0}, {0X9480,0XBF,0}, {0X9580,0X00,0}, {0X9680,0X00,0}, {0X9780,0XB4,0}, {0X9880,0X0D,0}, {0X9980,0X2C,0}, {0X9A80,0X0A,0}, {0X9B80,0X01,0}, {0X9C80,0X01,0}, {0X9D80,0X00,0}, {0X9E80,0X00,0}, {0X9F80,0X00,0}, {0XA080,0X0A,0}, {0XA280,0X06,0}, {0XA380,0X2E,0}, {0XA480,0X0E,0}, {0XA580,0XC0,0}, {0XA680,0X01,0}, {0XA780,0X00,0}, {0XA980,0X00,0}, {0XAA80,0X00,0}, {0XE780,0X00,0}, {0XED80,0X00,0}, {0XF380,0XCC,0}, {0XFB80,0X00,0}, {0X3500,0X00,0}, }; /* add the function to set different gama by different mode */ /* Revise some spelling mistakes */ int nt35560_set_dynamic_gamma(enum danymic_gamma_mode gamma_mode) { int ret = 0; if (LOW_LIGHT == gamma_mode) { printk(KERN_ERR "the dynamic_gamma_setting is wrong\n"); } switch(gamma_mode) { case GAMMA25: ret = process_mddi_table((struct sequence*)&nt35560_fwvga_dynamic_gamma25_table, ARRAY_SIZE(nt35560_fwvga_dynamic_gamma25_table), lcd_panel_fwvga); break ; case GAMMA22: ret = process_mddi_table((struct sequence*)&nt35560_fwvga_dynamic_gamma22_table, ARRAY_SIZE(nt35560_fwvga_dynamic_gamma22_table), lcd_panel_fwvga); break; case HIGH_LIGHT: ret = process_mddi_table((struct sequence*)&nt35560_fwvga_dynamic_gamma19_table, ARRAY_SIZE(nt35560_fwvga_dynamic_gamma19_table), lcd_panel_fwvga); break; default: ret= -1; break; } LCD_DEBUG("%s: change gamma mode to %d\n",__func__,gamma_mode); return ret; } #endif #ifdef CONFIG_FB_AUTO_CABC /*************************************************************** Function: nt35560_set_cabc_moving_detect Description: Set CABC moving detect function on or off Parameters: uint32 state: 0 for off, 1 for on Return: 0: success ***************************************************************/ static int nt35560_set_cabc_moving_detect(uint32 state) { int ret = 0; if (state == STATE_OFF) { /* Turn off automatic moving mode selection */ nt35560_fwvga_automatic_moving_selection_table[0].value = DEFAULT_VAL_MOV_CTRL1; ret = process_mddi_table((struct sequence*)&nt35560_fwvga_automatic_moving_selection_table, ARRAY_SIZE(nt35560_fwvga_automatic_moving_selection_table), lcd_panel_fwvga); } else { /* Automatic moving mode selection * If host's frame RAM update rate is 20 frames per second, * the CABC mode will be changed from still mode to moving mode. * This function is only available in normal display mode with CABC mode is set still mode. */ nt35560_fwvga_automatic_moving_selection_table[0].value = (DEFAULT_VAL_MOV_CTRL1 & (~MASK_MOVDET)) | VAL_MOVDET(0x13); ret = process_mddi_table((struct sequence*)&nt35560_fwvga_automatic_moving_selection_table, ARRAY_SIZE(nt35560_fwvga_automatic_moving_selection_table), lcd_panel_fwvga); } LCD_DEBUG("%s: set cabc moving detect: %d\n", __func__, state); return ret; } /*************************************************************** Function: nt35560_set_cabc_dimming Description: Set CABC dimming function on or off Parameters: uint32 state: 0 for off, 1 for on Return: 0: success ***************************************************************/ static int nt35560_set_cabc_dimming(uint32 state) { int ret = 0; /* Set DMCT bit to 1, then the CABC dimming function is controlled by DD_C */ nt35560_fwvga_abc_ctrl_14_table[0].value = VAL_BIT_DMCT | DEFAULT_VAL_ABC_CTRL14; ret = process_mddi_table((struct sequence*)&nt35560_fwvga_abc_ctrl_14_table, ARRAY_SIZE(nt35560_fwvga_abc_ctrl_14_table), lcd_panel_fwvga); if (state == STATE_OFF) { /* Turn off the CABC dimming function */ nt35560_fwvga_abc_ctrl_2_table[0].value = (~VAL_BIT_DD_C) & DEFAULT_VAL_ABC_CTRL2; ret = process_mddi_table((struct sequence*)&nt35560_fwvga_abc_ctrl_2_table, ARRAY_SIZE(nt35560_fwvga_abc_ctrl_2_table), lcd_panel_fwvga); } else { /* Turn on the CABC dimming function */ nt35560_fwvga_abc_ctrl_2_table[0].value = VAL_BIT_DD_C | DEFAULT_VAL_ABC_CTRL2; ret = process_mddi_table((struct sequence*)&nt35560_fwvga_abc_ctrl_2_table, ARRAY_SIZE(nt35560_fwvga_abc_ctrl_2_table), lcd_panel_fwvga); /* DIM_STEP_STILL, 8 steps */ nt35560_fwvga_abc_ctrl_6_table[0].value = (DEFAULT_VAL_ABC_CTRL6 & (~MASK_DIM_STEP_STILL)) | VAL_DIM_STEP_STILL(0x02); ret = process_mddi_table((struct sequence*)&nt35560_fwvga_abc_ctrl_6_table, ARRAY_SIZE(nt35560_fwvga_abc_ctrl_6_table), lcd_panel_fwvga); /* DMST_C, 4 frames per step */ nt35560_fwvga_abc_ctrl_7_table[0].value = (DEFAULT_VAL_ABC_CTRL7 & (~MASK_DMST_C)) | VAL_DMST_C(0x3); ret = process_mddi_table((struct sequence*)&nt35560_fwvga_abc_ctrl_7_table, ARRAY_SIZE(nt35560_fwvga_abc_ctrl_7_table), lcd_panel_fwvga); } LCD_DEBUG("%s: set cabc dimming: %d\n", __func__, state); return ret; } /*************************************************************** Function: nt35560_set_cabc_mode Description: Set CABC mode Parameters: uint32 mode: 0 for off, 1 for UI mode, 2 for still mode, 3 for moving mode Return: 0: success ***************************************************************/ static int nt35560_set_cabc_mode(uint32 mode) { int ret = 0; switch (mode) { case CABC_MODE_OFF: case CABC_MODE_UI: case CABC_MODE_STILL: case CABC_MODE_MOVING: /* Set CABC mode, 0 for off, 1 for UI mode, 2 for still mode, 3 for moving mode */ nt35560_fwvga_write_cabc_mode_table[0].value = mode; ret = process_mddi_table((struct sequence*)&nt35560_fwvga_write_cabc_mode_table, ARRAY_SIZE(nt35560_fwvga_write_cabc_mode_table), lcd_panel_fwvga); LCD_DEBUG("%s: set cabc mode to %d\n", __func__, mode); break; default: LCD_DEBUG("%s: invalid cabc mode: %d\n", __func__, mode); ret = -EINVAL; break; } return ret; } /*************************************************************** Function: nt35560_config_cabc Description: Set CABC configuration Parameters: struct msmfb_cabc_config cabc_cfg: CABC configuration struct Return: 0: success ***************************************************************/ static int nt35560_config_cabc(struct msmfb_cabc_config cabc_cfg) { int ret = 0; /* Enable/Disable CABC dimming function */ nt35560_set_cabc_dimming(cabc_cfg.dimming_on); /* Enable/Disable CABC moving detect function */ nt35560_set_cabc_moving_detect(cabc_cfg.mov_det_on); /* Set CABC mode */ nt35560_set_cabc_mode(cabc_cfg.mode); return ret; } #endif // CONFIG_FB_AUTO_CABC /*************************************************************** Function: nt35560_set_cabc_brightness Description: Set CABC brightness Parameters: uint32 brightness: backlight brightness value Return: 0: success ***************************************************************/ static void nt35560_set_cabc_brightness(struct msm_fb_data_type *mfd,uint32 bl_level) { nt35560_fwvga_write_cabc_brightness_table[0].value = bl_level; process_mddi_table((struct sequence*)&nt35560_fwvga_write_cabc_brightness_table, ARRAY_SIZE(nt35560_fwvga_write_cabc_brightness_table), lcd_panel_fwvga); } static int nt35560_lcd_on(struct platform_device *pdev) { int ret = 0; boolean para_debug_flag = FALSE; uint32 para_num = 0; /* open debug file and read the para */ switch(lcd_panel_fwvga) { case LCD_NT35560_TOSHIBA_FWVGA: para_debug_flag = lcd_debug_malloc_get_para( "nt35560_toshiba_fwvga_init_table", (void**)&nt35560_fwvga_init_table,&para_num); break; default: break; } /* If exist the init file ,then init lcd with it for debug */ if( (TRUE == para_debug_flag)&&(NULL != nt35560_fwvga_init_table)) { ret = process_mddi_table(nt35560_fwvga_init_table, para_num, lcd_panel_fwvga); } else { if(machine_is_msm8255_u8860lp() || machine_is_msm8255_u8860_r() || machine_is_msm8255_u8860_51()) { /* Exit Standby Mode */ ret = process_mddi_table((struct sequence*)&nt35560_fwvga_standby_exit_tablelp, ARRAY_SIZE(nt35560_fwvga_standby_exit_tablelp), lcd_panel_fwvga); } else { /* Exit Standby Mode */ ret = process_mddi_table((struct sequence*)&nt35560_fwvga_standby_exit_table, ARRAY_SIZE(nt35560_fwvga_standby_exit_table), lcd_panel_fwvga); } } /* Must malloc before,then you can call free */ if((TRUE == para_debug_flag)&&(NULL != nt35560_fwvga_init_table)) { lcd_debug_free_para((void *)nt35560_fwvga_init_table); } LCD_DEBUG("%s: nt35560_lcd exit sleep mode ,on_ret=%d\n",__func__,ret); return ret; } static int nt35560_lcd_off(struct platform_device *pdev) { int ret = 0; /*enter sleep mode*/ ret = process_mddi_table((struct sequence*)&nt35560_fwvga_standby_enter_table, ARRAY_SIZE(nt35560_fwvga_standby_enter_table), lcd_panel_fwvga); LCD_DEBUG("%s: nt35560_lcd enter sleep mode ,off_ret=%d\n",__func__,ret); return ret; } static int __devinit nt35560_probe(struct platform_device *pdev) { msm_fb_add_device(pdev); return 0; } static struct platform_driver this_driver = { .probe = nt35560_probe, .driver = { .name = "mddi_nt35560_fwvga", }, }; static struct msm_fb_panel_data nt35560_panel_data = { .on = nt35560_lcd_on, .off = nt35560_lcd_off, .set_backlight = pwm_set_backlight, #ifdef CONFIG_FB_DYNAMIC_GAMMA .set_dynamic_gamma = nt35560_set_dynamic_gamma, #endif #ifdef CONFIG_FB_AUTO_CABC .config_cabc = nt35560_config_cabc, #endif .set_cabc_brightness = nt35560_set_cabc_brightness, }; static struct platform_device this_device = { .name = "mddi_nt35560_fwvga", .id = 0, .dev = { .platform_data = &nt35560_panel_data, } }; static int __init nt35560_init(void) { int ret = 0; struct msm_panel_info *pinfo = NULL; bpp_type bpp = MDDI_OUT_16BPP; hw_lcd_interface_type mddi_port_type = get_hw_lcd_interface_type(); lcd_panel_fwvga=get_lcd_panel_type(); if(LCD_NT35560_TOSHIBA_FWVGA != lcd_panel_fwvga) { return 0; } LCD_DEBUG("%s:------nt35560_init------\n",__func__); /* Select which bpp accroding MDDI port type */ if(LCD_IS_MDDI_TYPE1 == mddi_port_type) { bpp = MDDI_OUT_16BPP; } else if(LCD_IS_MDDI_TYPE2 == mddi_port_type) { bpp = MDDI_OUT_24BPP; } else { bpp = MDDI_OUT_16BPP; } ret = platform_driver_register(&this_driver); if (!ret) { pinfo = &nt35560_panel_data.panel_info; pinfo->xres = 480; pinfo->yres = 854; pinfo->type = MDDI_PANEL; pinfo->pdest = DISPLAY_1; pinfo->mddi.vdopkt = MDDI_DEFAULT_PRIM_PIX_ATTR; pinfo->wait_cycle = 0; pinfo->bpp = (uint32)bpp; pinfo->fb_num = 2; pinfo->clk_rate = 192000000; pinfo->clk_min = 192000000; pinfo->clk_max = 192000000; pinfo->lcd.vsync_enable = TRUE; pinfo->lcd.refx100 = 6000; pinfo->lcd.v_back_porch = 0; pinfo->lcd.v_front_porch = 0; pinfo->lcd.v_pulse_width = 22; pinfo->lcd.hw_vsync_mode = TRUE; pinfo->lcd.vsync_notifier_period = 0; pinfo->bl_max = 255; ret = platform_device_register(&this_device); if (ret) { platform_driver_unregister(&this_driver); } } return ret; } module_init(nt35560_init);
gpl-2.0
clemsyn/asusOC
net/bluetooth/l2cap.c
49
112338
/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org> Copyright (C) 2010 Google Inc. Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth L2CAP core and sockets. */ #include <linux/module.h> #include <linux/types.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/socket.h> #include <linux/skbuff.h> #include <linux/list.h> #include <linux/device.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/crc16.h> #include <net/sock.h> #include <asm/system.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #define VERSION "2.15" static int disable_ertm = 0; static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN; static u8 l2cap_fixed_chan[8] = { 0x02, }; static const struct proto_ops l2cap_sock_ops; static struct workqueue_struct *_busy_wq; static struct bt_sock_list l2cap_sk_list = { .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock) }; static void l2cap_busy_work(struct work_struct *work); static void __l2cap_sock_close(struct sock *sk, int reason); static void l2cap_sock_close(struct sock *sk); static void l2cap_sock_kill(struct sock *sk); static int l2cap_build_conf_req(struct sock *sk, void *data); static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code, u8 ident, u16 dlen, void *data); static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb); /* ---- L2CAP timers ---- */ static void l2cap_sock_timeout(unsigned long arg) { struct sock *sk = (struct sock *) arg; int reason; BT_DBG("sock %p state %d", sk, sk->sk_state); bh_lock_sock(sk); if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG) reason = ECONNREFUSED; else if (sk->sk_state == BT_CONNECT && l2cap_pi(sk)->sec_level != BT_SECURITY_SDP) reason = ECONNREFUSED; else reason = ETIMEDOUT; __l2cap_sock_close(sk, reason); bh_unlock_sock(sk); l2cap_sock_kill(sk); sock_put(sk); } static void l2cap_sock_set_timer(struct sock *sk, long timeout) { BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout); sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout); } static void l2cap_sock_clear_timer(struct sock *sk) { BT_DBG("sock %p state %d", sk, sk->sk_state); sk_stop_timer(sk, &sk->sk_timer); } /* ---- L2CAP channels ---- */ static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid) { struct sock *s; for (s = l->head; s; s = l2cap_pi(s)->next_c) { if (l2cap_pi(s)->dcid == cid) break; } return s; } static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid) { struct sock *s; for (s = l->head; s; s = l2cap_pi(s)->next_c) { if (l2cap_pi(s)->scid == cid) break; } return s; } /* Find channel with given SCID. * Returns locked socket */ static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid) { struct sock *s; read_lock(&l->lock); s = __l2cap_get_chan_by_scid(l, cid); if (s) bh_lock_sock(s); read_unlock(&l->lock); return s; } static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident) { struct sock *s; for (s = l->head; s; s = l2cap_pi(s)->next_c) { if (l2cap_pi(s)->ident == ident) break; } return s; } static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident) { struct sock *s; read_lock(&l->lock); s = __l2cap_get_chan_by_ident(l, ident); if (s) bh_lock_sock(s); read_unlock(&l->lock); return s; } static u16 l2cap_alloc_cid(struct l2cap_chan_list *l) { u16 cid = L2CAP_CID_DYN_START; for (; cid < L2CAP_CID_DYN_END; cid++) { if (!__l2cap_get_chan_by_scid(l, cid)) return cid; } return 0; } static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk) { sock_hold(sk); if (l->head) l2cap_pi(l->head)->prev_c = sk; l2cap_pi(sk)->next_c = l->head; l2cap_pi(sk)->prev_c = NULL; l->head = sk; } static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk) { struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c; write_lock_bh(&l->lock); if (sk == l->head) l->head = next; if (next) l2cap_pi(next)->prev_c = prev; if (prev) l2cap_pi(prev)->next_c = next; write_unlock_bh(&l->lock); __sock_put(sk); } static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent) { struct l2cap_chan_list *l = &conn->chan_list; BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid); conn->disc_reason = 0x13; l2cap_pi(sk)->conn = conn; if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) { /* Alloc CID for connection-oriented socket */ l2cap_pi(sk)->scid = l2cap_alloc_cid(l); } else if (sk->sk_type == SOCK_DGRAM) { /* Connectionless socket */ l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS; l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS; l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU; } else { /* Raw socket can send/recv signalling messages only */ l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING; l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING; l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU; } __l2cap_chan_link(l, sk); if (parent) bt_accept_enqueue(parent, sk); } /* Delete channel. * Must be called on the locked socket. */ static void l2cap_chan_del(struct sock *sk, int err) { struct l2cap_conn *conn = l2cap_pi(sk)->conn; struct sock *parent = bt_sk(sk)->parent; l2cap_sock_clear_timer(sk); BT_DBG("sk %p, conn %p, err %d", sk, conn, err); if (conn) { /* Unlink from channel list */ l2cap_chan_unlink(&conn->chan_list, sk); l2cap_pi(sk)->conn = NULL; hci_conn_put(conn->hcon); } sk->sk_state = BT_CLOSED; sock_set_flag(sk, SOCK_ZAPPED); if (err) sk->sk_err = err; if (parent) { bt_accept_unlink(sk); parent->sk_data_ready(parent, 0); } else sk->sk_state_change(sk); skb_queue_purge(TX_QUEUE(sk)); if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) { struct srej_list *l, *tmp; del_timer(&l2cap_pi(sk)->retrans_timer); del_timer(&l2cap_pi(sk)->monitor_timer); del_timer(&l2cap_pi(sk)->ack_timer); skb_queue_purge(SREJ_QUEUE(sk)); skb_queue_purge(BUSY_QUEUE(sk)); list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) { list_del(&l->list); kfree(l); } } } /* Service level security */ static inline int l2cap_check_security(struct sock *sk) { struct l2cap_conn *conn = l2cap_pi(sk)->conn; __u8 auth_type; if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) { if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH) auth_type = HCI_AT_NO_BONDING_MITM; else auth_type = HCI_AT_NO_BONDING; if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW) l2cap_pi(sk)->sec_level = BT_SECURITY_SDP; } else { switch (l2cap_pi(sk)->sec_level) { case BT_SECURITY_HIGH: auth_type = HCI_AT_GENERAL_BONDING_MITM; break; case BT_SECURITY_MEDIUM: auth_type = HCI_AT_GENERAL_BONDING; break; default: auth_type = HCI_AT_NO_BONDING; break; } } return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level, auth_type); } static inline u8 l2cap_get_ident(struct l2cap_conn *conn) { u8 id; /* Get next available identificator. * 1 - 128 are used by kernel. * 129 - 199 are reserved. * 200 - 254 are used by utilities like l2ping, etc. */ spin_lock_bh(&conn->lock); if (++conn->tx_ident > 128) conn->tx_ident = 1; id = conn->tx_ident; spin_unlock_bh(&conn->lock); return id; } static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data) { struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data); u8 flags; BT_DBG("code 0x%2.2x", code); if (!skb) return; if (lmp_no_flush_capable(conn->hcon->hdev)) flags = ACL_START_NO_FLUSH; else flags = ACL_START; bt_cb(skb)->force_active = 1; hci_send_acl(conn->hcon, skb, flags); } static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control) { struct sk_buff *skb; struct l2cap_hdr *lh; struct l2cap_conn *conn = pi->conn; struct sock *sk = (struct sock *)pi; int count, hlen = L2CAP_HDR_SIZE + 2; if (sk->sk_state != BT_CONNECTED) return; if (pi->fcs == L2CAP_FCS_CRC16) hlen += 2; BT_DBG("pi %p, control 0x%2.2x", pi, control); count = min_t(unsigned int, conn->mtu, hlen); control |= L2CAP_CTRL_FRAME_TYPE; if (pi->conn_state & L2CAP_CONN_SEND_FBIT) { control |= L2CAP_CTRL_FINAL; pi->conn_state &= ~L2CAP_CONN_SEND_FBIT; } if (pi->conn_state & L2CAP_CONN_SEND_PBIT) { control |= L2CAP_CTRL_POLL; pi->conn_state &= ~L2CAP_CONN_SEND_PBIT; } skb = bt_skb_alloc(count, GFP_ATOMIC); if (!skb) return; lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(pi->dcid); put_unaligned_le16(control, skb_put(skb, 2)); if (pi->fcs == L2CAP_FCS_CRC16) { u16 fcs = crc16(0, (u8 *)lh, count - 2); put_unaligned_le16(fcs, skb_put(skb, 2)); } bt_cb(skb)->force_active = l2cap_pi(sk)->force_active; hci_send_acl(pi->conn->hcon, skb, 0); } static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control) { if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) { control |= L2CAP_SUPER_RCV_NOT_READY; pi->conn_state |= L2CAP_CONN_RNR_SENT; } else control |= L2CAP_SUPER_RCV_READY; control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; l2cap_send_sframe(pi, control); } static inline int __l2cap_no_conn_pending(struct sock *sk) { return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND); } static void l2cap_do_start(struct sock *sk) { struct l2cap_conn *conn = l2cap_pi(sk)->conn; if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) { if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) return; if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) { struct l2cap_conn_req req; req.scid = cpu_to_le16(l2cap_pi(sk)->scid); req.psm = l2cap_pi(sk)->psm; l2cap_pi(sk)->ident = l2cap_get_ident(conn); l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND; l2cap_send_cmd(conn, l2cap_pi(sk)->ident, L2CAP_CONN_REQ, sizeof(req), &req); } } else { struct l2cap_info_req req; req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; conn->info_ident = l2cap_get_ident(conn); mod_timer(&conn->info_timer, jiffies + msecs_to_jiffies(L2CAP_INFO_TIMEOUT)); l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ, sizeof(req), &req); } } static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask) { u32 local_feat_mask = l2cap_feat_mask; if (!disable_ertm) local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING; switch (mode) { case L2CAP_MODE_ERTM: return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask; case L2CAP_MODE_STREAMING: return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask; default: return 0x00; } } static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err) { struct l2cap_disconn_req req; if (!conn) return; skb_queue_purge(TX_QUEUE(sk)); if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) { del_timer(&l2cap_pi(sk)->retrans_timer); del_timer(&l2cap_pi(sk)->monitor_timer); del_timer(&l2cap_pi(sk)->ack_timer); } req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid); req.scid = cpu_to_le16(l2cap_pi(sk)->scid); l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ, sizeof(req), &req); sk->sk_state = BT_DISCONN; sk->sk_err = err; } /* ---- L2CAP connections ---- */ static void l2cap_conn_start(struct l2cap_conn *conn) { struct l2cap_chan_list *l = &conn->chan_list; struct sock_del_list del, *tmp1, *tmp2; struct sock *sk; BT_DBG("conn %p", conn); INIT_LIST_HEAD(&del.list); read_lock(&l->lock); for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { bh_lock_sock(sk); if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM) { bh_unlock_sock(sk); continue; } if (sk->sk_state == BT_CONNECT) { struct l2cap_conn_req req; if (!l2cap_check_security(sk) || !__l2cap_no_conn_pending(sk)) { bh_unlock_sock(sk); continue; } if (!l2cap_mode_supported(l2cap_pi(sk)->mode, conn->feat_mask) && l2cap_pi(sk)->conf_state & L2CAP_CONF_STATE2_DEVICE) { tmp1 = kzalloc(sizeof(struct sock_del_list), GFP_ATOMIC); tmp1->sk = sk; list_add_tail(&tmp1->list, &del.list); bh_unlock_sock(sk); continue; } req.scid = cpu_to_le16(l2cap_pi(sk)->scid); req.psm = l2cap_pi(sk)->psm; l2cap_pi(sk)->ident = l2cap_get_ident(conn); l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND; l2cap_send_cmd(conn, l2cap_pi(sk)->ident, L2CAP_CONN_REQ, sizeof(req), &req); } else if (sk->sk_state == BT_CONNECT2) { struct l2cap_conn_rsp rsp; char buf[128]; rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); if (l2cap_check_security(sk)) { if (bt_sk(sk)->defer_setup) { struct sock *parent = bt_sk(sk)->parent; rsp.result = cpu_to_le16(L2CAP_CR_PEND); rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); if (parent) parent->sk_data_ready(parent, 0); } else { sk->sk_state = BT_CONFIG; rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); } } else { rsp.result = cpu_to_le16(L2CAP_CR_PEND); rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); } l2cap_send_cmd(conn, l2cap_pi(sk)->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT || rsp.result != L2CAP_CR_SUCCESS) { bh_unlock_sock(sk); continue; } l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(sk, buf), buf); l2cap_pi(sk)->num_conf_req++; } bh_unlock_sock(sk); } read_unlock(&l->lock); list_for_each_entry_safe(tmp1, tmp2, &del.list, list) { bh_lock_sock(tmp1->sk); __l2cap_sock_close(tmp1->sk, ECONNRESET); bh_unlock_sock(tmp1->sk); list_del(&tmp1->list); kfree(tmp1); } } static void l2cap_conn_ready(struct l2cap_conn *conn) { struct l2cap_chan_list *l = &conn->chan_list; struct sock *sk; BT_DBG("conn %p", conn); read_lock(&l->lock); for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { bh_lock_sock(sk); if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM) { l2cap_sock_clear_timer(sk); sk->sk_state = BT_CONNECTED; sk->sk_state_change(sk); } else if (sk->sk_state == BT_CONNECT) l2cap_do_start(sk); bh_unlock_sock(sk); } read_unlock(&l->lock); } /* Notify sockets that we cannot guaranty reliability anymore */ static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err) { struct l2cap_chan_list *l = &conn->chan_list; struct sock *sk; BT_DBG("conn %p", conn); read_lock(&l->lock); for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { if (l2cap_pi(sk)->force_reliable) sk->sk_err = err; } read_unlock(&l->lock); } static void l2cap_info_timeout(unsigned long arg) { struct l2cap_conn *conn = (void *) arg; conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; conn->info_ident = 0; l2cap_conn_start(conn); } static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) { struct l2cap_conn *conn = hcon->l2cap_data; if (conn || status) return conn; conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC); if (!conn) return NULL; hcon->l2cap_data = conn; conn->hcon = hcon; BT_DBG("hcon %p conn %p", hcon, conn); conn->mtu = hcon->hdev->acl_mtu; conn->src = &hcon->hdev->bdaddr; conn->dst = &hcon->dst; conn->feat_mask = 0; spin_lock_init(&conn->lock); rwlock_init(&conn->chan_list.lock); setup_timer(&conn->info_timer, l2cap_info_timeout, (unsigned long) conn); conn->disc_reason = 0x13; return conn; } static void l2cap_conn_del(struct hci_conn *hcon, int err) { struct l2cap_conn *conn = hcon->l2cap_data; struct sock *sk; if (!conn) return; BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); kfree_skb(conn->rx_skb); /* Kill channels */ while ((sk = conn->chan_list.head)) { bh_lock_sock(sk); l2cap_chan_del(sk, err); bh_unlock_sock(sk); l2cap_sock_kill(sk); } if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) del_timer_sync(&conn->info_timer); hcon->l2cap_data = NULL; kfree(conn); } static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent) { struct l2cap_chan_list *l = &conn->chan_list; write_lock_bh(&l->lock); __l2cap_chan_add(conn, sk, parent); write_unlock_bh(&l->lock); } /* ---- Socket interface ---- */ static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src) { struct sock *sk; struct hlist_node *node; sk_for_each(sk, node, &l2cap_sk_list.head) if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src)) goto found; sk = NULL; found: return sk; } /* Find socket with psm and source bdaddr. * Returns closest match. */ static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src) { struct sock *sk = NULL, *sk1 = NULL; struct hlist_node *node; sk_for_each(sk, node, &l2cap_sk_list.head) { if (state && sk->sk_state != state) continue; if (l2cap_pi(sk)->psm == psm) { /* Exact match. */ if (!bacmp(&bt_sk(sk)->src, src)) break; /* Closest match */ if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) sk1 = sk; } } return node ? sk : sk1; } /* Find socket with given address (psm, src). * Returns locked socket */ static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src) { struct sock *s; read_lock(&l2cap_sk_list.lock); s = __l2cap_get_sock_by_psm(state, psm, src); if (s) bh_lock_sock(s); read_unlock(&l2cap_sk_list.lock); return s; } static void l2cap_sock_destruct(struct sock *sk) { BT_DBG("sk %p", sk); skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); } static void l2cap_sock_cleanup_listen(struct sock *parent) { struct sock *sk; BT_DBG("parent %p", parent); /* Close not yet accepted channels */ while ((sk = bt_accept_dequeue(parent, NULL))) l2cap_sock_close(sk); parent->sk_state = BT_CLOSED; sock_set_flag(parent, SOCK_ZAPPED); } /* Kill socket (only if zapped and orphan) * Must be called on unlocked socket. */ static void l2cap_sock_kill(struct sock *sk) { if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) return; BT_DBG("sk %p state %d", sk, sk->sk_state); /* Kill poor orphan */ bt_sock_unlink(&l2cap_sk_list, sk); sock_set_flag(sk, SOCK_DEAD); sock_put(sk); } static void __l2cap_sock_close(struct sock *sk, int reason) { BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); switch (sk->sk_state) { case BT_LISTEN: l2cap_sock_cleanup_listen(sk); break; case BT_CONNECTED: case BT_CONFIG: if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) { struct l2cap_conn *conn = l2cap_pi(sk)->conn; l2cap_sock_set_timer(sk, sk->sk_sndtimeo); l2cap_send_disconn_req(conn, sk, reason); } else l2cap_chan_del(sk, reason); break; case BT_CONNECT2: if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) { struct l2cap_conn *conn = l2cap_pi(sk)->conn; struct l2cap_conn_rsp rsp; __u16 result; if (bt_sk(sk)->defer_setup) result = L2CAP_CR_SEC_BLOCK; else result = L2CAP_CR_BAD_PSM; rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); rsp.result = cpu_to_le16(result); rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); l2cap_send_cmd(conn, l2cap_pi(sk)->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); } else l2cap_chan_del(sk, reason); break; case BT_CONNECT: case BT_DISCONN: l2cap_chan_del(sk, reason); break; default: sock_set_flag(sk, SOCK_ZAPPED); break; } } /* Must be called on unlocked socket. */ static void l2cap_sock_close(struct sock *sk) { l2cap_sock_clear_timer(sk); lock_sock(sk); __l2cap_sock_close(sk, ECONNRESET); release_sock(sk); l2cap_sock_kill(sk); } static void l2cap_sock_init(struct sock *sk, struct sock *parent) { struct l2cap_pinfo *pi = l2cap_pi(sk); BT_DBG("sk %p", sk); if (parent) { sk->sk_type = parent->sk_type; bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup; pi->imtu = l2cap_pi(parent)->imtu; pi->omtu = l2cap_pi(parent)->omtu; pi->conf_state = l2cap_pi(parent)->conf_state; pi->mode = l2cap_pi(parent)->mode; pi->fcs = l2cap_pi(parent)->fcs; pi->max_tx = l2cap_pi(parent)->max_tx; pi->tx_win = l2cap_pi(parent)->tx_win; pi->sec_level = l2cap_pi(parent)->sec_level; pi->role_switch = l2cap_pi(parent)->role_switch; pi->force_reliable = l2cap_pi(parent)->force_reliable; pi->flushable = l2cap_pi(parent)->flushable; pi->force_active = l2cap_pi(parent)->force_active; } else { pi->imtu = L2CAP_DEFAULT_MTU; pi->omtu = 0; if (!disable_ertm && sk->sk_type == SOCK_STREAM) { pi->mode = L2CAP_MODE_ERTM; pi->conf_state |= L2CAP_CONF_STATE2_DEVICE; } else { pi->mode = L2CAP_MODE_BASIC; } pi->max_tx = L2CAP_DEFAULT_MAX_TX; pi->fcs = L2CAP_FCS_CRC16; pi->tx_win = L2CAP_DEFAULT_TX_WINDOW; pi->sec_level = BT_SECURITY_LOW; pi->role_switch = 0; pi->force_reliable = 0; pi->flushable = 0; pi->force_active = 1; } /* Default config options */ pi->conf_len = 0; pi->flush_to = L2CAP_DEFAULT_FLUSH_TO; skb_queue_head_init(TX_QUEUE(sk)); skb_queue_head_init(SREJ_QUEUE(sk)); skb_queue_head_init(BUSY_QUEUE(sk)); INIT_LIST_HEAD(SREJ_LIST(sk)); } static struct proto l2cap_proto = { .name = "L2CAP", .owner = THIS_MODULE, .obj_size = sizeof(struct l2cap_pinfo) }; static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio) { struct sock *sk; sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto); if (!sk) return NULL; sock_init_data(sock, sk); INIT_LIST_HEAD(&bt_sk(sk)->accept_q); sk->sk_destruct = l2cap_sock_destruct; sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT); sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = proto; sk->sk_state = BT_OPEN; setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk); bt_sock_link(&l2cap_sk_list, sk); return sk; } static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; BT_DBG("sock %p", sock); sock->state = SS_UNCONNECTED; if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM && sock->type != SOCK_DGRAM && sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW)) return -EPERM; sock->ops = &l2cap_sock_ops; sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC); if (!sk) return -ENOMEM; l2cap_sock_init(sk, NULL); return 0; } static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) { struct sock *sk = sock->sk; struct sockaddr_l2 la; int len, err = 0; BT_DBG("sk %p", sk); if (!addr || addr->sa_family != AF_BLUETOOTH) return -EINVAL; memset(&la, 0, sizeof(la)); len = min_t(unsigned int, sizeof(la), alen); memcpy(&la, addr, len); if (la.l2_cid) return -EINVAL; lock_sock(sk); if (sk->sk_state != BT_OPEN) { err = -EBADFD; goto done; } if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) { err = -EACCES; goto done; } write_lock_bh(&l2cap_sk_list.lock); if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) { err = -EADDRINUSE; } else { /* Save source address */ bacpy(&bt_sk(sk)->src, &la.l2_bdaddr); l2cap_pi(sk)->psm = la.l2_psm; l2cap_pi(sk)->sport = la.l2_psm; sk->sk_state = BT_BOUND; if (__le16_to_cpu(la.l2_psm) == 0x0001 || __le16_to_cpu(la.l2_psm) == 0x0003) l2cap_pi(sk)->sec_level = BT_SECURITY_SDP; } write_unlock_bh(&l2cap_sk_list.lock); done: release_sock(sk); return err; } static int l2cap_do_connect(struct sock *sk) { bdaddr_t *src = &bt_sk(sk)->src; bdaddr_t *dst = &bt_sk(sk)->dst; struct l2cap_conn *conn; struct hci_conn *hcon; struct hci_dev *hdev; __u8 auth_type; int err; BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm); hdev = hci_get_route(dst, src); if (!hdev) return -EHOSTUNREACH; hci_dev_lock_bh(hdev); err = -ENOMEM; if (sk->sk_type == SOCK_RAW) { switch (l2cap_pi(sk)->sec_level) { case BT_SECURITY_HIGH: auth_type = HCI_AT_DEDICATED_BONDING_MITM; break; case BT_SECURITY_MEDIUM: auth_type = HCI_AT_DEDICATED_BONDING; break; default: auth_type = HCI_AT_NO_BONDING; break; } } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) { if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH) auth_type = HCI_AT_NO_BONDING_MITM; else auth_type = HCI_AT_NO_BONDING; if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW) l2cap_pi(sk)->sec_level = BT_SECURITY_SDP; } else { switch (l2cap_pi(sk)->sec_level) { case BT_SECURITY_HIGH: auth_type = HCI_AT_GENERAL_BONDING_MITM; break; case BT_SECURITY_MEDIUM: auth_type = HCI_AT_GENERAL_BONDING; break; default: auth_type = HCI_AT_NO_BONDING; break; } } hcon = hci_connect(hdev, ACL_LINK, 0, dst, l2cap_pi(sk)->sec_level, auth_type); if (!hcon) goto done; conn = l2cap_conn_add(hcon, 0); if (!conn) { hci_conn_put(hcon); goto done; } err = 0; /* Update source addr of the socket */ bacpy(src, conn->src); l2cap_chan_add(conn, sk, NULL); sk->sk_state = BT_CONNECT; l2cap_sock_set_timer(sk, sk->sk_sndtimeo); if (hcon->state == BT_CONNECTED) { if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM) { l2cap_sock_clear_timer(sk); sk->sk_state = BT_CONNECTED; } else l2cap_do_start(sk); } done: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; } static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { struct sock *sk = sock->sk; struct sockaddr_l2 la; int len, err = 0; BT_DBG("sk %p", sk); if (!addr || alen < sizeof(addr->sa_family) || addr->sa_family != AF_BLUETOOTH) return -EINVAL; memset(&la, 0, sizeof(la)); len = min_t(unsigned int, sizeof(la), alen); memcpy(&la, addr, len); if (la.l2_cid) return -EINVAL; lock_sock(sk); if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) && !la.l2_psm) { err = -EINVAL; goto done; } switch (l2cap_pi(sk)->mode) { case L2CAP_MODE_BASIC: break; case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: if (!disable_ertm) break; /* fall through */ default: err = -ENOTSUPP; goto done; } switch (sk->sk_state) { case BT_CONNECT: case BT_CONNECT2: case BT_CONFIG: /* Already connecting */ goto wait; case BT_CONNECTED: /* Already connected */ err = -EISCONN; goto done; case BT_OPEN: case BT_BOUND: /* Can connect */ break; default: err = -EBADFD; goto done; } /* Set destination address and psm */ bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr); l2cap_pi(sk)->psm = la.l2_psm; err = l2cap_do_connect(sk); if (err) goto done; wait: err = bt_sock_wait_state(sk, BT_CONNECTED, sock_sndtimeo(sk, flags & O_NONBLOCK)); done: release_sock(sk); return err; } static int l2cap_sock_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sk %p backlog %d", sk, backlog); lock_sock(sk); if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM) || sk->sk_state != BT_BOUND) { err = -EBADFD; goto done; } switch (l2cap_pi(sk)->mode) { case L2CAP_MODE_BASIC: break; case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: if (!disable_ertm) break; /* fall through */ default: err = -ENOTSUPP; goto done; } if (!l2cap_pi(sk)->psm) { bdaddr_t *src = &bt_sk(sk)->src; u16 psm; err = -EINVAL; write_lock_bh(&l2cap_sk_list.lock); for (psm = 0x1001; psm < 0x1100; psm += 2) if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) { l2cap_pi(sk)->psm = cpu_to_le16(psm); l2cap_pi(sk)->sport = cpu_to_le16(psm); err = 0; break; } write_unlock_bh(&l2cap_sk_list.lock); if (err < 0) goto done; } sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; sk->sk_state = BT_LISTEN; done: release_sock(sk); return err; } static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags) { DECLARE_WAITQUEUE(wait, current); struct sock *sk = sock->sk, *nsk; long timeo; int err = 0; lock_sock_nested(sk, SINGLE_DEPTH_NESTING); if (sk->sk_state != BT_LISTEN) { err = -EBADFD; goto done; } timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); BT_DBG("sk %p timeo %ld", sk, timeo); /* Wait for an incoming connection. (wake-one). */ add_wait_queue_exclusive(sk_sleep(sk), &wait); while (!(nsk = bt_accept_dequeue(sk, newsock))) { set_current_state(TASK_INTERRUPTIBLE); if (!timeo) { err = -EAGAIN; break; } release_sock(sk); timeo = schedule_timeout(timeo); lock_sock_nested(sk, SINGLE_DEPTH_NESTING); if (sk->sk_state != BT_LISTEN) { err = -EBADFD; break; } if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } } set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); if (err) goto done; newsock->state = SS_CONNECTED; BT_DBG("new socket %p", nsk); done: release_sock(sk); return err; } static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer) { struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr; struct sock *sk = sock->sk; BT_DBG("sock %p, sk %p", sock, sk); addr->sa_family = AF_BLUETOOTH; *len = sizeof(struct sockaddr_l2); if (peer) { la->l2_psm = l2cap_pi(sk)->psm; bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst); la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid); } else { la->l2_psm = l2cap_pi(sk)->sport; bacpy(&la->l2_bdaddr, &bt_sk(sk)->src); la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid); } return 0; } static int __l2cap_wait_ack(struct sock *sk) { DECLARE_WAITQUEUE(wait, current); int err = 0; int timeo = HZ/5; add_wait_queue(sk_sleep(sk), &wait); while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) { set_current_state(TASK_INTERRUPTIBLE); if (!timeo) timeo = HZ/5; if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); err = sock_error(sk); if (err) break; } set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return err; } static void l2cap_monitor_timeout(unsigned long arg) { struct sock *sk = (void *) arg; BT_DBG("sk %p", sk); bh_lock_sock(sk); if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) { l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED); bh_unlock_sock(sk); return; } l2cap_pi(sk)->retry_count++; __mod_monitor_timer(); l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL); bh_unlock_sock(sk); } static void l2cap_retrans_timeout(unsigned long arg) { struct sock *sk = (void *) arg; BT_DBG("sk %p", sk); bh_lock_sock(sk); l2cap_pi(sk)->retry_count = 1; __mod_monitor_timer(); l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F; l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL); bh_unlock_sock(sk); } static void l2cap_drop_acked_frames(struct sock *sk) { struct sk_buff *skb; while ((skb = skb_peek(TX_QUEUE(sk))) && l2cap_pi(sk)->unacked_frames) { if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq) break; skb = skb_dequeue(TX_QUEUE(sk)); kfree_skb(skb); l2cap_pi(sk)->unacked_frames--; } if (!l2cap_pi(sk)->unacked_frames) del_timer(&l2cap_pi(sk)->retrans_timer); } static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb) { struct l2cap_pinfo *pi = l2cap_pi(sk); struct hci_conn *hcon = pi->conn->hcon; u16 flags; BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len); if (lmp_no_flush_capable(hcon->hdev) && !l2cap_pi(sk)->flushable) flags = ACL_START_NO_FLUSH; else flags = ACL_START; bt_cb(skb)->force_active = pi->force_active; hci_send_acl(hcon, skb, flags); } static void l2cap_streaming_send(struct sock *sk) { struct sk_buff *skb; struct l2cap_pinfo *pi = l2cap_pi(sk); u16 control, fcs; while ((skb = skb_dequeue(TX_QUEUE(sk)))) { control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE); control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT; put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE); if (pi->fcs == L2CAP_FCS_CRC16) { fcs = crc16(0, (u8 *)skb->data, skb->len - 2); put_unaligned_le16(fcs, skb->data + skb->len - 2); } l2cap_do_send(sk, skb); pi->next_tx_seq = (pi->next_tx_seq + 1) % 64; } } static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq) { struct l2cap_pinfo *pi = l2cap_pi(sk); struct sk_buff *skb, *tx_skb; u16 control, fcs; skb = skb_peek(TX_QUEUE(sk)); if (!skb) return; do { if (bt_cb(skb)->tx_seq == tx_seq) break; if (skb_queue_is_last(TX_QUEUE(sk), skb)) return; } while ((skb = skb_queue_next(TX_QUEUE(sk), skb))); if (pi->remote_max_tx && bt_cb(skb)->retries == pi->remote_max_tx) { l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED); return; } tx_skb = skb_clone(skb, GFP_ATOMIC); bt_cb(skb)->retries++; control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); if (pi->conn_state & L2CAP_CONN_SEND_FBIT) { control |= L2CAP_CTRL_FINAL; pi->conn_state &= ~L2CAP_CONN_SEND_FBIT; } control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); if (pi->fcs == L2CAP_FCS_CRC16) { fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2); put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2); } l2cap_do_send(sk, tx_skb); } static int l2cap_ertm_send(struct sock *sk) { struct sk_buff *skb, *tx_skb; struct l2cap_pinfo *pi = l2cap_pi(sk); u16 control, fcs; int nsent = 0; if (sk->sk_state != BT_CONNECTED) return -ENOTCONN; while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) { if (pi->remote_max_tx && bt_cb(skb)->retries == pi->remote_max_tx) { l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED); break; } tx_skb = skb_clone(skb, GFP_ATOMIC); bt_cb(skb)->retries++; control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); control &= L2CAP_CTRL_SAR; if (pi->conn_state & L2CAP_CONN_SEND_FBIT) { control |= L2CAP_CTRL_FINAL; pi->conn_state &= ~L2CAP_CONN_SEND_FBIT; } control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); if (pi->fcs == L2CAP_FCS_CRC16) { fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2); put_unaligned_le16(fcs, skb->data + tx_skb->len - 2); } l2cap_do_send(sk, tx_skb); __mod_retrans_timer(); bt_cb(skb)->tx_seq = pi->next_tx_seq; pi->next_tx_seq = (pi->next_tx_seq + 1) % 64; pi->unacked_frames++; pi->frames_sent++; if (skb_queue_is_last(TX_QUEUE(sk), skb)) sk->sk_send_head = NULL; else sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb); nsent++; } return nsent; } static int l2cap_retransmit_frames(struct sock *sk) { struct l2cap_pinfo *pi = l2cap_pi(sk); int ret; if (!skb_queue_empty(TX_QUEUE(sk))) sk->sk_send_head = TX_QUEUE(sk)->next; pi->next_tx_seq = pi->expected_ack_seq; ret = l2cap_ertm_send(sk); return ret; } static void l2cap_send_ack(struct l2cap_pinfo *pi) { struct sock *sk = (struct sock *)pi; u16 control = 0; control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) { control |= L2CAP_SUPER_RCV_NOT_READY; pi->conn_state |= L2CAP_CONN_RNR_SENT; l2cap_send_sframe(pi, control); return; } if (l2cap_ertm_send(sk) > 0) return; control |= L2CAP_SUPER_RCV_READY; l2cap_send_sframe(pi, control); } static void l2cap_send_srejtail(struct sock *sk) { struct srej_list *tail; u16 control; control = L2CAP_SUPER_SELECT_REJECT; control |= L2CAP_CTRL_FINAL; tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list); control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; l2cap_send_sframe(l2cap_pi(sk), control); } static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb) { struct l2cap_conn *conn = l2cap_pi(sk)->conn; struct sk_buff **frag; int err, sent = 0; if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) return -EFAULT; sent += count; len -= count; /* Continuation fragments (no L2CAP header) */ frag = &skb_shinfo(skb)->frag_list; while (len) { count = min_t(unsigned int, conn->mtu, len); *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err); if (!*frag) return -EFAULT; if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) return -EFAULT; sent += count; len -= count; frag = &(*frag)->next; } return sent; } static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len) { struct l2cap_conn *conn = l2cap_pi(sk)->conn; struct sk_buff *skb; int err, count, hlen = L2CAP_HDR_SIZE + 2; struct l2cap_hdr *lh; BT_DBG("sk %p len %d", sk, (int)len); count = min_t(unsigned int, (conn->mtu - hlen), len); skb = bt_skb_send_alloc(sk, count + hlen, msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) return ERR_PTR(-ENOMEM); /* Create L2CAP header */ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid); lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2)); err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); if (unlikely(err < 0)) { kfree_skb(skb); return ERR_PTR(err); } return skb; } static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len) { struct l2cap_conn *conn = l2cap_pi(sk)->conn; struct sk_buff *skb; int err, count, hlen = L2CAP_HDR_SIZE; struct l2cap_hdr *lh; BT_DBG("sk %p len %d", sk, (int)len); count = min_t(unsigned int, (conn->mtu - hlen), len); skb = bt_skb_send_alloc(sk, count + hlen, msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) return ERR_PTR(-ENOMEM); /* Create L2CAP header */ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid); lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); if (unlikely(err < 0)) { kfree_skb(skb); return ERR_PTR(err); } return skb; } static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen) { struct l2cap_conn *conn = l2cap_pi(sk)->conn; struct sk_buff *skb; int err, count, hlen = L2CAP_HDR_SIZE + 2; struct l2cap_hdr *lh; BT_DBG("sk %p len %d", sk, (int)len); if (!conn) return ERR_PTR(-ENOTCONN); if (sdulen) hlen += 2; if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) hlen += 2; count = min_t(unsigned int, (conn->mtu - hlen), len); skb = bt_skb_send_alloc(sk, count + hlen, msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) return ERR_PTR(-ENOMEM); /* Create L2CAP header */ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid); lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); put_unaligned_le16(control, skb_put(skb, 2)); if (sdulen) put_unaligned_le16(sdulen, skb_put(skb, 2)); err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); if (unlikely(err < 0)) { kfree_skb(skb); return ERR_PTR(err); } if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) put_unaligned_le16(0, skb_put(skb, 2)); bt_cb(skb)->retries = 0; return skb; } static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len) { struct l2cap_pinfo *pi = l2cap_pi(sk); struct sk_buff *skb; struct sk_buff_head sar_queue; u16 control; size_t size = 0; skb_queue_head_init(&sar_queue); control = L2CAP_SDU_START; skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len); if (IS_ERR(skb)) return PTR_ERR(skb); __skb_queue_tail(&sar_queue, skb); len -= pi->remote_mps; size += pi->remote_mps; while (len > 0) { size_t buflen; if (len > pi->remote_mps) { control = L2CAP_SDU_CONTINUE; buflen = pi->remote_mps; } else { control = L2CAP_SDU_END; buflen = len; } skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0); if (IS_ERR(skb)) { skb_queue_purge(&sar_queue); return PTR_ERR(skb); } __skb_queue_tail(&sar_queue, skb); len -= buflen; size += buflen; } skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk)); if (sk->sk_send_head == NULL) sk->sk_send_head = sar_queue.next; return size; } static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct l2cap_pinfo *pi = l2cap_pi(sk); struct sk_buff *skb; u16 control; int err; BT_DBG("sock %p, sk %p", sock, sk); err = sock_error(sk); if (err) return err; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; lock_sock(sk); if (sk->sk_state != BT_CONNECTED) { err = -ENOTCONN; goto done; } /* Connectionless channel */ if (sk->sk_type == SOCK_DGRAM) { skb = l2cap_create_connless_pdu(sk, msg, len); if (IS_ERR(skb)) { err = PTR_ERR(skb); } else { l2cap_do_send(sk, skb); err = len; } goto done; } switch (pi->mode) { case L2CAP_MODE_BASIC: /* Check outgoing MTU */ if (len > pi->omtu) { err = -EMSGSIZE; goto done; } /* Create a basic PDU */ skb = l2cap_create_basic_pdu(sk, msg, len); if (IS_ERR(skb)) { err = PTR_ERR(skb); goto done; } l2cap_do_send(sk, skb); err = len; break; case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: /* Entire SDU fits into one PDU */ if (len <= pi->remote_mps) { control = L2CAP_SDU_UNSEGMENTED; skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0); if (IS_ERR(skb)) { err = PTR_ERR(skb); goto done; } __skb_queue_tail(TX_QUEUE(sk), skb); if (sk->sk_send_head == NULL) sk->sk_send_head = skb; } else { /* Segment SDU into multiples PDUs */ err = l2cap_sar_segment_sdu(sk, msg, len); if (err < 0) goto done; } if (pi->mode == L2CAP_MODE_STREAMING) { l2cap_streaming_send(sk); } else { if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->conn_state && L2CAP_CONN_WAIT_F) { err = len; break; } err = l2cap_ertm_send(sk); } if (err >= 0) err = len; break; default: BT_DBG("bad state %1.1x", pi->mode); err = -EBADFD; } done: release_sock(sk); return err; } static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; lock_sock(sk); if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) { struct l2cap_conn_rsp rsp; struct l2cap_conn *conn = l2cap_pi(sk)->conn; u8 buf[128]; sk->sk_state = BT_CONFIG; rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) { release_sock(sk); return 0; } l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(sk, buf), buf); l2cap_pi(sk)->num_conf_req++; release_sock(sk); return 0; } release_sock(sk); return bt_sock_recvmsg(iocb, sock, msg, len, flags); } static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct l2cap_options opts; int len, err = 0; u32 opt; BT_DBG("sk %p", sk); lock_sock(sk); switch (optname) { case L2CAP_OPTIONS: if (sk->sk_state == BT_CONNECTED) { err = -EINVAL; break; } opts.imtu = l2cap_pi(sk)->imtu; opts.omtu = l2cap_pi(sk)->omtu; opts.flush_to = l2cap_pi(sk)->flush_to; opts.mode = l2cap_pi(sk)->mode; opts.fcs = l2cap_pi(sk)->fcs; opts.max_tx = l2cap_pi(sk)->max_tx; opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win; len = min_t(unsigned int, sizeof(opts), optlen); if (copy_from_user((char *) &opts, optval, len)) { err = -EFAULT; break; } if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) { err = -EINVAL; break; } l2cap_pi(sk)->mode = opts.mode; switch (l2cap_pi(sk)->mode) { case L2CAP_MODE_BASIC: l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE; break; case L2CAP_MODE_ERTM: case L2CAP_MODE_STREAMING: if (!disable_ertm) break; /* fall through */ default: err = -EINVAL; break; } l2cap_pi(sk)->imtu = opts.imtu; l2cap_pi(sk)->omtu = opts.omtu; l2cap_pi(sk)->fcs = opts.fcs; l2cap_pi(sk)->max_tx = opts.max_tx; l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size; break; case L2CAP_LM: if (get_user(opt, (u32 __user *) optval)) { err = -EFAULT; break; } if (opt & L2CAP_LM_AUTH) l2cap_pi(sk)->sec_level = BT_SECURITY_LOW; if (opt & L2CAP_LM_ENCRYPT) l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM; if (opt & L2CAP_LM_SECURE) l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH; l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER); l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE); l2cap_pi(sk)->flushable = (opt & L2CAP_LM_FLUSHABLE); break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct bt_security sec; struct bt_power pwr; int len, err = 0; u32 opt; BT_DBG("sk %p", sk); if (level == SOL_L2CAP) return l2cap_sock_setsockopt_old(sock, optname, optval, optlen); if (level != SOL_BLUETOOTH) return -ENOPROTOOPT; lock_sock(sk); switch (optname) { case BT_SECURITY: if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_RAW) { err = -EINVAL; break; } sec.level = BT_SECURITY_LOW; len = min_t(unsigned int, sizeof(sec), optlen); if (copy_from_user((char *) &sec, optval, len)) { err = -EFAULT; break; } if (sec.level < BT_SECURITY_LOW || sec.level > BT_SECURITY_HIGH) { err = -EINVAL; break; } l2cap_pi(sk)->sec_level = sec.level; break; case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } if (get_user(opt, (u32 __user *) optval)) { err = -EFAULT; break; } bt_sk(sk)->defer_setup = opt; break; case BT_POWER: if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_RAW) { err = -EINVAL; break; } pwr.force_active = 1; len = min_t(unsigned int, sizeof(pwr), optlen); if (copy_from_user((char *) &pwr, optval, len)) { err = -EFAULT; break; } l2cap_pi(sk)->force_active = pwr.force_active; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct l2cap_options opts; struct l2cap_conninfo cinfo; int len, err = 0; u32 opt; BT_DBG("sk %p", sk); if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case L2CAP_OPTIONS: opts.imtu = l2cap_pi(sk)->imtu; opts.omtu = l2cap_pi(sk)->omtu; opts.flush_to = l2cap_pi(sk)->flush_to; opts.mode = l2cap_pi(sk)->mode; opts.fcs = l2cap_pi(sk)->fcs; opts.max_tx = l2cap_pi(sk)->max_tx; opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win; len = min_t(unsigned int, len, sizeof(opts)); if (copy_to_user(optval, (char *) &opts, len)) err = -EFAULT; break; case L2CAP_LM: switch (l2cap_pi(sk)->sec_level) { case BT_SECURITY_LOW: opt = L2CAP_LM_AUTH; break; case BT_SECURITY_MEDIUM: opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT; break; case BT_SECURITY_HIGH: opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE; break; default: opt = 0; break; } if (l2cap_pi(sk)->role_switch) opt |= L2CAP_LM_MASTER; if (l2cap_pi(sk)->force_reliable) opt |= L2CAP_LM_RELIABLE; if (l2cap_pi(sk)->flushable) opt |= L2CAP_LM_FLUSHABLE; if (put_user(opt, (u32 __user *) optval)) err = -EFAULT; break; case L2CAP_CONNINFO: if (sk->sk_state != BT_CONNECTED && !(sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup)) { err = -ENOTCONN; break; } cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle; memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3); len = min_t(unsigned int, len, sizeof(cinfo)); if (copy_to_user(optval, (char *) &cinfo, len)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct bt_security sec; struct bt_power pwr; int len, err = 0; BT_DBG("sk %p", sk); if (level == SOL_L2CAP) return l2cap_sock_getsockopt_old(sock, optname, optval, optlen); if (level != SOL_BLUETOOTH) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case BT_SECURITY: if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_RAW) { err = -EINVAL; break; } sec.level = l2cap_pi(sk)->sec_level; len = min_t(unsigned int, len, sizeof(sec)); if (copy_to_user(optval, (char *) &sec, len)) err = -EFAULT; break; case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval)) err = -EFAULT; break; case BT_POWER: if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_RAW) { err = -EINVAL; break; } pwr.force_active = l2cap_pi(sk)->force_active; len = min_t(unsigned int, len, sizeof(pwr)); if (copy_to_user(optval, (char *) &pwr, len)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int l2cap_sock_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sock %p, sk %p", sock, sk); if (!sk) return 0; lock_sock(sk); if (!sk->sk_shutdown) { if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) err = __l2cap_wait_ack(sk); sk->sk_shutdown = SHUTDOWN_MASK; l2cap_sock_clear_timer(sk); __l2cap_sock_close(sk, 0); if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); } if (!err && sk->sk_err) err = -sk->sk_err; release_sock(sk); return err; } static int l2cap_sock_release(struct socket *sock) { struct sock *sk = sock->sk; int err; BT_DBG("sock %p, sk %p", sock, sk); if (!sk) return 0; err = l2cap_sock_shutdown(sock, 2); sock_orphan(sk); l2cap_sock_kill(sk); return err; } static void l2cap_chan_ready(struct sock *sk) { struct sock *parent = bt_sk(sk)->parent; BT_DBG("sk %p, parent %p", sk, parent); l2cap_pi(sk)->conf_state = 0; l2cap_sock_clear_timer(sk); if (!parent) { /* Outgoing channel. * Wake up socket sleeping on connect. */ sk->sk_state = BT_CONNECTED; sk->sk_state_change(sk); } else { /* Incoming channel. * Wake up socket sleeping on accept. */ parent->sk_data_ready(parent, 0); } } /* Copy frame to all raw sockets on that connection */ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) { struct l2cap_chan_list *l = &conn->chan_list; struct sk_buff *nskb; struct sock *sk; BT_DBG("conn %p", conn); read_lock(&l->lock); for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { if (sk->sk_type != SOCK_RAW) continue; /* Don't send frame to the socket it came from */ if (skb->sk == sk) continue; nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) continue; if (sock_queue_rcv_skb(sk, nskb)) kfree_skb(nskb); } read_unlock(&l->lock); } /* ---- L2CAP signalling commands ---- */ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code, u8 ident, u16 dlen, void *data) { struct sk_buff *skb, **frag; struct l2cap_cmd_hdr *cmd; struct l2cap_hdr *lh; int len, count; BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen); len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; count = min_t(unsigned int, conn->mtu, len); skb = bt_skb_alloc(count, GFP_ATOMIC); if (!skb) return NULL; lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING); cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE); cmd->code = code; cmd->ident = ident; cmd->len = cpu_to_le16(dlen); if (dlen) { count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE; memcpy(skb_put(skb, count), data, count); data += count; } len -= skb->len; /* Continuation fragments (no L2CAP header) */ frag = &skb_shinfo(skb)->frag_list; while (len) { count = min_t(unsigned int, conn->mtu, len); *frag = bt_skb_alloc(count, GFP_ATOMIC); if (!*frag) goto fail; memcpy(skb_put(*frag, count), data, count); len -= count; data += count; frag = &(*frag)->next; } return skb; fail: kfree_skb(skb); return NULL; } static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val) { struct l2cap_conf_opt *opt = *ptr; int len; len = L2CAP_CONF_OPT_SIZE + opt->len; *ptr += len; *type = opt->type; *olen = opt->len; switch (opt->len) { case 1: *val = *((u8 *) opt->val); break; case 2: *val = __le16_to_cpu(*((__le16 *) opt->val)); break; case 4: *val = __le32_to_cpu(*((__le32 *) opt->val)); break; default: *val = (unsigned long) opt->val; break; } BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val); return len; } static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val) { struct l2cap_conf_opt *opt = *ptr; BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val); opt->type = type; opt->len = len; switch (len) { case 1: *((u8 *) opt->val) = val; break; case 2: *((__le16 *) opt->val) = cpu_to_le16(val); break; case 4: *((__le32 *) opt->val) = cpu_to_le32(val); break; default: memcpy(opt->val, (void *) val, len); break; } *ptr += L2CAP_CONF_OPT_SIZE + len; } static void l2cap_ack_timeout(unsigned long arg) { struct sock *sk = (void *) arg; bh_lock_sock(sk); l2cap_send_ack(l2cap_pi(sk)); bh_unlock_sock(sk); } static inline void l2cap_ertm_init(struct sock *sk) { l2cap_pi(sk)->expected_ack_seq = 0; l2cap_pi(sk)->unacked_frames = 0; l2cap_pi(sk)->buffer_seq = 0; l2cap_pi(sk)->num_acked = 0; l2cap_pi(sk)->frames_sent = 0; setup_timer(&l2cap_pi(sk)->retrans_timer, l2cap_retrans_timeout, (unsigned long) sk); setup_timer(&l2cap_pi(sk)->monitor_timer, l2cap_monitor_timeout, (unsigned long) sk); setup_timer(&l2cap_pi(sk)->ack_timer, l2cap_ack_timeout, (unsigned long) sk); __skb_queue_head_init(SREJ_QUEUE(sk)); __skb_queue_head_init(BUSY_QUEUE(sk)); INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work); sk->sk_backlog_rcv = l2cap_ertm_data_rcv; } static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) { switch (mode) { case L2CAP_MODE_STREAMING: case L2CAP_MODE_ERTM: if (l2cap_mode_supported(mode, remote_feat_mask)) return mode; /* fall through */ default: return L2CAP_MODE_BASIC; } } static int l2cap_build_conf_req(struct sock *sk, void *data) { struct l2cap_pinfo *pi = l2cap_pi(sk); struct l2cap_conf_req *req = data; struct l2cap_conf_rfc rfc = { .mode = pi->mode }; void *ptr = req->data; BT_DBG("sk %p", sk); if (pi->num_conf_req || pi->num_conf_rsp) goto done; switch (pi->mode) { case L2CAP_MODE_STREAMING: case L2CAP_MODE_ERTM: if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE) break; /* fall through */ default: pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask); break; } done: switch (pi->mode) { case L2CAP_MODE_BASIC: if (pi->imtu != L2CAP_DEFAULT_MTU) l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu); if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) && !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING)) break; rfc.mode = L2CAP_MODE_BASIC; rfc.txwin_size = 0; rfc.max_transmit = 0; rfc.retrans_timeout = 0; rfc.monitor_timeout = 0; rfc.max_pdu_size = 0; l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc); break; case L2CAP_MODE_ERTM: rfc.mode = L2CAP_MODE_ERTM; rfc.txwin_size = pi->tx_win; rfc.max_transmit = pi->max_tx; rfc.retrans_timeout = 0; rfc.monitor_timeout = 0; rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10) rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc); if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) break; if (pi->fcs == L2CAP_FCS_NONE || pi->conf_state & L2CAP_CONF_NO_FCS_RECV) { pi->fcs = L2CAP_FCS_NONE; l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs); } break; case L2CAP_MODE_STREAMING: rfc.mode = L2CAP_MODE_STREAMING; rfc.txwin_size = 0; rfc.max_transmit = 0; rfc.retrans_timeout = 0; rfc.monitor_timeout = 0; rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10) rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc); if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) break; if (pi->fcs == L2CAP_FCS_NONE || pi->conf_state & L2CAP_CONF_NO_FCS_RECV) { pi->fcs = L2CAP_FCS_NONE; l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs); } break; } /* FIXME: Need actual value of the flush timeout */ //if (flush_to != L2CAP_DEFAULT_FLUSH_TO) // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to); req->dcid = cpu_to_le16(pi->dcid); req->flags = cpu_to_le16(0); return ptr - data; } static int l2cap_parse_conf_req(struct sock *sk, void *data) { struct l2cap_pinfo *pi = l2cap_pi(sk); struct l2cap_conf_rsp *rsp = data; void *ptr = rsp->data; void *req = pi->conf_req; int len = pi->conf_len; int type, hint, olen; unsigned long val; struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; u16 mtu = L2CAP_DEFAULT_MTU; u16 result = L2CAP_CONF_SUCCESS; BT_DBG("sk %p", sk); while (len >= L2CAP_CONF_OPT_SIZE) { len -= l2cap_get_conf_opt(&req, &type, &olen, &val); hint = type & L2CAP_CONF_HINT; type &= L2CAP_CONF_MASK; switch (type) { case L2CAP_CONF_MTU: mtu = val; break; case L2CAP_CONF_FLUSH_TO: pi->flush_to = val; break; case L2CAP_CONF_QOS: break; case L2CAP_CONF_RFC: if (olen == sizeof(rfc)) memcpy(&rfc, (void *) val, olen); break; case L2CAP_CONF_FCS: if (val == L2CAP_FCS_NONE) pi->conf_state |= L2CAP_CONF_NO_FCS_RECV; break; default: if (hint) break; result = L2CAP_CONF_UNKNOWN; *((u8 *) ptr++) = type; break; } } if (pi->num_conf_rsp || pi->num_conf_req > 1) goto done; switch (pi->mode) { case L2CAP_MODE_STREAMING: case L2CAP_MODE_ERTM: if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) { pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask); break; } if (pi->mode != rfc.mode) return -ECONNREFUSED; break; } done: if (pi->mode != rfc.mode) { result = L2CAP_CONF_UNACCEPT; rfc.mode = pi->mode; if (pi->num_conf_rsp == 1) return -ECONNREFUSED; l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc); } if (result == L2CAP_CONF_SUCCESS) { /* Configure output options and let the other side know * which ones we don't like. */ if (mtu < L2CAP_DEFAULT_MIN_MTU) result = L2CAP_CONF_UNACCEPT; else { pi->omtu = mtu; pi->conf_state |= L2CAP_CONF_MTU_DONE; } l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu); switch (rfc.mode) { case L2CAP_MODE_BASIC: pi->fcs = L2CAP_FCS_NONE; pi->conf_state |= L2CAP_CONF_MODE_DONE; break; case L2CAP_MODE_ERTM: pi->remote_tx_win = rfc.txwin_size; pi->remote_max_tx = rfc.max_transmit; if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10) rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); pi->remote_mps = le16_to_cpu(rfc.max_pdu_size); rfc.retrans_timeout = le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO); rfc.monitor_timeout = le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO); pi->conf_state |= L2CAP_CONF_MODE_DONE; l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc); break; case L2CAP_MODE_STREAMING: if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10) rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); pi->remote_mps = le16_to_cpu(rfc.max_pdu_size); pi->conf_state |= L2CAP_CONF_MODE_DONE; l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc); break; default: result = L2CAP_CONF_UNACCEPT; memset(&rfc, 0, sizeof(rfc)); rfc.mode = pi->mode; } if (result == L2CAP_CONF_SUCCESS) pi->conf_state |= L2CAP_CONF_OUTPUT_DONE; } rsp->scid = cpu_to_le16(pi->dcid); rsp->result = cpu_to_le16(result); rsp->flags = cpu_to_le16(0x0000); return ptr - data; } static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result) { struct l2cap_pinfo *pi = l2cap_pi(sk); struct l2cap_conf_req *req = data; void *ptr = req->data; int type, olen; unsigned long val; struct l2cap_conf_rfc rfc; BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data); while (len >= L2CAP_CONF_OPT_SIZE) { len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); switch (type) { case L2CAP_CONF_MTU: if (val < L2CAP_DEFAULT_MIN_MTU) { *result = L2CAP_CONF_UNACCEPT; pi->imtu = L2CAP_DEFAULT_MIN_MTU; } else pi->imtu = val; l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu); break; case L2CAP_CONF_FLUSH_TO: pi->flush_to = val; l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to); break; case L2CAP_CONF_RFC: if (olen == sizeof(rfc)) memcpy(&rfc, (void *)val, olen); if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) && rfc.mode != pi->mode) return -ECONNREFUSED; pi->fcs = 0; l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc); break; } } if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode) return -ECONNREFUSED; pi->mode = rfc.mode; if (*result == L2CAP_CONF_SUCCESS) { switch (rfc.mode) { case L2CAP_MODE_ERTM: pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); pi->mps = le16_to_cpu(rfc.max_pdu_size); break; case L2CAP_MODE_STREAMING: pi->mps = le16_to_cpu(rfc.max_pdu_size); } } req->dcid = cpu_to_le16(pi->dcid); req->flags = cpu_to_le16(0x0000); return ptr - data; } static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags) { struct l2cap_conf_rsp *rsp = data; void *ptr = rsp->data; BT_DBG("sk %p", sk); rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid); rsp->result = cpu_to_le16(result); rsp->flags = cpu_to_le16(flags); return ptr - data; } static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len) { struct l2cap_pinfo *pi = l2cap_pi(sk); int type, olen; unsigned long val; struct l2cap_conf_rfc rfc; BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len); if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING)) return; while (len >= L2CAP_CONF_OPT_SIZE) { len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); switch (type) { case L2CAP_CONF_RFC: if (olen == sizeof(rfc)) memcpy(&rfc, (void *)val, olen); goto done; } } done: switch (rfc.mode) { case L2CAP_MODE_ERTM: pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); pi->mps = le16_to_cpu(rfc.max_pdu_size); break; case L2CAP_MODE_STREAMING: pi->mps = le16_to_cpu(rfc.max_pdu_size); } } static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) { struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data; if (rej->reason != 0x0000) return 0; if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) && cmd->ident == conn->info_ident) { del_timer(&conn->info_timer); conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; conn->info_ident = 0; l2cap_conn_start(conn); } return 0; } static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) { struct l2cap_chan_list *list = &conn->chan_list; struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; struct l2cap_conn_rsp rsp; struct sock *parent, *sk = NULL; int result, status = L2CAP_CS_NO_INFO; u16 dcid = 0, scid = __le16_to_cpu(req->scid); __le16 psm = req->psm; BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid); /* Check if we have socket listening on psm */ parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src); if (!parent) { result = L2CAP_CR_BAD_PSM; goto sendresp; } /* Check if the ACL is secure enough (if not SDP) */ if (psm != cpu_to_le16(0x0001) && !hci_conn_check_link_mode(conn->hcon)) { conn->disc_reason = 0x05; result = L2CAP_CR_SEC_BLOCK; goto response; } result = L2CAP_CR_NO_MEM; /* Check for backlog size */ if (sk_acceptq_is_full(parent)) { BT_DBG("backlog full %d", parent->sk_ack_backlog); goto response; } sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC); if (!sk) goto response; write_lock_bh(&list->lock); /* Check if we already have channel with that dcid */ if (__l2cap_get_chan_by_dcid(list, scid)) { write_unlock_bh(&list->lock); sock_set_flag(sk, SOCK_ZAPPED); l2cap_sock_kill(sk); goto response; } hci_conn_hold(conn->hcon); l2cap_sock_init(sk, parent); bacpy(&bt_sk(sk)->src, conn->src); bacpy(&bt_sk(sk)->dst, conn->dst); l2cap_pi(sk)->psm = psm; l2cap_pi(sk)->dcid = scid; __l2cap_chan_add(conn, sk, parent); dcid = l2cap_pi(sk)->scid; l2cap_sock_set_timer(sk, sk->sk_sndtimeo); l2cap_pi(sk)->ident = cmd->ident; if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { if (l2cap_check_security(sk)) { if (bt_sk(sk)->defer_setup) { sk->sk_state = BT_CONNECT2; result = L2CAP_CR_PEND; status = L2CAP_CS_AUTHOR_PEND; parent->sk_data_ready(parent, 0); } else { sk->sk_state = BT_CONFIG; result = L2CAP_CR_SUCCESS; status = L2CAP_CS_NO_INFO; } } else { sk->sk_state = BT_CONNECT2; result = L2CAP_CR_PEND; status = L2CAP_CS_AUTHEN_PEND; } } else { sk->sk_state = BT_CONNECT2; result = L2CAP_CR_PEND; status = L2CAP_CS_NO_INFO; } write_unlock_bh(&list->lock); response: bh_unlock_sock(parent); sendresp: rsp.scid = cpu_to_le16(scid); rsp.dcid = cpu_to_le16(dcid); rsp.result = cpu_to_le16(result); rsp.status = cpu_to_le16(status); l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { struct l2cap_info_req info; info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; conn->info_ident = l2cap_get_ident(conn); mod_timer(&conn->info_timer, jiffies + msecs_to_jiffies(L2CAP_INFO_TIMEOUT)); l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ, sizeof(info), &info); } if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) && result == L2CAP_CR_SUCCESS) { u8 buf[128]; l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(sk, buf), buf); l2cap_pi(sk)->num_conf_req++; } return 0; } static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) { struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data; u16 scid, dcid, result, status; struct sock *sk; u8 req[128]; scid = __le16_to_cpu(rsp->scid); dcid = __le16_to_cpu(rsp->dcid); result = __le16_to_cpu(rsp->result); status = __le16_to_cpu(rsp->status); BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status); if (scid) { sk = l2cap_get_chan_by_scid(&conn->chan_list, scid); if (!sk) return -EFAULT; } else { sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident); if (!sk) return -EFAULT; } switch (result) { case L2CAP_CR_SUCCESS: sk->sk_state = BT_CONFIG; l2cap_pi(sk)->ident = 0; l2cap_pi(sk)->dcid = dcid; l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND; if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) break; l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(sk, req), req); l2cap_pi(sk)->num_conf_req++; break; case L2CAP_CR_PEND: l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND; break; default: l2cap_chan_del(sk, ECONNREFUSED); break; } bh_unlock_sock(sk); return 0; } static inline void set_default_fcs(struct l2cap_pinfo *pi) { /* FCS is enabled only in ERTM or streaming mode, if one or both * sides request it. */ if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING) pi->fcs = L2CAP_FCS_NONE; else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV)) pi->fcs = L2CAP_FCS_CRC16; } static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) { struct l2cap_conf_req *req = (struct l2cap_conf_req *) data; u16 dcid, flags; u8 rsp[64]; struct sock *sk; int len; dcid = __le16_to_cpu(req->dcid); flags = __le16_to_cpu(req->flags); BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags); sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid); if (!sk) return -ENOENT; if (sk->sk_state == BT_DISCONN) goto unlock; /* Reject if config buffer is too small. */ len = cmd_len - sizeof(*req); if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) { l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(sk, rsp, L2CAP_CONF_REJECT, flags), rsp); goto unlock; } /* Store config. */ memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len); l2cap_pi(sk)->conf_len += len; if (flags & 0x0001) { /* Incomplete config. Send empty response. */ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(sk, rsp, L2CAP_CONF_SUCCESS, 0x0001), rsp); goto unlock; } /* Complete config. */ len = l2cap_parse_conf_req(sk, rsp); if (len < 0) { l2cap_send_disconn_req(conn, sk, ECONNRESET); goto unlock; } l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); l2cap_pi(sk)->num_conf_rsp++; /* Reset config buffer. */ l2cap_pi(sk)->conf_len = 0; if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE)) goto unlock; if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) { set_default_fcs(l2cap_pi(sk)); sk->sk_state = BT_CONNECTED; l2cap_pi(sk)->next_tx_seq = 0; l2cap_pi(sk)->expected_tx_seq = 0; __skb_queue_head_init(TX_QUEUE(sk)); if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) l2cap_ertm_init(sk); l2cap_chan_ready(sk); goto unlock; } if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) { u8 buf[64]; l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(sk, buf), buf); l2cap_pi(sk)->num_conf_req++; } unlock: bh_unlock_sock(sk); return 0; } static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) { struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data; u16 scid, flags, result; struct sock *sk; int len = cmd->len - sizeof(*rsp); scid = __le16_to_cpu(rsp->scid); flags = __le16_to_cpu(rsp->flags); result = __le16_to_cpu(rsp->result); BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result); sk = l2cap_get_chan_by_scid(&conn->chan_list, scid); if (!sk) return 0; switch (result) { case L2CAP_CONF_SUCCESS: l2cap_conf_rfc_get(sk, rsp->data, len); break; case L2CAP_CONF_UNACCEPT: if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) { char req[64]; if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) { l2cap_send_disconn_req(conn, sk, ECONNRESET); goto done; } /* throw out any old stored conf requests */ result = L2CAP_CONF_SUCCESS; len = l2cap_parse_conf_rsp(sk, rsp->data, len, req, &result); if (len < 0) { l2cap_send_disconn_req(conn, sk, ECONNRESET); goto done; } l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, len, req); l2cap_pi(sk)->num_conf_req++; if (result != L2CAP_CONF_SUCCESS) goto done; break; } default: sk->sk_err = ECONNRESET; l2cap_sock_set_timer(sk, HZ * 5); l2cap_send_disconn_req(conn, sk, ECONNRESET); goto done; } if (flags & 0x01) goto done; l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE; if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) { set_default_fcs(l2cap_pi(sk)); sk->sk_state = BT_CONNECTED; l2cap_pi(sk)->next_tx_seq = 0; l2cap_pi(sk)->expected_tx_seq = 0; __skb_queue_head_init(TX_QUEUE(sk)); if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) l2cap_ertm_init(sk); l2cap_chan_ready(sk); } done: bh_unlock_sock(sk); return 0; } static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) { struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data; struct l2cap_disconn_rsp rsp; u16 dcid, scid; struct sock *sk; scid = __le16_to_cpu(req->scid); dcid = __le16_to_cpu(req->dcid); BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid); sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid); if (!sk) return 0; rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp); sk->sk_shutdown = SHUTDOWN_MASK; l2cap_chan_del(sk, ECONNRESET); bh_unlock_sock(sk); l2cap_sock_kill(sk); return 0; } static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) { struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data; u16 dcid, scid; struct sock *sk; scid = __le16_to_cpu(rsp->scid); dcid = __le16_to_cpu(rsp->dcid); BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid); sk = l2cap_get_chan_by_scid(&conn->chan_list, scid); if (!sk) return 0; l2cap_chan_del(sk, 0); bh_unlock_sock(sk); l2cap_sock_kill(sk); return 0; } static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) { struct l2cap_info_req *req = (struct l2cap_info_req *) data; u16 type; type = __le16_to_cpu(req->type); BT_DBG("type 0x%4.4x", type); if (type == L2CAP_IT_FEAT_MASK) { u8 buf[8]; u32 feat_mask = l2cap_feat_mask; struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); if (!disable_ertm) feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING | L2CAP_FEAT_FCS; put_unaligned_le32(feat_mask, rsp->data); l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), buf); } else if (type == L2CAP_IT_FIXED_CHAN) { u8 buf[12]; struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); memcpy(buf + 4, l2cap_fixed_chan, 8); l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), buf); } else { struct l2cap_info_rsp rsp; rsp.type = cpu_to_le16(type); rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP); l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp); } return 0; } static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) { struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data; u16 type, result; type = __le16_to_cpu(rsp->type); result = __le16_to_cpu(rsp->result); BT_DBG("type 0x%4.4x result 0x%2.2x", type, result); del_timer(&conn->info_timer); if (result != L2CAP_IR_SUCCESS) { conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; conn->info_ident = 0; l2cap_conn_start(conn); return 0; } if (type == L2CAP_IT_FEAT_MASK) { conn->feat_mask = get_unaligned_le32(rsp->data); if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) { struct l2cap_info_req req; req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); conn->info_ident = l2cap_get_ident(conn); l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ, sizeof(req), &req); } else { conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; conn->info_ident = 0; l2cap_conn_start(conn); } } else if (type == L2CAP_IT_FIXED_CHAN) { conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; conn->info_ident = 0; l2cap_conn_start(conn); } return 0; } static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) { u8 *data = skb->data; int len = skb->len; struct l2cap_cmd_hdr cmd; int err = 0; l2cap_raw_recv(conn, skb); while (len >= L2CAP_CMD_HDR_SIZE) { u16 cmd_len; memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE); data += L2CAP_CMD_HDR_SIZE; len -= L2CAP_CMD_HDR_SIZE; cmd_len = le16_to_cpu(cmd.len); BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident); if (cmd_len > len || !cmd.ident) { BT_DBG("corrupted command"); break; } switch (cmd.code) { case L2CAP_COMMAND_REJ: l2cap_command_rej(conn, &cmd, data); break; case L2CAP_CONN_REQ: err = l2cap_connect_req(conn, &cmd, data); break; case L2CAP_CONN_RSP: err = l2cap_connect_rsp(conn, &cmd, data); break; case L2CAP_CONF_REQ: err = l2cap_config_req(conn, &cmd, cmd_len, data); break; case L2CAP_CONF_RSP: err = l2cap_config_rsp(conn, &cmd, data); break; case L2CAP_DISCONN_REQ: err = l2cap_disconnect_req(conn, &cmd, data); break; case L2CAP_DISCONN_RSP: err = l2cap_disconnect_rsp(conn, &cmd, data); break; case L2CAP_ECHO_REQ: l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data); break; case L2CAP_ECHO_RSP: break; case L2CAP_INFO_REQ: err = l2cap_information_req(conn, &cmd, data); break; case L2CAP_INFO_RSP: err = l2cap_information_rsp(conn, &cmd, data); break; default: BT_ERR("Unknown signaling command 0x%2.2x", cmd.code); err = -EINVAL; break; } if (err) { struct l2cap_cmd_rej rej; BT_DBG("error %d", err); /* FIXME: Map err to a valid reason */ rej.reason = cpu_to_le16(0); l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); } data += cmd_len; len -= cmd_len; } kfree_skb(skb); } static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb) { u16 our_fcs, rcv_fcs; int hdr_size = L2CAP_HDR_SIZE + 2; if (pi->fcs == L2CAP_FCS_CRC16) { skb_trim(skb, skb->len - 2); rcv_fcs = get_unaligned_le16(skb->data + skb->len); our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size); if (our_fcs != rcv_fcs) return -EBADMSG; } return 0; } static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk) { struct l2cap_pinfo *pi = l2cap_pi(sk); u16 control = 0; pi->frames_sent = 0; control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) { control |= L2CAP_SUPER_RCV_NOT_READY; l2cap_send_sframe(pi, control); pi->conn_state |= L2CAP_CONN_RNR_SENT; } if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY) l2cap_retransmit_frames(sk); l2cap_ertm_send(sk); if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) && pi->frames_sent == 0) { control |= L2CAP_SUPER_RCV_READY; l2cap_send_sframe(pi, control); } } static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar) { struct sk_buff *next_skb; struct l2cap_pinfo *pi = l2cap_pi(sk); int tx_seq_offset, next_tx_seq_offset; bt_cb(skb)->tx_seq = tx_seq; bt_cb(skb)->sar = sar; next_skb = skb_peek(SREJ_QUEUE(sk)); if (!next_skb) { __skb_queue_tail(SREJ_QUEUE(sk), skb); return 0; } tx_seq_offset = (tx_seq - pi->buffer_seq) % 64; if (tx_seq_offset < 0) tx_seq_offset += 64; do { if (bt_cb(next_skb)->tx_seq == tx_seq) return -EINVAL; next_tx_seq_offset = (bt_cb(next_skb)->tx_seq - pi->buffer_seq) % 64; if (next_tx_seq_offset < 0) next_tx_seq_offset += 64; if (next_tx_seq_offset > tx_seq_offset) { __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb); return 0; } if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb)) break; } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb))); __skb_queue_tail(SREJ_QUEUE(sk), skb); return 0; } static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control) { struct l2cap_pinfo *pi = l2cap_pi(sk); struct sk_buff *_skb; int err; switch (control & L2CAP_CTRL_SAR) { case L2CAP_SDU_UNSEGMENTED: if (pi->conn_state & L2CAP_CONN_SAR_SDU) goto drop; err = sock_queue_rcv_skb(sk, skb); if (!err) return err; break; case L2CAP_SDU_START: if (pi->conn_state & L2CAP_CONN_SAR_SDU) goto drop; pi->sdu_len = get_unaligned_le16(skb->data); if (pi->sdu_len > pi->imtu) goto disconnect; pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC); if (!pi->sdu) return -ENOMEM; /* pull sdu_len bytes only after alloc, because of Local Busy * condition we have to be sure that this will be executed * only once, i.e., when alloc does not fail */ skb_pull(skb, 2); memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); pi->conn_state |= L2CAP_CONN_SAR_SDU; pi->partial_sdu_len = skb->len; break; case L2CAP_SDU_CONTINUE: if (!(pi->conn_state & L2CAP_CONN_SAR_SDU)) goto disconnect; if (!pi->sdu) goto disconnect; pi->partial_sdu_len += skb->len; if (pi->partial_sdu_len > pi->sdu_len) goto drop; memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); break; case L2CAP_SDU_END: if (!(pi->conn_state & L2CAP_CONN_SAR_SDU)) goto disconnect; if (!pi->sdu) goto disconnect; if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) { pi->partial_sdu_len += skb->len; if (pi->partial_sdu_len > pi->imtu) goto drop; if (pi->partial_sdu_len != pi->sdu_len) goto drop; memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); } _skb = skb_clone(pi->sdu, GFP_ATOMIC); if (!_skb) { pi->conn_state |= L2CAP_CONN_SAR_RETRY; return -ENOMEM; } err = sock_queue_rcv_skb(sk, _skb); if (err < 0) { kfree_skb(_skb); pi->conn_state |= L2CAP_CONN_SAR_RETRY; return err; } pi->conn_state &= ~L2CAP_CONN_SAR_RETRY; pi->conn_state &= ~L2CAP_CONN_SAR_SDU; kfree_skb(pi->sdu); break; } kfree_skb(skb); return 0; drop: kfree_skb(pi->sdu); pi->sdu = NULL; disconnect: l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); kfree_skb(skb); return 0; } static int l2cap_try_push_rx_skb(struct sock *sk) { struct l2cap_pinfo *pi = l2cap_pi(sk); struct sk_buff *skb; u16 control; int err; while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) { control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT; err = l2cap_ertm_reassembly_sdu(sk, skb, control); if (err < 0) { skb_queue_head(BUSY_QUEUE(sk), skb); return -EBUSY; } pi->buffer_seq = (pi->buffer_seq + 1) % 64; } if (!(pi->conn_state & L2CAP_CONN_RNR_SENT)) goto done; control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL; l2cap_send_sframe(pi, control); l2cap_pi(sk)->retry_count = 1; del_timer(&pi->retrans_timer); __mod_monitor_timer(); l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F; done: pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY; pi->conn_state &= ~L2CAP_CONN_RNR_SENT; BT_DBG("sk %p, Exit local busy", sk); return 0; } static void l2cap_busy_work(struct work_struct *work) { DECLARE_WAITQUEUE(wait, current); struct l2cap_pinfo *pi = container_of(work, struct l2cap_pinfo, busy_work); struct sock *sk = (struct sock *)pi; int n_tries = 0, timeo = HZ/5, err; struct sk_buff *skb; lock_sock(sk); add_wait_queue(sk_sleep(sk), &wait); while ((skb = skb_peek(BUSY_QUEUE(sk)))) { set_current_state(TASK_INTERRUPTIBLE); if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) { err = -EBUSY; l2cap_send_disconn_req(pi->conn, sk, EBUSY); break; } if (!timeo) timeo = HZ/5; if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); err = sock_error(sk); if (err) break; if (l2cap_try_push_rx_skb(sk) == 0) break; } set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); release_sock(sk); } static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control) { struct l2cap_pinfo *pi = l2cap_pi(sk); int sctrl, err; if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) { bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT; __skb_queue_tail(BUSY_QUEUE(sk), skb); return l2cap_try_push_rx_skb(sk); } err = l2cap_ertm_reassembly_sdu(sk, skb, control); if (err >= 0) { pi->buffer_seq = (pi->buffer_seq + 1) % 64; return err; } /* Busy Condition */ BT_DBG("sk %p, Enter local busy", sk); pi->conn_state |= L2CAP_CONN_LOCAL_BUSY; bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT; __skb_queue_tail(BUSY_QUEUE(sk), skb); sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; sctrl |= L2CAP_SUPER_RCV_NOT_READY; l2cap_send_sframe(pi, sctrl); pi->conn_state |= L2CAP_CONN_RNR_SENT; del_timer(&pi->ack_timer); queue_work(_busy_wq, &pi->busy_work); return err; } static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control) { struct l2cap_pinfo *pi = l2cap_pi(sk); struct sk_buff *_skb; int err = -EINVAL; /* * TODO: We have to notify the userland if some data is lost with the * Streaming Mode. */ switch (control & L2CAP_CTRL_SAR) { case L2CAP_SDU_UNSEGMENTED: if (pi->conn_state & L2CAP_CONN_SAR_SDU) { kfree_skb(pi->sdu); break; } err = sock_queue_rcv_skb(sk, skb); if (!err) return 0; break; case L2CAP_SDU_START: if (pi->conn_state & L2CAP_CONN_SAR_SDU) { kfree_skb(pi->sdu); break; } pi->sdu_len = get_unaligned_le16(skb->data); skb_pull(skb, 2); if (pi->sdu_len > pi->imtu) { err = -EMSGSIZE; break; } pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC); if (!pi->sdu) { err = -ENOMEM; break; } memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); pi->conn_state |= L2CAP_CONN_SAR_SDU; pi->partial_sdu_len = skb->len; err = 0; break; case L2CAP_SDU_CONTINUE: if (!(pi->conn_state & L2CAP_CONN_SAR_SDU)) break; memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); pi->partial_sdu_len += skb->len; if (pi->partial_sdu_len > pi->sdu_len) kfree_skb(pi->sdu); else err = 0; break; case L2CAP_SDU_END: if (!(pi->conn_state & L2CAP_CONN_SAR_SDU)) break; memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); pi->conn_state &= ~L2CAP_CONN_SAR_SDU; pi->partial_sdu_len += skb->len; if (pi->partial_sdu_len > pi->imtu) goto drop; if (pi->partial_sdu_len == pi->sdu_len) { _skb = skb_clone(pi->sdu, GFP_ATOMIC); err = sock_queue_rcv_skb(sk, _skb); if (err < 0) kfree_skb(_skb); } err = 0; drop: kfree_skb(pi->sdu); break; } kfree_skb(skb); return err; } static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq) { struct sk_buff *skb; u16 control; while ((skb = skb_peek(SREJ_QUEUE(sk)))) { if (bt_cb(skb)->tx_seq != tx_seq) break; skb = skb_dequeue(SREJ_QUEUE(sk)); control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT; l2cap_ertm_reassembly_sdu(sk, skb, control); l2cap_pi(sk)->buffer_seq_srej = (l2cap_pi(sk)->buffer_seq_srej + 1) % 64; tx_seq = (tx_seq + 1) % 64; } } static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq) { struct l2cap_pinfo *pi = l2cap_pi(sk); struct srej_list *l, *tmp; u16 control; list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) { if (l->tx_seq == tx_seq) { list_del(&l->list); kfree(l); return; } control = L2CAP_SUPER_SELECT_REJECT; control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; l2cap_send_sframe(pi, control); list_del(&l->list); list_add_tail(&l->list, SREJ_LIST(sk)); } } static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq) { struct l2cap_pinfo *pi = l2cap_pi(sk); struct srej_list *new; u16 control; while (tx_seq != pi->expected_tx_seq) { control = L2CAP_SUPER_SELECT_REJECT; control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; l2cap_send_sframe(pi, control); new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); new->tx_seq = pi->expected_tx_seq; pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64; list_add_tail(&new->list, SREJ_LIST(sk)); } pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64; } static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb) { struct l2cap_pinfo *pi = l2cap_pi(sk); u8 tx_seq = __get_txseq(rx_control); u8 req_seq = __get_reqseq(rx_control); u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT; int tx_seq_offset, expected_tx_seq_offset; int num_to_ack = (pi->tx_win/6) + 1; int err = 0; BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq, rx_control); if (L2CAP_CTRL_FINAL & rx_control && l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) { del_timer(&pi->monitor_timer); if (pi->unacked_frames > 0) __mod_retrans_timer(); pi->conn_state &= ~L2CAP_CONN_WAIT_F; } pi->expected_ack_seq = req_seq; l2cap_drop_acked_frames(sk); if (tx_seq == pi->expected_tx_seq) goto expected; tx_seq_offset = (tx_seq - pi->buffer_seq) % 64; if (tx_seq_offset < 0) tx_seq_offset += 64; /* invalid tx_seq */ if (tx_seq_offset >= pi->tx_win) { l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); goto drop; } if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY) goto drop; if (pi->conn_state & L2CAP_CONN_SREJ_SENT) { struct srej_list *first; first = list_first_entry(SREJ_LIST(sk), struct srej_list, list); if (tx_seq == first->tx_seq) { l2cap_add_to_srej_queue(sk, skb, tx_seq, sar); l2cap_check_srej_gap(sk, tx_seq); list_del(&first->list); kfree(first); if (list_empty(SREJ_LIST(sk))) { pi->buffer_seq = pi->buffer_seq_srej; pi->conn_state &= ~L2CAP_CONN_SREJ_SENT; l2cap_send_ack(pi); BT_DBG("sk %p, Exit SREJ_SENT", sk); } } else { struct srej_list *l; /* duplicated tx_seq */ if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0) goto drop; list_for_each_entry(l, SREJ_LIST(sk), list) { if (l->tx_seq == tx_seq) { l2cap_resend_srejframe(sk, tx_seq); return 0; } } l2cap_send_srejframe(sk, tx_seq); } } else { expected_tx_seq_offset = (pi->expected_tx_seq - pi->buffer_seq) % 64; if (expected_tx_seq_offset < 0) expected_tx_seq_offset += 64; /* duplicated tx_seq */ if (tx_seq_offset < expected_tx_seq_offset) goto drop; pi->conn_state |= L2CAP_CONN_SREJ_SENT; BT_DBG("sk %p, Enter SREJ", sk); INIT_LIST_HEAD(SREJ_LIST(sk)); pi->buffer_seq_srej = pi->buffer_seq; __skb_queue_head_init(SREJ_QUEUE(sk)); __skb_queue_head_init(BUSY_QUEUE(sk)); l2cap_add_to_srej_queue(sk, skb, tx_seq, sar); pi->conn_state |= L2CAP_CONN_SEND_PBIT; l2cap_send_srejframe(sk, tx_seq); del_timer(&pi->ack_timer); } return 0; expected: pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64; if (pi->conn_state & L2CAP_CONN_SREJ_SENT) { bt_cb(skb)->tx_seq = tx_seq; bt_cb(skb)->sar = sar; __skb_queue_tail(SREJ_QUEUE(sk), skb); return 0; } err = l2cap_push_rx_skb(sk, skb, rx_control); if (err < 0) return 0; if (rx_control & L2CAP_CTRL_FINAL) { if (pi->conn_state & L2CAP_CONN_REJ_ACT) pi->conn_state &= ~L2CAP_CONN_REJ_ACT; else l2cap_retransmit_frames(sk); } __mod_ack_timer(); pi->num_acked = (pi->num_acked + 1) % num_to_ack; if (pi->num_acked == num_to_ack - 1) l2cap_send_ack(pi); return 0; drop: kfree_skb(skb); return 0; } static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control) { struct l2cap_pinfo *pi = l2cap_pi(sk); BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control), rx_control); pi->expected_ack_seq = __get_reqseq(rx_control); l2cap_drop_acked_frames(sk); if (rx_control & L2CAP_CTRL_POLL) { pi->conn_state |= L2CAP_CONN_SEND_FBIT; if (pi->conn_state & L2CAP_CONN_SREJ_SENT) { if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) && (pi->unacked_frames > 0)) __mod_retrans_timer(); pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; l2cap_send_srejtail(sk); } else { l2cap_send_i_or_rr_or_rnr(sk); } } else if (rx_control & L2CAP_CTRL_FINAL) { pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; if (pi->conn_state & L2CAP_CONN_REJ_ACT) pi->conn_state &= ~L2CAP_CONN_REJ_ACT; else l2cap_retransmit_frames(sk); } else { if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) && (pi->unacked_frames > 0)) __mod_retrans_timer(); pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; if (pi->conn_state & L2CAP_CONN_SREJ_SENT) { l2cap_send_ack(pi); } else { l2cap_ertm_send(sk); } } } static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control) { struct l2cap_pinfo *pi = l2cap_pi(sk); u8 tx_seq = __get_reqseq(rx_control); BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control); pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; pi->expected_ack_seq = tx_seq; l2cap_drop_acked_frames(sk); if (rx_control & L2CAP_CTRL_FINAL) { if (pi->conn_state & L2CAP_CONN_REJ_ACT) pi->conn_state &= ~L2CAP_CONN_REJ_ACT; else l2cap_retransmit_frames(sk); } else { l2cap_retransmit_frames(sk); if (pi->conn_state & L2CAP_CONN_WAIT_F) pi->conn_state |= L2CAP_CONN_REJ_ACT; } } static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control) { struct l2cap_pinfo *pi = l2cap_pi(sk); u8 tx_seq = __get_reqseq(rx_control); BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control); pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; if (rx_control & L2CAP_CTRL_POLL) { pi->expected_ack_seq = tx_seq; l2cap_drop_acked_frames(sk); pi->conn_state |= L2CAP_CONN_SEND_FBIT; l2cap_retransmit_one_frame(sk, tx_seq); l2cap_ertm_send(sk); if (pi->conn_state & L2CAP_CONN_WAIT_F) { pi->srej_save_reqseq = tx_seq; pi->conn_state |= L2CAP_CONN_SREJ_ACT; } } else if (rx_control & L2CAP_CTRL_FINAL) { if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) && pi->srej_save_reqseq == tx_seq) pi->conn_state &= ~L2CAP_CONN_SREJ_ACT; else l2cap_retransmit_one_frame(sk, tx_seq); } else { l2cap_retransmit_one_frame(sk, tx_seq); if (pi->conn_state & L2CAP_CONN_WAIT_F) { pi->srej_save_reqseq = tx_seq; pi->conn_state |= L2CAP_CONN_SREJ_ACT; } } } static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control) { struct l2cap_pinfo *pi = l2cap_pi(sk); u8 tx_seq = __get_reqseq(rx_control); BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control); pi->conn_state |= L2CAP_CONN_REMOTE_BUSY; pi->expected_ack_seq = tx_seq; l2cap_drop_acked_frames(sk); if (rx_control & L2CAP_CTRL_POLL) pi->conn_state |= L2CAP_CONN_SEND_FBIT; if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) { del_timer(&pi->retrans_timer); if (rx_control & L2CAP_CTRL_POLL) l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL); return; } if (rx_control & L2CAP_CTRL_POLL) l2cap_send_srejtail(sk); else l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY); } static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb) { BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len); if (L2CAP_CTRL_FINAL & rx_control && l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) { del_timer(&l2cap_pi(sk)->monitor_timer); if (l2cap_pi(sk)->unacked_frames > 0) __mod_retrans_timer(); l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F; } switch (rx_control & L2CAP_CTRL_SUPERVISE) { case L2CAP_SUPER_RCV_READY: l2cap_data_channel_rrframe(sk, rx_control); break; case L2CAP_SUPER_REJECT: l2cap_data_channel_rejframe(sk, rx_control); break; case L2CAP_SUPER_SELECT_REJECT: l2cap_data_channel_srejframe(sk, rx_control); break; case L2CAP_SUPER_RCV_NOT_READY: l2cap_data_channel_rnrframe(sk, rx_control); break; } kfree_skb(skb); return 0; } static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb) { struct l2cap_pinfo *pi = l2cap_pi(sk); u16 control; u8 req_seq; int len, next_tx_seq_offset, req_seq_offset; control = get_unaligned_le16(skb->data); skb_pull(skb, 2); len = skb->len; /* * We can just drop the corrupted I-frame here. * Receiver will miss it and start proper recovery * procedures and ask retransmission. */ if (l2cap_check_fcs(pi, skb)) goto drop; if (__is_sar_start(control) && __is_iframe(control)) len -= 2; if (pi->fcs == L2CAP_FCS_CRC16) len -= 2; if (len > pi->mps) { l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); goto drop; } req_seq = __get_reqseq(control); req_seq_offset = (req_seq - pi->expected_ack_seq) % 64; if (req_seq_offset < 0) req_seq_offset += 64; next_tx_seq_offset = (pi->next_tx_seq - pi->expected_ack_seq) % 64; if (next_tx_seq_offset < 0) next_tx_seq_offset += 64; /* check for invalid req-seq */ if (req_seq_offset > next_tx_seq_offset) { l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); goto drop; } if (__is_iframe(control)) { if (len < 0) { l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); goto drop; } l2cap_data_channel_iframe(sk, control, skb); } else { if (len != 0) { BT_ERR("%d", len); l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); goto drop; } l2cap_data_channel_sframe(sk, control, skb); } return 0; drop: kfree_skb(skb); return 0; } static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb) { struct sock *sk; struct l2cap_pinfo *pi; u16 control; u8 tx_seq; int len; sk = l2cap_get_chan_by_scid(&conn->chan_list, cid); if (!sk) { BT_DBG("unknown cid 0x%4.4x", cid); goto drop; } pi = l2cap_pi(sk); BT_DBG("sk %p, len %d", sk, skb->len); if (sk->sk_state != BT_CONNECTED) goto drop; switch (pi->mode) { case L2CAP_MODE_BASIC: /* If socket recv buffers overflows we drop data here * which is *bad* because L2CAP has to be reliable. * But we don't have any other choice. L2CAP doesn't * provide flow control mechanism. */ if (pi->imtu < skb->len) goto drop; if (!sock_queue_rcv_skb(sk, skb)) goto done; break; case L2CAP_MODE_ERTM: if (!sock_owned_by_user(sk)) { l2cap_ertm_data_rcv(sk, skb); } else { if (sk_add_backlog(sk, skb)) goto drop; } goto done; case L2CAP_MODE_STREAMING: control = get_unaligned_le16(skb->data); skb_pull(skb, 2); len = skb->len; if (l2cap_check_fcs(pi, skb)) goto drop; if (__is_sar_start(control)) len -= 2; if (pi->fcs == L2CAP_FCS_CRC16) len -= 2; if (len > pi->mps || len < 0 || __is_sframe(control)) goto drop; tx_seq = __get_txseq(control); if (pi->expected_tx_seq == tx_seq) pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64; else pi->expected_tx_seq = (tx_seq + 1) % 64; l2cap_streaming_reassembly_sdu(sk, skb, control); goto done; default: BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode); break; } drop: kfree_skb(skb); done: if (sk) bh_unlock_sock(sk); return 0; } static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb) { struct sock *sk; sk = l2cap_get_sock_by_psm(0, psm, conn->src); if (!sk) goto drop; BT_DBG("sk %p, len %d", sk, skb->len); if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED) goto drop; if (l2cap_pi(sk)->imtu < skb->len) goto drop; if (!sock_queue_rcv_skb(sk, skb)) goto done; drop: kfree_skb(skb); done: if (sk) bh_unlock_sock(sk); return 0; } static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) { struct l2cap_hdr *lh = (void *) skb->data; u16 cid, len; __le16 psm; skb_pull(skb, L2CAP_HDR_SIZE); cid = __le16_to_cpu(lh->cid); len = __le16_to_cpu(lh->len); if (len != skb->len) { kfree_skb(skb); return; } BT_DBG("len %d, cid 0x%4.4x", len, cid); switch (cid) { case L2CAP_CID_SIGNALING: l2cap_sig_channel(conn, skb); break; case L2CAP_CID_CONN_LESS: psm = get_unaligned_le16(skb->data); skb_pull(skb, 2); l2cap_conless_channel(conn, psm, skb); break; default: l2cap_data_channel(conn, cid, skb); break; } } /* ---- L2CAP interface with lower layer (HCI) ---- */ static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) { int exact = 0, lm1 = 0, lm2 = 0; register struct sock *sk; struct hlist_node *node; if (type != ACL_LINK) return -EINVAL; BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr)); /* Find listening sockets and check their link_mode */ read_lock(&l2cap_sk_list.lock); sk_for_each(sk, node, &l2cap_sk_list.head) { if (sk->sk_state != BT_LISTEN) continue; if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) { lm1 |= HCI_LM_ACCEPT; if (l2cap_pi(sk)->role_switch) lm1 |= HCI_LM_MASTER; exact++; } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { lm2 |= HCI_LM_ACCEPT; if (l2cap_pi(sk)->role_switch) lm2 |= HCI_LM_MASTER; } } read_unlock(&l2cap_sk_list.lock); return exact ? lm1 : lm2; } static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status) { struct l2cap_conn *conn; BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); if (hcon->type != ACL_LINK) return -EINVAL; if (!status) { conn = l2cap_conn_add(hcon, status); if (conn) l2cap_conn_ready(conn); } else l2cap_conn_del(hcon, bt_err(status)); return 0; } static int l2cap_disconn_ind(struct hci_conn *hcon) { struct l2cap_conn *conn = hcon->l2cap_data; BT_DBG("hcon %p", hcon); if (hcon->type != ACL_LINK || !conn) return 0x13; return conn->disc_reason; } static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) { BT_DBG("hcon %p reason %d", hcon, reason); if (hcon->type != ACL_LINK) return -EINVAL; l2cap_conn_del(hcon, bt_err(reason)); return 0; } static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt) { if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM) return; if (encrypt == 0x00) { if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) { l2cap_sock_clear_timer(sk); l2cap_sock_set_timer(sk, HZ * 5); } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH) __l2cap_sock_close(sk, ECONNREFUSED); } else { if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) l2cap_sock_clear_timer(sk); } } static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) { struct l2cap_chan_list *l; struct l2cap_conn *conn = hcon->l2cap_data; struct sock *sk; if (!conn) return 0; l = &conn->chan_list; BT_DBG("conn %p", conn); read_lock(&l->lock); for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { bh_lock_sock(sk); if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) { bh_unlock_sock(sk); continue; } if (!status && (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)) { l2cap_check_encryption(sk, encrypt); bh_unlock_sock(sk); continue; } if (sk->sk_state == BT_CONNECT) { if (!status) { struct l2cap_conn_req req; req.scid = cpu_to_le16(l2cap_pi(sk)->scid); req.psm = l2cap_pi(sk)->psm; l2cap_pi(sk)->ident = l2cap_get_ident(conn); l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND; l2cap_send_cmd(conn, l2cap_pi(sk)->ident, L2CAP_CONN_REQ, sizeof(req), &req); } else { l2cap_sock_clear_timer(sk); l2cap_sock_set_timer(sk, HZ / 10); } } else if (sk->sk_state == BT_CONNECT2) { struct l2cap_conn_rsp rsp; __u16 result; if (!status) { sk->sk_state = BT_CONFIG; result = L2CAP_CR_SUCCESS; } else { sk->sk_state = BT_DISCONN; l2cap_sock_set_timer(sk, HZ / 10); result = L2CAP_CR_SEC_BLOCK; } rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); rsp.result = cpu_to_le16(result); rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); l2cap_send_cmd(conn, l2cap_pi(sk)->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); } bh_unlock_sock(sk); } read_unlock(&l->lock); return 0; } static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) { struct l2cap_conn *conn = hcon->l2cap_data; if (!conn && !(conn = l2cap_conn_add(hcon, 0))) goto drop; BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags); if (!(flags & ACL_CONT)) { struct l2cap_hdr *hdr; int len; if (conn->rx_len) { BT_ERR("Unexpected start frame (len %d)", skb->len); kfree_skb(conn->rx_skb); conn->rx_skb = NULL; conn->rx_len = 0; l2cap_conn_unreliable(conn, ECOMM); } if (skb->len < 2) { BT_ERR("Frame is too short (len %d)", skb->len); l2cap_conn_unreliable(conn, ECOMM); goto drop; } hdr = (struct l2cap_hdr *) skb->data; len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE; if (len == skb->len) { /* Complete frame received */ l2cap_recv_frame(conn, skb); return 0; } BT_DBG("Start: total len %d, frag len %d", len, skb->len); if (skb->len > len) { BT_ERR("Frame is too long (len %d, expected len %d)", skb->len, len); l2cap_conn_unreliable(conn, ECOMM); goto drop; } /* Allocate skb for the complete frame (with header) */ conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC); if (!conn->rx_skb) goto drop; skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), skb->len); conn->rx_len = len - skb->len; } else { BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len); if (!conn->rx_len) { BT_ERR("Unexpected continuation frame (len %d)", skb->len); l2cap_conn_unreliable(conn, ECOMM); goto drop; } if (skb->len > conn->rx_len) { BT_ERR("Fragment is too long (len %d, expected %d)", skb->len, conn->rx_len); kfree_skb(conn->rx_skb); conn->rx_skb = NULL; conn->rx_len = 0; l2cap_conn_unreliable(conn, ECOMM); goto drop; } skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), skb->len); conn->rx_len -= skb->len; if (!conn->rx_len) { /* Complete frame received */ l2cap_recv_frame(conn, conn->rx_skb); conn->rx_skb = NULL; } } drop: kfree_skb(skb); return 0; } static int l2cap_debugfs_show(struct seq_file *f, void *p) { struct sock *sk; struct hlist_node *node; read_lock_bh(&l2cap_sk_list.lock); sk_for_each(sk, node, &l2cap_sk_list.head) { struct l2cap_pinfo *pi = l2cap_pi(sk); seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n", batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), sk->sk_state, __le16_to_cpu(pi->psm), pi->scid, pi->dcid, pi->imtu, pi->omtu, pi->sec_level); } read_unlock_bh(&l2cap_sk_list.lock); return 0; } static int l2cap_debugfs_open(struct inode *inode, struct file *file) { return single_open(file, l2cap_debugfs_show, inode->i_private); } static const struct file_operations l2cap_debugfs_fops = { .open = l2cap_debugfs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static struct dentry *l2cap_debugfs; static const struct proto_ops l2cap_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .release = l2cap_sock_release, .bind = l2cap_sock_bind, .connect = l2cap_sock_connect, .listen = l2cap_sock_listen, .accept = l2cap_sock_accept, .getname = l2cap_sock_getname, .sendmsg = l2cap_sock_sendmsg, .recvmsg = l2cap_sock_recvmsg, .poll = bt_sock_poll, .ioctl = bt_sock_ioctl, .mmap = sock_no_mmap, .socketpair = sock_no_socketpair, .shutdown = l2cap_sock_shutdown, .setsockopt = l2cap_sock_setsockopt, .getsockopt = l2cap_sock_getsockopt }; static const struct net_proto_family l2cap_sock_family_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .create = l2cap_sock_create, }; static struct hci_proto l2cap_hci_proto = { .name = "L2CAP", .id = HCI_PROTO_L2CAP, .connect_ind = l2cap_connect_ind, .connect_cfm = l2cap_connect_cfm, .disconn_ind = l2cap_disconn_ind, .disconn_cfm = l2cap_disconn_cfm, .security_cfm = l2cap_security_cfm, .recv_acldata = l2cap_recv_acldata }; static int __init l2cap_init(void) { int err; err = proto_register(&l2cap_proto, 0); if (err < 0) return err; _busy_wq = create_singlethread_workqueue("l2cap"); if (!_busy_wq) goto error; err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops); if (err < 0) { BT_ERR("L2CAP socket registration failed"); goto error; } err = hci_register_proto(&l2cap_hci_proto); if (err < 0) { BT_ERR("L2CAP protocol registration failed"); bt_sock_unregister(BTPROTO_L2CAP); goto error; } if (bt_debugfs) { l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs, NULL, &l2cap_debugfs_fops); if (!l2cap_debugfs) BT_ERR("Failed to create L2CAP debug file"); } BT_INFO("L2CAP ver %s", VERSION); BT_INFO("L2CAP socket layer initialized"); return 0; error: proto_unregister(&l2cap_proto); return err; } static void __exit l2cap_exit(void) { debugfs_remove(l2cap_debugfs); flush_workqueue(_busy_wq); destroy_workqueue(_busy_wq); if (bt_sock_unregister(BTPROTO_L2CAP) < 0) BT_ERR("L2CAP socket unregistration failed"); if (hci_unregister_proto(&l2cap_hci_proto) < 0) BT_ERR("L2CAP protocol unregistration failed"); proto_unregister(&l2cap_proto); } void l2cap_load(void) { /* Dummy function to trigger automatic L2CAP module loading by * other modules that use L2CAP sockets but don't use any other * symbols from it. */ } EXPORT_SYMBOL(l2cap_load); module_init(l2cap_init); module_exit(l2cap_exit); module_param(disable_ertm, bool, 0644); MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode"); MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_ALIAS("bt-proto-0");
gpl-2.0
CyanogenMod/android_kernel_amazon_bowser-common
arch/arm/mach-omap2/board-3430sdp.c
305
17839
/* * linux/arch/arm/mach-omap2/board-3430sdp.c * * Copyright (C) 2007 Texas Instruments * * Modified from mach-omap2/board-generic.c * * Initial code: Syed Mohammed Khasim * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/input.h> #include <linux/input/matrix_keypad.h> #include <linux/spi/spi.h> #include <linux/i2c/twl.h> #include <linux/regulator/machine.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/mmc/host.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <plat/mcspi.h> #include <plat/board.h> #include <plat/usb.h> #include <plat/common.h> #include <plat/dma.h> #include <plat/gpmc.h> #include <video/omapdss.h> #include <video/omap-panel-generic-dpi.h> #include <plat/gpmc-smc91x.h> #include "board-flash.h" #include "mux.h" #include "sdram-qimonda-hyb18m512160af-6.h" #include "hsmmc.h" #include "pm.h" #include "control.h" #include "common-board-devices.h" #define CONFIG_DISABLE_HFCLK 1 #define SDP3430_TS_GPIO_IRQ_SDPV1 3 #define SDP3430_TS_GPIO_IRQ_SDPV2 2 #define ENABLE_VAUX3_DEDICATED 0x03 #define ENABLE_VAUX3_DEV_GRP 0x20 #define TWL4030_MSECURE_GPIO 22 static uint32_t board_keymap[] = { KEY(0, 0, KEY_LEFT), KEY(0, 1, KEY_RIGHT), KEY(0, 2, KEY_A), KEY(0, 3, KEY_B), KEY(0, 4, KEY_C), KEY(1, 0, KEY_DOWN), KEY(1, 1, KEY_UP), KEY(1, 2, KEY_E), KEY(1, 3, KEY_F), KEY(1, 4, KEY_G), KEY(2, 0, KEY_ENTER), KEY(2, 1, KEY_I), KEY(2, 2, KEY_J), KEY(2, 3, KEY_K), KEY(2, 4, KEY_3), KEY(3, 0, KEY_M), KEY(3, 1, KEY_N), KEY(3, 2, KEY_O), KEY(3, 3, KEY_P), KEY(3, 4, KEY_Q), KEY(4, 0, KEY_R), KEY(4, 1, KEY_4), KEY(4, 2, KEY_T), KEY(4, 3, KEY_U), KEY(4, 4, KEY_D), KEY(5, 0, KEY_V), KEY(5, 1, KEY_W), KEY(5, 2, KEY_L), KEY(5, 3, KEY_S), KEY(5, 4, KEY_H), 0 }; static struct matrix_keymap_data board_map_data = { .keymap = board_keymap, .keymap_size = ARRAY_SIZE(board_keymap), }; static struct twl4030_keypad_data sdp3430_kp_data = { .keymap_data = &board_map_data, .rows = 5, .cols = 6, .rep = 1, }; #define SDP3430_LCD_PANEL_BACKLIGHT_GPIO 8 #define SDP3430_LCD_PANEL_ENABLE_GPIO 5 static struct gpio sdp3430_dss_gpios[] __initdata = { {SDP3430_LCD_PANEL_ENABLE_GPIO, GPIOF_OUT_INIT_LOW, "LCD reset" }, {SDP3430_LCD_PANEL_BACKLIGHT_GPIO, GPIOF_OUT_INIT_LOW, "LCD Backlight"}, }; static int lcd_enabled; static int dvi_enabled; static void __init sdp3430_display_init(void) { int r; r = gpio_request_array(sdp3430_dss_gpios, ARRAY_SIZE(sdp3430_dss_gpios)); if (r) printk(KERN_ERR "failed to get LCD control GPIOs\n"); } static int sdp3430_panel_enable_lcd(struct omap_dss_device *dssdev) { if (dvi_enabled) { printk(KERN_ERR "cannot enable LCD, DVI is enabled\n"); return -EINVAL; } gpio_direction_output(SDP3430_LCD_PANEL_ENABLE_GPIO, 1); gpio_direction_output(SDP3430_LCD_PANEL_BACKLIGHT_GPIO, 1); lcd_enabled = 1; return 0; } static void sdp3430_panel_disable_lcd(struct omap_dss_device *dssdev) { lcd_enabled = 0; gpio_direction_output(SDP3430_LCD_PANEL_ENABLE_GPIO, 0); gpio_direction_output(SDP3430_LCD_PANEL_BACKLIGHT_GPIO, 0); } static int sdp3430_panel_enable_dvi(struct omap_dss_device *dssdev) { if (lcd_enabled) { printk(KERN_ERR "cannot enable DVI, LCD is enabled\n"); return -EINVAL; } dvi_enabled = 1; return 0; } static void sdp3430_panel_disable_dvi(struct omap_dss_device *dssdev) { dvi_enabled = 0; } static int sdp3430_panel_enable_tv(struct omap_dss_device *dssdev) { return 0; } static void sdp3430_panel_disable_tv(struct omap_dss_device *dssdev) { } static struct omap_dss_device sdp3430_lcd_device = { .name = "lcd", .driver_name = "sharp_ls_panel", .type = OMAP_DISPLAY_TYPE_DPI, .phy.dpi.data_lines = 16, .platform_enable = sdp3430_panel_enable_lcd, .platform_disable = sdp3430_panel_disable_lcd, }; static struct panel_generic_dpi_data dvi_panel = { .name = "generic", .platform_enable = sdp3430_panel_enable_dvi, .platform_disable = sdp3430_panel_disable_dvi, }; static struct omap_dss_device sdp3430_dvi_device = { .name = "dvi", .type = OMAP_DISPLAY_TYPE_DPI, .driver_name = "generic_dpi_panel", .data = &dvi_panel, .phy.dpi.data_lines = 24, }; static struct omap_dss_device sdp3430_tv_device = { .name = "tv", .driver_name = "venc", .type = OMAP_DISPLAY_TYPE_VENC, .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO, .platform_enable = sdp3430_panel_enable_tv, .platform_disable = sdp3430_panel_disable_tv, }; static struct omap_dss_device *sdp3430_dss_devices[] = { &sdp3430_lcd_device, &sdp3430_dvi_device, &sdp3430_tv_device, }; static struct omap_dss_board_info sdp3430_dss_data = { .num_devices = ARRAY_SIZE(sdp3430_dss_devices), .devices = sdp3430_dss_devices, .default_device = &sdp3430_lcd_device, }; static struct omap_board_config_kernel sdp3430_config[] __initdata = { }; static void __init omap_3430sdp_init_early(void) { omap2_init_common_infrastructure(); omap2_init_common_devices(hyb18m512160af6_sdrc_params, NULL); } static int sdp3430_batt_table[] = { /* 0 C*/ 30800, 29500, 28300, 27100, 26000, 24900, 23900, 22900, 22000, 21100, 20300, 19400, 18700, 17900, 17200, 16500, 15900, 15300, 14700, 14100, 13600, 13100, 12600, 12100, 11600, 11200, 10800, 10400, 10000, 9630, 9280, 8950, 8620, 8310, 8020, 7730, 7460, 7200, 6950, 6710, 6470, 6250, 6040, 5830, 5640, 5450, 5260, 5090, 4920, 4760, 4600, 4450, 4310, 4170, 4040, 3910, 3790, 3670, 3550 }; static struct twl4030_bci_platform_data sdp3430_bci_data = { .battery_tmp_tbl = sdp3430_batt_table, .tblsize = ARRAY_SIZE(sdp3430_batt_table), }; static struct omap2_hsmmc_info mmc[] = { { .mmc = 1, /* 8 bits (default) requires S6.3 == ON, * so the SIM card isn't used; else 4 bits. */ .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA, .gpio_wp = 4, }, { .mmc = 2, .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA, .gpio_wp = 7, }, {} /* Terminator */ }; static int sdp3430_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) { /* gpio + 0 is "mmc0_cd" (input/IRQ), * gpio + 1 is "mmc1_cd" (input/IRQ) */ mmc[0].gpio_cd = gpio + 0; mmc[1].gpio_cd = gpio + 1; omap2_hsmmc_init(mmc); /* gpio + 7 is "sub_lcd_en_bkl" (output/PWM1) */ gpio_request_one(gpio + 7, GPIOF_OUT_INIT_LOW, "sub_lcd_en_bkl"); /* gpio + 15 is "sub_lcd_nRST" (output) */ gpio_request_one(gpio + 15, GPIOF_OUT_INIT_LOW, "sub_lcd_nRST"); return 0; } static struct twl4030_gpio_platform_data sdp3430_gpio_data = { .gpio_base = OMAP_MAX_GPIO_LINES, .irq_base = TWL4030_GPIO_IRQ_BASE, .irq_end = TWL4030_GPIO_IRQ_END, .pulldowns = BIT(2) | BIT(6) | BIT(8) | BIT(13) | BIT(16) | BIT(17), .setup = sdp3430_twl_gpio_setup, }; static struct twl4030_usb_data sdp3430_usb_data = { .usb_mode = T2_USB_MODE_ULPI, }; static struct twl4030_madc_platform_data sdp3430_madc_data = { .irq_line = 1, }; /* regulator consumer mappings */ /* ads7846 on SPI */ static struct regulator_consumer_supply sdp3430_vaux3_supplies[] = { REGULATOR_SUPPLY("vcc", "spi1.0"), }; static struct regulator_consumer_supply sdp3430_vdda_dac_supplies[] = { REGULATOR_SUPPLY("vdda_dac", "omapdss_venc"), }; /* VPLL2 for digital video outputs */ static struct regulator_consumer_supply sdp3430_vpll2_supplies[] = { REGULATOR_SUPPLY("vdds_dsi", "omapdss"), REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi1"), }; static struct regulator_consumer_supply sdp3430_vmmc1_supplies[] = { REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"), }; static struct regulator_consumer_supply sdp3430_vsim_supplies[] = { REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.0"), }; static struct regulator_consumer_supply sdp3430_vmmc2_supplies[] = { REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"), }; /* * Apply all the fixed voltages since most versions of U-Boot * don't bother with that initialization. */ /* VAUX1 for mainboard (irda and sub-lcd) */ static struct regulator_init_data sdp3430_vaux1 = { .constraints = { .min_uV = 2800000, .max_uV = 2800000, .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, }; /* VAUX2 for camera module */ static struct regulator_init_data sdp3430_vaux2 = { .constraints = { .min_uV = 2800000, .max_uV = 2800000, .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, }; /* VAUX3 for LCD board */ static struct regulator_init_data sdp3430_vaux3 = { .constraints = { .min_uV = 2800000, .max_uV = 2800000, .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(sdp3430_vaux3_supplies), .consumer_supplies = sdp3430_vaux3_supplies, }; /* VAUX4 for OMAP VDD_CSI2 (camera) */ static struct regulator_init_data sdp3430_vaux4 = { .constraints = { .min_uV = 1800000, .max_uV = 1800000, .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, }; /* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */ static struct regulator_init_data sdp3430_vmmc1 = { .constraints = { .min_uV = 1850000, .max_uV = 3150000, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(sdp3430_vmmc1_supplies), .consumer_supplies = sdp3430_vmmc1_supplies, }; /* VMMC2 for MMC2 card */ static struct regulator_init_data sdp3430_vmmc2 = { .constraints = { .min_uV = 1850000, .max_uV = 1850000, .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(sdp3430_vmmc2_supplies), .consumer_supplies = sdp3430_vmmc2_supplies, }; /* VSIM for OMAP VDD_MMC1A (i/o for DAT4..DAT7) */ static struct regulator_init_data sdp3430_vsim = { .constraints = { .min_uV = 1800000, .max_uV = 3000000, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(sdp3430_vsim_supplies), .consumer_supplies = sdp3430_vsim_supplies, }; /* VDAC for DSS driving S-Video */ static struct regulator_init_data sdp3430_vdac = { .constraints = { .min_uV = 1800000, .max_uV = 1800000, .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(sdp3430_vdda_dac_supplies), .consumer_supplies = sdp3430_vdda_dac_supplies, }; static struct regulator_init_data sdp3430_vpll2 = { .constraints = { .name = "VDVI", .min_uV = 1800000, .max_uV = 1800000, .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = ARRAY_SIZE(sdp3430_vpll2_supplies), .consumer_supplies = sdp3430_vpll2_supplies, }; static struct twl4030_codec_audio_data sdp3430_audio; static struct twl4030_codec_data sdp3430_codec = { .audio_mclk = 26000000, .audio = &sdp3430_audio, }; static struct twl4030_platform_data sdp3430_twldata = { .irq_base = TWL4030_IRQ_BASE, .irq_end = TWL4030_IRQ_END, /* platform_data for children goes here */ .bci = &sdp3430_bci_data, .gpio = &sdp3430_gpio_data, .madc = &sdp3430_madc_data, .keypad = &sdp3430_kp_data, .usb = &sdp3430_usb_data, .codec = &sdp3430_codec, .vaux1 = &sdp3430_vaux1, .vaux2 = &sdp3430_vaux2, .vaux3 = &sdp3430_vaux3, .vaux4 = &sdp3430_vaux4, .vmmc1 = &sdp3430_vmmc1, .vmmc2 = &sdp3430_vmmc2, .vsim = &sdp3430_vsim, .vdac = &sdp3430_vdac, .vpll2 = &sdp3430_vpll2, }; static int __init omap3430_i2c_init(void) { /* i2c1 for PMIC only */ omap3_pmic_init("twl4030", &sdp3430_twldata); /* i2c2 on camera connector (for sensor control) and optional isp1301 */ omap_register_i2c_bus(2, 400, NULL, 0); /* i2c3 on display connector (for DVI, tfp410) */ omap_register_i2c_bus(3, 400, NULL, 0); return 0; } #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) static struct omap_smc91x_platform_data board_smc91x_data = { .cs = 3, .flags = GPMC_MUX_ADD_DATA | GPMC_TIMINGS_SMC91C96 | IORESOURCE_IRQ_LOWLEVEL, }; static void __init board_smc91x_init(void) { if (omap_rev() > OMAP3430_REV_ES1_0) board_smc91x_data.gpio_irq = 6; else board_smc91x_data.gpio_irq = 29; gpmc_smc91x_init(&board_smc91x_data); } #else static inline void board_smc91x_init(void) { } #endif static void enable_board_wakeup_source(void) { /* T2 interrupt line (keypad) */ omap_mux_init_signal("sys_nirq", OMAP_WAKEUP_EN | OMAP_PIN_INPUT_PULLUP); } static const struct usbhs_omap_board_data usbhs_bdata __initconst = { .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, .phy_reset = true, .reset_gpio_port[0] = 57, .reset_gpio_port[1] = 61, .reset_gpio_port[2] = -EINVAL }; #ifdef CONFIG_OMAP_MUX static struct omap_board_mux board_mux[] __initdata = { { .reg_offset = OMAP_MUX_TERMINATOR }, }; #else #define board_mux NULL #endif /* * SDP3430 V2 Board CS organization * Different from SDP3430 V1. Now 4 switches used to specify CS * * See also the Switch S8 settings in the comments. */ static char chip_sel_3430[][GPMC_CS_NUM] = { {PDC_NOR, PDC_NAND, PDC_ONENAND, DBG_MPDB, 0, 0, 0, 0}, /* S8:1111 */ {PDC_ONENAND, PDC_NAND, PDC_NOR, DBG_MPDB, 0, 0, 0, 0}, /* S8:1110 */ {PDC_NAND, PDC_ONENAND, PDC_NOR, DBG_MPDB, 0, 0, 0, 0}, /* S8:1101 */ }; static struct mtd_partition sdp_nor_partitions[] = { /* bootloader (U-Boot, etc) in first sector */ { .name = "Bootloader-NOR", .offset = 0, .size = SZ_256K, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, /* bootloader params in the next sector */ { .name = "Params-NOR", .offset = MTDPART_OFS_APPEND, .size = SZ_256K, .mask_flags = 0, }, /* kernel */ { .name = "Kernel-NOR", .offset = MTDPART_OFS_APPEND, .size = SZ_2M, .mask_flags = 0 }, /* file system */ { .name = "Filesystem-NOR", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, .mask_flags = 0 } }; static struct mtd_partition sdp_onenand_partitions[] = { { .name = "X-Loader-OneNAND", .offset = 0, .size = 4 * (64 * 2048), .mask_flags = MTD_WRITEABLE /* force read-only */ }, { .name = "U-Boot-OneNAND", .offset = MTDPART_OFS_APPEND, .size = 2 * (64 * 2048), .mask_flags = MTD_WRITEABLE /* force read-only */ }, { .name = "U-Boot Environment-OneNAND", .offset = MTDPART_OFS_APPEND, .size = 1 * (64 * 2048), }, { .name = "Kernel-OneNAND", .offset = MTDPART_OFS_APPEND, .size = 16 * (64 * 2048), }, { .name = "File System-OneNAND", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static struct mtd_partition sdp_nand_partitions[] = { /* All the partition sizes are listed in terms of NAND block size */ { .name = "X-Loader-NAND", .offset = 0, .size = 4 * (64 * 2048), .mask_flags = MTD_WRITEABLE, /* force read-only */ }, { .name = "U-Boot-NAND", .offset = MTDPART_OFS_APPEND, /* Offset = 0x80000 */ .size = 10 * (64 * 2048), .mask_flags = MTD_WRITEABLE, /* force read-only */ }, { .name = "Boot Env-NAND", .offset = MTDPART_OFS_APPEND, /* Offset = 0x1c0000 */ .size = 6 * (64 * 2048), }, { .name = "Kernel-NAND", .offset = MTDPART_OFS_APPEND, /* Offset = 0x280000 */ .size = 40 * (64 * 2048), }, { .name = "File System - NAND", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, /* Offset = 0x780000 */ }, }; static struct flash_partitions sdp_flash_partitions[] = { { .parts = sdp_nor_partitions, .nr_parts = ARRAY_SIZE(sdp_nor_partitions), }, { .parts = sdp_onenand_partitions, .nr_parts = ARRAY_SIZE(sdp_onenand_partitions), }, { .parts = sdp_nand_partitions, .nr_parts = ARRAY_SIZE(sdp_nand_partitions), }, }; static void __init omap_3430sdp_init(void) { int gpio_pendown; omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); omap_board_config = sdp3430_config; omap_board_config_size = ARRAY_SIZE(sdp3430_config); omap3430_i2c_init(); omap_display_init(&sdp3430_dss_data); if (omap_rev() > OMAP3430_REV_ES1_0) gpio_pendown = SDP3430_TS_GPIO_IRQ_SDPV2; else gpio_pendown = SDP3430_TS_GPIO_IRQ_SDPV1; omap_ads7846_init(1, gpio_pendown, 310, NULL); omap_serial_init(); usb_musb_init(NULL); board_smc91x_init(); board_flash_init(sdp_flash_partitions, chip_sel_3430, 0); sdp3430_display_init(); enable_board_wakeup_source(); usbhs_init(&usbhs_bdata); } MACHINE_START(OMAP_3430SDP, "OMAP3430 3430SDP board") /* Maintainer: Syed Khasim - Texas Instruments Inc */ .boot_params = 0x80000100, .reserve = omap_reserve, .map_io = omap3_map_io, .init_early = omap_3430sdp_init_early, .init_irq = omap_init_irq, .init_machine = omap_3430sdp_init, .timer = &omap_timer, MACHINE_END
gpl-2.0
nikro56/android_kernel_acer_t30s
arch/arm/mach-at91/board-sam9263ek.c
305
10546
/* * linux/arch/arm/mach-at91/board-sam9263ek.c * * Copyright (C) 2005 SAN People * Copyright (C) 2007 Atmel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/spi/ads7846.h> #include <linux/i2c/at24.h> #include <linux/fb.h> #include <linux/gpio_keys.h> #include <linux/input.h> #include <linux/leds.h> #include <video/atmel_lcdc.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <mach/board.h> #include <mach/gpio.h> #include <mach/at91sam9_smc.h> #include <mach/at91_shdwc.h> #include "sam9_smc.h" #include "generic.h" static void __init ek_map_io(void) { /* Initialize processor: 16.367 MHz crystal */ at91sam9263_initialize(16367660); /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART0 on ttyS1. (Rx, Tx, RTS, CTS) */ at91_register_uart(AT91SAM9263_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS); /* set serial console to ttyS0 (ie, DBGU) */ at91_set_serial_console(0); } static void __init ek_init_irq(void) { at91sam9263_init_interrupts(NULL); } /* * USB Host port */ static struct at91_usbh_data __initdata ek_usbh_data = { .ports = 2, .vbus_pin = { AT91_PIN_PA24, AT91_PIN_PA21 }, }; /* * USB Device port */ static struct at91_udc_data __initdata ek_udc_data = { .vbus_pin = AT91_PIN_PA25, .pullup_pin = 0, /* pull-up driven by UDC */ }; /* * ADS7846 Touchscreen */ #if defined(CONFIG_TOUCHSCREEN_ADS7846) || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE) static int ads7843_pendown_state(void) { return !at91_get_gpio_value(AT91_PIN_PA15); /* Touchscreen PENIRQ */ } static struct ads7846_platform_data ads_info = { .model = 7843, .x_min = 150, .x_max = 3830, .y_min = 190, .y_max = 3830, .vref_delay_usecs = 100, .x_plate_ohms = 450, .y_plate_ohms = 250, .pressure_max = 15000, .debounce_max = 1, .debounce_rep = 0, .debounce_tol = (~0), .get_pendown_state = ads7843_pendown_state, }; static void __init ek_add_device_ts(void) { at91_set_B_periph(AT91_PIN_PA15, 1); /* External IRQ1, with pullup */ at91_set_gpio_input(AT91_PIN_PA31, 1); /* Touchscreen BUSY signal */ } #else static void __init ek_add_device_ts(void) {} #endif /* * SPI devices. */ static struct spi_board_info ek_spi_devices[] = { #if defined(CONFIG_MTD_AT91_DATAFLASH_CARD) { /* DataFlash card */ .modalias = "mtd_dataflash", .chip_select = 0, .max_speed_hz = 15 * 1000 * 1000, .bus_num = 0, }, #endif #if defined(CONFIG_TOUCHSCREEN_ADS7846) || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE) { .modalias = "ads7846", .chip_select = 3, .max_speed_hz = 125000 * 26, /* (max sample rate @ 3V) * (cmd + data + overhead) */ .bus_num = 0, .platform_data = &ads_info, .irq = AT91SAM9263_ID_IRQ1, }, #endif }; /* * MCI (SD/MMC) */ static struct at91_mmc_data __initdata ek_mmc_data = { .wire4 = 1, .det_pin = AT91_PIN_PE18, .wp_pin = AT91_PIN_PE19, // .vcc_pin = ... not connected }; /* * MACB Ethernet device */ static struct at91_eth_data __initdata ek_macb_data = { .phy_irq_pin = AT91_PIN_PE31, .is_rmii = 1, }; /* * NAND flash */ static struct mtd_partition __initdata ek_nand_partition[] = { { .name = "Partition 1", .offset = 0, .size = SZ_64M, }, { .name = "Partition 2", .offset = MTDPART_OFS_NXTBLK, .size = MTDPART_SIZ_FULL, }, }; static struct mtd_partition * __init nand_partitions(int size, int *num_partitions) { *num_partitions = ARRAY_SIZE(ek_nand_partition); return ek_nand_partition; } static struct atmel_nand_data __initdata ek_nand_data = { .ale = 21, .cle = 22, // .det_pin = ... not connected .rdy_pin = AT91_PIN_PA22, .enable_pin = AT91_PIN_PD15, .partition_info = nand_partitions, #if defined(CONFIG_MTD_NAND_ATMEL_BUSWIDTH_16) .bus_width_16 = 1, #else .bus_width_16 = 0, #endif }; static struct sam9_smc_config __initdata ek_nand_smc_config = { .ncs_read_setup = 0, .nrd_setup = 1, .ncs_write_setup = 0, .nwe_setup = 1, .ncs_read_pulse = 3, .nrd_pulse = 3, .ncs_write_pulse = 3, .nwe_pulse = 3, .read_cycle = 5, .write_cycle = 5, .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE, .tdf_cycles = 2, }; static void __init ek_add_device_nand(void) { /* setup bus-width (8 or 16) */ if (ek_nand_data.bus_width_16) ek_nand_smc_config.mode |= AT91_SMC_DBW_16; else ek_nand_smc_config.mode |= AT91_SMC_DBW_8; /* configure chip-select 3 (NAND) */ sam9_smc_configure(3, &ek_nand_smc_config); at91_add_device_nand(&ek_nand_data); } /* * I2C devices */ static struct at24_platform_data at24c512 = { .byte_len = SZ_512K / 8, .page_size = 128, .flags = AT24_FLAG_ADDR16, }; static struct i2c_board_info __initdata ek_i2c_devices[] = { { I2C_BOARD_INFO("24c512", 0x50), .platform_data = &at24c512, }, /* more devices can be added using expansion connectors */ }; /* * LCD Controller */ #if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE) static struct fb_videomode at91_tft_vga_modes[] = { { .name = "TX09D50VM1CCA @ 60", .refresh = 60, .xres = 240, .yres = 320, .pixclock = KHZ2PICOS(4965), .left_margin = 1, .right_margin = 33, .upper_margin = 1, .lower_margin = 0, .hsync_len = 5, .vsync_len = 1, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED, }, }; static struct fb_monspecs at91fb_default_monspecs = { .manufacturer = "HIT", .monitor = "TX09D70VM1CCA", .modedb = at91_tft_vga_modes, .modedb_len = ARRAY_SIZE(at91_tft_vga_modes), .hfmin = 15000, .hfmax = 64000, .vfmin = 50, .vfmax = 150, }; #define AT91SAM9263_DEFAULT_LCDCON2 (ATMEL_LCDC_MEMOR_LITTLE \ | ATMEL_LCDC_DISTYPE_TFT \ | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE) static void at91_lcdc_power_control(int on) { at91_set_gpio_value(AT91_PIN_PA30, on); } /* Driver datas */ static struct atmel_lcdfb_info __initdata ek_lcdc_data = { .lcdcon_is_backlight = true, .default_bpp = 16, .default_dmacon = ATMEL_LCDC_DMAEN, .default_lcdcon2 = AT91SAM9263_DEFAULT_LCDCON2, .default_monspecs = &at91fb_default_monspecs, .atmel_lcdfb_power_control = at91_lcdc_power_control, .guard_time = 1, }; #else static struct atmel_lcdfb_info __initdata ek_lcdc_data; #endif /* * GPIO Buttons */ #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) static struct gpio_keys_button ek_buttons[] = { { /* BP1, "leftclic" */ .code = BTN_LEFT, .gpio = AT91_PIN_PC5, .active_low = 1, .desc = "left_click", .wakeup = 1, }, { /* BP2, "rightclic" */ .code = BTN_RIGHT, .gpio = AT91_PIN_PC4, .active_low = 1, .desc = "right_click", .wakeup = 1, } }; static struct gpio_keys_platform_data ek_button_data = { .buttons = ek_buttons, .nbuttons = ARRAY_SIZE(ek_buttons), }; static struct platform_device ek_button_device = { .name = "gpio-keys", .id = -1, .num_resources = 0, .dev = { .platform_data = &ek_button_data, } }; static void __init ek_add_device_buttons(void) { at91_set_GPIO_periph(AT91_PIN_PC5, 1); /* left button */ at91_set_deglitch(AT91_PIN_PC5, 1); at91_set_GPIO_periph(AT91_PIN_PC4, 1); /* right button */ at91_set_deglitch(AT91_PIN_PC4, 1); platform_device_register(&ek_button_device); } #else static void __init ek_add_device_buttons(void) {} #endif /* * AC97 * reset_pin is not connected: NRST */ static struct ac97c_platform_data ek_ac97_data = { }; /* * LEDs ... these could all be PWM-driven, for variable brightness */ static struct gpio_led ek_leds[] = { { /* "right" led, green, userled2 (could be driven by pwm2) */ .name = "ds2", .gpio = AT91_PIN_PC29, .active_low = 1, .default_trigger = "nand-disk", }, { /* "power" led, yellow (could be driven by pwm0) */ .name = "ds3", .gpio = AT91_PIN_PB7, .default_trigger = "heartbeat", } }; /* * PWM Leds */ static struct gpio_led ek_pwm_led[] = { /* For now only DS1 is PWM-driven (by pwm1) */ { .name = "ds1", .gpio = 1, /* is PWM channel number */ .active_low = 1, .default_trigger = "none", } }; /* * CAN */ static void sam9263ek_transceiver_switch(int on) { if (on) { at91_set_gpio_output(AT91_PIN_PA18, 1); /* CANRXEN */ at91_set_gpio_output(AT91_PIN_PA19, 0); /* CANRS */ } else { at91_set_gpio_output(AT91_PIN_PA18, 0); /* CANRXEN */ at91_set_gpio_output(AT91_PIN_PA19, 1); /* CANRS */ } } static struct at91_can_data ek_can_data = { .transceiver_switch = sam9263ek_transceiver_switch, }; static void __init ek_board_init(void) { /* Serial */ at91_add_device_serial(); /* USB Host */ at91_add_device_usbh(&ek_usbh_data); /* USB Device */ at91_add_device_udc(&ek_udc_data); /* SPI */ at91_set_gpio_output(AT91_PIN_PE20, 1); /* select spi0 clock */ at91_add_device_spi(ek_spi_devices, ARRAY_SIZE(ek_spi_devices)); /* Touchscreen */ ek_add_device_ts(); /* MMC */ at91_add_device_mmc(1, &ek_mmc_data); /* Ethernet */ at91_add_device_eth(&ek_macb_data); /* NAND */ ek_add_device_nand(); /* I2C */ at91_add_device_i2c(ek_i2c_devices, ARRAY_SIZE(ek_i2c_devices)); /* LCD Controller */ at91_add_device_lcdc(&ek_lcdc_data); /* Push Buttons */ ek_add_device_buttons(); /* AC97 */ at91_add_device_ac97(&ek_ac97_data); /* LEDs */ at91_gpio_leds(ek_leds, ARRAY_SIZE(ek_leds)); at91_pwm_leds(ek_pwm_led, ARRAY_SIZE(ek_pwm_led)); /* CAN */ at91_add_device_can(&ek_can_data); } MACHINE_START(AT91SAM9263EK, "Atmel AT91SAM9263-EK") /* Maintainer: Atmel */ .boot_params = AT91_SDRAM_BASE + 0x100, .timer = &at91sam926x_timer, .map_io = ek_map_io, .init_irq = ek_init_irq, .init_machine = ek_board_init, MACHINE_END
gpl-2.0
Tiamat-AOSP/Tiamat-Xoom
fs/nfsd/stats.c
1329
2725
/* * procfs-based user access to knfsd statistics * * /proc/net/rpc/nfsd * * Format: * rc <hits> <misses> <nocache> * Statistsics for the reply cache * fh <stale> <total-lookups> <anonlookups> <dir-not-in-dcache> <nondir-not-in-dcache> * statistics for filehandle lookup * io <bytes-read> <bytes-writtten> * statistics for IO throughput * th <threads> <fullcnt> <10%-20%> <20%-30%> ... <90%-100%> <100%> * time (seconds) when nfsd thread usage above thresholds * and number of times that all threads were in use * ra cache-size <10% <20% <30% ... <100% not-found * number of times that read-ahead entry was found that deep in * the cache. * plus generic RPC stats (see net/sunrpc/stats.c) * * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de> */ #include <linux/seq_file.h> #include <linux/module.h> #include <linux/sunrpc/stats.h> #include <linux/nfsd/stats.h> #include "nfsd.h" struct nfsd_stats nfsdstats; struct svc_stat nfsd_svcstats = { .program = &nfsd_program, }; static int nfsd_proc_show(struct seq_file *seq, void *v) { int i; seq_printf(seq, "rc %u %u %u\nfh %u %u %u %u %u\nio %u %u\n", nfsdstats.rchits, nfsdstats.rcmisses, nfsdstats.rcnocache, nfsdstats.fh_stale, nfsdstats.fh_lookup, nfsdstats.fh_anon, nfsdstats.fh_nocache_dir, nfsdstats.fh_nocache_nondir, nfsdstats.io_read, nfsdstats.io_write); /* thread usage: */ seq_printf(seq, "th %u %u", nfsdstats.th_cnt, nfsdstats.th_fullcnt); for (i=0; i<10; i++) { unsigned int jifs = nfsdstats.th_usage[i]; unsigned int sec = jifs / HZ, msec = (jifs % HZ)*1000/HZ; seq_printf(seq, " %u.%03u", sec, msec); } /* newline and ra-cache */ seq_printf(seq, "\nra %u", nfsdstats.ra_size); for (i=0; i<11; i++) seq_printf(seq, " %u", nfsdstats.ra_depth[i]); seq_putc(seq, '\n'); /* show my rpc info */ svc_seq_show(seq, &nfsd_svcstats); #ifdef CONFIG_NFSD_V4 /* Show count for individual nfsv4 operations */ /* Writing operation numbers 0 1 2 also for maintaining uniformity */ seq_printf(seq,"proc4ops %u", LAST_NFS4_OP + 1); for (i = 0; i <= LAST_NFS4_OP; i++) seq_printf(seq, " %u", nfsdstats.nfs4_opcount[i]); seq_putc(seq, '\n'); #endif return 0; } static int nfsd_proc_open(struct inode *inode, struct file *file) { return single_open(file, nfsd_proc_show, NULL); } static const struct file_operations nfsd_proc_fops = { .owner = THIS_MODULE, .open = nfsd_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; void nfsd_stat_init(void) { svc_proc_register(&nfsd_svcstats, &nfsd_proc_fops); } void nfsd_stat_shutdown(void) { svc_proc_unregister("nfsd"); }
gpl-2.0
chentz78/chentz-N4-Kernel
drivers/usb/gadget/nokia.c
2097
7109
/* * nokia.c -- Nokia Composite Gadget Driver * * Copyright (C) 2008-2010 Nokia Corporation * Contact: Felipe Balbi <felipe.balbi@nokia.com> * * This gadget driver borrows from serial.c which is: * * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com) * Copyright (C) 2008 by David Brownell * Copyright (C) 2008 by Nokia Corporation * * This software is distributed under the terms of the GNU General * Public License ("GPL") as published by the Free Software Foundation, * version 2 of that License. */ #include <linux/kernel.h> #include <linux/device.h> #include "u_serial.h" #include "u_ether.h" #include "u_phonet.h" #include "gadget_chips.h" /* Defines */ #define NOKIA_VERSION_NUM 0x0211 #define NOKIA_LONG_NAME "N900 (PC-Suite Mode)" /*-------------------------------------------------------------------------*/ /* * Kbuild is not very cooperative with respect to linking separately * compiled library objects into one module. So for now we won't use * separate compilation ... ensuring init/exit sections work to shrink * the runtime footprint, and giving us at least some parts of what * a "gcc --combine ... part1.c part2.c part3.c ... " build would. */ #define USBF_OBEX_INCLUDED #include "f_ecm.c" #include "f_obex.c" #include "f_phonet.c" #include "u_ether.c" /*-------------------------------------------------------------------------*/ USB_GADGET_COMPOSITE_OPTIONS(); #define NOKIA_VENDOR_ID 0x0421 /* Nokia */ #define NOKIA_PRODUCT_ID 0x01c8 /* Nokia Gadget */ /* string IDs are assigned dynamically */ #define STRING_DESCRIPTION_IDX USB_GADGET_FIRST_AVAIL_IDX static char manufacturer_nokia[] = "Nokia"; static const char product_nokia[] = NOKIA_LONG_NAME; static const char description_nokia[] = "PC-Suite Configuration"; static struct usb_string strings_dev[] = { [USB_GADGET_MANUFACTURER_IDX].s = manufacturer_nokia, [USB_GADGET_PRODUCT_IDX].s = NOKIA_LONG_NAME, [USB_GADGET_SERIAL_IDX].s = "", [STRING_DESCRIPTION_IDX].s = description_nokia, { } /* end of list */ }; static struct usb_gadget_strings stringtab_dev = { .language = 0x0409, /* en-us */ .strings = strings_dev, }; static struct usb_gadget_strings *dev_strings[] = { &stringtab_dev, NULL, }; static struct usb_device_descriptor device_desc = { .bLength = USB_DT_DEVICE_SIZE, .bDescriptorType = USB_DT_DEVICE, .bcdUSB = __constant_cpu_to_le16(0x0200), .bDeviceClass = USB_CLASS_COMM, .idVendor = __constant_cpu_to_le16(NOKIA_VENDOR_ID), .idProduct = __constant_cpu_to_le16(NOKIA_PRODUCT_ID), .bcdDevice = cpu_to_le16(NOKIA_VERSION_NUM), /* .iManufacturer = DYNAMIC */ /* .iProduct = DYNAMIC */ .bNumConfigurations = 1, }; /*-------------------------------------------------------------------------*/ /* Module */ MODULE_DESCRIPTION("Nokia composite gadget driver for N900"); MODULE_AUTHOR("Felipe Balbi"); MODULE_LICENSE("GPL"); /*-------------------------------------------------------------------------*/ static struct usb_function *f_acm_cfg1; static struct usb_function *f_acm_cfg2; static u8 hostaddr[ETH_ALEN]; static struct eth_dev *the_dev; enum { TTY_PORT_OBEX0, TTY_PORT_OBEX1, TTY_PORTS_MAX, }; static unsigned char tty_lines[TTY_PORTS_MAX]; static struct usb_configuration nokia_config_500ma_driver = { .label = "Bus Powered", .bConfigurationValue = 1, /* .iConfiguration = DYNAMIC */ .bmAttributes = USB_CONFIG_ATT_ONE, .MaxPower = 500, }; static struct usb_configuration nokia_config_100ma_driver = { .label = "Self Powered", .bConfigurationValue = 2, /* .iConfiguration = DYNAMIC */ .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER, .MaxPower = 100, }; static struct usb_function_instance *fi_acm; static int __init nokia_bind_config(struct usb_configuration *c) { struct usb_function *f_acm; int status = 0; status = phonet_bind_config(c); if (status) printk(KERN_DEBUG "could not bind phonet config\n"); status = obex_bind_config(c, tty_lines[TTY_PORT_OBEX0]); if (status) printk(KERN_DEBUG "could not bind obex config %d\n", 0); status = obex_bind_config(c, tty_lines[TTY_PORT_OBEX1]); if (status) printk(KERN_DEBUG "could not bind obex config %d\n", 0); f_acm = usb_get_function(fi_acm); if (IS_ERR(f_acm)) return PTR_ERR(f_acm); status = usb_add_function(c, f_acm); if (status) goto err_conf; status = ecm_bind_config(c, hostaddr, the_dev); if (status) { pr_debug("could not bind ecm config %d\n", status); goto err_ecm; } if (c == &nokia_config_500ma_driver) f_acm_cfg1 = f_acm; else f_acm_cfg2 = f_acm; return status; err_ecm: usb_remove_function(c, f_acm); err_conf: usb_put_function(f_acm); return status; } static int __init nokia_bind(struct usb_composite_dev *cdev) { struct usb_gadget *gadget = cdev->gadget; int status; int cur_line; status = gphonet_setup(cdev->gadget); if (status < 0) goto err_phonet; for (cur_line = 0; cur_line < TTY_PORTS_MAX; cur_line++) { status = gserial_alloc_line(&tty_lines[cur_line]); if (status) goto err_ether; } the_dev = gether_setup(cdev->gadget, hostaddr); if (IS_ERR(the_dev)) { status = PTR_ERR(the_dev); goto err_ether; } status = usb_string_ids_tab(cdev, strings_dev); if (status < 0) goto err_usb; device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id; device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id; status = strings_dev[STRING_DESCRIPTION_IDX].id; nokia_config_500ma_driver.iConfiguration = status; nokia_config_100ma_driver.iConfiguration = status; if (!gadget_supports_altsettings(gadget)) goto err_usb; fi_acm = usb_get_function_instance("acm"); if (IS_ERR(fi_acm)) goto err_usb; /* finally register the configuration */ status = usb_add_config(cdev, &nokia_config_500ma_driver, nokia_bind_config); if (status < 0) goto err_acm_inst; status = usb_add_config(cdev, &nokia_config_100ma_driver, nokia_bind_config); if (status < 0) goto err_put_cfg1; usb_composite_overwrite_options(cdev, &coverwrite); dev_info(&gadget->dev, "%s\n", NOKIA_LONG_NAME); return 0; err_put_cfg1: usb_put_function(f_acm_cfg1); err_acm_inst: usb_put_function_instance(fi_acm); err_usb: gether_cleanup(the_dev); err_ether: cur_line--; while (cur_line >= 0) gserial_free_line(tty_lines[cur_line--]); gphonet_cleanup(); err_phonet: return status; } static int __exit nokia_unbind(struct usb_composite_dev *cdev) { int i; usb_put_function(f_acm_cfg1); usb_put_function(f_acm_cfg2); usb_put_function_instance(fi_acm); gphonet_cleanup(); for (i = 0; i < TTY_PORTS_MAX; i++) gserial_free_line(tty_lines[i]); gether_cleanup(the_dev); return 0; } static __refdata struct usb_composite_driver nokia_driver = { .name = "g_nokia", .dev = &device_desc, .strings = dev_strings, .max_speed = USB_SPEED_HIGH, .bind = nokia_bind, .unbind = __exit_p(nokia_unbind), }; static int __init nokia_init(void) { return usb_composite_probe(&nokia_driver); } module_init(nokia_init); static void __exit nokia_cleanup(void) { usb_composite_unregister(&nokia_driver); } module_exit(nokia_cleanup);
gpl-2.0
IngenicSemiconductor/kernel-inwatch
drivers/media/platform/soc_camera/sh_mobile_csi2.c
2097
9617
/* * Driver for the SH-Mobile MIPI CSI-2 unit * * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <linux/module.h> #include <media/sh_mobile_ceu.h> #include <media/sh_mobile_csi2.h> #include <media/soc_camera.h> #include <media/soc_mediabus.h> #include <media/v4l2-common.h> #include <media/v4l2-dev.h> #include <media/v4l2-device.h> #include <media/v4l2-mediabus.h> #include <media/v4l2-subdev.h> #define SH_CSI2_TREF 0x00 #define SH_CSI2_SRST 0x04 #define SH_CSI2_PHYCNT 0x08 #define SH_CSI2_CHKSUM 0x0C #define SH_CSI2_VCDT 0x10 struct sh_csi2 { struct v4l2_subdev subdev; struct list_head list; unsigned int irq; unsigned long mipi_flags; void __iomem *base; struct platform_device *pdev; struct sh_csi2_client_config *client; }; static int sh_csi2_try_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev); struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data; if (mf->width > 8188) mf->width = 8188; else if (mf->width & 1) mf->width &= ~1; switch (pdata->type) { case SH_CSI2C: switch (mf->code) { case V4L2_MBUS_FMT_UYVY8_2X8: /* YUV422 */ case V4L2_MBUS_FMT_YUYV8_1_5X8: /* YUV420 */ case V4L2_MBUS_FMT_Y8_1X8: /* RAW8 */ case V4L2_MBUS_FMT_SBGGR8_1X8: case V4L2_MBUS_FMT_SGRBG8_1X8: break; default: /* All MIPI CSI-2 devices must support one of primary formats */ mf->code = V4L2_MBUS_FMT_YUYV8_2X8; } break; case SH_CSI2I: switch (mf->code) { case V4L2_MBUS_FMT_Y8_1X8: /* RAW8 */ case V4L2_MBUS_FMT_SBGGR8_1X8: case V4L2_MBUS_FMT_SGRBG8_1X8: case V4L2_MBUS_FMT_SBGGR10_1X10: /* RAW10 */ case V4L2_MBUS_FMT_SBGGR12_1X12: /* RAW12 */ break; default: /* All MIPI CSI-2 devices must support one of primary formats */ mf->code = V4L2_MBUS_FMT_SBGGR8_1X8; } break; } return 0; } /* * We have done our best in try_fmt to try and tell the sensor, which formats * we support. If now the configuration is unsuitable for us we can only * error out. */ static int sh_csi2_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev); u32 tmp = (priv->client->channel & 3) << 8; dev_dbg(sd->v4l2_dev->dev, "%s(%u)\n", __func__, mf->code); if (mf->width > 8188 || mf->width & 1) return -EINVAL; switch (mf->code) { case V4L2_MBUS_FMT_UYVY8_2X8: tmp |= 0x1e; /* YUV422 8 bit */ break; case V4L2_MBUS_FMT_YUYV8_1_5X8: tmp |= 0x18; /* YUV420 8 bit */ break; case V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE: tmp |= 0x21; /* RGB555 */ break; case V4L2_MBUS_FMT_RGB565_2X8_BE: tmp |= 0x22; /* RGB565 */ break; case V4L2_MBUS_FMT_Y8_1X8: case V4L2_MBUS_FMT_SBGGR8_1X8: case V4L2_MBUS_FMT_SGRBG8_1X8: tmp |= 0x2a; /* RAW8 */ break; default: return -EINVAL; } iowrite32(tmp, priv->base + SH_CSI2_VCDT); return 0; } static int sh_csi2_g_mbus_config(struct v4l2_subdev *sd, struct v4l2_mbus_config *cfg) { cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_MASTER | V4L2_MBUS_DATA_ACTIVE_HIGH; cfg->type = V4L2_MBUS_PARALLEL; return 0; } static int sh_csi2_s_mbus_config(struct v4l2_subdev *sd, const struct v4l2_mbus_config *cfg) { struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev); struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd); struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd); struct v4l2_mbus_config client_cfg = {.type = V4L2_MBUS_CSI2, .flags = priv->mipi_flags}; return v4l2_subdev_call(client_sd, video, s_mbus_config, &client_cfg); } static struct v4l2_subdev_video_ops sh_csi2_subdev_video_ops = { .s_mbus_fmt = sh_csi2_s_fmt, .try_mbus_fmt = sh_csi2_try_fmt, .g_mbus_config = sh_csi2_g_mbus_config, .s_mbus_config = sh_csi2_s_mbus_config, }; static void sh_csi2_hwinit(struct sh_csi2 *priv) { struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data; __u32 tmp = 0x10; /* Enable MIPI CSI clock lane */ /* Reflect registers immediately */ iowrite32(0x00000001, priv->base + SH_CSI2_TREF); /* reset CSI2 harware */ iowrite32(0x00000001, priv->base + SH_CSI2_SRST); udelay(5); iowrite32(0x00000000, priv->base + SH_CSI2_SRST); switch (pdata->type) { case SH_CSI2C: if (priv->client->lanes == 1) tmp |= 1; else /* Default - both lanes */ tmp |= 3; break; case SH_CSI2I: if (!priv->client->lanes || priv->client->lanes > 4) /* Default - all 4 lanes */ tmp |= 0xf; else tmp |= (1 << priv->client->lanes) - 1; } if (priv->client->phy == SH_CSI2_PHY_MAIN) tmp |= 0x8000; iowrite32(tmp, priv->base + SH_CSI2_PHYCNT); tmp = 0; if (pdata->flags & SH_CSI2_ECC) tmp |= 2; if (pdata->flags & SH_CSI2_CRC) tmp |= 1; iowrite32(tmp, priv->base + SH_CSI2_CHKSUM); } static int sh_csi2_client_connect(struct sh_csi2 *priv) { struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data; struct soc_camera_device *icd = v4l2_get_subdev_hostdata(&priv->subdev); struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd); struct device *dev = v4l2_get_subdevdata(&priv->subdev); struct v4l2_mbus_config cfg; unsigned long common_flags, csi2_flags; int i, ret; if (priv->client) return -EBUSY; for (i = 0; i < pdata->num_clients; i++) if (&pdata->clients[i].pdev->dev == icd->pdev) break; dev_dbg(dev, "%s(%p): found #%d\n", __func__, dev, i); if (i == pdata->num_clients) return -ENODEV; /* Check if we can support this camera */ csi2_flags = V4L2_MBUS_CSI2_CONTINUOUS_CLOCK | V4L2_MBUS_CSI2_1_LANE; switch (pdata->type) { case SH_CSI2C: if (pdata->clients[i].lanes != 1) csi2_flags |= V4L2_MBUS_CSI2_2_LANE; break; case SH_CSI2I: switch (pdata->clients[i].lanes) { default: csi2_flags |= V4L2_MBUS_CSI2_4_LANE; case 3: csi2_flags |= V4L2_MBUS_CSI2_3_LANE; case 2: csi2_flags |= V4L2_MBUS_CSI2_2_LANE; } } cfg.type = V4L2_MBUS_CSI2; ret = v4l2_subdev_call(client_sd, video, g_mbus_config, &cfg); if (ret == -ENOIOCTLCMD) common_flags = csi2_flags; else if (!ret) common_flags = soc_mbus_config_compatible(&cfg, csi2_flags); else common_flags = 0; if (!common_flags) return -EINVAL; /* All good: camera MIPI configuration supported */ priv->mipi_flags = common_flags; priv->client = pdata->clients + i; pm_runtime_get_sync(dev); sh_csi2_hwinit(priv); return 0; } static void sh_csi2_client_disconnect(struct sh_csi2 *priv) { if (!priv->client) return; priv->client = NULL; pm_runtime_put(v4l2_get_subdevdata(&priv->subdev)); } static int sh_csi2_s_power(struct v4l2_subdev *sd, int on) { struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev); if (on) return sh_csi2_client_connect(priv); sh_csi2_client_disconnect(priv); return 0; } static struct v4l2_subdev_core_ops sh_csi2_subdev_core_ops = { .s_power = sh_csi2_s_power, }; static struct v4l2_subdev_ops sh_csi2_subdev_ops = { .core = &sh_csi2_subdev_core_ops, .video = &sh_csi2_subdev_video_ops, }; static int sh_csi2_probe(struct platform_device *pdev) { struct resource *res; unsigned int irq; int ret; struct sh_csi2 *priv; /* Platform data specify the PHY, lanes, ECC, CRC */ struct sh_csi2_pdata *pdata = pdev->dev.platform_data; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); /* Interrupt unused so far */ irq = platform_get_irq(pdev, 0); if (!res || (int)irq <= 0 || !pdata) { dev_err(&pdev->dev, "Not enough CSI2 platform resources.\n"); return -ENODEV; } /* TODO: Add support for CSI2I. Careful: different register layout! */ if (pdata->type != SH_CSI2C) { dev_err(&pdev->dev, "Only CSI2C supported ATM.\n"); return -EINVAL; } priv = devm_kzalloc(&pdev->dev, sizeof(struct sh_csi2), GFP_KERNEL); if (!priv) return -ENOMEM; priv->irq = irq; priv->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); priv->pdev = pdev; platform_set_drvdata(pdev, priv); v4l2_subdev_init(&priv->subdev, &sh_csi2_subdev_ops); v4l2_set_subdevdata(&priv->subdev, &pdev->dev); snprintf(priv->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s.mipi-csi", dev_name(pdata->v4l2_dev->dev)); ret = v4l2_device_register_subdev(pdata->v4l2_dev, &priv->subdev); dev_dbg(&pdev->dev, "%s(%p): ret(register_subdev) = %d\n", __func__, priv, ret); if (ret < 0) goto esdreg; pm_runtime_enable(&pdev->dev); dev_dbg(&pdev->dev, "CSI2 probed.\n"); return 0; esdreg: platform_set_drvdata(pdev, NULL); return ret; } static int sh_csi2_remove(struct platform_device *pdev) { struct sh_csi2 *priv = platform_get_drvdata(pdev); v4l2_device_unregister_subdev(&priv->subdev); pm_runtime_disable(&pdev->dev); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver __refdata sh_csi2_pdrv = { .remove = sh_csi2_remove, .probe = sh_csi2_probe, .driver = { .name = "sh-mobile-csi2", .owner = THIS_MODULE, }, }; module_platform_driver(sh_csi2_pdrv); MODULE_DESCRIPTION("SH-Mobile MIPI CSI-2 driver"); MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:sh-mobile-csi2");
gpl-2.0
GalaxyTab4/android_kernel_samsung_matissevewifi
drivers/i2c/busses/scx200_acb.c
2609
14021
/* Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> National Semiconductor SCx200 ACCESS.bus support Also supports the AMD CS5535 and AMD CS5536 Based on i2c-keywest.c which is: Copyright (c) 2001 Benjamin Herrenschmidt <benh@kernel.crashing.org> Copyright (c) 2000 Philip Edelbrock <phil@stimpy.netroedge.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/scx200.h> MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>"); MODULE_DESCRIPTION("NatSemi SCx200 ACCESS.bus Driver"); MODULE_ALIAS("platform:cs5535-smb"); MODULE_LICENSE("GPL"); #define MAX_DEVICES 4 static int base[MAX_DEVICES] = { 0x820, 0x840 }; module_param_array(base, int, NULL, 0); MODULE_PARM_DESC(base, "Base addresses for the ACCESS.bus controllers"); #define POLL_TIMEOUT (HZ/5) enum scx200_acb_state { state_idle, state_address, state_command, state_repeat_start, state_quick, state_read, state_write, }; static const char *scx200_acb_state_name[] = { "idle", "address", "command", "repeat_start", "quick", "read", "write", }; /* Physical interface */ struct scx200_acb_iface { struct scx200_acb_iface *next; struct i2c_adapter adapter; unsigned base; struct mutex mutex; /* State machine data */ enum scx200_acb_state state; int result; u8 address_byte; u8 command; u8 *ptr; char needs_reset; unsigned len; }; /* Register Definitions */ #define ACBSDA (iface->base + 0) #define ACBST (iface->base + 1) #define ACBST_SDAST 0x40 /* SDA Status */ #define ACBST_BER 0x20 #define ACBST_NEGACK 0x10 /* Negative Acknowledge */ #define ACBST_STASTR 0x08 /* Stall After Start */ #define ACBST_MASTER 0x02 #define ACBCST (iface->base + 2) #define ACBCST_BB 0x02 #define ACBCTL1 (iface->base + 3) #define ACBCTL1_STASTRE 0x80 #define ACBCTL1_NMINTE 0x40 #define ACBCTL1_ACK 0x10 #define ACBCTL1_STOP 0x02 #define ACBCTL1_START 0x01 #define ACBADDR (iface->base + 4) #define ACBCTL2 (iface->base + 5) #define ACBCTL2_ENABLE 0x01 /************************************************************************/ static void scx200_acb_machine(struct scx200_acb_iface *iface, u8 status) { const char *errmsg; dev_dbg(&iface->adapter.dev, "state %s, status = 0x%02x\n", scx200_acb_state_name[iface->state], status); if (status & ACBST_BER) { errmsg = "bus error"; goto error; } if (!(status & ACBST_MASTER)) { errmsg = "not master"; goto error; } if (status & ACBST_NEGACK) { dev_dbg(&iface->adapter.dev, "negative ack in state %s\n", scx200_acb_state_name[iface->state]); iface->state = state_idle; iface->result = -ENXIO; outb(inb(ACBCTL1) | ACBCTL1_STOP, ACBCTL1); outb(ACBST_STASTR | ACBST_NEGACK, ACBST); /* Reset the status register */ outb(0, ACBST); return; } switch (iface->state) { case state_idle: dev_warn(&iface->adapter.dev, "interrupt in idle state\n"); break; case state_address: /* Do a pointer write first */ outb(iface->address_byte & ~1, ACBSDA); iface->state = state_command; break; case state_command: outb(iface->command, ACBSDA); if (iface->address_byte & 1) iface->state = state_repeat_start; else iface->state = state_write; break; case state_repeat_start: outb(inb(ACBCTL1) | ACBCTL1_START, ACBCTL1); /* fallthrough */ case state_quick: if (iface->address_byte & 1) { if (iface->len == 1) outb(inb(ACBCTL1) | ACBCTL1_ACK, ACBCTL1); else outb(inb(ACBCTL1) & ~ACBCTL1_ACK, ACBCTL1); outb(iface->address_byte, ACBSDA); iface->state = state_read; } else { outb(iface->address_byte, ACBSDA); iface->state = state_write; } break; case state_read: /* Set ACK if _next_ byte will be the last one */ if (iface->len == 2) outb(inb(ACBCTL1) | ACBCTL1_ACK, ACBCTL1); else outb(inb(ACBCTL1) & ~ACBCTL1_ACK, ACBCTL1); if (iface->len == 1) { iface->result = 0; iface->state = state_idle; outb(inb(ACBCTL1) | ACBCTL1_STOP, ACBCTL1); } *iface->ptr++ = inb(ACBSDA); --iface->len; break; case state_write: if (iface->len == 0) { iface->result = 0; iface->state = state_idle; outb(inb(ACBCTL1) | ACBCTL1_STOP, ACBCTL1); break; } outb(*iface->ptr++, ACBSDA); --iface->len; break; } return; error: dev_err(&iface->adapter.dev, "%s in state %s (addr=0x%02x, len=%d, status=0x%02x)\n", errmsg, scx200_acb_state_name[iface->state], iface->address_byte, iface->len, status); iface->state = state_idle; iface->result = -EIO; iface->needs_reset = 1; } static void scx200_acb_poll(struct scx200_acb_iface *iface) { u8 status; unsigned long timeout; timeout = jiffies + POLL_TIMEOUT; while (1) { status = inb(ACBST); /* Reset the status register to avoid the hang */ outb(0, ACBST); if ((status & (ACBST_SDAST|ACBST_BER|ACBST_NEGACK)) != 0) { scx200_acb_machine(iface, status); return; } if (time_after(jiffies, timeout)) break; cpu_relax(); cond_resched(); } dev_err(&iface->adapter.dev, "timeout in state %s\n", scx200_acb_state_name[iface->state]); iface->state = state_idle; iface->result = -EIO; iface->needs_reset = 1; } static void scx200_acb_reset(struct scx200_acb_iface *iface) { /* Disable the ACCESS.bus device and Configure the SCL frequency: 16 clock cycles */ outb(0x70, ACBCTL2); /* Polling mode */ outb(0, ACBCTL1); /* Disable slave address */ outb(0, ACBADDR); /* Enable the ACCESS.bus device */ outb(inb(ACBCTL2) | ACBCTL2_ENABLE, ACBCTL2); /* Free STALL after START */ outb(inb(ACBCTL1) & ~(ACBCTL1_STASTRE | ACBCTL1_NMINTE), ACBCTL1); /* Send a STOP */ outb(inb(ACBCTL1) | ACBCTL1_STOP, ACBCTL1); /* Clear BER, NEGACK and STASTR bits */ outb(ACBST_BER | ACBST_NEGACK | ACBST_STASTR, ACBST); /* Clear BB bit */ outb(inb(ACBCST) | ACBCST_BB, ACBCST); } static s32 scx200_acb_smbus_xfer(struct i2c_adapter *adapter, u16 address, unsigned short flags, char rw, u8 command, int size, union i2c_smbus_data *data) { struct scx200_acb_iface *iface = i2c_get_adapdata(adapter); int len; u8 *buffer; u16 cur_word; int rc; switch (size) { case I2C_SMBUS_QUICK: len = 0; buffer = NULL; break; case I2C_SMBUS_BYTE: len = 1; buffer = rw ? &data->byte : &command; break; case I2C_SMBUS_BYTE_DATA: len = 1; buffer = &data->byte; break; case I2C_SMBUS_WORD_DATA: len = 2; cur_word = cpu_to_le16(data->word); buffer = (u8 *)&cur_word; break; case I2C_SMBUS_I2C_BLOCK_DATA: len = data->block[0]; if (len == 0 || len > I2C_SMBUS_BLOCK_MAX) return -EINVAL; buffer = &data->block[1]; break; default: return -EINVAL; } dev_dbg(&adapter->dev, "size=%d, address=0x%x, command=0x%x, len=%d, read=%d\n", size, address, command, len, rw); if (!len && rw == I2C_SMBUS_READ) { dev_dbg(&adapter->dev, "zero length read\n"); return -EINVAL; } mutex_lock(&iface->mutex); iface->address_byte = (address << 1) | rw; iface->command = command; iface->ptr = buffer; iface->len = len; iface->result = -EINVAL; iface->needs_reset = 0; outb(inb(ACBCTL1) | ACBCTL1_START, ACBCTL1); if (size == I2C_SMBUS_QUICK || size == I2C_SMBUS_BYTE) iface->state = state_quick; else iface->state = state_address; while (iface->state != state_idle) scx200_acb_poll(iface); if (iface->needs_reset) scx200_acb_reset(iface); rc = iface->result; mutex_unlock(&iface->mutex); if (rc == 0 && size == I2C_SMBUS_WORD_DATA && rw == I2C_SMBUS_READ) data->word = le16_to_cpu(cur_word); #ifdef DEBUG dev_dbg(&adapter->dev, "transfer done, result: %d", rc); if (buffer) { int i; printk(" data:"); for (i = 0; i < len; ++i) printk(" %02x", buffer[i]); } printk("\n"); #endif return rc; } static u32 scx200_acb_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_I2C_BLOCK; } /* For now, we only handle combined mode (smbus) */ static const struct i2c_algorithm scx200_acb_algorithm = { .smbus_xfer = scx200_acb_smbus_xfer, .functionality = scx200_acb_func, }; static struct scx200_acb_iface *scx200_acb_list; static DEFINE_MUTEX(scx200_acb_list_mutex); static int scx200_acb_probe(struct scx200_acb_iface *iface) { u8 val; /* Disable the ACCESS.bus device and Configure the SCL frequency: 16 clock cycles */ outb(0x70, ACBCTL2); if (inb(ACBCTL2) != 0x70) { pr_debug("ACBCTL2 readback failed\n"); return -ENXIO; } outb(inb(ACBCTL1) | ACBCTL1_NMINTE, ACBCTL1); val = inb(ACBCTL1); if (val) { pr_debug("disabled, but ACBCTL1=0x%02x\n", val); return -ENXIO; } outb(inb(ACBCTL2) | ACBCTL2_ENABLE, ACBCTL2); outb(inb(ACBCTL1) | ACBCTL1_NMINTE, ACBCTL1); val = inb(ACBCTL1); if ((val & ACBCTL1_NMINTE) != ACBCTL1_NMINTE) { pr_debug("enabled, but NMINTE won't be set, ACBCTL1=0x%02x\n", val); return -ENXIO; } return 0; } static struct scx200_acb_iface *scx200_create_iface(const char *text, struct device *dev, int index) { struct scx200_acb_iface *iface; struct i2c_adapter *adapter; iface = kzalloc(sizeof(*iface), GFP_KERNEL); if (!iface) { pr_err("can't allocate memory\n"); return NULL; } adapter = &iface->adapter; i2c_set_adapdata(adapter, iface); snprintf(adapter->name, sizeof(adapter->name), "%s ACB%d", text, index); adapter->owner = THIS_MODULE; adapter->algo = &scx200_acb_algorithm; adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; adapter->dev.parent = dev; mutex_init(&iface->mutex); return iface; } static int scx200_acb_create(struct scx200_acb_iface *iface) { struct i2c_adapter *adapter; int rc; adapter = &iface->adapter; rc = scx200_acb_probe(iface); if (rc) { pr_warn("probe failed\n"); return rc; } scx200_acb_reset(iface); if (i2c_add_adapter(adapter) < 0) { pr_err("failed to register\n"); return -ENODEV; } if (!adapter->dev.parent) { /* If there's no dev, we're tracking (ISA) ifaces manually */ mutex_lock(&scx200_acb_list_mutex); iface->next = scx200_acb_list; scx200_acb_list = iface; mutex_unlock(&scx200_acb_list_mutex); } return 0; } static struct scx200_acb_iface *scx200_create_dev(const char *text, unsigned long base, int index, struct device *dev) { struct scx200_acb_iface *iface; int rc; iface = scx200_create_iface(text, dev, index); if (iface == NULL) return NULL; if (!request_region(base, 8, iface->adapter.name)) { pr_err("can't allocate io 0x%lx-0x%lx\n", base, base + 8 - 1); goto errout_free; } iface->base = base; rc = scx200_acb_create(iface); if (rc == 0) return iface; release_region(base, 8); errout_free: kfree(iface); return NULL; } static int scx200_probe(struct platform_device *pdev) { struct scx200_acb_iface *iface; struct resource *res; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!res) { dev_err(&pdev->dev, "can't fetch device resource info\n"); return -ENODEV; } iface = scx200_create_dev("CS5535", res->start, 0, &pdev->dev); if (!iface) return -EIO; dev_info(&pdev->dev, "SCx200 device '%s' registered\n", iface->adapter.name); platform_set_drvdata(pdev, iface); return 0; } static void scx200_cleanup_iface(struct scx200_acb_iface *iface) { i2c_del_adapter(&iface->adapter); release_region(iface->base, 8); kfree(iface); } static int scx200_remove(struct platform_device *pdev) { struct scx200_acb_iface *iface; iface = platform_get_drvdata(pdev); scx200_cleanup_iface(iface); return 0; } static struct platform_driver scx200_pci_driver = { .driver = { .name = "cs5535-smb", .owner = THIS_MODULE, }, .probe = scx200_probe, .remove = scx200_remove, }; static DEFINE_PCI_DEVICE_TABLE(scx200_isa) = { { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE) }, { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE) }, { 0, } }; static __init void scx200_scan_isa(void) { int i; if (!pci_dev_present(scx200_isa)) return; for (i = 0; i < MAX_DEVICES; ++i) { if (base[i] == 0) continue; /* XXX: should we care about failures? */ scx200_create_dev("SCx200", base[i], i, NULL); } } static int __init scx200_acb_init(void) { pr_debug("NatSemi SCx200 ACCESS.bus Driver\n"); /* First scan for ISA-based devices */ scx200_scan_isa(); /* XXX: should we care about errors? */ /* If at least one bus was created, init must succeed */ if (scx200_acb_list) return 0; /* No ISA devices; register the platform driver for PCI-based devices */ return platform_driver_register(&scx200_pci_driver); } static void __exit scx200_acb_cleanup(void) { struct scx200_acb_iface *iface; platform_driver_unregister(&scx200_pci_driver); mutex_lock(&scx200_acb_list_mutex); while ((iface = scx200_acb_list) != NULL) { scx200_acb_list = iface->next; mutex_unlock(&scx200_acb_list_mutex); scx200_cleanup_iface(iface); mutex_lock(&scx200_acb_list_mutex); } mutex_unlock(&scx200_acb_list_mutex); } module_init(scx200_acb_init); module_exit(scx200_acb_cleanup);
gpl-2.0
Vangreen/android_kernel_lge_msm8226
arch/powerpc/kernel/sysfs.c
2865
16723
#include <linux/device.h> #include <linux/cpu.h> #include <linux/smp.h> #include <linux/percpu.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/export.h> #include <linux/nodemask.h> #include <linux/cpumask.h> #include <linux/notifier.h> #include <asm/current.h> #include <asm/processor.h> #include <asm/cputable.h> #include <asm/hvcall.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/smp.h> #include <asm/pmc.h> #include "cacheinfo.h" #ifdef CONFIG_PPC64 #include <asm/paca.h> #include <asm/lppaca.h> #endif static DEFINE_PER_CPU(struct cpu, cpu_devices); /* * SMT snooze delay stuff, 64-bit only for now */ #ifdef CONFIG_PPC64 /* Time in microseconds we delay before sleeping in the idle loop */ DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 }; static ssize_t store_smt_snooze_delay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cpu *cpu = container_of(dev, struct cpu, dev); ssize_t ret; long snooze; ret = sscanf(buf, "%ld", &snooze); if (ret != 1) return -EINVAL; per_cpu(smt_snooze_delay, cpu->dev.id) = snooze; update_smt_snooze_delay(snooze); return count; } static ssize_t show_smt_snooze_delay(struct device *dev, struct device_attribute *attr, char *buf) { struct cpu *cpu = container_of(dev, struct cpu, dev); return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id)); } static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay, store_smt_snooze_delay); static int __init setup_smt_snooze_delay(char *str) { unsigned int cpu; long snooze; if (!cpu_has_feature(CPU_FTR_SMT)) return 1; snooze = simple_strtol(str, NULL, 10); for_each_possible_cpu(cpu) per_cpu(smt_snooze_delay, cpu) = snooze; return 1; } __setup("smt-snooze-delay=", setup_smt_snooze_delay); #endif /* CONFIG_PPC64 */ /* * Enabling PMCs will slow partition context switch times so we only do * it the first time we write to the PMCs. */ static DEFINE_PER_CPU(char, pmcs_enabled); void ppc_enable_pmcs(void) { ppc_set_pmu_inuse(1); /* Only need to enable them once */ if (__get_cpu_var(pmcs_enabled)) return; __get_cpu_var(pmcs_enabled) = 1; if (ppc_md.enable_pmcs) ppc_md.enable_pmcs(); } EXPORT_SYMBOL(ppc_enable_pmcs); #define SYSFS_PMCSETUP(NAME, ADDRESS) \ static void read_##NAME(void *val) \ { \ *(unsigned long *)val = mfspr(ADDRESS); \ } \ static void write_##NAME(void *val) \ { \ ppc_enable_pmcs(); \ mtspr(ADDRESS, *(unsigned long *)val); \ } \ static ssize_t show_##NAME(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct cpu *cpu = container_of(dev, struct cpu, dev); \ unsigned long val; \ smp_call_function_single(cpu->dev.id, read_##NAME, &val, 1); \ return sprintf(buf, "%lx\n", val); \ } \ static ssize_t __used \ store_##NAME(struct device *dev, struct device_attribute *attr, \ const char *buf, size_t count) \ { \ struct cpu *cpu = container_of(dev, struct cpu, dev); \ unsigned long val; \ int ret = sscanf(buf, "%lx", &val); \ if (ret != 1) \ return -EINVAL; \ smp_call_function_single(cpu->dev.id, write_##NAME, &val, 1); \ return count; \ } /* Let's define all possible registers, we'll only hook up the ones * that are implemented on the current processor */ #if defined(CONFIG_PPC64) #define HAS_PPC_PMC_CLASSIC 1 #define HAS_PPC_PMC_IBM 1 #define HAS_PPC_PMC_PA6T 1 #elif defined(CONFIG_6xx) #define HAS_PPC_PMC_CLASSIC 1 #define HAS_PPC_PMC_IBM 1 #define HAS_PPC_PMC_G4 1 #endif #ifdef HAS_PPC_PMC_CLASSIC SYSFS_PMCSETUP(mmcr0, SPRN_MMCR0); SYSFS_PMCSETUP(mmcr1, SPRN_MMCR1); SYSFS_PMCSETUP(pmc1, SPRN_PMC1); SYSFS_PMCSETUP(pmc2, SPRN_PMC2); SYSFS_PMCSETUP(pmc3, SPRN_PMC3); SYSFS_PMCSETUP(pmc4, SPRN_PMC4); SYSFS_PMCSETUP(pmc5, SPRN_PMC5); SYSFS_PMCSETUP(pmc6, SPRN_PMC6); #ifdef HAS_PPC_PMC_G4 SYSFS_PMCSETUP(mmcr2, SPRN_MMCR2); #endif #ifdef CONFIG_PPC64 SYSFS_PMCSETUP(pmc7, SPRN_PMC7); SYSFS_PMCSETUP(pmc8, SPRN_PMC8); SYSFS_PMCSETUP(mmcra, SPRN_MMCRA); SYSFS_PMCSETUP(purr, SPRN_PURR); SYSFS_PMCSETUP(spurr, SPRN_SPURR); SYSFS_PMCSETUP(dscr, SPRN_DSCR); SYSFS_PMCSETUP(pir, SPRN_PIR); static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra); static DEVICE_ATTR(spurr, 0600, show_spurr, NULL); static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr); static DEVICE_ATTR(purr, 0600, show_purr, store_purr); static DEVICE_ATTR(pir, 0400, show_pir, NULL); unsigned long dscr_default = 0; EXPORT_SYMBOL(dscr_default); static ssize_t show_dscr_default(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%lx\n", dscr_default); } static ssize_t __used store_dscr_default(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long val; int ret = 0; ret = sscanf(buf, "%lx", &val); if (ret != 1) return -EINVAL; dscr_default = val; return count; } static DEVICE_ATTR(dscr_default, 0600, show_dscr_default, store_dscr_default); static void sysfs_create_dscr_default(void) { int err = 0; if (cpu_has_feature(CPU_FTR_DSCR)) err = device_create_file(cpu_subsys.dev_root, &dev_attr_dscr_default); } #endif /* CONFIG_PPC64 */ #ifdef HAS_PPC_PMC_PA6T SYSFS_PMCSETUP(pa6t_pmc0, SPRN_PA6T_PMC0); SYSFS_PMCSETUP(pa6t_pmc1, SPRN_PA6T_PMC1); SYSFS_PMCSETUP(pa6t_pmc2, SPRN_PA6T_PMC2); SYSFS_PMCSETUP(pa6t_pmc3, SPRN_PA6T_PMC3); SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4); SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5); #ifdef CONFIG_DEBUG_KERNEL SYSFS_PMCSETUP(hid0, SPRN_HID0); SYSFS_PMCSETUP(hid1, SPRN_HID1); SYSFS_PMCSETUP(hid4, SPRN_HID4); SYSFS_PMCSETUP(hid5, SPRN_HID5); SYSFS_PMCSETUP(ima0, SPRN_PA6T_IMA0); SYSFS_PMCSETUP(ima1, SPRN_PA6T_IMA1); SYSFS_PMCSETUP(ima2, SPRN_PA6T_IMA2); SYSFS_PMCSETUP(ima3, SPRN_PA6T_IMA3); SYSFS_PMCSETUP(ima4, SPRN_PA6T_IMA4); SYSFS_PMCSETUP(ima5, SPRN_PA6T_IMA5); SYSFS_PMCSETUP(ima6, SPRN_PA6T_IMA6); SYSFS_PMCSETUP(ima7, SPRN_PA6T_IMA7); SYSFS_PMCSETUP(ima8, SPRN_PA6T_IMA8); SYSFS_PMCSETUP(ima9, SPRN_PA6T_IMA9); SYSFS_PMCSETUP(imaat, SPRN_PA6T_IMAAT); SYSFS_PMCSETUP(btcr, SPRN_PA6T_BTCR); SYSFS_PMCSETUP(pccr, SPRN_PA6T_PCCR); SYSFS_PMCSETUP(rpccr, SPRN_PA6T_RPCCR); SYSFS_PMCSETUP(der, SPRN_PA6T_DER); SYSFS_PMCSETUP(mer, SPRN_PA6T_MER); SYSFS_PMCSETUP(ber, SPRN_PA6T_BER); SYSFS_PMCSETUP(ier, SPRN_PA6T_IER); SYSFS_PMCSETUP(sier, SPRN_PA6T_SIER); SYSFS_PMCSETUP(siar, SPRN_PA6T_SIAR); SYSFS_PMCSETUP(tsr0, SPRN_PA6T_TSR0); SYSFS_PMCSETUP(tsr1, SPRN_PA6T_TSR1); SYSFS_PMCSETUP(tsr2, SPRN_PA6T_TSR2); SYSFS_PMCSETUP(tsr3, SPRN_PA6T_TSR3); #endif /* CONFIG_DEBUG_KERNEL */ #endif /* HAS_PPC_PMC_PA6T */ #ifdef HAS_PPC_PMC_IBM static struct device_attribute ibm_common_attrs[] = { __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0), __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1), }; #endif /* HAS_PPC_PMC_G4 */ #ifdef HAS_PPC_PMC_G4 static struct device_attribute g4_common_attrs[] = { __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0), __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1), __ATTR(mmcr2, 0600, show_mmcr2, store_mmcr2), }; #endif /* HAS_PPC_PMC_G4 */ static struct device_attribute classic_pmc_attrs[] = { __ATTR(pmc1, 0600, show_pmc1, store_pmc1), __ATTR(pmc2, 0600, show_pmc2, store_pmc2), __ATTR(pmc3, 0600, show_pmc3, store_pmc3), __ATTR(pmc4, 0600, show_pmc4, store_pmc4), __ATTR(pmc5, 0600, show_pmc5, store_pmc5), __ATTR(pmc6, 0600, show_pmc6, store_pmc6), #ifdef CONFIG_PPC64 __ATTR(pmc7, 0600, show_pmc7, store_pmc7), __ATTR(pmc8, 0600, show_pmc8, store_pmc8), #endif }; #ifdef HAS_PPC_PMC_PA6T static struct device_attribute pa6t_attrs[] = { __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0), __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1), __ATTR(pmc0, 0600, show_pa6t_pmc0, store_pa6t_pmc0), __ATTR(pmc1, 0600, show_pa6t_pmc1, store_pa6t_pmc1), __ATTR(pmc2, 0600, show_pa6t_pmc2, store_pa6t_pmc2), __ATTR(pmc3, 0600, show_pa6t_pmc3, store_pa6t_pmc3), __ATTR(pmc4, 0600, show_pa6t_pmc4, store_pa6t_pmc4), __ATTR(pmc5, 0600, show_pa6t_pmc5, store_pa6t_pmc5), #ifdef CONFIG_DEBUG_KERNEL __ATTR(hid0, 0600, show_hid0, store_hid0), __ATTR(hid1, 0600, show_hid1, store_hid1), __ATTR(hid4, 0600, show_hid4, store_hid4), __ATTR(hid5, 0600, show_hid5, store_hid5), __ATTR(ima0, 0600, show_ima0, store_ima0), __ATTR(ima1, 0600, show_ima1, store_ima1), __ATTR(ima2, 0600, show_ima2, store_ima2), __ATTR(ima3, 0600, show_ima3, store_ima3), __ATTR(ima4, 0600, show_ima4, store_ima4), __ATTR(ima5, 0600, show_ima5, store_ima5), __ATTR(ima6, 0600, show_ima6, store_ima6), __ATTR(ima7, 0600, show_ima7, store_ima7), __ATTR(ima8, 0600, show_ima8, store_ima8), __ATTR(ima9, 0600, show_ima9, store_ima9), __ATTR(imaat, 0600, show_imaat, store_imaat), __ATTR(btcr, 0600, show_btcr, store_btcr), __ATTR(pccr, 0600, show_pccr, store_pccr), __ATTR(rpccr, 0600, show_rpccr, store_rpccr), __ATTR(der, 0600, show_der, store_der), __ATTR(mer, 0600, show_mer, store_mer), __ATTR(ber, 0600, show_ber, store_ber), __ATTR(ier, 0600, show_ier, store_ier), __ATTR(sier, 0600, show_sier, store_sier), __ATTR(siar, 0600, show_siar, store_siar), __ATTR(tsr0, 0600, show_tsr0, store_tsr0), __ATTR(tsr1, 0600, show_tsr1, store_tsr1), __ATTR(tsr2, 0600, show_tsr2, store_tsr2), __ATTR(tsr3, 0600, show_tsr3, store_tsr3), #endif /* CONFIG_DEBUG_KERNEL */ }; #endif /* HAS_PPC_PMC_PA6T */ #endif /* HAS_PPC_PMC_CLASSIC */ static void __cpuinit register_cpu_online(unsigned int cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); struct device *s = &c->dev; struct device_attribute *attrs, *pmc_attrs; int i, nattrs; #ifdef CONFIG_PPC64 if (cpu_has_feature(CPU_FTR_SMT)) device_create_file(s, &dev_attr_smt_snooze_delay); #endif /* PMC stuff */ switch (cur_cpu_spec->pmc_type) { #ifdef HAS_PPC_PMC_IBM case PPC_PMC_IBM: attrs = ibm_common_attrs; nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute); pmc_attrs = classic_pmc_attrs; break; #endif /* HAS_PPC_PMC_IBM */ #ifdef HAS_PPC_PMC_G4 case PPC_PMC_G4: attrs = g4_common_attrs; nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute); pmc_attrs = classic_pmc_attrs; break; #endif /* HAS_PPC_PMC_G4 */ #ifdef HAS_PPC_PMC_PA6T case PPC_PMC_PA6T: /* PA Semi starts counting at PMC0 */ attrs = pa6t_attrs; nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute); pmc_attrs = NULL; break; #endif /* HAS_PPC_PMC_PA6T */ default: attrs = NULL; nattrs = 0; pmc_attrs = NULL; } for (i = 0; i < nattrs; i++) device_create_file(s, &attrs[i]); if (pmc_attrs) for (i = 0; i < cur_cpu_spec->num_pmcs; i++) device_create_file(s, &pmc_attrs[i]); #ifdef CONFIG_PPC64 if (cpu_has_feature(CPU_FTR_MMCRA)) device_create_file(s, &dev_attr_mmcra); if (cpu_has_feature(CPU_FTR_PURR)) device_create_file(s, &dev_attr_purr); if (cpu_has_feature(CPU_FTR_SPURR)) device_create_file(s, &dev_attr_spurr); if (cpu_has_feature(CPU_FTR_DSCR)) device_create_file(s, &dev_attr_dscr); if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2)) device_create_file(s, &dev_attr_pir); #endif /* CONFIG_PPC64 */ cacheinfo_cpu_online(cpu); } #ifdef CONFIG_HOTPLUG_CPU static void unregister_cpu_online(unsigned int cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); struct device *s = &c->dev; struct device_attribute *attrs, *pmc_attrs; int i, nattrs; BUG_ON(!c->hotpluggable); #ifdef CONFIG_PPC64 if (cpu_has_feature(CPU_FTR_SMT)) device_remove_file(s, &dev_attr_smt_snooze_delay); #endif /* PMC stuff */ switch (cur_cpu_spec->pmc_type) { #ifdef HAS_PPC_PMC_IBM case PPC_PMC_IBM: attrs = ibm_common_attrs; nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute); pmc_attrs = classic_pmc_attrs; break; #endif /* HAS_PPC_PMC_IBM */ #ifdef HAS_PPC_PMC_G4 case PPC_PMC_G4: attrs = g4_common_attrs; nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute); pmc_attrs = classic_pmc_attrs; break; #endif /* HAS_PPC_PMC_G4 */ #ifdef HAS_PPC_PMC_PA6T case PPC_PMC_PA6T: /* PA Semi starts counting at PMC0 */ attrs = pa6t_attrs; nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute); pmc_attrs = NULL; break; #endif /* HAS_PPC_PMC_PA6T */ default: attrs = NULL; nattrs = 0; pmc_attrs = NULL; } for (i = 0; i < nattrs; i++) device_remove_file(s, &attrs[i]); if (pmc_attrs) for (i = 0; i < cur_cpu_spec->num_pmcs; i++) device_remove_file(s, &pmc_attrs[i]); #ifdef CONFIG_PPC64 if (cpu_has_feature(CPU_FTR_MMCRA)) device_remove_file(s, &dev_attr_mmcra); if (cpu_has_feature(CPU_FTR_PURR)) device_remove_file(s, &dev_attr_purr); if (cpu_has_feature(CPU_FTR_SPURR)) device_remove_file(s, &dev_attr_spurr); if (cpu_has_feature(CPU_FTR_DSCR)) device_remove_file(s, &dev_attr_dscr); if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2)) device_remove_file(s, &dev_attr_pir); #endif /* CONFIG_PPC64 */ cacheinfo_cpu_offline(cpu); } #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE ssize_t arch_cpu_probe(const char *buf, size_t count) { if (ppc_md.cpu_probe) return ppc_md.cpu_probe(buf, count); return -EINVAL; } ssize_t arch_cpu_release(const char *buf, size_t count) { if (ppc_md.cpu_release) return ppc_md.cpu_release(buf, count); return -EINVAL; } #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ #endif /* CONFIG_HOTPLUG_CPU */ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned int)(long)hcpu; switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: register_cpu_online(cpu); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_DEAD: case CPU_DEAD_FROZEN: unregister_cpu_online(cpu); break; #endif } return NOTIFY_OK; } static struct notifier_block __cpuinitdata sysfs_cpu_nb = { .notifier_call = sysfs_cpu_notify, }; static DEFINE_MUTEX(cpu_mutex); int cpu_add_dev_attr(struct device_attribute *attr) { int cpu; mutex_lock(&cpu_mutex); for_each_possible_cpu(cpu) { device_create_file(get_cpu_device(cpu), attr); } mutex_unlock(&cpu_mutex); return 0; } EXPORT_SYMBOL_GPL(cpu_add_dev_attr); int cpu_add_dev_attr_group(struct attribute_group *attrs) { int cpu; struct device *dev; int ret; mutex_lock(&cpu_mutex); for_each_possible_cpu(cpu) { dev = get_cpu_device(cpu); ret = sysfs_create_group(&dev->kobj, attrs); WARN_ON(ret != 0); } mutex_unlock(&cpu_mutex); return 0; } EXPORT_SYMBOL_GPL(cpu_add_dev_attr_group); void cpu_remove_dev_attr(struct device_attribute *attr) { int cpu; mutex_lock(&cpu_mutex); for_each_possible_cpu(cpu) { device_remove_file(get_cpu_device(cpu), attr); } mutex_unlock(&cpu_mutex); } EXPORT_SYMBOL_GPL(cpu_remove_dev_attr); void cpu_remove_dev_attr_group(struct attribute_group *attrs) { int cpu; struct device *dev; mutex_lock(&cpu_mutex); for_each_possible_cpu(cpu) { dev = get_cpu_device(cpu); sysfs_remove_group(&dev->kobj, attrs); } mutex_unlock(&cpu_mutex); } EXPORT_SYMBOL_GPL(cpu_remove_dev_attr_group); /* NUMA stuff */ #ifdef CONFIG_NUMA static void register_nodes(void) { int i; for (i = 0; i < MAX_NUMNODES; i++) register_one_node(i); } int sysfs_add_device_to_node(struct device *dev, int nid) { struct node *node = &node_devices[nid]; return sysfs_create_link(&node->dev.kobj, &dev->kobj, kobject_name(&dev->kobj)); } EXPORT_SYMBOL_GPL(sysfs_add_device_to_node); void sysfs_remove_device_from_node(struct device *dev, int nid) { struct node *node = &node_devices[nid]; sysfs_remove_link(&node->dev.kobj, kobject_name(&dev->kobj)); } EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node); #else static void register_nodes(void) { return; } #endif /* Only valid if CPU is present. */ static ssize_t show_physical_id(struct device *dev, struct device_attribute *attr, char *buf) { struct cpu *cpu = container_of(dev, struct cpu, dev); return sprintf(buf, "%d\n", get_hard_smp_processor_id(cpu->dev.id)); } static DEVICE_ATTR(physical_id, 0444, show_physical_id, NULL); static int __init topology_init(void) { int cpu; register_nodes(); register_cpu_notifier(&sysfs_cpu_nb); for_each_possible_cpu(cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); /* * For now, we just see if the system supports making * the RTAS calls for CPU hotplug. But, there may be a * more comprehensive way to do this for an individual * CPU. For instance, the boot cpu might never be valid * for hotplugging. */ if (ppc_md.cpu_die) c->hotpluggable = 1; if (cpu_online(cpu) || c->hotpluggable) { register_cpu(c, cpu); device_create_file(&c->dev, &dev_attr_physical_id); } if (cpu_online(cpu)) register_cpu_online(cpu); } #ifdef CONFIG_PPC64 sysfs_create_dscr_default(); #endif /* CONFIG_PPC64 */ return 0; } subsys_initcall(topology_init);
gpl-2.0
markyzq/linux-3.14
drivers/net/phy/national.c
3377
4393
/* * drivers/net/phy/national.c * * Driver for National Semiconductor PHYs * * Author: Stuart Menefy <stuart.menefy@st.com> * Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com> * * Copyright (c) 2008 STMicroelectronics Limited * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/phy.h> #include <linux/netdevice.h> #define DEBUG /* DP83865 phy identifier values */ #define DP83865_PHY_ID 0x20005c7a #define DP83865_INT_STATUS 0x14 #define DP83865_INT_MASK 0x15 #define DP83865_INT_CLEAR 0x17 #define DP83865_INT_REMOTE_FAULT 0x0008 #define DP83865_INT_ANE_COMPLETED 0x0010 #define DP83865_INT_LINK_CHANGE 0xe000 #define DP83865_INT_MASK_DEFAULT (DP83865_INT_REMOTE_FAULT | \ DP83865_INT_ANE_COMPLETED | \ DP83865_INT_LINK_CHANGE) /* Advanced proprietary configuration */ #define NS_EXP_MEM_CTL 0x16 #define NS_EXP_MEM_DATA 0x1d #define NS_EXP_MEM_ADD 0x1e #define LED_CTRL_REG 0x13 #define AN_FALLBACK_AN 0x0001 #define AN_FALLBACK_CRC 0x0002 #define AN_FALLBACK_IE 0x0004 #define ALL_FALLBACK_ON (AN_FALLBACK_AN | AN_FALLBACK_CRC | AN_FALLBACK_IE) enum hdx_loopback { hdx_loopback_on = 0, hdx_loopback_off = 1, }; static u8 ns_exp_read(struct phy_device *phydev, u16 reg) { phy_write(phydev, NS_EXP_MEM_ADD, reg); return phy_read(phydev, NS_EXP_MEM_DATA); } static void ns_exp_write(struct phy_device *phydev, u16 reg, u8 data) { phy_write(phydev, NS_EXP_MEM_ADD, reg); phy_write(phydev, NS_EXP_MEM_DATA, data); } static int ns_config_intr(struct phy_device *phydev) { int err; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) err = phy_write(phydev, DP83865_INT_MASK, DP83865_INT_MASK_DEFAULT); else err = phy_write(phydev, DP83865_INT_MASK, 0); return err; } static int ns_ack_interrupt(struct phy_device *phydev) { int ret = phy_read(phydev, DP83865_INT_STATUS); if (ret < 0) return ret; /* Clear the interrupt status bit by writing a “1” * to the corresponding bit in INT_CLEAR (2:0 are reserved) */ ret = phy_write(phydev, DP83865_INT_CLEAR, ret & ~0x7); return ret; } static void ns_giga_speed_fallback(struct phy_device *phydev, int mode) { int bmcr = phy_read(phydev, MII_BMCR); phy_write(phydev, MII_BMCR, (bmcr | BMCR_PDOWN)); /* Enable 8 bit expended memory read/write (no auto increment) */ phy_write(phydev, NS_EXP_MEM_CTL, 0); phy_write(phydev, NS_EXP_MEM_ADD, 0x1C0); phy_write(phydev, NS_EXP_MEM_DATA, 0x0008); phy_write(phydev, MII_BMCR, (bmcr & ~BMCR_PDOWN)); phy_write(phydev, LED_CTRL_REG, mode); } static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable) { if (disable) ns_exp_write(phydev, 0x1c0, ns_exp_read(phydev, 0x1c0) | 1); else ns_exp_write(phydev, 0x1c0, ns_exp_read(phydev, 0x1c0) & 0xfffe); pr_debug("10BASE-T HDX loopback %s\n", (ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on"); } static int ns_config_init(struct phy_device *phydev) { ns_giga_speed_fallback(phydev, ALL_FALLBACK_ON); /* In the latest MAC or switches design, the 10 Mbps loopback is desired to be turned off. */ ns_10_base_t_hdx_loopack(phydev, hdx_loopback_off); return ns_ack_interrupt(phydev); } static struct phy_driver dp83865_driver = { .phy_id = DP83865_PHY_ID, .phy_id_mask = 0xfffffff0, .name = "NatSemi DP83865", .features = PHY_GBIT_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause, .flags = PHY_HAS_INTERRUPT, .config_init = ns_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = ns_ack_interrupt, .config_intr = ns_config_intr, .driver = {.owner = THIS_MODULE,} }; static int __init ns_init(void) { return phy_driver_register(&dp83865_driver); } static void __exit ns_exit(void) { phy_driver_unregister(&dp83865_driver); } MODULE_DESCRIPTION("NatSemi PHY driver"); MODULE_AUTHOR("Stuart Menefy"); MODULE_LICENSE("GPL"); module_init(ns_init); module_exit(ns_exit); static struct mdio_device_id __maybe_unused ns_tbl[] = { { DP83865_PHY_ID, 0xfffffff0 }, { } }; MODULE_DEVICE_TABLE(mdio, ns_tbl);
gpl-2.0
oppo-source/Neo5-kernel-source
drivers/net/ethernet/sfc/mcdi.c
3377
31480
/**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2008-2011 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference. */ #include <linux/delay.h> #include "net_driver.h" #include "nic.h" #include "io.h" #include "regs.h" #include "mcdi_pcol.h" #include "phy.h" /************************************************************************** * * Management-Controller-to-Driver Interface * ************************************************************************** */ #define MCDI_RPC_TIMEOUT 10 /*seconds */ #define MCDI_PDU(efx) \ (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST) #define MCDI_DOORBELL(efx) \ (efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST) #define MCDI_STATUS(efx) \ (efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST) /* A reboot/assertion causes the MCDI status word to be set after the * command word is set or a REBOOT event is sent. If we notice a reboot * via these mechanisms then wait 10ms for the status word to be set. */ #define MCDI_STATUS_DELAY_US 100 #define MCDI_STATUS_DELAY_COUNT 100 #define MCDI_STATUS_SLEEP_MS \ (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000) #define SEQ_MASK \ EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ)) static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) { struct siena_nic_data *nic_data; EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0); nic_data = efx->nic_data; return &nic_data->mcdi; } void efx_mcdi_init(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi; if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) return; mcdi = efx_mcdi(efx); init_waitqueue_head(&mcdi->wq); spin_lock_init(&mcdi->iface_lock); atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); mcdi->mode = MCDI_MODE_POLL; (void) efx_mcdi_poll_reboot(efx); } static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, const u8 *inbuf, size_t inlen) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); unsigned int i; efx_dword_t hdr; u32 xflags, seqno; BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); BUG_ON(inlen & 3 || inlen >= MC_SMEM_PDU_LEN); seqno = mcdi->seqno & SEQ_MASK; xflags = 0; if (mcdi->mode == MCDI_MODE_EVENTS) xflags |= MCDI_HEADER_XFLAGS_EVREQ; EFX_POPULATE_DWORD_6(hdr, MCDI_HEADER_RESPONSE, 0, MCDI_HEADER_RESYNC, 1, MCDI_HEADER_CODE, cmd, MCDI_HEADER_DATALEN, inlen, MCDI_HEADER_SEQ, seqno, MCDI_HEADER_XFLAGS, xflags); efx_writed(efx, &hdr, pdu); for (i = 0; i < inlen; i += 4) _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); /* Ensure the payload is written out before the header */ wmb(); /* ring the doorbell with a distinctive value */ _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); } static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); int i; BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); BUG_ON(outlen & 3 || outlen >= MC_SMEM_PDU_LEN); for (i = 0; i < outlen; i += 4) *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); } static int efx_mcdi_poll(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); unsigned int time, finish; unsigned int respseq, respcmd, error; unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); unsigned int rc, spins; efx_dword_t reg; /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ rc = -efx_mcdi_poll_reboot(efx); if (rc) goto out; /* Poll for completion. Poll quickly (once a us) for the 1st jiffy, * because generally mcdi responses are fast. After that, back off * and poll once a jiffy (approximately) */ spins = TICK_USEC; finish = get_seconds() + MCDI_RPC_TIMEOUT; while (1) { if (spins != 0) { --spins; udelay(1); } else { schedule_timeout_uninterruptible(1); } time = get_seconds(); rmb(); efx_readd(efx, &reg, pdu); /* All 1's indicates that shared memory is in reset (and is * not a valid header). Wait for it to come out reset before * completing the command */ if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff && EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE)) break; if (time >= finish) return -ETIMEDOUT; } mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN); respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ); respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE); error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR); if (error && mcdi->resplen == 0) { netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); rc = EIO; } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) { netif_err(efx, hw, efx->net_dev, "MC response mismatch tx seq 0x%x rx seq 0x%x\n", respseq, mcdi->seqno); rc = EIO; } else if (error) { efx_readd(efx, &reg, pdu + 4); switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { #define TRANSLATE_ERROR(name) \ case MC_CMD_ERR_ ## name: \ rc = name; \ break TRANSLATE_ERROR(ENOENT); TRANSLATE_ERROR(EINTR); TRANSLATE_ERROR(EACCES); TRANSLATE_ERROR(EBUSY); TRANSLATE_ERROR(EINVAL); TRANSLATE_ERROR(EDEADLK); TRANSLATE_ERROR(ENOSYS); TRANSLATE_ERROR(ETIME); #undef TRANSLATE_ERROR default: rc = EIO; break; } } else rc = 0; out: mcdi->resprc = rc; if (rc) mcdi->resplen = 0; /* Return rc=0 like wait_event_timeout() */ return 0; } /* Test and clear MC-rebooted flag for this port/function */ int efx_mcdi_poll_reboot(struct efx_nic *efx) { unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx); efx_dword_t reg; uint32_t value; if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) return false; efx_readd(efx, &reg, addr); value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); if (value == 0) return 0; EFX_ZERO_DWORD(reg); efx_writed(efx, &reg, addr); if (value == MC_STATUS_DWORD_ASSERT) return -EINTR; else return -EIO; } static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi) { /* Wait until the interface becomes QUIESCENT and we win the race * to mark it RUNNING. */ wait_event(mcdi->wq, atomic_cmpxchg(&mcdi->state, MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING) == MCDI_STATE_QUIESCENT); } static int efx_mcdi_await_completion(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); if (wait_event_timeout( mcdi->wq, atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED, msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0) return -ETIMEDOUT; /* Check if efx_mcdi_set_mode() switched us back to polled completions. * In which case, poll for completions directly. If efx_mcdi_ev_cpl() * completed the request first, then we'll just end up completing the * request again, which is safe. * * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which * wait_event_timeout() implicitly provides. */ if (mcdi->mode == MCDI_MODE_POLL) return efx_mcdi_poll(efx); return 0; } static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi) { /* If the interface is RUNNING, then move to COMPLETED and wake any * waiters. If the interface isn't in RUNNING then we've received a * duplicate completion after we've already transitioned back to * QUIESCENT. [A subsequent invocation would increment seqno, so would * have failed the seqno check]. */ if (atomic_cmpxchg(&mcdi->state, MCDI_STATE_RUNNING, MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) { wake_up(&mcdi->wq); return true; } return false; } static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) { atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); wake_up(&mcdi->wq); } static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, unsigned int datalen, unsigned int errno) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); bool wake = false; spin_lock(&mcdi->iface_lock); if ((seqno ^ mcdi->seqno) & SEQ_MASK) { if (mcdi->credits) /* The request has been cancelled */ --mcdi->credits; else netif_err(efx, hw, efx->net_dev, "MC response mismatch tx seq 0x%x rx " "seq 0x%x\n", seqno, mcdi->seqno); } else { mcdi->resprc = errno; mcdi->resplen = datalen; wake = true; } spin_unlock(&mcdi->iface_lock); if (wake) efx_mcdi_complete(mcdi); } /* Issue the given command by writing the data into the shared memory PDU, * ring the doorbell and wait for completion. Copyout the result. */ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen, size_t *outlen_actual) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); int rc; BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0); efx_mcdi_acquire(mcdi); /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ spin_lock_bh(&mcdi->iface_lock); ++mcdi->seqno; spin_unlock_bh(&mcdi->iface_lock); efx_mcdi_copyin(efx, cmd, inbuf, inlen); if (mcdi->mode == MCDI_MODE_POLL) rc = efx_mcdi_poll(efx); else rc = efx_mcdi_await_completion(efx); if (rc != 0) { /* Close the race with efx_mcdi_ev_cpl() executing just too late * and completing a request we've just cancelled, by ensuring * that the seqno check therein fails. */ spin_lock_bh(&mcdi->iface_lock); ++mcdi->seqno; ++mcdi->credits; spin_unlock_bh(&mcdi->iface_lock); netif_err(efx, hw, efx->net_dev, "MC command 0x%x inlen %d mode %d timed out\n", cmd, (int)inlen, mcdi->mode); } else { size_t resplen; /* At the very least we need a memory barrier here to ensure * we pick up changes from efx_mcdi_ev_cpl(). Protect against * a spurious efx_mcdi_ev_cpl() running concurrently by * acquiring the iface_lock. */ spin_lock_bh(&mcdi->iface_lock); rc = -mcdi->resprc; resplen = mcdi->resplen; spin_unlock_bh(&mcdi->iface_lock); if (rc == 0) { efx_mcdi_copyout(efx, outbuf, min(outlen, mcdi->resplen + 3) & ~0x3); if (outlen_actual != NULL) *outlen_actual = resplen; } else if (cmd == MC_CMD_REBOOT && rc == -EIO) ; /* Don't reset if MC_CMD_REBOOT returns EIO */ else if (rc == -EIO || rc == -EINTR) { netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n", -rc); efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); } else netif_dbg(efx, hw, efx->net_dev, "MC command 0x%x inlen %d failed rc=%d\n", cmd, (int)inlen, -rc); if (rc == -EIO || rc == -EINTR) { msleep(MCDI_STATUS_SLEEP_MS); efx_mcdi_poll_reboot(efx); } } efx_mcdi_release(mcdi); return rc; } void efx_mcdi_mode_poll(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi; if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) return; mcdi = efx_mcdi(efx); if (mcdi->mode == MCDI_MODE_POLL) return; /* We can switch from event completion to polled completion, because * mcdi requests are always completed in shared memory. We do this by * switching the mode to POLL'd then completing the request. * efx_mcdi_await_completion() will then call efx_mcdi_poll(). * * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(), * which efx_mcdi_complete() provides for us. */ mcdi->mode = MCDI_MODE_POLL; efx_mcdi_complete(mcdi); } void efx_mcdi_mode_event(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi; if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) return; mcdi = efx_mcdi(efx); if (mcdi->mode == MCDI_MODE_EVENTS) return; /* We can't switch from polled to event completion in the middle of a * request, because the completion method is specified in the request. * So acquire the interface to serialise the requestors. We don't need * to acquire the iface_lock to change the mode here, but we do need a * write memory barrier ensure that efx_mcdi_rpc() sees it, which * efx_mcdi_acquire() provides. */ efx_mcdi_acquire(mcdi); mcdi->mode = MCDI_MODE_EVENTS; efx_mcdi_release(mcdi); } static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); /* If there is an outstanding MCDI request, it has been terminated * either by a BADASSERT or REBOOT event. If the mcdi interface is * in polled mode, then do nothing because the MC reboot handler will * set the header correctly. However, if the mcdi interface is waiting * for a CMDDONE event it won't receive it [and since all MCDI events * are sent to the same queue, we can't be racing with * efx_mcdi_ev_cpl()] * * There's a race here with efx_mcdi_rpc(), because we might receive * a REBOOT event *before* the request has been copied out. In polled * mode (during startup) this is irrelevant, because efx_mcdi_complete() * is ignored. In event mode, this condition is just an edge-case of * receiving a REBOOT event after posting the MCDI request. Did the mc * reboot before or after the copyout? The best we can do always is * just return failure. */ spin_lock(&mcdi->iface_lock); if (efx_mcdi_complete(mcdi)) { if (mcdi->mode == MCDI_MODE_EVENTS) { mcdi->resprc = rc; mcdi->resplen = 0; ++mcdi->credits; } } else { int count; /* Nobody was waiting for an MCDI request, so trigger a reset */ efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); /* Consume the status word since efx_mcdi_rpc_finish() won't */ for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) { if (efx_mcdi_poll_reboot(efx)) break; udelay(MCDI_STATUS_DELAY_US); } } spin_unlock(&mcdi->iface_lock); } static unsigned int efx_mcdi_event_link_speed[] = { [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100, [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000, [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000, }; static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) { u32 flags, fcntl, speed, lpa; speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED); EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed)); speed = efx_mcdi_event_link_speed[speed]; flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS); fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL); lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP); /* efx->link_state is only modified by efx_mcdi_phy_get_link(), * which is only run after flushing the event queues. Therefore, it * is safe to modify the link state outside of the mac_lock here. */ efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl); efx_mcdi_phy_check_fcntl(efx, lpa); efx_link_status_changed(efx); } /* Called from falcon_process_eventq for MCDI events */ void efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event) { struct efx_nic *efx = channel->efx; int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE); u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA); switch (code) { case MCDI_EVENT_CODE_BADSSERT: netif_err(efx, hw, efx->net_dev, "MC watchdog or assertion failure at 0x%x\n", data); efx_mcdi_ev_death(efx, EINTR); break; case MCDI_EVENT_CODE_PMNOTICE: netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n"); break; case MCDI_EVENT_CODE_CMDDONE: efx_mcdi_ev_cpl(efx, MCDI_EVENT_FIELD(*event, CMDDONE_SEQ), MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN), MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO)); break; case MCDI_EVENT_CODE_LINKCHANGE: efx_mcdi_process_link_change(efx, event); break; case MCDI_EVENT_CODE_SENSOREVT: efx_mcdi_sensor_event(efx, event); break; case MCDI_EVENT_CODE_SCHEDERR: netif_info(efx, hw, efx->net_dev, "MC Scheduler error address=0x%x\n", data); break; case MCDI_EVENT_CODE_REBOOT: netif_info(efx, hw, efx->net_dev, "MC Reboot\n"); efx_mcdi_ev_death(efx, EIO); break; case MCDI_EVENT_CODE_MAC_STATS_DMA: /* MAC stats are gather lazily. We can ignore this. */ break; case MCDI_EVENT_CODE_FLR: efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF)); break; default: netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", code); } } /************************************************************************** * * Specific request functions * ************************************************************************** */ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) { u8 outbuf[ALIGN(MC_CMD_GET_VERSION_OUT_LEN, 4)]; size_t outlength; const __le16 *ver_words; int rc; BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0); rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0, outbuf, sizeof(outbuf), &outlength); if (rc) goto fail; if (outlength < MC_CMD_GET_VERSION_OUT_LEN) { rc = -EIO; goto fail; } ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); snprintf(buf, len, "%u.%u.%u.%u", le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]), le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3])); return; fail: netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); buf[0] = 0; } int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, bool *was_attached) { u8 inbuf[MC_CMD_DRV_ATTACH_IN_LEN]; u8 outbuf[MC_CMD_DRV_ATTACH_OUT_LEN]; size_t outlen; int rc; MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE, driver_operating ? 1 : 0); MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1); rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); if (rc) goto fail; if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) { rc = -EIO; goto fail; } if (was_attached != NULL) *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); return 0; fail: netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, u16 *fw_subtype_list, u32 *capabilities) { uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMIN]; size_t outlen; int port_num = efx_port_num(efx); int offset; int rc; BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0, outbuf, sizeof(outbuf), &outlen); if (rc) goto fail; if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) { rc = -EIO; goto fail; } offset = (port_num) ? MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST; if (mac_address) memcpy(mac_address, outbuf + offset, ETH_ALEN); if (fw_subtype_list) memcpy(fw_subtype_list, outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST, MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM * sizeof(fw_subtype_list[0])); if (capabilities) { if (port_num) *capabilities = MCDI_DWORD(outbuf, GET_BOARD_CFG_OUT_CAPABILITIES_PORT1); else *capabilities = MCDI_DWORD(outbuf, GET_BOARD_CFG_OUT_CAPABILITIES_PORT0); } return 0; fail: netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n", __func__, rc, (int)outlen); return rc; } int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) { u8 inbuf[MC_CMD_LOG_CTRL_IN_LEN]; u32 dest = 0; int rc; if (uart) dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART; if (evq) dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ; MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest); MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq); BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0); rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf), NULL, 0, NULL); if (rc) goto fail; return 0; fail: netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) { u8 outbuf[MC_CMD_NVRAM_TYPES_OUT_LEN]; size_t outlen; int rc; BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0); rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0, outbuf, sizeof(outbuf), &outlen); if (rc) goto fail; if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) { rc = -EIO; goto fail; } *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES); return 0; fail: netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, size_t *size_out, size_t *erase_size_out, bool *protected_out) { u8 inbuf[MC_CMD_NVRAM_INFO_IN_LEN]; u8 outbuf[MC_CMD_NVRAM_INFO_OUT_LEN]; size_t outlen; int rc; MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type); rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); if (rc) goto fail; if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) { rc = -EIO; goto fail; } *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE); *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE); *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) & (1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN)); return 0; fail: netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) { u8 inbuf[MC_CMD_NVRAM_UPDATE_START_IN_LEN]; int rc; MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type); BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0); rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf), NULL, 0, NULL); if (rc) goto fail; return 0; fail: netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, loff_t offset, u8 *buffer, size_t length) { u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN]; u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; size_t outlen; int rc; MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type); MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset); MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length); rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); if (rc) goto fail; memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length); return 0; fail: netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, loff_t offset, const u8 *buffer, size_t length) { u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; int rc; MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset); MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length); memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length); BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), NULL, 0, NULL); if (rc) goto fail; return 0; fail: netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, loff_t offset, size_t length) { u8 inbuf[MC_CMD_NVRAM_ERASE_IN_LEN]; int rc; MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type); MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset); MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length); BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0); rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf), NULL, 0, NULL); if (rc) goto fail; return 0; fail: netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) { u8 inbuf[MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN]; int rc; MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type); BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0); rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf), NULL, 0, NULL); if (rc) goto fail; return 0; fail: netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) { u8 inbuf[MC_CMD_NVRAM_TEST_IN_LEN]; u8 outbuf[MC_CMD_NVRAM_TEST_OUT_LEN]; int rc; MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type); rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), NULL); if (rc) return rc; switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) { case MC_CMD_NVRAM_TEST_PASS: case MC_CMD_NVRAM_TEST_NOTSUPP: return 0; default: return -EIO; } } int efx_mcdi_nvram_test_all(struct efx_nic *efx) { u32 nvram_types; unsigned int type; int rc; rc = efx_mcdi_nvram_types(efx, &nvram_types); if (rc) goto fail1; type = 0; while (nvram_types != 0) { if (nvram_types & 1) { rc = efx_mcdi_nvram_test(efx, type); if (rc) goto fail2; } type++; nvram_types >>= 1; } return 0; fail2: netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n", __func__, type); fail1: netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } static int efx_mcdi_read_assertion(struct efx_nic *efx) { u8 inbuf[MC_CMD_GET_ASSERTS_IN_LEN]; u8 outbuf[MC_CMD_GET_ASSERTS_OUT_LEN]; unsigned int flags, index, ofst; const char *reason; size_t outlen; int retry; int rc; /* Attempt to read any stored assertion state before we reboot * the mcfw out of the assertion handler. Retry twice, once * because a boot-time assertion might cause this command to fail * with EINTR. And once again because GET_ASSERTS can race with * MC_CMD_REBOOT running on the other port. */ retry = 2; do { MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1); rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS, inbuf, MC_CMD_GET_ASSERTS_IN_LEN, outbuf, sizeof(outbuf), &outlen); } while ((rc == -EINTR || rc == -EIO) && retry-- > 0); if (rc) return rc; if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) return -EIO; /* Print out any recorded assertion state */ flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS); if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) return 0; reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) ? "system-level assertion" : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) ? "thread-level assertion" : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) ? "watchdog reset" : "unknown assertion"; netif_err(efx, hw, efx->net_dev, "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS), MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); /* Print out the registers */ ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; for (index = 1; index < 32; index++) { netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", index, MCDI_DWORD2(outbuf, ofst)); ofst += sizeof(efx_dword_t); } return 0; } static void efx_mcdi_exit_assertion(struct efx_nic *efx) { u8 inbuf[MC_CMD_REBOOT_IN_LEN]; /* Atomically reboot the mcfw out of the assertion handler */ BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION); efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN, NULL, 0, NULL); } int efx_mcdi_handle_assertion(struct efx_nic *efx) { int rc; rc = efx_mcdi_read_assertion(efx); if (rc) return rc; efx_mcdi_exit_assertion(efx); return 0; } void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) { u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN]; int rc; BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF); BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON); BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT); BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0); MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode); rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), NULL, 0, NULL); if (rc) netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); } int efx_mcdi_reset_port(struct efx_nic *efx) { int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL); if (rc) netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } int efx_mcdi_reset_mc(struct efx_nic *efx) { u8 inbuf[MC_CMD_REBOOT_IN_LEN]; int rc; BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0); rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf), NULL, 0, NULL); /* White is black, and up is down */ if (rc == -EIO) return 0; if (rc == 0) rc = -EIO; netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, const u8 *mac, int *id_out) { u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN]; u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN]; size_t outlen; int rc; MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type); MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE, MC_CMD_FILTER_MODE_SIMPLE); memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN); rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); if (rc) goto fail; if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) { rc = -EIO; goto fail; } *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID); return 0; fail: *id_out = -1; netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out) { return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out); } int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) { u8 outbuf[MC_CMD_WOL_FILTER_GET_OUT_LEN]; size_t outlen; int rc; rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0, outbuf, sizeof(outbuf), &outlen); if (rc) goto fail; if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) { rc = -EIO; goto fail; } *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID); return 0; fail: *id_out = -1; netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) { u8 inbuf[MC_CMD_WOL_FILTER_REMOVE_IN_LEN]; int rc; MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id); rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf), NULL, 0, NULL); if (rc) goto fail; return 0; fail: netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } int efx_mcdi_flush_rxqs(struct efx_nic *efx) { struct efx_channel *channel; struct efx_rx_queue *rx_queue; __le32 *qid; int rc, count; qid = kmalloc(EFX_MAX_CHANNELS * sizeof(*qid), GFP_KERNEL); if (qid == NULL) return -ENOMEM; count = 0; efx_for_each_channel(channel, efx) { efx_for_each_channel_rx_queue(rx_queue, channel) { if (rx_queue->flush_pending) { rx_queue->flush_pending = false; atomic_dec(&efx->rxq_flush_pending); qid[count++] = cpu_to_le32( efx_rx_queue_index(rx_queue)); } } } rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)qid, count * sizeof(*qid), NULL, 0, NULL); WARN_ON(rc > 0); kfree(qid); return rc; } int efx_mcdi_wol_filter_reset(struct efx_nic *efx) { int rc; rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL); if (rc) goto fail; return 0; fail: netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; }
gpl-2.0
cybernet/rhel7-kernel
kernel/drivers/char/pcmcia/cm4000_cs.c
4401
49392
/* * A driver for the PCMCIA Smartcard Reader "Omnikey CardMan Mobile 4000" * * cm4000_cs.c support.linux@omnikey.com * * Tue Oct 23 11:32:43 GMT 2001 herp - cleaned up header files * Sun Jan 20 10:11:15 MET 2002 herp - added modversion header files * Thu Nov 14 16:34:11 GMT 2002 mh - added PPS functionality * Tue Nov 19 16:36:27 GMT 2002 mh - added SUSPEND/RESUME functionailty * Wed Jul 28 12:55:01 CEST 2004 mh - kernel 2.6 adjustments * * current version: 2.4.0gm4 * * (C) 2000,2001,2002,2003,2004 Omnikey AG * * (C) 2005-2006 Harald Welte <laforge@gnumonks.org> * - Adhere to Kernel CodingStyle * - Port to 2.6.13 "new" style PCMCIA * - Check for copy_{from,to}_user return values * - Use nonseekable_open() * - add class interface for udev device creation * * All rights reserved. Licensed under dual BSD/GPL license. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/delay.h> #include <linux/bitrev.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include <linux/io.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ciscode.h> #include <pcmcia/ds.h> #include <linux/cm4000_cs.h> /* #define ATR_CSUM */ #define reader_to_dev(x) (&x->p_dev->dev) /* n (debug level) is ignored */ /* additional debug output may be enabled by re-compiling with * CM4000_DEBUG set */ /* #define CM4000_DEBUG */ #define DEBUGP(n, rdr, x, args...) do { \ dev_dbg(reader_to_dev(rdr), "%s:" x, \ __func__ , ## args); \ } while (0) static DEFINE_MUTEX(cmm_mutex); #define T_1SEC (HZ) #define T_10MSEC msecs_to_jiffies(10) #define T_20MSEC msecs_to_jiffies(20) #define T_40MSEC msecs_to_jiffies(40) #define T_50MSEC msecs_to_jiffies(50) #define T_100MSEC msecs_to_jiffies(100) #define T_500MSEC msecs_to_jiffies(500) static void cm4000_release(struct pcmcia_device *link); static int major; /* major number we get from the kernel */ /* note: the first state has to have number 0 always */ #define M_FETCH_ATR 0 #define M_TIMEOUT_WAIT 1 #define M_READ_ATR_LEN 2 #define M_READ_ATR 3 #define M_ATR_PRESENT 4 #define M_BAD_CARD 5 #define M_CARDOFF 6 #define LOCK_IO 0 #define LOCK_MONITOR 1 #define IS_AUTOPPS_ACT 6 #define IS_PROCBYTE_PRESENT 7 #define IS_INVREV 8 #define IS_ANY_T0 9 #define IS_ANY_T1 10 #define IS_ATR_PRESENT 11 #define IS_ATR_VALID 12 #define IS_CMM_ABSENT 13 #define IS_BAD_LENGTH 14 #define IS_BAD_CSUM 15 #define IS_BAD_CARD 16 #define REG_FLAGS0(x) (x + 0) #define REG_FLAGS1(x) (x + 1) #define REG_NUM_BYTES(x) (x + 2) #define REG_BUF_ADDR(x) (x + 3) #define REG_BUF_DATA(x) (x + 4) #define REG_NUM_SEND(x) (x + 5) #define REG_BAUDRATE(x) (x + 6) #define REG_STOPBITS(x) (x + 7) struct cm4000_dev { struct pcmcia_device *p_dev; unsigned char atr[MAX_ATR]; unsigned char rbuf[512]; unsigned char sbuf[512]; wait_queue_head_t devq; /* when removing cardman must not be zeroed! */ wait_queue_head_t ioq; /* if IO is locked, wait on this Q */ wait_queue_head_t atrq; /* wait for ATR valid */ wait_queue_head_t readq; /* used by write to wake blk.read */ /* warning: do not move this fields. * initialising to zero depends on it - see ZERO_DEV below. */ unsigned char atr_csum; unsigned char atr_len_retry; unsigned short atr_len; unsigned short rlen; /* bytes avail. after write */ unsigned short rpos; /* latest read pos. write zeroes */ unsigned char procbyte; /* T=0 procedure byte */ unsigned char mstate; /* state of card monitor */ unsigned char cwarn; /* slow down warning */ unsigned char flags0; /* cardman IO-flags 0 */ unsigned char flags1; /* cardman IO-flags 1 */ unsigned int mdelay; /* variable monitor speeds, in jiffies */ unsigned int baudv; /* baud value for speed */ unsigned char ta1; unsigned char proto; /* T=0, T=1, ... */ unsigned long flags; /* lock+flags (MONITOR,IO,ATR) * for concurrent access */ unsigned char pts[4]; struct timer_list timer; /* used to keep monitor running */ int monitor_running; }; #define ZERO_DEV(dev) \ memset(&dev->atr_csum,0, \ sizeof(struct cm4000_dev) - \ offsetof(struct cm4000_dev, atr_csum)) static struct pcmcia_device *dev_table[CM4000_MAX_DEV]; static struct class *cmm_class; /* This table doesn't use spaces after the comma between fields and thus * violates CodingStyle. However, I don't really think wrapping it around will * make it any clearer to read -HW */ static unsigned char fi_di_table[10][14] = { /*FI 00 01 02 03 04 05 06 07 08 09 10 11 12 13 */ /*DI */ /* 0 */ {0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11}, /* 1 */ {0x01,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x91,0x11,0x11,0x11,0x11}, /* 2 */ {0x02,0x12,0x22,0x32,0x11,0x11,0x11,0x11,0x11,0x92,0xA2,0xB2,0x11,0x11}, /* 3 */ {0x03,0x13,0x23,0x33,0x43,0x53,0x63,0x11,0x11,0x93,0xA3,0xB3,0xC3,0xD3}, /* 4 */ {0x04,0x14,0x24,0x34,0x44,0x54,0x64,0x11,0x11,0x94,0xA4,0xB4,0xC4,0xD4}, /* 5 */ {0x00,0x15,0x25,0x35,0x45,0x55,0x65,0x11,0x11,0x95,0xA5,0xB5,0xC5,0xD5}, /* 6 */ {0x06,0x16,0x26,0x36,0x46,0x56,0x66,0x11,0x11,0x96,0xA6,0xB6,0xC6,0xD6}, /* 7 */ {0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11,0x11}, /* 8 */ {0x08,0x11,0x28,0x38,0x48,0x58,0x68,0x11,0x11,0x98,0xA8,0xB8,0xC8,0xD8}, /* 9 */ {0x09,0x19,0x29,0x39,0x49,0x59,0x69,0x11,0x11,0x99,0xA9,0xB9,0xC9,0xD9} }; #ifndef CM4000_DEBUG #define xoutb outb #define xinb inb #else static inline void xoutb(unsigned char val, unsigned short port) { pr_debug("outb(val=%.2x,port=%.4x)\n", val, port); outb(val, port); } static inline unsigned char xinb(unsigned short port) { unsigned char val; val = inb(port); pr_debug("%.2x=inb(%.4x)\n", val, port); return val; } #endif static inline unsigned char invert_revert(unsigned char ch) { return bitrev8(~ch); } static void str_invert_revert(unsigned char *b, int len) { int i; for (i = 0; i < len; i++) b[i] = invert_revert(b[i]); } #define ATRLENCK(dev,pos) \ if (pos>=dev->atr_len || pos>=MAX_ATR) \ goto return_0; static unsigned int calc_baudv(unsigned char fidi) { unsigned int wcrcf, wbrcf, fi_rfu, di_rfu; fi_rfu = 372; di_rfu = 1; /* FI */ switch ((fidi >> 4) & 0x0F) { case 0x00: wcrcf = 372; break; case 0x01: wcrcf = 372; break; case 0x02: wcrcf = 558; break; case 0x03: wcrcf = 744; break; case 0x04: wcrcf = 1116; break; case 0x05: wcrcf = 1488; break; case 0x06: wcrcf = 1860; break; case 0x07: wcrcf = fi_rfu; break; case 0x08: wcrcf = fi_rfu; break; case 0x09: wcrcf = 512; break; case 0x0A: wcrcf = 768; break; case 0x0B: wcrcf = 1024; break; case 0x0C: wcrcf = 1536; break; case 0x0D: wcrcf = 2048; break; default: wcrcf = fi_rfu; break; } /* DI */ switch (fidi & 0x0F) { case 0x00: wbrcf = di_rfu; break; case 0x01: wbrcf = 1; break; case 0x02: wbrcf = 2; break; case 0x03: wbrcf = 4; break; case 0x04: wbrcf = 8; break; case 0x05: wbrcf = 16; break; case 0x06: wbrcf = 32; break; case 0x07: wbrcf = di_rfu; break; case 0x08: wbrcf = 12; break; case 0x09: wbrcf = 20; break; default: wbrcf = di_rfu; break; } return (wcrcf / wbrcf); } static unsigned short io_read_num_rec_bytes(unsigned int iobase, unsigned short *s) { unsigned short tmp; tmp = *s = 0; do { *s = tmp; tmp = inb(REG_NUM_BYTES(iobase)) | (inb(REG_FLAGS0(iobase)) & 4 ? 0x100 : 0); } while (tmp != *s); return *s; } static int parse_atr(struct cm4000_dev *dev) { unsigned char any_t1, any_t0; unsigned char ch, ifno; int ix, done; DEBUGP(3, dev, "-> parse_atr: dev->atr_len = %i\n", dev->atr_len); if (dev->atr_len < 3) { DEBUGP(5, dev, "parse_atr: atr_len < 3\n"); return 0; } if (dev->atr[0] == 0x3f) set_bit(IS_INVREV, &dev->flags); else clear_bit(IS_INVREV, &dev->flags); ix = 1; ifno = 1; ch = dev->atr[1]; dev->proto = 0; /* XXX PROTO */ any_t1 = any_t0 = done = 0; dev->ta1 = 0x11; /* defaults to 9600 baud */ do { if (ifno == 1 && (ch & 0x10)) { /* read first interface byte and TA1 is present */ dev->ta1 = dev->atr[2]; DEBUGP(5, dev, "Card says FiDi is 0x%.2x\n", dev->ta1); ifno++; } else if ((ifno == 2) && (ch & 0x10)) { /* TA(2) */ dev->ta1 = 0x11; ifno++; } DEBUGP(5, dev, "Yi=%.2x\n", ch & 0xf0); ix += ((ch & 0x10) >> 4) /* no of int.face chars */ +((ch & 0x20) >> 5) + ((ch & 0x40) >> 6) + ((ch & 0x80) >> 7); /* ATRLENCK(dev,ix); */ if (ch & 0x80) { /* TDi */ ch = dev->atr[ix]; if ((ch & 0x0f)) { any_t1 = 1; DEBUGP(5, dev, "card is capable of T=1\n"); } else { any_t0 = 1; DEBUGP(5, dev, "card is capable of T=0\n"); } } else done = 1; } while (!done); DEBUGP(5, dev, "ix=%d noHist=%d any_t1=%d\n", ix, dev->atr[1] & 15, any_t1); if (ix + 1 + (dev->atr[1] & 0x0f) + any_t1 != dev->atr_len) { DEBUGP(5, dev, "length error\n"); return 0; } if (any_t0) set_bit(IS_ANY_T0, &dev->flags); if (any_t1) { /* compute csum */ dev->atr_csum = 0; #ifdef ATR_CSUM for (i = 1; i < dev->atr_len; i++) dev->atr_csum ^= dev->atr[i]; if (dev->atr_csum) { set_bit(IS_BAD_CSUM, &dev->flags); DEBUGP(5, dev, "bad checksum\n"); goto return_0; } #endif if (any_t0 == 0) dev->proto = 1; /* XXX PROTO */ set_bit(IS_ANY_T1, &dev->flags); } return 1; } struct card_fixup { char atr[12]; u_int8_t atr_len; u_int8_t stopbits; }; static struct card_fixup card_fixups[] = { { /* ACOS */ .atr = { 0x3b, 0xb3, 0x11, 0x00, 0x00, 0x41, 0x01 }, .atr_len = 7, .stopbits = 0x03, }, { /* Motorola */ .atr = {0x3b, 0x76, 0x13, 0x00, 0x00, 0x80, 0x62, 0x07, 0x41, 0x81, 0x81 }, .atr_len = 11, .stopbits = 0x04, }, }; static void set_cardparameter(struct cm4000_dev *dev) { int i; unsigned int iobase = dev->p_dev->resource[0]->start; u_int8_t stopbits = 0x02; /* ISO default */ DEBUGP(3, dev, "-> set_cardparameter\n"); dev->flags1 = dev->flags1 | (((dev->baudv - 1) & 0x0100) >> 8); xoutb(dev->flags1, REG_FLAGS1(iobase)); DEBUGP(5, dev, "flags1 = 0x%02x\n", dev->flags1); /* set baudrate */ xoutb((unsigned char)((dev->baudv - 1) & 0xFF), REG_BAUDRATE(iobase)); DEBUGP(5, dev, "baudv = %i -> write 0x%02x\n", dev->baudv, ((dev->baudv - 1) & 0xFF)); /* set stopbits */ for (i = 0; i < ARRAY_SIZE(card_fixups); i++) { if (!memcmp(dev->atr, card_fixups[i].atr, card_fixups[i].atr_len)) stopbits = card_fixups[i].stopbits; } xoutb(stopbits, REG_STOPBITS(iobase)); DEBUGP(3, dev, "<- set_cardparameter\n"); } static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq) { unsigned long tmp, i; unsigned short num_bytes_read; unsigned char pts_reply[4]; ssize_t rc; unsigned int iobase = dev->p_dev->resource[0]->start; rc = 0; DEBUGP(3, dev, "-> set_protocol\n"); DEBUGP(5, dev, "ptsreq->Protocol = 0x%.8x, ptsreq->Flags=0x%.8x, " "ptsreq->pts1=0x%.2x, ptsreq->pts2=0x%.2x, " "ptsreq->pts3=0x%.2x\n", (unsigned int)ptsreq->protocol, (unsigned int)ptsreq->flags, ptsreq->pts1, ptsreq->pts2, ptsreq->pts3); /* Fill PTS structure */ dev->pts[0] = 0xff; dev->pts[1] = 0x00; tmp = ptsreq->protocol; while ((tmp = (tmp >> 1)) > 0) dev->pts[1]++; dev->proto = dev->pts[1]; /* Set new protocol */ dev->pts[1] = (0x01 << 4) | (dev->pts[1]); /* Correct Fi/Di according to CM4000 Fi/Di table */ DEBUGP(5, dev, "Ta(1) from ATR is 0x%.2x\n", dev->ta1); /* set Fi/Di according to ATR TA(1) */ dev->pts[2] = fi_di_table[dev->ta1 & 0x0F][(dev->ta1 >> 4) & 0x0F]; /* Calculate PCK character */ dev->pts[3] = dev->pts[0] ^ dev->pts[1] ^ dev->pts[2]; DEBUGP(5, dev, "pts0=%.2x, pts1=%.2x, pts2=%.2x, pts3=%.2x\n", dev->pts[0], dev->pts[1], dev->pts[2], dev->pts[3]); /* check card convention */ if (test_bit(IS_INVREV, &dev->flags)) str_invert_revert(dev->pts, 4); /* reset SM */ xoutb(0x80, REG_FLAGS0(iobase)); /* Enable access to the message buffer */ DEBUGP(5, dev, "Enable access to the messages buffer\n"); dev->flags1 = 0x20 /* T_Active */ | (test_bit(IS_INVREV, &dev->flags) ? 0x02 : 0x00) /* inv parity */ | ((dev->baudv >> 8) & 0x01); /* MSB-baud */ xoutb(dev->flags1, REG_FLAGS1(iobase)); DEBUGP(5, dev, "Enable message buffer -> flags1 = 0x%.2x\n", dev->flags1); /* write challenge to the buffer */ DEBUGP(5, dev, "Write challenge to buffer: "); for (i = 0; i < 4; i++) { xoutb(i, REG_BUF_ADDR(iobase)); xoutb(dev->pts[i], REG_BUF_DATA(iobase)); /* buf data */ #ifdef CM4000_DEBUG pr_debug("0x%.2x ", dev->pts[i]); } pr_debug("\n"); #else } #endif /* set number of bytes to write */ DEBUGP(5, dev, "Set number of bytes to write\n"); xoutb(0x04, REG_NUM_SEND(iobase)); /* Trigger CARDMAN CONTROLLER */ xoutb(0x50, REG_FLAGS0(iobase)); /* Monitor progress */ /* wait for xmit done */ DEBUGP(5, dev, "Waiting for NumRecBytes getting valid\n"); for (i = 0; i < 100; i++) { if (inb(REG_FLAGS0(iobase)) & 0x08) { DEBUGP(5, dev, "NumRecBytes is valid\n"); break; } mdelay(10); } if (i == 100) { DEBUGP(5, dev, "Timeout waiting for NumRecBytes getting " "valid\n"); rc = -EIO; goto exit_setprotocol; } DEBUGP(5, dev, "Reading NumRecBytes\n"); for (i = 0; i < 100; i++) { io_read_num_rec_bytes(iobase, &num_bytes_read); if (num_bytes_read >= 4) { DEBUGP(2, dev, "NumRecBytes = %i\n", num_bytes_read); break; } mdelay(10); } /* check whether it is a short PTS reply? */ if (num_bytes_read == 3) i = 0; if (i == 100) { DEBUGP(5, dev, "Timeout reading num_bytes_read\n"); rc = -EIO; goto exit_setprotocol; } DEBUGP(5, dev, "Reset the CARDMAN CONTROLLER\n"); xoutb(0x80, REG_FLAGS0(iobase)); /* Read PPS reply */ DEBUGP(5, dev, "Read PPS reply\n"); for (i = 0; i < num_bytes_read; i++) { xoutb(i, REG_BUF_ADDR(iobase)); pts_reply[i] = inb(REG_BUF_DATA(iobase)); } #ifdef CM4000_DEBUG DEBUGP(2, dev, "PTSreply: "); for (i = 0; i < num_bytes_read; i++) { pr_debug("0x%.2x ", pts_reply[i]); } pr_debug("\n"); #endif /* CM4000_DEBUG */ DEBUGP(5, dev, "Clear Tactive in Flags1\n"); xoutb(0x20, REG_FLAGS1(iobase)); /* Compare ptsreq and ptsreply */ if ((dev->pts[0] == pts_reply[0]) && (dev->pts[1] == pts_reply[1]) && (dev->pts[2] == pts_reply[2]) && (dev->pts[3] == pts_reply[3])) { /* setcardparameter according to PPS */ dev->baudv = calc_baudv(dev->pts[2]); set_cardparameter(dev); } else if ((dev->pts[0] == pts_reply[0]) && ((dev->pts[1] & 0xef) == pts_reply[1]) && ((pts_reply[0] ^ pts_reply[1]) == pts_reply[2])) { /* short PTS reply, set card parameter to default values */ dev->baudv = calc_baudv(0x11); set_cardparameter(dev); } else rc = -EIO; exit_setprotocol: DEBUGP(3, dev, "<- set_protocol\n"); return rc; } static int io_detect_cm4000(unsigned int iobase, struct cm4000_dev *dev) { /* note: statemachine is assumed to be reset */ if (inb(REG_FLAGS0(iobase)) & 8) { clear_bit(IS_ATR_VALID, &dev->flags); set_bit(IS_CMM_ABSENT, &dev->flags); return 0; /* detect CMM = 1 -> failure */ } /* xoutb(0x40, REG_FLAGS1(iobase)); detectCMM */ xoutb(dev->flags1 | 0x40, REG_FLAGS1(iobase)); if ((inb(REG_FLAGS0(iobase)) & 8) == 0) { clear_bit(IS_ATR_VALID, &dev->flags); set_bit(IS_CMM_ABSENT, &dev->flags); return 0; /* detect CMM=0 -> failure */ } /* clear detectCMM again by restoring original flags1 */ xoutb(dev->flags1, REG_FLAGS1(iobase)); return 1; } static void terminate_monitor(struct cm4000_dev *dev) { /* tell the monitor to stop and wait until * it terminates. */ DEBUGP(3, dev, "-> terminate_monitor\n"); wait_event_interruptible(dev->devq, test_and_set_bit(LOCK_MONITOR, (void *)&dev->flags)); /* now, LOCK_MONITOR has been set. * allow a last cycle in the monitor. * the monitor will indicate that it has * finished by clearing this bit. */ DEBUGP(5, dev, "Now allow last cycle of monitor!\n"); while (test_bit(LOCK_MONITOR, (void *)&dev->flags)) msleep(25); DEBUGP(5, dev, "Delete timer\n"); del_timer_sync(&dev->timer); #ifdef CM4000_DEBUG dev->monitor_running = 0; #endif DEBUGP(3, dev, "<- terminate_monitor\n"); } /* * monitor the card every 50msec. as a side-effect, retrieve the * atr once a card is inserted. another side-effect of retrieving the * atr is that the card will be powered on, so there is no need to * power on the card explictely from the application: the driver * is already doing that for you. */ static void monitor_card(unsigned long p) { struct cm4000_dev *dev = (struct cm4000_dev *) p; unsigned int iobase = dev->p_dev->resource[0]->start; unsigned short s; struct ptsreq ptsreq; int i, atrc; DEBUGP(7, dev, "-> monitor_card\n"); /* if someone has set the lock for us: we're done! */ if (test_and_set_bit(LOCK_MONITOR, &dev->flags)) { DEBUGP(4, dev, "About to stop monitor\n"); /* no */ dev->rlen = dev->rpos = dev->atr_csum = dev->atr_len_retry = dev->cwarn = 0; dev->mstate = M_FETCH_ATR; clear_bit(LOCK_MONITOR, &dev->flags); /* close et al. are sleeping on devq, so wake it */ wake_up_interruptible(&dev->devq); DEBUGP(2, dev, "<- monitor_card (we are done now)\n"); return; } /* try to lock io: if it is already locked, just add another timer */ if (test_and_set_bit(LOCK_IO, (void *)&dev->flags)) { DEBUGP(4, dev, "Couldn't get IO lock\n"); goto return_with_timer; } /* is a card/a reader inserted at all ? */ dev->flags0 = xinb(REG_FLAGS0(iobase)); DEBUGP(7, dev, "dev->flags0 = 0x%2x\n", dev->flags0); DEBUGP(7, dev, "smartcard present: %s\n", dev->flags0 & 1 ? "yes" : "no"); DEBUGP(7, dev, "cardman present: %s\n", dev->flags0 == 0xff ? "no" : "yes"); if ((dev->flags0 & 1) == 0 /* no smartcard inserted */ || dev->flags0 == 0xff) { /* no cardman inserted */ /* no */ dev->rlen = dev->rpos = dev->atr_csum = dev->atr_len_retry = dev->cwarn = 0; dev->mstate = M_FETCH_ATR; dev->flags &= 0x000000ff; /* only keep IO and MONITOR locks */ if (dev->flags0 == 0xff) { DEBUGP(4, dev, "set IS_CMM_ABSENT bit\n"); set_bit(IS_CMM_ABSENT, &dev->flags); } else if (test_bit(IS_CMM_ABSENT, &dev->flags)) { DEBUGP(4, dev, "clear IS_CMM_ABSENT bit " "(card is removed)\n"); clear_bit(IS_CMM_ABSENT, &dev->flags); } goto release_io; } else if ((dev->flags0 & 1) && test_bit(IS_CMM_ABSENT, &dev->flags)) { /* cardman and card present but cardman was absent before * (after suspend with inserted card) */ DEBUGP(4, dev, "clear IS_CMM_ABSENT bit (card is inserted)\n"); clear_bit(IS_CMM_ABSENT, &dev->flags); } if (test_bit(IS_ATR_VALID, &dev->flags) == 1) { DEBUGP(7, dev, "believe ATR is already valid (do nothing)\n"); goto release_io; } switch (dev->mstate) { unsigned char flags0; case M_CARDOFF: DEBUGP(4, dev, "M_CARDOFF\n"); flags0 = inb(REG_FLAGS0(iobase)); if (flags0 & 0x02) { /* wait until Flags0 indicate power is off */ dev->mdelay = T_10MSEC; } else { /* Flags0 indicate power off and no card inserted now; * Reset CARDMAN CONTROLLER */ xoutb(0x80, REG_FLAGS0(iobase)); /* prepare for fetching ATR again: after card off ATR * is read again automatically */ dev->rlen = dev->rpos = dev->atr_csum = dev->atr_len_retry = dev->cwarn = 0; dev->mstate = M_FETCH_ATR; /* minimal gap between CARDOFF and read ATR is 50msec */ dev->mdelay = T_50MSEC; } break; case M_FETCH_ATR: DEBUGP(4, dev, "M_FETCH_ATR\n"); xoutb(0x80, REG_FLAGS0(iobase)); DEBUGP(4, dev, "Reset BAUDV to 9600\n"); dev->baudv = 0x173; /* 9600 */ xoutb(0x02, REG_STOPBITS(iobase)); /* stopbits=2 */ xoutb(0x73, REG_BAUDRATE(iobase)); /* baud value */ xoutb(0x21, REG_FLAGS1(iobase)); /* T_Active=1, baud value */ /* warm start vs. power on: */ xoutb(dev->flags0 & 2 ? 0x46 : 0x44, REG_FLAGS0(iobase)); dev->mdelay = T_40MSEC; dev->mstate = M_TIMEOUT_WAIT; break; case M_TIMEOUT_WAIT: DEBUGP(4, dev, "M_TIMEOUT_WAIT\n"); /* numRecBytes */ io_read_num_rec_bytes(iobase, &dev->atr_len); dev->mdelay = T_10MSEC; dev->mstate = M_READ_ATR_LEN; break; case M_READ_ATR_LEN: DEBUGP(4, dev, "M_READ_ATR_LEN\n"); /* infinite loop possible, since there is no timeout */ #define MAX_ATR_LEN_RETRY 100 if (dev->atr_len == io_read_num_rec_bytes(iobase, &s)) { if (dev->atr_len_retry++ >= MAX_ATR_LEN_RETRY) { /* + XX msec */ dev->mdelay = T_10MSEC; dev->mstate = M_READ_ATR; } } else { dev->atr_len = s; dev->atr_len_retry = 0; /* set new timeout */ } DEBUGP(4, dev, "Current ATR_LEN = %i\n", dev->atr_len); break; case M_READ_ATR: DEBUGP(4, dev, "M_READ_ATR\n"); xoutb(0x80, REG_FLAGS0(iobase)); /* reset SM */ for (i = 0; i < dev->atr_len; i++) { xoutb(i, REG_BUF_ADDR(iobase)); dev->atr[i] = inb(REG_BUF_DATA(iobase)); } /* Deactivate T_Active flags */ DEBUGP(4, dev, "Deactivate T_Active flags\n"); dev->flags1 = 0x01; xoutb(dev->flags1, REG_FLAGS1(iobase)); /* atr is present (which doesn't mean it's valid) */ set_bit(IS_ATR_PRESENT, &dev->flags); if (dev->atr[0] == 0x03) str_invert_revert(dev->atr, dev->atr_len); atrc = parse_atr(dev); if (atrc == 0) { /* atr invalid */ dev->mdelay = 0; dev->mstate = M_BAD_CARD; } else { dev->mdelay = T_50MSEC; dev->mstate = M_ATR_PRESENT; set_bit(IS_ATR_VALID, &dev->flags); } if (test_bit(IS_ATR_VALID, &dev->flags) == 1) { DEBUGP(4, dev, "monitor_card: ATR valid\n"); /* if ta1 == 0x11, no PPS necessary (default values) */ /* do not do PPS with multi protocol cards */ if ((test_bit(IS_AUTOPPS_ACT, &dev->flags) == 0) && (dev->ta1 != 0x11) && !(test_bit(IS_ANY_T0, &dev->flags) && test_bit(IS_ANY_T1, &dev->flags))) { DEBUGP(4, dev, "Perform AUTOPPS\n"); set_bit(IS_AUTOPPS_ACT, &dev->flags); ptsreq.protocol = (0x01 << dev->proto); ptsreq.flags = 0x01; ptsreq.pts1 = 0x00; ptsreq.pts2 = 0x00; ptsreq.pts3 = 0x00; if (set_protocol(dev, &ptsreq) == 0) { DEBUGP(4, dev, "AUTOPPS ret SUCC\n"); clear_bit(IS_AUTOPPS_ACT, &dev->flags); wake_up_interruptible(&dev->atrq); } else { DEBUGP(4, dev, "AUTOPPS failed: " "repower using defaults\n"); /* prepare for repowering */ clear_bit(IS_ATR_PRESENT, &dev->flags); clear_bit(IS_ATR_VALID, &dev->flags); dev->rlen = dev->rpos = dev->atr_csum = dev->atr_len_retry = dev->cwarn = 0; dev->mstate = M_FETCH_ATR; dev->mdelay = T_50MSEC; } } else { /* for cards which use slightly different * params (extra guard time) */ set_cardparameter(dev); if (test_bit(IS_AUTOPPS_ACT, &dev->flags) == 1) DEBUGP(4, dev, "AUTOPPS already active " "2nd try:use default values\n"); if (dev->ta1 == 0x11) DEBUGP(4, dev, "No AUTOPPS necessary " "TA(1)==0x11\n"); if (test_bit(IS_ANY_T0, &dev->flags) && test_bit(IS_ANY_T1, &dev->flags)) DEBUGP(4, dev, "Do NOT perform AUTOPPS " "with multiprotocol cards\n"); clear_bit(IS_AUTOPPS_ACT, &dev->flags); wake_up_interruptible(&dev->atrq); } } else { DEBUGP(4, dev, "ATR invalid\n"); wake_up_interruptible(&dev->atrq); } break; case M_BAD_CARD: DEBUGP(4, dev, "M_BAD_CARD\n"); /* slow down warning, but prompt immediately after insertion */ if (dev->cwarn == 0 || dev->cwarn == 10) { set_bit(IS_BAD_CARD, &dev->flags); dev_warn(&dev->p_dev->dev, MODULE_NAME ": "); if (test_bit(IS_BAD_CSUM, &dev->flags)) { DEBUGP(4, dev, "ATR checksum (0x%.2x, should " "be zero) failed\n", dev->atr_csum); } #ifdef CM4000_DEBUG else if (test_bit(IS_BAD_LENGTH, &dev->flags)) { DEBUGP(4, dev, "ATR length error\n"); } else { DEBUGP(4, dev, "card damaged or wrong way " "inserted\n"); } #endif dev->cwarn = 0; wake_up_interruptible(&dev->atrq); /* wake open */ } dev->cwarn++; dev->mdelay = T_100MSEC; dev->mstate = M_FETCH_ATR; break; default: DEBUGP(7, dev, "Unknown action\n"); break; /* nothing */ } release_io: DEBUGP(7, dev, "release_io\n"); clear_bit(LOCK_IO, &dev->flags); wake_up_interruptible(&dev->ioq); /* whoever needs IO */ return_with_timer: DEBUGP(7, dev, "<- monitor_card (returns with timer)\n"); mod_timer(&dev->timer, jiffies + dev->mdelay); clear_bit(LOCK_MONITOR, &dev->flags); } /* Interface to userland (file_operations) */ static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count, loff_t *ppos) { struct cm4000_dev *dev = filp->private_data; unsigned int iobase = dev->p_dev->resource[0]->start; ssize_t rc; int i, j, k; DEBUGP(2, dev, "-> cmm_read(%s,%d)\n", current->comm, current->pid); if (count == 0) /* according to manpage */ return 0; if (!pcmcia_dev_present(dev->p_dev) || /* device removed */ test_bit(IS_CMM_ABSENT, &dev->flags)) return -ENODEV; if (test_bit(IS_BAD_CSUM, &dev->flags)) return -EIO; /* also see the note about this in cmm_write */ if (wait_event_interruptible (dev->atrq, ((filp->f_flags & O_NONBLOCK) || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) != 0)))) { if (filp->f_flags & O_NONBLOCK) return -EAGAIN; return -ERESTARTSYS; } if (test_bit(IS_ATR_VALID, &dev->flags) == 0) return -EIO; /* this one implements blocking IO */ if (wait_event_interruptible (dev->readq, ((filp->f_flags & O_NONBLOCK) || (dev->rpos < dev->rlen)))) { if (filp->f_flags & O_NONBLOCK) return -EAGAIN; return -ERESTARTSYS; } /* lock io */ if (wait_event_interruptible (dev->ioq, ((filp->f_flags & O_NONBLOCK) || (test_and_set_bit(LOCK_IO, (void *)&dev->flags) == 0)))) { if (filp->f_flags & O_NONBLOCK) return -EAGAIN; return -ERESTARTSYS; } rc = 0; dev->flags0 = inb(REG_FLAGS0(iobase)); if ((dev->flags0 & 1) == 0 /* no smartcard inserted */ || dev->flags0 == 0xff) { /* no cardman inserted */ clear_bit(IS_ATR_VALID, &dev->flags); if (dev->flags0 & 1) { set_bit(IS_CMM_ABSENT, &dev->flags); rc = -ENODEV; } else { rc = -EIO; } goto release_io; } DEBUGP(4, dev, "begin read answer\n"); j = min(count, (size_t)(dev->rlen - dev->rpos)); k = dev->rpos; if (k + j > 255) j = 256 - k; DEBUGP(4, dev, "read1 j=%d\n", j); for (i = 0; i < j; i++) { xoutb(k++, REG_BUF_ADDR(iobase)); dev->rbuf[i] = xinb(REG_BUF_DATA(iobase)); } j = min(count, (size_t)(dev->rlen - dev->rpos)); if (k + j > 255) { DEBUGP(4, dev, "read2 j=%d\n", j); dev->flags1 |= 0x10; /* MSB buf addr set */ xoutb(dev->flags1, REG_FLAGS1(iobase)); for (; i < j; i++) { xoutb(k++, REG_BUF_ADDR(iobase)); dev->rbuf[i] = xinb(REG_BUF_DATA(iobase)); } } if (dev->proto == 0 && count > dev->rlen - dev->rpos && i) { DEBUGP(4, dev, "T=0 and count > buffer\n"); dev->rbuf[i] = dev->rbuf[i - 1]; dev->rbuf[i - 1] = dev->procbyte; j++; } count = j; dev->rpos = dev->rlen + 1; /* Clear T1Active */ DEBUGP(4, dev, "Clear T1Active\n"); dev->flags1 &= 0xdf; xoutb(dev->flags1, REG_FLAGS1(iobase)); xoutb(0, REG_FLAGS1(iobase)); /* clear detectCMM */ /* last check before exit */ if (!io_detect_cm4000(iobase, dev)) { rc = -ENODEV; goto release_io; } if (test_bit(IS_INVREV, &dev->flags) && count > 0) str_invert_revert(dev->rbuf, count); if (copy_to_user(buf, dev->rbuf, count)) rc = -EFAULT; release_io: clear_bit(LOCK_IO, &dev->flags); wake_up_interruptible(&dev->ioq); DEBUGP(2, dev, "<- cmm_read returns: rc = %Zi\n", (rc < 0 ? rc : count)); return rc < 0 ? rc : count; } static ssize_t cmm_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { struct cm4000_dev *dev = filp->private_data; unsigned int iobase = dev->p_dev->resource[0]->start; unsigned short s; unsigned char tmp; unsigned char infolen; unsigned char sendT0; unsigned short nsend; unsigned short nr; ssize_t rc; int i; DEBUGP(2, dev, "-> cmm_write(%s,%d)\n", current->comm, current->pid); if (count == 0) /* according to manpage */ return 0; if (dev->proto == 0 && count < 4) { /* T0 must have at least 4 bytes */ DEBUGP(4, dev, "T0 short write\n"); return -EIO; } nr = count & 0x1ff; /* max bytes to write */ sendT0 = dev->proto ? 0 : nr > 5 ? 0x08 : 0; if (!pcmcia_dev_present(dev->p_dev) || /* device removed */ test_bit(IS_CMM_ABSENT, &dev->flags)) return -ENODEV; if (test_bit(IS_BAD_CSUM, &dev->flags)) { DEBUGP(4, dev, "bad csum\n"); return -EIO; } /* * wait for atr to become valid. * note: it is important to lock this code. if we dont, the monitor * could be run between test_bit and the call to sleep on the * atr-queue. if *then* the monitor detects atr valid, it will wake up * any process on the atr-queue, *but* since we have been interrupted, * we do not yet sleep on this queue. this would result in a missed * wake_up and the calling process would sleep forever (until * interrupted). also, do *not* restore_flags before sleep_on, because * this could result in the same situation! */ if (wait_event_interruptible (dev->atrq, ((filp->f_flags & O_NONBLOCK) || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) != 0)))) { if (filp->f_flags & O_NONBLOCK) return -EAGAIN; return -ERESTARTSYS; } if (test_bit(IS_ATR_VALID, &dev->flags) == 0) { /* invalid atr */ DEBUGP(4, dev, "invalid ATR\n"); return -EIO; } /* lock io */ if (wait_event_interruptible (dev->ioq, ((filp->f_flags & O_NONBLOCK) || (test_and_set_bit(LOCK_IO, (void *)&dev->flags) == 0)))) { if (filp->f_flags & O_NONBLOCK) return -EAGAIN; return -ERESTARTSYS; } if (copy_from_user(dev->sbuf, buf, ((count > 512) ? 512 : count))) return -EFAULT; rc = 0; dev->flags0 = inb(REG_FLAGS0(iobase)); if ((dev->flags0 & 1) == 0 /* no smartcard inserted */ || dev->flags0 == 0xff) { /* no cardman inserted */ clear_bit(IS_ATR_VALID, &dev->flags); if (dev->flags0 & 1) { set_bit(IS_CMM_ABSENT, &dev->flags); rc = -ENODEV; } else { DEBUGP(4, dev, "IO error\n"); rc = -EIO; } goto release_io; } xoutb(0x80, REG_FLAGS0(iobase)); /* reset SM */ if (!io_detect_cm4000(iobase, dev)) { rc = -ENODEV; goto release_io; } /* reflect T=0 send/read mode in flags1 */ dev->flags1 |= (sendT0); set_cardparameter(dev); /* dummy read, reset flag procedure received */ tmp = inb(REG_FLAGS1(iobase)); dev->flags1 = 0x20 /* T_Active */ | (sendT0) | (test_bit(IS_INVREV, &dev->flags) ? 2 : 0)/* inverse parity */ | (((dev->baudv - 1) & 0x0100) >> 8); /* MSB-Baud */ DEBUGP(1, dev, "set dev->flags1 = 0x%.2x\n", dev->flags1); xoutb(dev->flags1, REG_FLAGS1(iobase)); /* xmit data */ DEBUGP(4, dev, "Xmit data\n"); for (i = 0; i < nr; i++) { if (i >= 256) { dev->flags1 = 0x20 /* T_Active */ | (sendT0) /* SendT0 */ /* inverse parity: */ | (test_bit(IS_INVREV, &dev->flags) ? 2 : 0) | (((dev->baudv - 1) & 0x0100) >> 8) /* MSB-Baud */ | 0x10; /* set address high */ DEBUGP(4, dev, "dev->flags = 0x%.2x - set address " "high\n", dev->flags1); xoutb(dev->flags1, REG_FLAGS1(iobase)); } if (test_bit(IS_INVREV, &dev->flags)) { DEBUGP(4, dev, "Apply inverse convention for 0x%.2x " "-> 0x%.2x\n", (unsigned char)dev->sbuf[i], invert_revert(dev->sbuf[i])); xoutb(i, REG_BUF_ADDR(iobase)); xoutb(invert_revert(dev->sbuf[i]), REG_BUF_DATA(iobase)); } else { xoutb(i, REG_BUF_ADDR(iobase)); xoutb(dev->sbuf[i], REG_BUF_DATA(iobase)); } } DEBUGP(4, dev, "Xmit done\n"); if (dev->proto == 0) { /* T=0 proto: 0 byte reply */ if (nr == 4) { DEBUGP(4, dev, "T=0 assumes 0 byte reply\n"); xoutb(i, REG_BUF_ADDR(iobase)); if (test_bit(IS_INVREV, &dev->flags)) xoutb(0xff, REG_BUF_DATA(iobase)); else xoutb(0x00, REG_BUF_DATA(iobase)); } /* numSendBytes */ if (sendT0) nsend = nr; else { if (nr == 4) nsend = 5; else { nsend = 5 + (unsigned char)dev->sbuf[4]; if (dev->sbuf[4] == 0) nsend += 0x100; } } } else nsend = nr; /* T0: output procedure byte */ if (test_bit(IS_INVREV, &dev->flags)) { DEBUGP(4, dev, "T=0 set Procedure byte (inverse-reverse) " "0x%.2x\n", invert_revert(dev->sbuf[1])); xoutb(invert_revert(dev->sbuf[1]), REG_NUM_BYTES(iobase)); } else { DEBUGP(4, dev, "T=0 set Procedure byte 0x%.2x\n", dev->sbuf[1]); xoutb(dev->sbuf[1], REG_NUM_BYTES(iobase)); } DEBUGP(1, dev, "set NumSendBytes = 0x%.2x\n", (unsigned char)(nsend & 0xff)); xoutb((unsigned char)(nsend & 0xff), REG_NUM_SEND(iobase)); DEBUGP(1, dev, "Trigger CARDMAN CONTROLLER (0x%.2x)\n", 0x40 /* SM_Active */ | (dev->flags0 & 2 ? 0 : 4) /* power on if needed */ |(dev->proto ? 0x10 : 0x08) /* T=1/T=0 */ |(nsend & 0x100) >> 8 /* MSB numSendBytes */ ); xoutb(0x40 /* SM_Active */ | (dev->flags0 & 2 ? 0 : 4) /* power on if needed */ |(dev->proto ? 0x10 : 0x08) /* T=1/T=0 */ |(nsend & 0x100) >> 8, /* MSB numSendBytes */ REG_FLAGS0(iobase)); /* wait for xmit done */ if (dev->proto == 1) { DEBUGP(4, dev, "Wait for xmit done\n"); for (i = 0; i < 1000; i++) { if (inb(REG_FLAGS0(iobase)) & 0x08) break; msleep_interruptible(10); } if (i == 1000) { DEBUGP(4, dev, "timeout waiting for xmit done\n"); rc = -EIO; goto release_io; } } /* T=1: wait for infoLen */ infolen = 0; if (dev->proto) { /* wait until infoLen is valid */ for (i = 0; i < 6000; i++) { /* max waiting time of 1 min */ io_read_num_rec_bytes(iobase, &s); if (s >= 3) { infolen = inb(REG_FLAGS1(iobase)); DEBUGP(4, dev, "infolen=%d\n", infolen); break; } msleep_interruptible(10); } if (i == 6000) { DEBUGP(4, dev, "timeout waiting for infoLen\n"); rc = -EIO; goto release_io; } } else clear_bit(IS_PROCBYTE_PRESENT, &dev->flags); /* numRecBytes | bit9 of numRecytes */ io_read_num_rec_bytes(iobase, &dev->rlen); for (i = 0; i < 600; i++) { /* max waiting time of 2 sec */ if (dev->proto) { if (dev->rlen >= infolen + 4) break; } msleep_interruptible(10); /* numRecBytes | bit9 of numRecytes */ io_read_num_rec_bytes(iobase, &s); if (s > dev->rlen) { DEBUGP(1, dev, "NumRecBytes inc (reset timeout)\n"); i = 0; /* reset timeout */ dev->rlen = s; } /* T=0: we are done when numRecBytes doesn't * increment any more and NoProcedureByte * is set and numRecBytes == bytes sent + 6 * (header bytes + data + 1 for sw2) * except when the card replies an error * which means, no data will be sent back. */ else if (dev->proto == 0) { if ((inb(REG_BUF_ADDR(iobase)) & 0x80)) { /* no procedure byte received since last read */ DEBUGP(1, dev, "NoProcedure byte set\n"); /* i=0; */ } else { /* procedure byte received since last read */ DEBUGP(1, dev, "NoProcedure byte unset " "(reset timeout)\n"); dev->procbyte = inb(REG_FLAGS1(iobase)); DEBUGP(1, dev, "Read procedure byte 0x%.2x\n", dev->procbyte); i = 0; /* resettimeout */ } if (inb(REG_FLAGS0(iobase)) & 0x08) { DEBUGP(1, dev, "T0Done flag (read reply)\n"); break; } } if (dev->proto) infolen = inb(REG_FLAGS1(iobase)); } if (i == 600) { DEBUGP(1, dev, "timeout waiting for numRecBytes\n"); rc = -EIO; goto release_io; } else { if (dev->proto == 0) { DEBUGP(1, dev, "Wait for T0Done bit to be set\n"); for (i = 0; i < 1000; i++) { if (inb(REG_FLAGS0(iobase)) & 0x08) break; msleep_interruptible(10); } if (i == 1000) { DEBUGP(1, dev, "timeout waiting for T0Done\n"); rc = -EIO; goto release_io; } dev->procbyte = inb(REG_FLAGS1(iobase)); DEBUGP(4, dev, "Read procedure byte 0x%.2x\n", dev->procbyte); io_read_num_rec_bytes(iobase, &dev->rlen); DEBUGP(4, dev, "Read NumRecBytes = %i\n", dev->rlen); } } /* T=1: read offset=zero, T=0: read offset=after challenge */ dev->rpos = dev->proto ? 0 : nr == 4 ? 5 : nr > dev->rlen ? 5 : nr; DEBUGP(4, dev, "dev->rlen = %i, dev->rpos = %i, nr = %i\n", dev->rlen, dev->rpos, nr); release_io: DEBUGP(4, dev, "Reset SM\n"); xoutb(0x80, REG_FLAGS0(iobase)); /* reset SM */ if (rc < 0) { DEBUGP(4, dev, "Write failed but clear T_Active\n"); dev->flags1 &= 0xdf; xoutb(dev->flags1, REG_FLAGS1(iobase)); } clear_bit(LOCK_IO, &dev->flags); wake_up_interruptible(&dev->ioq); wake_up_interruptible(&dev->readq); /* tell read we have data */ /* ITSEC E2: clear write buffer */ memset((char *)dev->sbuf, 0, 512); /* return error or actually written bytes */ DEBUGP(2, dev, "<- cmm_write\n"); return rc < 0 ? rc : nr; } static void start_monitor(struct cm4000_dev *dev) { DEBUGP(3, dev, "-> start_monitor\n"); if (!dev->monitor_running) { DEBUGP(5, dev, "create, init and add timer\n"); setup_timer(&dev->timer, monitor_card, (unsigned long)dev); dev->monitor_running = 1; mod_timer(&dev->timer, jiffies); } else DEBUGP(5, dev, "monitor already running\n"); DEBUGP(3, dev, "<- start_monitor\n"); } static void stop_monitor(struct cm4000_dev *dev) { DEBUGP(3, dev, "-> stop_monitor\n"); if (dev->monitor_running) { DEBUGP(5, dev, "stopping monitor\n"); terminate_monitor(dev); /* reset monitor SM */ clear_bit(IS_ATR_VALID, &dev->flags); clear_bit(IS_ATR_PRESENT, &dev->flags); } else DEBUGP(5, dev, "monitor already stopped\n"); DEBUGP(3, dev, "<- stop_monitor\n"); } static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct cm4000_dev *dev = filp->private_data; unsigned int iobase = dev->p_dev->resource[0]->start; struct inode *inode = file_inode(filp); struct pcmcia_device *link; int size; int rc; void __user *argp = (void __user *)arg; #ifdef CM4000_DEBUG char *ioctl_names[CM_IOC_MAXNR + 1] = { [_IOC_NR(CM_IOCGSTATUS)] "CM_IOCGSTATUS", [_IOC_NR(CM_IOCGATR)] "CM_IOCGATR", [_IOC_NR(CM_IOCARDOFF)] "CM_IOCARDOFF", [_IOC_NR(CM_IOCSPTS)] "CM_IOCSPTS", [_IOC_NR(CM_IOSDBGLVL)] "CM4000_DBGLVL", }; DEBUGP(3, dev, "cmm_ioctl(device=%d.%d) %s\n", imajor(inode), iminor(inode), ioctl_names[_IOC_NR(cmd)]); #endif mutex_lock(&cmm_mutex); rc = -ENODEV; link = dev_table[iminor(inode)]; if (!pcmcia_dev_present(link)) { DEBUGP(4, dev, "DEV_OK false\n"); goto out; } if (test_bit(IS_CMM_ABSENT, &dev->flags)) { DEBUGP(4, dev, "CMM_ABSENT flag set\n"); goto out; } rc = -EINVAL; if (_IOC_TYPE(cmd) != CM_IOC_MAGIC) { DEBUGP(4, dev, "ioctype mismatch\n"); goto out; } if (_IOC_NR(cmd) > CM_IOC_MAXNR) { DEBUGP(4, dev, "iocnr mismatch\n"); goto out; } size = _IOC_SIZE(cmd); rc = -EFAULT; DEBUGP(4, dev, "iocdir=%.4x iocr=%.4x iocw=%.4x iocsize=%d cmd=%.4x\n", _IOC_DIR(cmd), _IOC_READ, _IOC_WRITE, size, cmd); if (_IOC_DIR(cmd) & _IOC_READ) { if (!access_ok(VERIFY_WRITE, argp, size)) goto out; } if (_IOC_DIR(cmd) & _IOC_WRITE) { if (!access_ok(VERIFY_READ, argp, size)) goto out; } rc = 0; switch (cmd) { case CM_IOCGSTATUS: DEBUGP(4, dev, " ... in CM_IOCGSTATUS\n"); { int status; /* clear other bits, but leave inserted & powered as * they are */ status = dev->flags0 & 3; if (test_bit(IS_ATR_PRESENT, &dev->flags)) status |= CM_ATR_PRESENT; if (test_bit(IS_ATR_VALID, &dev->flags)) status |= CM_ATR_VALID; if (test_bit(IS_CMM_ABSENT, &dev->flags)) status |= CM_NO_READER; if (test_bit(IS_BAD_CARD, &dev->flags)) status |= CM_BAD_CARD; if (copy_to_user(argp, &status, sizeof(int))) rc = -EFAULT; } break; case CM_IOCGATR: DEBUGP(4, dev, "... in CM_IOCGATR\n"); { struct atreq __user *atreq = argp; int tmp; /* allow nonblocking io and being interrupted */ if (wait_event_interruptible (dev->atrq, ((filp->f_flags & O_NONBLOCK) || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) != 0)))) { if (filp->f_flags & O_NONBLOCK) rc = -EAGAIN; else rc = -ERESTARTSYS; break; } rc = -EFAULT; if (test_bit(IS_ATR_VALID, &dev->flags) == 0) { tmp = -1; if (copy_to_user(&(atreq->atr_len), &tmp, sizeof(int))) break; } else { if (copy_to_user(atreq->atr, dev->atr, dev->atr_len)) break; tmp = dev->atr_len; if (copy_to_user(&(atreq->atr_len), &tmp, sizeof(int))) break; } rc = 0; break; } case CM_IOCARDOFF: #ifdef CM4000_DEBUG DEBUGP(4, dev, "... in CM_IOCARDOFF\n"); if (dev->flags0 & 0x01) { DEBUGP(4, dev, " Card inserted\n"); } else { DEBUGP(2, dev, " No card inserted\n"); } if (dev->flags0 & 0x02) { DEBUGP(4, dev, " Card powered\n"); } else { DEBUGP(2, dev, " Card not powered\n"); } #endif /* is a card inserted and powered? */ if ((dev->flags0 & 0x01) && (dev->flags0 & 0x02)) { /* get IO lock */ if (wait_event_interruptible (dev->ioq, ((filp->f_flags & O_NONBLOCK) || (test_and_set_bit(LOCK_IO, (void *)&dev->flags) == 0)))) { if (filp->f_flags & O_NONBLOCK) rc = -EAGAIN; else rc = -ERESTARTSYS; break; } /* Set Flags0 = 0x42 */ DEBUGP(4, dev, "Set Flags0=0x42 \n"); xoutb(0x42, REG_FLAGS0(iobase)); clear_bit(IS_ATR_PRESENT, &dev->flags); clear_bit(IS_ATR_VALID, &dev->flags); dev->mstate = M_CARDOFF; clear_bit(LOCK_IO, &dev->flags); if (wait_event_interruptible (dev->atrq, ((filp->f_flags & O_NONBLOCK) || (test_bit(IS_ATR_VALID, (void *)&dev->flags) != 0)))) { if (filp->f_flags & O_NONBLOCK) rc = -EAGAIN; else rc = -ERESTARTSYS; break; } } /* release lock */ clear_bit(LOCK_IO, &dev->flags); wake_up_interruptible(&dev->ioq); rc = 0; break; case CM_IOCSPTS: { struct ptsreq krnptsreq; if (copy_from_user(&krnptsreq, argp, sizeof(struct ptsreq))) { rc = -EFAULT; break; } rc = 0; DEBUGP(4, dev, "... in CM_IOCSPTS\n"); /* wait for ATR to get valid */ if (wait_event_interruptible (dev->atrq, ((filp->f_flags & O_NONBLOCK) || (test_bit(IS_ATR_PRESENT, (void *)&dev->flags) != 0)))) { if (filp->f_flags & O_NONBLOCK) rc = -EAGAIN; else rc = -ERESTARTSYS; break; } /* get IO lock */ if (wait_event_interruptible (dev->ioq, ((filp->f_flags & O_NONBLOCK) || (test_and_set_bit(LOCK_IO, (void *)&dev->flags) == 0)))) { if (filp->f_flags & O_NONBLOCK) rc = -EAGAIN; else rc = -ERESTARTSYS; break; } if ((rc = set_protocol(dev, &krnptsreq)) != 0) { /* auto power_on again */ dev->mstate = M_FETCH_ATR; clear_bit(IS_ATR_VALID, &dev->flags); } /* release lock */ clear_bit(LOCK_IO, &dev->flags); wake_up_interruptible(&dev->ioq); } break; #ifdef CM4000_DEBUG case CM_IOSDBGLVL: rc = -ENOTTY; break; #endif default: DEBUGP(4, dev, "... in default (unknown IOCTL code)\n"); rc = -ENOTTY; } out: mutex_unlock(&cmm_mutex); return rc; } static int cmm_open(struct inode *inode, struct file *filp) { struct cm4000_dev *dev; struct pcmcia_device *link; int minor = iminor(inode); int ret; if (minor >= CM4000_MAX_DEV) return -ENODEV; mutex_lock(&cmm_mutex); link = dev_table[minor]; if (link == NULL || !pcmcia_dev_present(link)) { ret = -ENODEV; goto out; } if (link->open) { ret = -EBUSY; goto out; } dev = link->priv; filp->private_data = dev; DEBUGP(2, dev, "-> cmm_open(device=%d.%d process=%s,%d)\n", imajor(inode), minor, current->comm, current->pid); /* init device variables, they may be "polluted" after close * or, the device may never have been closed (i.e. open failed) */ ZERO_DEV(dev); /* opening will always block since the * monitor will be started by open, which * means we have to wait for ATR becoming * valid = block until valid (or card * inserted) */ if (filp->f_flags & O_NONBLOCK) { ret = -EAGAIN; goto out; } dev->mdelay = T_50MSEC; /* start monitoring the cardstatus */ start_monitor(dev); link->open = 1; /* only one open per device */ DEBUGP(2, dev, "<- cmm_open\n"); ret = nonseekable_open(inode, filp); out: mutex_unlock(&cmm_mutex); return ret; } static int cmm_close(struct inode *inode, struct file *filp) { struct cm4000_dev *dev; struct pcmcia_device *link; int minor = iminor(inode); if (minor >= CM4000_MAX_DEV) return -ENODEV; link = dev_table[minor]; if (link == NULL) return -ENODEV; dev = link->priv; DEBUGP(2, dev, "-> cmm_close(maj/min=%d.%d)\n", imajor(inode), minor); stop_monitor(dev); ZERO_DEV(dev); link->open = 0; /* only one open per device */ wake_up(&dev->devq); /* socket removed? */ DEBUGP(2, dev, "cmm_close\n"); return 0; } static void cmm_cm4000_release(struct pcmcia_device * link) { struct cm4000_dev *dev = link->priv; /* dont terminate the monitor, rather rely on * close doing that for us. */ DEBUGP(3, dev, "-> cmm_cm4000_release\n"); while (link->open) { printk(KERN_INFO MODULE_NAME ": delaying release until " "process has terminated\n"); /* note: don't interrupt us: * close the applications which own * the devices _first_ ! */ wait_event(dev->devq, (link->open == 0)); } /* dev->devq=NULL; this cannot be zeroed earlier */ DEBUGP(3, dev, "<- cmm_cm4000_release\n"); return; } /*==== Interface to PCMCIA Layer =======================================*/ static int cm4000_config_check(struct pcmcia_device *p_dev, void *priv_data) { return pcmcia_request_io(p_dev); } static int cm4000_config(struct pcmcia_device * link, int devno) { struct cm4000_dev *dev; link->config_flags |= CONF_AUTO_SET_IO; /* read the config-tuples */ if (pcmcia_loop_config(link, cm4000_config_check, NULL)) goto cs_release; if (pcmcia_enable_device(link)) goto cs_release; dev = link->priv; return 0; cs_release: cm4000_release(link); return -ENODEV; } static int cm4000_suspend(struct pcmcia_device *link) { struct cm4000_dev *dev; dev = link->priv; stop_monitor(dev); return 0; } static int cm4000_resume(struct pcmcia_device *link) { struct cm4000_dev *dev; dev = link->priv; if (link->open) start_monitor(dev); return 0; } static void cm4000_release(struct pcmcia_device *link) { cmm_cm4000_release(link); /* delay release until device closed */ pcmcia_disable_device(link); } static int cm4000_probe(struct pcmcia_device *link) { struct cm4000_dev *dev; int i, ret; for (i = 0; i < CM4000_MAX_DEV; i++) if (dev_table[i] == NULL) break; if (i == CM4000_MAX_DEV) { printk(KERN_NOTICE MODULE_NAME ": all devices in use\n"); return -ENODEV; } /* create a new cm4000_cs device */ dev = kzalloc(sizeof(struct cm4000_dev), GFP_KERNEL); if (dev == NULL) return -ENOMEM; dev->p_dev = link; link->priv = dev; dev_table[i] = link; init_waitqueue_head(&dev->devq); init_waitqueue_head(&dev->ioq); init_waitqueue_head(&dev->atrq); init_waitqueue_head(&dev->readq); ret = cm4000_config(link, i); if (ret) { dev_table[i] = NULL; kfree(dev); return ret; } device_create(cmm_class, NULL, MKDEV(major, i), NULL, "cmm%d", i); return 0; } static void cm4000_detach(struct pcmcia_device *link) { struct cm4000_dev *dev = link->priv; int devno; /* find device */ for (devno = 0; devno < CM4000_MAX_DEV; devno++) if (dev_table[devno] == link) break; if (devno == CM4000_MAX_DEV) return; stop_monitor(dev); cm4000_release(link); dev_table[devno] = NULL; kfree(dev); device_destroy(cmm_class, MKDEV(major, devno)); return; } static const struct file_operations cm4000_fops = { .owner = THIS_MODULE, .read = cmm_read, .write = cmm_write, .unlocked_ioctl = cmm_ioctl, .open = cmm_open, .release= cmm_close, .llseek = no_llseek, }; static const struct pcmcia_device_id cm4000_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x0223, 0x0002), PCMCIA_DEVICE_PROD_ID12("CardMan", "4000", 0x2FB368CA, 0xA2BD8C39), PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, cm4000_ids); static struct pcmcia_driver cm4000_driver = { .owner = THIS_MODULE, .name = "cm4000_cs", .probe = cm4000_probe, .remove = cm4000_detach, .suspend = cm4000_suspend, .resume = cm4000_resume, .id_table = cm4000_ids, }; static int __init cmm_init(void) { int rc; cmm_class = class_create(THIS_MODULE, "cardman_4000"); if (IS_ERR(cmm_class)) return PTR_ERR(cmm_class); major = register_chrdev(0, DEVICE_NAME, &cm4000_fops); if (major < 0) { printk(KERN_WARNING MODULE_NAME ": could not get major number\n"); class_destroy(cmm_class); return major; } rc = pcmcia_register_driver(&cm4000_driver); if (rc < 0) { unregister_chrdev(major, DEVICE_NAME); class_destroy(cmm_class); return rc; } return 0; } static void __exit cmm_exit(void) { pcmcia_unregister_driver(&cm4000_driver); unregister_chrdev(major, DEVICE_NAME); class_destroy(cmm_class); }; module_init(cmm_init); module_exit(cmm_exit); MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
nikhil18/lightning-kernel-CAF
arch/arm/mach-imx/mx31moboard-marxbot.c
4913
9527
/* * Copyright (C) 2009 Valentin Longchamp, EPFL Mobots group * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/delay.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/i2c.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/types.h> #include <linux/usb/otg.h> #include <mach/common.h> #include <mach/hardware.h> #include <mach/iomux-mx3.h> #include <mach/ulpi.h> #include <media/soc_camera.h> #include "devices-imx31.h" static unsigned int marxbot_pins[] = { /* SDHC2 */ MX31_PIN_PC_PWRON__SD2_DATA3, MX31_PIN_PC_VS1__SD2_DATA2, MX31_PIN_PC_READY__SD2_DATA1, MX31_PIN_PC_WAIT_B__SD2_DATA0, MX31_PIN_PC_CD2_B__SD2_CLK, MX31_PIN_PC_CD1_B__SD2_CMD, MX31_PIN_ATA_DIOR__GPIO3_28, MX31_PIN_ATA_DIOW__GPIO3_29, /* CSI */ MX31_PIN_CSI_D6__CSI_D6, MX31_PIN_CSI_D7__CSI_D7, MX31_PIN_CSI_D8__CSI_D8, MX31_PIN_CSI_D9__CSI_D9, MX31_PIN_CSI_D10__CSI_D10, MX31_PIN_CSI_D11__CSI_D11, MX31_PIN_CSI_D12__CSI_D12, MX31_PIN_CSI_D13__CSI_D13, MX31_PIN_CSI_D14__CSI_D14, MX31_PIN_CSI_D15__CSI_D15, MX31_PIN_CSI_HSYNC__CSI_HSYNC, MX31_PIN_CSI_MCLK__CSI_MCLK, MX31_PIN_CSI_PIXCLK__CSI_PIXCLK, MX31_PIN_CSI_VSYNC__CSI_VSYNC, MX31_PIN_CSI_D4__GPIO3_4, MX31_PIN_CSI_D5__GPIO3_5, MX31_PIN_GPIO3_0__GPIO3_0, MX31_PIN_GPIO3_1__GPIO3_1, MX31_PIN_TXD2__GPIO1_28, /* dsPIC resets */ MX31_PIN_STXD5__GPIO1_21, MX31_PIN_SRXD5__GPIO1_22, /*battery detection */ MX31_PIN_LCS0__GPIO3_23, /* USB H1 */ MX31_PIN_CSPI1_MISO__USBH1_RXDP, MX31_PIN_CSPI1_MOSI__USBH1_RXDM, MX31_PIN_CSPI1_SS0__USBH1_TXDM, MX31_PIN_CSPI1_SS1__USBH1_TXDP, MX31_PIN_CSPI1_SS2__USBH1_RCV, MX31_PIN_CSPI1_SCLK__USBH1_OEB, MX31_PIN_CSPI1_SPI_RDY__USBH1_FS, MX31_PIN_SFS6__USBH1_SUSPEND, MX31_PIN_NFRE_B__GPIO1_11, MX31_PIN_NFALE__GPIO1_12, /* SEL */ MX31_PIN_DTR_DCE1__GPIO2_8, MX31_PIN_DSR_DCE1__GPIO2_9, MX31_PIN_RI_DCE1__GPIO2_10, MX31_PIN_DCD_DCE1__GPIO2_11, }; #define SDHC2_CD IOMUX_TO_GPIO(MX31_PIN_ATA_DIOR) #define SDHC2_WP IOMUX_TO_GPIO(MX31_PIN_ATA_DIOW) static int marxbot_sdhc2_get_ro(struct device *dev) { return !gpio_get_value(SDHC2_WP); } static int marxbot_sdhc2_init(struct device *dev, irq_handler_t detect_irq, void *data) { int ret; ret = gpio_request(SDHC2_CD, "sdhc-detect"); if (ret) return ret; gpio_direction_input(SDHC2_CD); ret = gpio_request(SDHC2_WP, "sdhc-wp"); if (ret) goto err_gpio_free; gpio_direction_input(SDHC2_WP); ret = request_irq(gpio_to_irq(SDHC2_CD), detect_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "sdhc2-card-detect", data); if (ret) goto err_gpio_free_2; return 0; err_gpio_free_2: gpio_free(SDHC2_WP); err_gpio_free: gpio_free(SDHC2_CD); return ret; } static void marxbot_sdhc2_exit(struct device *dev, void *data) { free_irq(gpio_to_irq(SDHC2_CD), data); gpio_free(SDHC2_WP); gpio_free(SDHC2_CD); } static const struct imxmmc_platform_data sdhc2_pdata __initconst = { .get_ro = marxbot_sdhc2_get_ro, .init = marxbot_sdhc2_init, .exit = marxbot_sdhc2_exit, }; #define TRSLAT_RST_B IOMUX_TO_GPIO(MX31_PIN_STXD5) #define DSPICS_RST_B IOMUX_TO_GPIO(MX31_PIN_SRXD5) static void dspics_resets_init(void) { if (!gpio_request(TRSLAT_RST_B, "translator-rst")) { gpio_direction_output(TRSLAT_RST_B, 0); gpio_export(TRSLAT_RST_B, false); } if (!gpio_request(DSPICS_RST_B, "dspics-rst")) { gpio_direction_output(DSPICS_RST_B, 0); gpio_export(DSPICS_RST_B, false); } } static struct spi_board_info marxbot_spi_board_info[] __initdata = { { .modalias = "spidev", .max_speed_hz = 300000, .bus_num = 1, .chip_select = 1, /* according spi1_cs[] ! */ }, }; #define TURRETCAM_POWER IOMUX_TO_GPIO(MX31_PIN_GPIO3_1) #define BASECAM_POWER IOMUX_TO_GPIO(MX31_PIN_CSI_D5) #define TURRETCAM_RST_B IOMUX_TO_GPIO(MX31_PIN_GPIO3_0) #define BASECAM_RST_B IOMUX_TO_GPIO(MX31_PIN_CSI_D4) #define CAM_CHOICE IOMUX_TO_GPIO(MX31_PIN_TXD2) static int marxbot_basecam_power(struct device *dev, int on) { gpio_set_value(BASECAM_POWER, !on); return 0; } static int marxbot_basecam_reset(struct device *dev) { gpio_set_value(BASECAM_RST_B, 0); udelay(100); gpio_set_value(BASECAM_RST_B, 1); return 0; } static struct i2c_board_info marxbot_i2c_devices[] = { { I2C_BOARD_INFO("mt9t031", 0x5d), }, }; static struct soc_camera_link base_iclink = { .bus_id = 0, /* Must match with the camera ID */ .power = marxbot_basecam_power, .reset = marxbot_basecam_reset, .board_info = &marxbot_i2c_devices[0], .i2c_adapter_id = 0, }; static struct platform_device marxbot_camera[] = { { .name = "soc-camera-pdrv", .id = 0, .dev = { .platform_data = &base_iclink, }, }, }; static struct platform_device *marxbot_cameras[] __initdata = { &marxbot_camera[0], }; static int __init marxbot_cam_init(void) { int ret = gpio_request(CAM_CHOICE, "cam-choice"); if (ret) return ret; gpio_direction_output(CAM_CHOICE, 0); ret = gpio_request(BASECAM_RST_B, "basecam-reset"); if (ret) return ret; gpio_direction_output(BASECAM_RST_B, 1); ret = gpio_request(BASECAM_POWER, "basecam-standby"); if (ret) return ret; gpio_direction_output(BASECAM_POWER, 0); ret = gpio_request(TURRETCAM_RST_B, "turretcam-reset"); if (ret) return ret; gpio_direction_output(TURRETCAM_RST_B, 1); ret = gpio_request(TURRETCAM_POWER, "turretcam-standby"); if (ret) return ret; gpio_direction_output(TURRETCAM_POWER, 0); return 0; } #define SEL0 IOMUX_TO_GPIO(MX31_PIN_DTR_DCE1) #define SEL1 IOMUX_TO_GPIO(MX31_PIN_DSR_DCE1) #define SEL2 IOMUX_TO_GPIO(MX31_PIN_RI_DCE1) #define SEL3 IOMUX_TO_GPIO(MX31_PIN_DCD_DCE1) static void marxbot_init_sel_gpios(void) { if (!gpio_request(SEL0, "sel0")) { gpio_direction_input(SEL0); gpio_export(SEL0, true); } if (!gpio_request(SEL1, "sel1")) { gpio_direction_input(SEL1); gpio_export(SEL1, true); } if (!gpio_request(SEL2, "sel2")) { gpio_direction_input(SEL2); gpio_export(SEL2, true); } if (!gpio_request(SEL3, "sel3")) { gpio_direction_input(SEL3); gpio_export(SEL3, true); } } #define USB_PAD_CFG (PAD_CTL_DRV_MAX | PAD_CTL_SRE_FAST | PAD_CTL_HYS_CMOS | \ PAD_CTL_ODE_CMOS | PAD_CTL_100K_PU) static int marxbot_usbh1_hw_init(struct platform_device *pdev) { mxc_iomux_set_gpr(MUX_PGP_USB_SUSPEND, true); mxc_iomux_set_pad(MX31_PIN_CSPI1_MISO, USB_PAD_CFG); mxc_iomux_set_pad(MX31_PIN_CSPI1_MOSI, USB_PAD_CFG); mxc_iomux_set_pad(MX31_PIN_CSPI1_SS0, USB_PAD_CFG); mxc_iomux_set_pad(MX31_PIN_CSPI1_SS1, USB_PAD_CFG); mxc_iomux_set_pad(MX31_PIN_CSPI1_SS2, USB_PAD_CFG); mxc_iomux_set_pad(MX31_PIN_CSPI1_SCLK, USB_PAD_CFG); mxc_iomux_set_pad(MX31_PIN_CSPI1_SPI_RDY, USB_PAD_CFG); mxc_iomux_set_pad(MX31_PIN_SFS6, USB_PAD_CFG); mdelay(10); return mx31_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED | MXC_EHCI_INTERFACE_SINGLE_UNI); } #define USBH1_VBUSEN_B IOMUX_TO_GPIO(MX31_PIN_NFRE_B) #define USBH1_MODE IOMUX_TO_GPIO(MX31_PIN_NFALE) static int marxbot_isp1105_init(struct usb_phy *otg) { int ret = gpio_request(USBH1_MODE, "usbh1-mode"); if (ret) return ret; /* single ended */ gpio_direction_output(USBH1_MODE, 0); ret = gpio_request(USBH1_VBUSEN_B, "usbh1-vbusen"); if (ret) { gpio_free(USBH1_MODE); return ret; } gpio_direction_output(USBH1_VBUSEN_B, 1); return 0; } static int marxbot_isp1105_set_vbus(struct usb_otg *otg, bool on) { if (on) gpio_set_value(USBH1_VBUSEN_B, 0); else gpio_set_value(USBH1_VBUSEN_B, 1); return 0; } static struct mxc_usbh_platform_data usbh1_pdata __initdata = { .init = marxbot_usbh1_hw_init, .portsc = MXC_EHCI_MODE_UTMI | MXC_EHCI_SERIAL, }; static int __init marxbot_usbh1_init(void) { struct usb_phy *phy; struct platform_device *pdev; phy = kzalloc(sizeof(*phy), GFP_KERNEL); if (!phy) return -ENOMEM; phy->otg = kzalloc(sizeof(struct usb_otg), GFP_KERNEL); if (!phy->otg) { kfree(phy); return -ENOMEM; } phy->label = "ISP1105"; phy->init = marxbot_isp1105_init; phy->otg->set_vbus = marxbot_isp1105_set_vbus; usbh1_pdata.otg = phy; pdev = imx31_add_mxc_ehci_hs(1, &usbh1_pdata); if (IS_ERR(pdev)) return PTR_ERR(pdev); return 0; } static const struct fsl_usb2_platform_data usb_pdata __initconst = { .operating_mode = FSL_USB2_DR_DEVICE, .phy_mode = FSL_USB2_PHY_ULPI, }; /* * system init for baseboard usage. Will be called by mx31moboard init. */ void __init mx31moboard_marxbot_init(void) { printk(KERN_INFO "Initializing mx31marxbot peripherals\n"); mxc_iomux_setup_multiple_pins(marxbot_pins, ARRAY_SIZE(marxbot_pins), "marxbot"); marxbot_init_sel_gpios(); dspics_resets_init(); imx31_add_mxc_mmc(1, &sdhc2_pdata); spi_register_board_info(marxbot_spi_board_info, ARRAY_SIZE(marxbot_spi_board_info)); marxbot_cam_init(); platform_add_devices(marxbot_cameras, ARRAY_SIZE(marxbot_cameras)); /* battery present pin */ gpio_request(IOMUX_TO_GPIO(MX31_PIN_LCS0), "bat-present"); gpio_direction_input(IOMUX_TO_GPIO(MX31_PIN_LCS0)); gpio_export(IOMUX_TO_GPIO(MX31_PIN_LCS0), false); imx31_add_fsl_usb2_udc(&usb_pdata); marxbot_usbh1_init(); }
gpl-2.0
MingoAllenII/draconis_msm8226
drivers/net/vmxnet3/vmxnet3_ethtool.c
5169
18966
/* * Linux driver for VMware's vmxnet3 ethernet NIC. * * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; version 2 of the License and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * The full GNU General Public License is included in this distribution in * the file called "COPYING". * * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> * */ #include "vmxnet3_int.h" struct vmxnet3_stat_desc { char desc[ETH_GSTRING_LEN]; int offset; }; /* per tq stats maintained by the device */ static const struct vmxnet3_stat_desc vmxnet3_tq_dev_stats[] = { /* description, offset */ { "Tx Queue#", 0 }, { " TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) }, { " TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) }, { " ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) }, { " ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) }, { " mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) }, { " mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) }, { " bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) }, { " bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) }, { " pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) }, { " pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) }, }; /* per tq stats maintained by the driver */ static const struct vmxnet3_stat_desc vmxnet3_tq_driver_stats[] = { /* description, offset */ {" drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats, drop_total) }, { " too many frags", offsetof(struct vmxnet3_tq_driver_stats, drop_too_many_frags) }, { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, drop_oversized_hdr) }, { " hdr err", offsetof(struct vmxnet3_tq_driver_stats, drop_hdr_inspect_err) }, { " tso", offsetof(struct vmxnet3_tq_driver_stats, drop_tso) }, { " ring full", offsetof(struct vmxnet3_tq_driver_stats, tx_ring_full) }, { " pkts linearized", offsetof(struct vmxnet3_tq_driver_stats, linearized) }, { " hdr cloned", offsetof(struct vmxnet3_tq_driver_stats, copy_skb_header) }, { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, oversized_hdr) }, }; /* per rq stats maintained by the device */ static const struct vmxnet3_stat_desc vmxnet3_rq_dev_stats[] = { { "Rx Queue#", 0 }, { " LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) }, { " LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) }, { " ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) }, { " ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) }, { " mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) }, { " mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) }, { " bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) }, { " bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) }, { " pkts rx OOB", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) }, { " pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) }, }; /* per rq stats maintained by the driver */ static const struct vmxnet3_stat_desc vmxnet3_rq_driver_stats[] = { /* description, offset */ { " drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats, drop_total) }, { " err", offsetof(struct vmxnet3_rq_driver_stats, drop_err) }, { " fcs", offsetof(struct vmxnet3_rq_driver_stats, drop_fcs) }, { " rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, rx_buf_alloc_failure) }, }; /* gloabl stats maintained by the driver */ static const struct vmxnet3_stat_desc vmxnet3_global_stats[] = { /* description, offset */ { "tx timeout count", offsetof(struct vmxnet3_adapter, tx_timeout_count) } }; struct rtnl_link_stats64 * vmxnet3_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct vmxnet3_adapter *adapter; struct vmxnet3_tq_driver_stats *drvTxStats; struct vmxnet3_rq_driver_stats *drvRxStats; struct UPT1_TxStats *devTxStats; struct UPT1_RxStats *devRxStats; unsigned long flags; int i; adapter = netdev_priv(netdev); /* Collect the dev stats into the shared area */ spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); spin_unlock_irqrestore(&adapter->cmd_lock, flags); for (i = 0; i < adapter->num_tx_queues; i++) { devTxStats = &adapter->tqd_start[i].stats; drvTxStats = &adapter->tx_queue[i].stats; stats->tx_packets += devTxStats->ucastPktsTxOK + devTxStats->mcastPktsTxOK + devTxStats->bcastPktsTxOK; stats->tx_bytes += devTxStats->ucastBytesTxOK + devTxStats->mcastBytesTxOK + devTxStats->bcastBytesTxOK; stats->tx_errors += devTxStats->pktsTxError; stats->tx_dropped += drvTxStats->drop_total; } for (i = 0; i < adapter->num_rx_queues; i++) { devRxStats = &adapter->rqd_start[i].stats; drvRxStats = &adapter->rx_queue[i].stats; stats->rx_packets += devRxStats->ucastPktsRxOK + devRxStats->mcastPktsRxOK + devRxStats->bcastPktsRxOK; stats->rx_bytes += devRxStats->ucastBytesRxOK + devRxStats->mcastBytesRxOK + devRxStats->bcastBytesRxOK; stats->rx_errors += devRxStats->pktsRxError; stats->rx_dropped += drvRxStats->drop_total; stats->multicast += devRxStats->mcastPktsRxOK; } return stats; } static int vmxnet3_get_sset_count(struct net_device *netdev, int sset) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); switch (sset) { case ETH_SS_STATS: return (ARRAY_SIZE(vmxnet3_tq_dev_stats) + ARRAY_SIZE(vmxnet3_tq_driver_stats)) * adapter->num_tx_queues + (ARRAY_SIZE(vmxnet3_rq_dev_stats) + ARRAY_SIZE(vmxnet3_rq_driver_stats)) * adapter->num_rx_queues + ARRAY_SIZE(vmxnet3_global_stats); default: return -EOPNOTSUPP; } } /* Should be multiple of 4 */ #define NUM_TX_REGS 8 #define NUM_RX_REGS 12 static int vmxnet3_get_regs_len(struct net_device *netdev) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); return (adapter->num_tx_queues * NUM_TX_REGS * sizeof(u32) + adapter->num_rx_queues * NUM_RX_REGS * sizeof(u32)); } static void vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT, sizeof(drvinfo->version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), ETHTOOL_BUSINFO_LEN); drvinfo->n_stats = vmxnet3_get_sset_count(netdev, ETH_SS_STATS); drvinfo->testinfo_len = 0; drvinfo->eedump_len = 0; drvinfo->regdump_len = vmxnet3_get_regs_len(netdev); } static void vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); if (stringset == ETH_SS_STATS) { int i, j; for (j = 0; j < adapter->num_tx_queues; j++) { for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) { memcpy(buf, vmxnet3_tq_dev_stats[i].desc, ETH_GSTRING_LEN); buf += ETH_GSTRING_LEN; } for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) { memcpy(buf, vmxnet3_tq_driver_stats[i].desc, ETH_GSTRING_LEN); buf += ETH_GSTRING_LEN; } } for (j = 0; j < adapter->num_rx_queues; j++) { for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) { memcpy(buf, vmxnet3_rq_dev_stats[i].desc, ETH_GSTRING_LEN); buf += ETH_GSTRING_LEN; } for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) { memcpy(buf, vmxnet3_rq_driver_stats[i].desc, ETH_GSTRING_LEN); buf += ETH_GSTRING_LEN; } } for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) { memcpy(buf, vmxnet3_global_stats[i].desc, ETH_GSTRING_LEN); buf += ETH_GSTRING_LEN; } } } int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); unsigned long flags; netdev_features_t changed = features ^ netdev->features; if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_RX)) { if (features & NETIF_F_RXCSUM) adapter->shared->devRead.misc.uptFeatures |= UPT1_F_RXCSUM; else adapter->shared->devRead.misc.uptFeatures &= ~UPT1_F_RXCSUM; /* update harware LRO capability accordingly */ if (features & NETIF_F_LRO) adapter->shared->devRead.misc.uptFeatures |= UPT1_F_LRO; else adapter->shared->devRead.misc.uptFeatures &= ~UPT1_F_LRO; if (features & NETIF_F_HW_VLAN_RX) adapter->shared->devRead.misc.uptFeatures |= UPT1_F_RXVLAN; else adapter->shared->devRead.misc.uptFeatures &= ~UPT1_F_RXVLAN; spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_FEATURE); spin_unlock_irqrestore(&adapter->cmd_lock, flags); } return 0; } static void vmxnet3_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *buf) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); unsigned long flags; u8 *base; int i; int j = 0; spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); spin_unlock_irqrestore(&adapter->cmd_lock, flags); /* this does assume each counter is 64-bit wide */ for (j = 0; j < adapter->num_tx_queues; j++) { base = (u8 *)&adapter->tqd_start[j].stats; *buf++ = (u64)j; for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset); base = (u8 *)&adapter->tx_queue[j].stats; for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset); } for (j = 0; j < adapter->num_tx_queues; j++) { base = (u8 *)&adapter->rqd_start[j].stats; *buf++ = (u64) j; for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset); base = (u8 *)&adapter->rx_queue[j].stats; for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset); } base = (u8 *)adapter; for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) *buf++ = *(u64 *)(base + vmxnet3_global_stats[i].offset); } static void vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); u32 *buf = p; int i = 0, j = 0; memset(p, 0, vmxnet3_get_regs_len(netdev)); regs->version = 1; /* Update vmxnet3_get_regs_len if we want to dump more registers */ /* make each ring use multiple of 16 bytes */ for (i = 0; i < adapter->num_tx_queues; i++) { buf[j++] = adapter->tx_queue[i].tx_ring.next2fill; buf[j++] = adapter->tx_queue[i].tx_ring.next2comp; buf[j++] = adapter->tx_queue[i].tx_ring.gen; buf[j++] = 0; buf[j++] = adapter->tx_queue[i].comp_ring.next2proc; buf[j++] = adapter->tx_queue[i].comp_ring.gen; buf[j++] = adapter->tx_queue[i].stopped; buf[j++] = 0; } for (i = 0; i < adapter->num_rx_queues; i++) { buf[j++] = adapter->rx_queue[i].rx_ring[0].next2fill; buf[j++] = adapter->rx_queue[i].rx_ring[0].next2comp; buf[j++] = adapter->rx_queue[i].rx_ring[0].gen; buf[j++] = 0; buf[j++] = adapter->rx_queue[i].rx_ring[1].next2fill; buf[j++] = adapter->rx_queue[i].rx_ring[1].next2comp; buf[j++] = adapter->rx_queue[i].rx_ring[1].gen; buf[j++] = 0; buf[j++] = adapter->rx_queue[i].comp_ring.next2proc; buf[j++] = adapter->rx_queue[i].comp_ring.gen; buf[j++] = 0; buf[j++] = 0; } } static void vmxnet3_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); wol->supported = WAKE_UCAST | WAKE_ARP | WAKE_MAGIC; wol->wolopts = adapter->wol; } static int vmxnet3_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); if (wol->wolopts & (WAKE_PHY | WAKE_MCAST | WAKE_BCAST | WAKE_MAGICSECURE)) { return -EOPNOTSUPP; } adapter->wol = wol->wolopts; device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); return 0; } static int vmxnet3_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); ecmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_TP; ecmd->advertising = ADVERTISED_TP; ecmd->port = PORT_TP; ecmd->transceiver = XCVR_INTERNAL; if (adapter->link_speed) { ethtool_cmd_speed_set(ecmd, adapter->link_speed); ecmd->duplex = DUPLEX_FULL; } else { ethtool_cmd_speed_set(ecmd, -1); ecmd->duplex = -1; } return 0; } static void vmxnet3_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *param) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE; param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE; param->rx_mini_max_pending = 0; param->rx_jumbo_max_pending = 0; param->rx_pending = adapter->rx_queue[0].rx_ring[0].size * adapter->num_rx_queues; param->tx_pending = adapter->tx_queue[0].tx_ring.size * adapter->num_tx_queues; param->rx_mini_pending = 0; param->rx_jumbo_pending = 0; } static int vmxnet3_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *param) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); u32 new_tx_ring_size, new_rx_ring_size; u32 sz; int err = 0; if (param->tx_pending == 0 || param->tx_pending > VMXNET3_TX_RING_MAX_SIZE) return -EINVAL; if (param->rx_pending == 0 || param->rx_pending > VMXNET3_RX_RING_MAX_SIZE) return -EINVAL; /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */ new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) & ~VMXNET3_RING_SIZE_MASK; new_tx_ring_size = min_t(u32, new_tx_ring_size, VMXNET3_TX_RING_MAX_SIZE); if (new_tx_ring_size > VMXNET3_TX_RING_MAX_SIZE || (new_tx_ring_size % VMXNET3_RING_SIZE_ALIGN) != 0) return -EINVAL; /* ring0 has to be a multiple of * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN */ sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; new_rx_ring_size = (param->rx_pending + sz - 1) / sz * sz; new_rx_ring_size = min_t(u32, new_rx_ring_size, VMXNET3_RX_RING_MAX_SIZE / sz * sz); if (new_rx_ring_size > VMXNET3_RX_RING_MAX_SIZE || (new_rx_ring_size % sz) != 0) return -EINVAL; if (new_tx_ring_size == adapter->tx_queue[0].tx_ring.size && new_rx_ring_size == adapter->rx_queue[0].rx_ring[0].size) { return 0; } /* * Reset_work may be in the middle of resetting the device, wait for its * completion. */ while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) msleep(1); if (netif_running(netdev)) { vmxnet3_quiesce_dev(adapter); vmxnet3_reset_dev(adapter); /* recreate the rx queue and the tx queue based on the * new sizes */ vmxnet3_tq_destroy_all(adapter); vmxnet3_rq_destroy_all(adapter); err = vmxnet3_create_queues(adapter, new_tx_ring_size, new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE); if (err) { /* failed, most likely because of OOM, try default * size */ printk(KERN_ERR "%s: failed to apply new sizes, try the" " default ones\n", netdev->name); err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE, VMXNET3_DEF_RX_RING_SIZE, VMXNET3_DEF_RX_RING_SIZE); if (err) { printk(KERN_ERR "%s: failed to create queues " "with default sizes. Closing it\n", netdev->name); goto out; } } err = vmxnet3_activate_dev(adapter); if (err) printk(KERN_ERR "%s: failed to re-activate, error %d." " Closing it\n", netdev->name, err); } out: clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); if (err) vmxnet3_force_close(adapter); return err; } static int vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, u32 *rules) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); switch (info->cmd) { case ETHTOOL_GRXRINGS: info->data = adapter->num_rx_queues; return 0; } return -EOPNOTSUPP; } #ifdef VMXNET3_RSS static u32 vmxnet3_get_rss_indir_size(struct net_device *netdev) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); struct UPT1_RSSConf *rssConf = adapter->rss_conf; return rssConf->indTableSize; } static int vmxnet3_get_rss_indir(struct net_device *netdev, u32 *p) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); struct UPT1_RSSConf *rssConf = adapter->rss_conf; unsigned int n = rssConf->indTableSize; while (n--) p[n] = rssConf->indTable[n]; return 0; } static int vmxnet3_set_rss_indir(struct net_device *netdev, const u32 *p) { unsigned int i; unsigned long flags; struct vmxnet3_adapter *adapter = netdev_priv(netdev); struct UPT1_RSSConf *rssConf = adapter->rss_conf; for (i = 0; i < rssConf->indTableSize; i++) rssConf->indTable[i] = p[i]; spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RSSIDT); spin_unlock_irqrestore(&adapter->cmd_lock, flags); return 0; } #endif static const struct ethtool_ops vmxnet3_ethtool_ops = { .get_settings = vmxnet3_get_settings, .get_drvinfo = vmxnet3_get_drvinfo, .get_regs_len = vmxnet3_get_regs_len, .get_regs = vmxnet3_get_regs, .get_wol = vmxnet3_get_wol, .set_wol = vmxnet3_set_wol, .get_link = ethtool_op_get_link, .get_strings = vmxnet3_get_strings, .get_sset_count = vmxnet3_get_sset_count, .get_ethtool_stats = vmxnet3_get_ethtool_stats, .get_ringparam = vmxnet3_get_ringparam, .set_ringparam = vmxnet3_set_ringparam, .get_rxnfc = vmxnet3_get_rxnfc, #ifdef VMXNET3_RSS .get_rxfh_indir_size = vmxnet3_get_rss_indir_size, .get_rxfh_indir = vmxnet3_get_rss_indir, .set_rxfh_indir = vmxnet3_set_rss_indir, #endif }; void vmxnet3_set_ethtool_ops(struct net_device *netdev) { SET_ETHTOOL_OPS(netdev, &vmxnet3_ethtool_ops); }
gpl-2.0
tonyho/Linux-3.7-TQ2440
drivers/media/dvb-frontends/or51211.c
5169
15364
/* * Support for OR51211 (pcHDTV HD-2000) - VSB * * Copyright (C) 2005 Kirk Lapray <kirk_lapray@bigfoot.com> * * Based on code from Jack Kelliher (kelliher@xmission.com) * Copyright (C) 2002 & pcHDTV, inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ /* * This driver needs external firmware. Please use the command * "<kerneldir>/Documentation/dvb/get_dvb_firmware or51211" to * download/extract it, and then copy it to /usr/lib/hotplug/firmware * or /lib/firmware (depending on configuration of firmware hotplug). */ #define OR51211_DEFAULT_FIRMWARE "dvb-fe-or51211.fw" #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/firmware.h> #include <linux/string.h> #include <linux/slab.h> #include <asm/byteorder.h> #include "dvb_math.h" #include "dvb_frontend.h" #include "or51211.h" static int debug; #define dprintk(args...) \ do { \ if (debug) printk(KERN_DEBUG "or51211: " args); \ } while (0) static u8 run_buf[] = {0x7f,0x01}; static u8 cmd_buf[] = {0x04,0x01,0x50,0x80,0x06}; // ATSC struct or51211_state { struct i2c_adapter* i2c; /* Configuration settings */ const struct or51211_config* config; struct dvb_frontend frontend; struct bt878* bt; /* Demodulator private data */ u8 initialized:1; u32 snr; /* Result of last SNR claculation */ /* Tuner private data */ u32 current_frequency; }; static int i2c_writebytes (struct or51211_state* state, u8 reg, const u8 *buf, int len) { int err; struct i2c_msg msg; msg.addr = reg; msg.flags = 0; msg.len = len; msg.buf = (u8 *)buf; if ((err = i2c_transfer (state->i2c, &msg, 1)) != 1) { printk(KERN_WARNING "or51211: i2c_writebytes error " "(addr %02x, err == %i)\n", reg, err); return -EREMOTEIO; } return 0; } static int i2c_readbytes(struct or51211_state *state, u8 reg, u8 *buf, int len) { int err; struct i2c_msg msg; msg.addr = reg; msg.flags = I2C_M_RD; msg.len = len; msg.buf = buf; if ((err = i2c_transfer (state->i2c, &msg, 1)) != 1) { printk(KERN_WARNING "or51211: i2c_readbytes error " "(addr %02x, err == %i)\n", reg, err); return -EREMOTEIO; } return 0; } static int or51211_load_firmware (struct dvb_frontend* fe, const struct firmware *fw) { struct or51211_state* state = fe->demodulator_priv; u8 tudata[585]; int i; dprintk("Firmware is %zd bytes\n",fw->size); /* Get eprom data */ tudata[0] = 17; if (i2c_writebytes(state,0x50,tudata,1)) { printk(KERN_WARNING "or51211:load_firmware error eprom addr\n"); return -1; } if (i2c_readbytes(state,0x50,&tudata[145],192)) { printk(KERN_WARNING "or51211: load_firmware error eprom\n"); return -1; } /* Create firmware buffer */ for (i = 0; i < 145; i++) tudata[i] = fw->data[i]; for (i = 0; i < 248; i++) tudata[i+337] = fw->data[145+i]; state->config->reset(fe); if (i2c_writebytes(state,state->config->demod_address,tudata,585)) { printk(KERN_WARNING "or51211: load_firmware error 1\n"); return -1; } msleep(1); if (i2c_writebytes(state,state->config->demod_address, &fw->data[393],8125)) { printk(KERN_WARNING "or51211: load_firmware error 2\n"); return -1; } msleep(1); if (i2c_writebytes(state,state->config->demod_address,run_buf,2)) { printk(KERN_WARNING "or51211: load_firmware error 3\n"); return -1; } /* Wait at least 5 msec */ msleep(10); if (i2c_writebytes(state,state->config->demod_address,run_buf,2)) { printk(KERN_WARNING "or51211: load_firmware error 4\n"); return -1; } msleep(10); printk("or51211: Done.\n"); return 0; }; static int or51211_setmode(struct dvb_frontend* fe, int mode) { struct or51211_state* state = fe->demodulator_priv; u8 rec_buf[14]; state->config->setmode(fe, mode); if (i2c_writebytes(state,state->config->demod_address,run_buf,2)) { printk(KERN_WARNING "or51211: setmode error 1\n"); return -1; } /* Wait at least 5 msec */ msleep(10); if (i2c_writebytes(state,state->config->demod_address,run_buf,2)) { printk(KERN_WARNING "or51211: setmode error 2\n"); return -1; } msleep(10); /* Set operation mode in Receiver 1 register; * type 1: * data 0x50h Automatic sets receiver channel conditions * Automatic NTSC rejection filter * Enable MPEG serial data output * MPEG2tr * High tuner phase noise * normal +/-150kHz Carrier acquisition range */ if (i2c_writebytes(state,state->config->demod_address,cmd_buf,3)) { printk(KERN_WARNING "or51211: setmode error 3\n"); return -1; } rec_buf[0] = 0x04; rec_buf[1] = 0x00; rec_buf[2] = 0x03; rec_buf[3] = 0x00; msleep(20); if (i2c_writebytes(state,state->config->demod_address,rec_buf,3)) { printk(KERN_WARNING "or51211: setmode error 5\n"); } msleep(3); if (i2c_readbytes(state,state->config->demod_address,&rec_buf[10],2)) { printk(KERN_WARNING "or51211: setmode error 6"); return -1; } dprintk("setmode rec status %02x %02x\n",rec_buf[10],rec_buf[11]); return 0; } static int or51211_set_parameters(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct or51211_state* state = fe->demodulator_priv; /* Change only if we are actually changing the channel */ if (state->current_frequency != p->frequency) { if (fe->ops.tuner_ops.set_params) { fe->ops.tuner_ops.set_params(fe); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } /* Set to ATSC mode */ or51211_setmode(fe,0); /* Update current frequency */ state->current_frequency = p->frequency; } return 0; } static int or51211_read_status(struct dvb_frontend* fe, fe_status_t* status) { struct or51211_state* state = fe->demodulator_priv; unsigned char rec_buf[2]; unsigned char snd_buf[] = {0x04,0x00,0x03,0x00}; *status = 0; /* Receiver Status */ if (i2c_writebytes(state,state->config->demod_address,snd_buf,3)) { printk(KERN_WARNING "or51132: read_status write error\n"); return -1; } msleep(3); if (i2c_readbytes(state,state->config->demod_address,rec_buf,2)) { printk(KERN_WARNING "or51132: read_status read error\n"); return -1; } dprintk("read_status %x %x\n",rec_buf[0],rec_buf[1]); if (rec_buf[0] & 0x01) { /* Receiver Lock */ *status |= FE_HAS_SIGNAL; *status |= FE_HAS_CARRIER; *status |= FE_HAS_VITERBI; *status |= FE_HAS_SYNC; *status |= FE_HAS_LOCK; } return 0; } /* Calculate SNR estimation (scaled by 2^24) 8-VSB SNR equation from Oren datasheets For 8-VSB: SNR[dB] = 10 * log10(219037.9454 / MSE^2 ) We re-write the snr equation as: SNR * 2^24 = 10*(c - 2*intlog10(MSE)) Where for 8-VSB, c = log10(219037.9454) * 2^24 */ static u32 calculate_snr(u32 mse, u32 c) { if (mse == 0) /* No signal */ return 0; mse = 2*intlog10(mse); if (mse > c) { /* Negative SNR, which is possible, but realisticly the demod will lose lock before the signal gets this bad. The API only allows for unsigned values, so just return 0 */ return 0; } return 10*(c - mse); } static int or51211_read_snr(struct dvb_frontend* fe, u16* snr) { struct or51211_state* state = fe->demodulator_priv; u8 rec_buf[2]; u8 snd_buf[3]; /* SNR after Equalizer */ snd_buf[0] = 0x04; snd_buf[1] = 0x00; snd_buf[2] = 0x04; if (i2c_writebytes(state,state->config->demod_address,snd_buf,3)) { printk(KERN_WARNING "%s: error writing snr reg\n", __func__); return -1; } if (i2c_readbytes(state,state->config->demod_address,rec_buf,2)) { printk(KERN_WARNING "%s: read_status read error\n", __func__); return -1; } state->snr = calculate_snr(rec_buf[0], 89599047); *snr = (state->snr) >> 16; dprintk("%s: noise = 0x%02x, snr = %d.%02d dB\n", __func__, rec_buf[0], state->snr >> 24, (((state->snr>>8) & 0xffff) * 100) >> 16); return 0; } static int or51211_read_signal_strength(struct dvb_frontend* fe, u16* strength) { /* Calculate Strength from SNR up to 35dB */ /* Even though the SNR can go higher than 35dB, there is some comfort */ /* factor in having a range of strong signals that can show at 100% */ struct or51211_state* state = (struct or51211_state*)fe->demodulator_priv; u16 snr; int ret; ret = fe->ops.read_snr(fe, &snr); if (ret != 0) return ret; /* Rather than use the 8.8 value snr, use state->snr which is 8.24 */ /* scale the range 0 - 35*2^24 into 0 - 65535 */ if (state->snr >= 8960 * 0x10000) *strength = 0xffff; else *strength = state->snr / 8960; return 0; } static int or51211_read_ber(struct dvb_frontend* fe, u32* ber) { *ber = -ENOSYS; return 0; } static int or51211_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks) { *ucblocks = -ENOSYS; return 0; } static int or51211_sleep(struct dvb_frontend* fe) { return 0; } static int or51211_init(struct dvb_frontend* fe) { struct or51211_state* state = fe->demodulator_priv; const struct or51211_config* config = state->config; const struct firmware* fw; unsigned char get_ver_buf[] = {0x04,0x00,0x30,0x00,0x00}; unsigned char rec_buf[14]; int ret,i; if (!state->initialized) { /* Request the firmware, this will block until it uploads */ printk(KERN_INFO "or51211: Waiting for firmware upload " "(%s)...\n", OR51211_DEFAULT_FIRMWARE); ret = config->request_firmware(fe, &fw, OR51211_DEFAULT_FIRMWARE); printk(KERN_INFO "or51211:Got Hotplug firmware\n"); if (ret) { printk(KERN_WARNING "or51211: No firmware uploaded " "(timeout or file not found?)\n"); return ret; } ret = or51211_load_firmware(fe, fw); release_firmware(fw); if (ret) { printk(KERN_WARNING "or51211: Writing firmware to " "device failed!\n"); return ret; } printk(KERN_INFO "or51211: Firmware upload complete.\n"); /* Set operation mode in Receiver 1 register; * type 1: * data 0x50h Automatic sets receiver channel conditions * Automatic NTSC rejection filter * Enable MPEG serial data output * MPEG2tr * High tuner phase noise * normal +/-150kHz Carrier acquisition range */ if (i2c_writebytes(state,state->config->demod_address, cmd_buf,3)) { printk(KERN_WARNING "or51211: Load DVR Error 5\n"); return -1; } /* Read back ucode version to besure we loaded correctly */ /* and are really up and running */ rec_buf[0] = 0x04; rec_buf[1] = 0x00; rec_buf[2] = 0x03; rec_buf[3] = 0x00; msleep(30); if (i2c_writebytes(state,state->config->demod_address, rec_buf,3)) { printk(KERN_WARNING "or51211: Load DVR Error A\n"); return -1; } msleep(3); if (i2c_readbytes(state,state->config->demod_address, &rec_buf[10],2)) { printk(KERN_WARNING "or51211: Load DVR Error B\n"); return -1; } rec_buf[0] = 0x04; rec_buf[1] = 0x00; rec_buf[2] = 0x01; rec_buf[3] = 0x00; msleep(20); if (i2c_writebytes(state,state->config->demod_address, rec_buf,3)) { printk(KERN_WARNING "or51211: Load DVR Error C\n"); return -1; } msleep(3); if (i2c_readbytes(state,state->config->demod_address, &rec_buf[12],2)) { printk(KERN_WARNING "or51211: Load DVR Error D\n"); return -1; } for (i = 0; i < 8; i++) rec_buf[i]=0xed; for (i = 0; i < 5; i++) { msleep(30); get_ver_buf[4] = i+1; if (i2c_writebytes(state,state->config->demod_address, get_ver_buf,5)) { printk(KERN_WARNING "or51211:Load DVR Error 6" " - %d\n",i); return -1; } msleep(3); if (i2c_readbytes(state,state->config->demod_address, &rec_buf[i*2],2)) { printk(KERN_WARNING "or51211:Load DVR Error 7" " - %d\n",i); return -1; } /* If we didn't receive the right index, try again */ if ((int)rec_buf[i*2+1]!=i+1){ i--; } } dprintk("read_fwbits %x %x %x %x %x %x %x %x %x %x\n", rec_buf[0], rec_buf[1], rec_buf[2], rec_buf[3], rec_buf[4], rec_buf[5], rec_buf[6], rec_buf[7], rec_buf[8], rec_buf[9]); printk(KERN_INFO "or51211: ver TU%02x%02x%02x VSB mode %02x" " Status %02x\n", rec_buf[2], rec_buf[4],rec_buf[6], rec_buf[12],rec_buf[10]); rec_buf[0] = 0x04; rec_buf[1] = 0x00; rec_buf[2] = 0x03; rec_buf[3] = 0x00; msleep(20); if (i2c_writebytes(state,state->config->demod_address, rec_buf,3)) { printk(KERN_WARNING "or51211: Load DVR Error 8\n"); return -1; } msleep(20); if (i2c_readbytes(state,state->config->demod_address, &rec_buf[8],2)) { printk(KERN_WARNING "or51211: Load DVR Error 9\n"); return -1; } state->initialized = 1; } return 0; } static int or51211_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* fesettings) { fesettings->min_delay_ms = 500; fesettings->step_size = 0; fesettings->max_drift = 0; return 0; } static void or51211_release(struct dvb_frontend* fe) { struct or51211_state* state = fe->demodulator_priv; state->config->sleep(fe); kfree(state); } static struct dvb_frontend_ops or51211_ops; struct dvb_frontend* or51211_attach(const struct or51211_config* config, struct i2c_adapter* i2c) { struct or51211_state* state = NULL; /* Allocate memory for the internal state */ state = kzalloc(sizeof(struct or51211_state), GFP_KERNEL); if (state == NULL) return NULL; /* Setup the state */ state->config = config; state->i2c = i2c; state->initialized = 0; state->current_frequency = 0; /* Create dvb_frontend */ memcpy(&state->frontend.ops, &or51211_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; } static struct dvb_frontend_ops or51211_ops = { .delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B }, .info = { .name = "Oren OR51211 VSB Frontend", .frequency_min = 44000000, .frequency_max = 958000000, .frequency_stepsize = 166666, .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_8VSB }, .release = or51211_release, .init = or51211_init, .sleep = or51211_sleep, .set_frontend = or51211_set_parameters, .get_tune_settings = or51211_get_tune_settings, .read_status = or51211_read_status, .read_ber = or51211_read_ber, .read_signal_strength = or51211_read_signal_strength, .read_snr = or51211_read_snr, .read_ucblocks = or51211_read_ucblocks, }; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); MODULE_DESCRIPTION("Oren OR51211 VSB [pcHDTV HD-2000] Demodulator Driver"); MODULE_AUTHOR("Kirk Lapray"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(or51211_attach);
gpl-2.0
Jason-Lam/linux-am335x
drivers/uwb/i1480/dfu/dfu.c
10033
6051
/* * Intel Wireless UWB Link 1480 * Main driver * * Copyright (C) 2005-2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * Common code for firmware upload used by the USB and PCI version; * i1480_fw_upload() takes a device descriptor and uses the function * pointers it provides to upload firmware and prepare the PHY. * * As well, provides common functions used by the rest of the code. */ #include "i1480-dfu.h" #include <linux/errno.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/uwb.h> #include <linux/random.h> #include <linux/export.h> /* * i1480_rceb_check - Check RCEB for expected field values * @i1480: pointer to device for which RCEB is being checked * @rceb: RCEB being checked * @cmd: which command the RCEB is related to * @context: expected context * @expected_type: expected event type * @expected_event: expected event * * If @cmd is NULL, do not print error messages, but still return an error * code. * * Return 0 if @rceb matches the expected values, -EINVAL otherwise. */ int i1480_rceb_check(const struct i1480 *i1480, const struct uwb_rceb *rceb, const char *cmd, u8 context, u8 expected_type, unsigned expected_event) { int result = 0; struct device *dev = i1480->dev; if (rceb->bEventContext != context) { if (cmd) dev_err(dev, "%s: unexpected context id 0x%02x " "(expected 0x%02x)\n", cmd, rceb->bEventContext, context); result = -EINVAL; } if (rceb->bEventType != expected_type) { if (cmd) dev_err(dev, "%s: unexpected event type 0x%02x " "(expected 0x%02x)\n", cmd, rceb->bEventType, expected_type); result = -EINVAL; } if (le16_to_cpu(rceb->wEvent) != expected_event) { if (cmd) dev_err(dev, "%s: unexpected event 0x%04x " "(expected 0x%04x)\n", cmd, le16_to_cpu(rceb->wEvent), expected_event); result = -EINVAL; } return result; } EXPORT_SYMBOL_GPL(i1480_rceb_check); /* * Execute a Radio Control Command * * Command data has to be in i1480->cmd_buf. * * @returns size of the reply data filled in i1480->evt_buf or < 0 errno * code on error. */ ssize_t i1480_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size, size_t reply_size) { ssize_t result; struct uwb_rceb *reply = i1480->evt_buf; struct uwb_rccb *cmd = i1480->cmd_buf; u16 expected_event = reply->wEvent; u8 expected_type = reply->bEventType; u8 context; init_completion(&i1480->evt_complete); i1480->evt_result = -EINPROGRESS; do { get_random_bytes(&context, 1); } while (context == 0x00 || context == 0xff); cmd->bCommandContext = context; result = i1480->cmd(i1480, cmd_name, cmd_size); if (result < 0) goto error; /* wait for the callback to report a event was received */ result = wait_for_completion_interruptible_timeout( &i1480->evt_complete, HZ); if (result == 0) { result = -ETIMEDOUT; goto error; } if (result < 0) goto error; result = i1480->evt_result; if (result < 0) { dev_err(i1480->dev, "%s: command reply reception failed: %zd\n", cmd_name, result); goto error; } /* * Firmware versions >= 1.4.12224 for IOGear GUWA100U generate a * spurious notification after firmware is downloaded. So check whether * the receibed RCEB is such notification before assuming that the * command has failed. */ if (i1480_rceb_check(i1480, i1480->evt_buf, NULL, 0, 0xfd, 0x0022) == 0) { /* Now wait for the actual RCEB for this command. */ result = i1480->wait_init_done(i1480); if (result < 0) goto error; result = i1480->evt_result; } if (result != reply_size) { dev_err(i1480->dev, "%s returned only %zu bytes, %zu expected\n", cmd_name, result, reply_size); result = -EINVAL; goto error; } /* Verify we got the right event in response */ result = i1480_rceb_check(i1480, i1480->evt_buf, cmd_name, context, expected_type, expected_event); error: return result; } EXPORT_SYMBOL_GPL(i1480_cmd); static int i1480_print_state(struct i1480 *i1480) { int result; u32 *buf = (u32 *) i1480->cmd_buf; result = i1480->read(i1480, 0x80080000, 2 * sizeof(*buf)); if (result < 0) { dev_err(i1480->dev, "cannot read U & L states: %d\n", result); goto error; } dev_info(i1480->dev, "state U 0x%08x, L 0x%08x\n", buf[0], buf[1]); error: return result; } /* * PCI probe, firmware uploader * * _mac_fw_upload() will call rc_setup(), which needs an rc_release(). */ int i1480_fw_upload(struct i1480 *i1480) { int result; result = i1480_pre_fw_upload(i1480); /* PHY pre fw */ if (result < 0 && result != -ENOENT) { i1480_print_state(i1480); goto error; } result = i1480_mac_fw_upload(i1480); /* MAC fw */ if (result < 0) { if (result == -ENOENT) dev_err(i1480->dev, "Cannot locate MAC FW file '%s'\n", i1480->mac_fw_name); else i1480_print_state(i1480); goto error; } result = i1480_phy_fw_upload(i1480); /* PHY fw */ if (result < 0 && result != -ENOENT) { i1480_print_state(i1480); goto error_rc_release; } /* * FIXME: find some reliable way to check whether firmware is running * properly. Maybe use some standard request that has no side effects? */ dev_info(i1480->dev, "firmware uploaded successfully\n"); error_rc_release: if (i1480->rc_release) i1480->rc_release(i1480); result = 0; error: return result; } EXPORT_SYMBOL_GPL(i1480_fw_upload);
gpl-2.0
papi92/android_kernel_samsung_g906s
drivers/infiniband/hw/amso1100/c2_cm.c
11569
10009
/* * Copyright (c) 2005 Ammasso, Inc. All rights reserved. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/slab.h> #include "c2.h" #include "c2_wr.h" #include "c2_vq.h" #include <rdma/iw_cm.h> int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) { struct c2_dev *c2dev = to_c2dev(cm_id->device); struct ib_qp *ibqp; struct c2_qp *qp; struct c2wr_qp_connect_req *wr; /* variable size needs a malloc. */ struct c2_vq_req *vq_req; int err; ibqp = c2_get_qp(cm_id->device, iw_param->qpn); if (!ibqp) return -EINVAL; qp = to_c2qp(ibqp); /* Associate QP <--> CM_ID */ cm_id->provider_data = qp; cm_id->add_ref(cm_id); qp->cm_id = cm_id; /* * only support the max private_data length */ if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) { err = -EINVAL; goto bail0; } /* * Set the rdma read limits */ err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird); if (err) goto bail0; /* * Create and send a WR_QP_CONNECT... */ wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); if (!wr) { err = -ENOMEM; goto bail0; } vq_req = vq_req_alloc(c2dev); if (!vq_req) { err = -ENOMEM; goto bail1; } c2_wr_set_id(wr, CCWR_QP_CONNECT); wr->hdr.context = 0; wr->rnic_handle = c2dev->adapter_handle; wr->qp_handle = qp->adapter_handle; wr->remote_addr = cm_id->remote_addr.sin_addr.s_addr; wr->remote_port = cm_id->remote_addr.sin_port; /* * Move any private data from the callers's buf into * the WR. */ if (iw_param->private_data) { wr->private_data_length = cpu_to_be32(iw_param->private_data_len); memcpy(&wr->private_data[0], iw_param->private_data, iw_param->private_data_len); } else wr->private_data_length = 0; /* * Send WR to adapter. NOTE: There is no synch reply from * the adapter. */ err = vq_send_wr(c2dev, (union c2wr *) wr); vq_req_free(c2dev, vq_req); bail1: kfree(wr); bail0: if (err) { /* * If we fail, release reference on QP and * disassociate QP from CM_ID */ cm_id->provider_data = NULL; qp->cm_id = NULL; cm_id->rem_ref(cm_id); } return err; } int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog) { struct c2_dev *c2dev; struct c2wr_ep_listen_create_req wr; struct c2wr_ep_listen_create_rep *reply; struct c2_vq_req *vq_req; int err; c2dev = to_c2dev(cm_id->device); if (c2dev == NULL) return -EINVAL; /* * Allocate verbs request. */ vq_req = vq_req_alloc(c2dev); if (!vq_req) return -ENOMEM; /* * Build the WR */ c2_wr_set_id(&wr, CCWR_EP_LISTEN_CREATE); wr.hdr.context = (u64) (unsigned long) vq_req; wr.rnic_handle = c2dev->adapter_handle; wr.local_addr = cm_id->local_addr.sin_addr.s_addr; wr.local_port = cm_id->local_addr.sin_port; wr.backlog = cpu_to_be32(backlog); wr.user_context = (u64) (unsigned long) cm_id; /* * Reference the request struct. Dereferenced in the int handler. */ vq_req_get(c2dev, vq_req); /* * Send WR to adapter */ err = vq_send_wr(c2dev, (union c2wr *) & wr); if (err) { vq_req_put(c2dev, vq_req); goto bail0; } /* * Wait for reply from adapter */ err = vq_wait_for_reply(c2dev, vq_req); if (err) goto bail0; /* * Process reply */ reply = (struct c2wr_ep_listen_create_rep *) (unsigned long) vq_req->reply_msg; if (!reply) { err = -ENOMEM; goto bail1; } if ((err = c2_errno(reply)) != 0) goto bail1; /* * Keep the adapter handle. Used in subsequent destroy */ cm_id->provider_data = (void*)(unsigned long) reply->ep_handle; /* * free vq stuff */ vq_repbuf_free(c2dev, reply); vq_req_free(c2dev, vq_req); return 0; bail1: vq_repbuf_free(c2dev, reply); bail0: vq_req_free(c2dev, vq_req); return err; } int c2_llp_service_destroy(struct iw_cm_id *cm_id) { struct c2_dev *c2dev; struct c2wr_ep_listen_destroy_req wr; struct c2wr_ep_listen_destroy_rep *reply; struct c2_vq_req *vq_req; int err; c2dev = to_c2dev(cm_id->device); if (c2dev == NULL) return -EINVAL; /* * Allocate verbs request. */ vq_req = vq_req_alloc(c2dev); if (!vq_req) return -ENOMEM; /* * Build the WR */ c2_wr_set_id(&wr, CCWR_EP_LISTEN_DESTROY); wr.hdr.context = (unsigned long) vq_req; wr.rnic_handle = c2dev->adapter_handle; wr.ep_handle = (u32)(unsigned long)cm_id->provider_data; /* * reference the request struct. dereferenced in the int handler. */ vq_req_get(c2dev, vq_req); /* * Send WR to adapter */ err = vq_send_wr(c2dev, (union c2wr *) & wr); if (err) { vq_req_put(c2dev, vq_req); goto bail0; } /* * Wait for reply from adapter */ err = vq_wait_for_reply(c2dev, vq_req); if (err) goto bail0; /* * Process reply */ reply=(struct c2wr_ep_listen_destroy_rep *)(unsigned long)vq_req->reply_msg; if (!reply) { err = -ENOMEM; goto bail0; } if ((err = c2_errno(reply)) != 0) goto bail1; bail1: vq_repbuf_free(c2dev, reply); bail0: vq_req_free(c2dev, vq_req); return err; } int c2_llp_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) { struct c2_dev *c2dev = to_c2dev(cm_id->device); struct c2_qp *qp; struct ib_qp *ibqp; struct c2wr_cr_accept_req *wr; /* variable length WR */ struct c2_vq_req *vq_req; struct c2wr_cr_accept_rep *reply; /* VQ Reply msg ptr. */ int err; ibqp = c2_get_qp(cm_id->device, iw_param->qpn); if (!ibqp) return -EINVAL; qp = to_c2qp(ibqp); /* Set the RDMA read limits */ err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird); if (err) goto bail0; /* Allocate verbs request. */ vq_req = vq_req_alloc(c2dev); if (!vq_req) { err = -ENOMEM; goto bail0; } vq_req->qp = qp; vq_req->cm_id = cm_id; vq_req->event = IW_CM_EVENT_ESTABLISHED; wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); if (!wr) { err = -ENOMEM; goto bail1; } /* Build the WR */ c2_wr_set_id(wr, CCWR_CR_ACCEPT); wr->hdr.context = (unsigned long) vq_req; wr->rnic_handle = c2dev->adapter_handle; wr->ep_handle = (u32) (unsigned long) cm_id->provider_data; wr->qp_handle = qp->adapter_handle; /* Replace the cr_handle with the QP after accept */ cm_id->provider_data = qp; cm_id->add_ref(cm_id); qp->cm_id = cm_id; cm_id->provider_data = qp; /* Validate private_data length */ if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) { err = -EINVAL; goto bail1; } if (iw_param->private_data) { wr->private_data_length = cpu_to_be32(iw_param->private_data_len); memcpy(&wr->private_data[0], iw_param->private_data, iw_param->private_data_len); } else wr->private_data_length = 0; /* Reference the request struct. Dereferenced in the int handler. */ vq_req_get(c2dev, vq_req); /* Send WR to adapter */ err = vq_send_wr(c2dev, (union c2wr *) wr); if (err) { vq_req_put(c2dev, vq_req); goto bail1; } /* Wait for reply from adapter */ err = vq_wait_for_reply(c2dev, vq_req); if (err) goto bail1; /* Check that reply is present */ reply = (struct c2wr_cr_accept_rep *) (unsigned long) vq_req->reply_msg; if (!reply) { err = -ENOMEM; goto bail1; } err = c2_errno(reply); vq_repbuf_free(c2dev, reply); if (!err) c2_set_qp_state(qp, C2_QP_STATE_RTS); bail1: kfree(wr); vq_req_free(c2dev, vq_req); bail0: if (err) { /* * If we fail, release reference on QP and * disassociate QP from CM_ID */ cm_id->provider_data = NULL; qp->cm_id = NULL; cm_id->rem_ref(cm_id); } return err; } int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) { struct c2_dev *c2dev; struct c2wr_cr_reject_req wr; struct c2_vq_req *vq_req; struct c2wr_cr_reject_rep *reply; int err; c2dev = to_c2dev(cm_id->device); /* * Allocate verbs request. */ vq_req = vq_req_alloc(c2dev); if (!vq_req) return -ENOMEM; /* * Build the WR */ c2_wr_set_id(&wr, CCWR_CR_REJECT); wr.hdr.context = (unsigned long) vq_req; wr.rnic_handle = c2dev->adapter_handle; wr.ep_handle = (u32) (unsigned long) cm_id->provider_data; /* * reference the request struct. dereferenced in the int handler. */ vq_req_get(c2dev, vq_req); /* * Send WR to adapter */ err = vq_send_wr(c2dev, (union c2wr *) & wr); if (err) { vq_req_put(c2dev, vq_req); goto bail0; } /* * Wait for reply from adapter */ err = vq_wait_for_reply(c2dev, vq_req); if (err) goto bail0; /* * Process reply */ reply = (struct c2wr_cr_reject_rep *) (unsigned long) vq_req->reply_msg; if (!reply) { err = -ENOMEM; goto bail0; } err = c2_errno(reply); /* * free vq stuff */ vq_repbuf_free(c2dev, reply); bail0: vq_req_free(c2dev, vq_req); return err; }
gpl-2.0
maz-1/android_kernel_sonyz_msm8974
drivers/media/dvb/b2c2/flexcop-eeprom.c
14385
3274
/* * Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III * flexcop-eeprom.c - eeprom access methods (currently only MAC address reading) * see flexcop.c for copyright information */ #include "flexcop.h" #if 0 /*EEPROM (Skystar2 has one "24LC08B" chip on board) */ static int eeprom_write(struct adapter *adapter, u16 addr, u8 *buf, u16 len) { return flex_i2c_write(adapter, 0x20000000, 0x50, addr, buf, len); } static int eeprom_lrc_write(struct adapter *adapter, u32 addr, u32 len, u8 *wbuf, u8 *rbuf, int retries) { int i; for (i = 0; i < retries; i++) { if (eeprom_write(adapter, addr, wbuf, len) == len) { if (eeprom_lrc_read(adapter, addr, len, rbuf, retries) == 1) return 1; } } return 0; } /* These functions could be used to unlock SkyStar2 cards. */ static int eeprom_writeKey(struct adapter *adapter, u8 *key, u32 len) { u8 rbuf[20]; u8 wbuf[20]; if (len != 16) return 0; memcpy(wbuf, key, len); wbuf[16] = 0; wbuf[17] = 0; wbuf[18] = 0; wbuf[19] = calc_lrc(wbuf, 19); return eeprom_lrc_write(adapter, 0x3e4, 20, wbuf, rbuf, 4); } static int eeprom_readKey(struct adapter *adapter, u8 *key, u32 len) { u8 buf[20]; if (len != 16) return 0; if (eeprom_lrc_read(adapter, 0x3e4, 20, buf, 4) == 0) return 0; memcpy(key, buf, len); return 1; } static char eeprom_set_mac_addr(struct adapter *adapter, char type, u8 *mac) { u8 tmp[8]; if (type != 0) { tmp[0] = mac[0]; tmp[1] = mac[1]; tmp[2] = mac[2]; tmp[3] = mac[5]; tmp[4] = mac[6]; tmp[5] = mac[7]; } else { tmp[0] = mac[0]; tmp[1] = mac[1]; tmp[2] = mac[2]; tmp[3] = mac[3]; tmp[4] = mac[4]; tmp[5] = mac[5]; } tmp[6] = 0; tmp[7] = calc_lrc(tmp, 7); if (eeprom_write(adapter, 0x3f8, tmp, 8) == 8) return 1; return 0; } static int flexcop_eeprom_read(struct flexcop_device *fc, u16 addr, u8 *buf, u16 len) { return fc->i2c_request(fc,FC_READ,FC_I2C_PORT_EEPROM,0x50,addr,buf,len); } #endif static u8 calc_lrc(u8 *buf, int len) { int i; u8 sum = 0; for (i = 0; i < len; i++) sum = sum ^ buf[i]; return sum; } static int flexcop_eeprom_request(struct flexcop_device *fc, flexcop_access_op_t op, u16 addr, u8 *buf, u16 len, int retries) { int i,ret = 0; u8 chipaddr = 0x50 | ((addr >> 8) & 3); for (i = 0; i < retries; i++) { ret = fc->i2c_request(&fc->fc_i2c_adap[1], op, chipaddr, addr & 0xff, buf, len); if (ret == 0) break; } return ret; } static int flexcop_eeprom_lrc_read(struct flexcop_device *fc, u16 addr, u8 *buf, u16 len, int retries) { int ret = flexcop_eeprom_request(fc, FC_READ, addr, buf, len, retries); if (ret == 0) if (calc_lrc(buf, len - 1) != buf[len - 1]) ret = -EINVAL; return ret; } /* JJ's comment about extended == 1: it is not presently used anywhere but was * added to the low-level functions for possible support of EUI64 */ int flexcop_eeprom_check_mac_addr(struct flexcop_device *fc, int extended) { u8 buf[8]; int ret = 0; if ((ret = flexcop_eeprom_lrc_read(fc,0x3f8,buf,8,4)) == 0) { if (extended != 0) { err("TODO: extended (EUI64) MAC addresses aren't " "completely supported yet"); ret = -EINVAL; } else memcpy(fc->dvb_adapter.proposed_mac,buf,6); } return ret; } EXPORT_SYMBOL(flexcop_eeprom_check_mac_addr);
gpl-2.0
matteocrippa/dsl-n55u-bender
release/src-rt/linux/linux-2.6/arch/ia64/hp/sim/simserial.c
50
25753
/* * Simulated Serial Driver (fake serial) * * This driver is mostly used for bringup purposes and will go away. * It has a strong dependency on the system console. All outputs * are rerouted to the same facility as the one used by printk which, in our * case means sys_sim.c console (goes via the simulator). The code hereafter * is completely leveraged from the serial.c driver. * * Copyright (C) 1999-2000, 2002-2003 Hewlett-Packard Co * Stephane Eranian <eranian@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com> * * 02/04/00 D. Mosberger Merged in serial.c bug fixes in rs_close(). * 02/25/00 D. Mosberger Synced up with 2.3.99pre-5 version of serial.c. * 07/30/02 D. Mosberger Replace sti()/cli() with explicit spinlocks & local irq masking */ #include <linux/init.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/major.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/capability.h> #include <linux/console.h> #include <linux/module.h> #include <linux/serial.h> #include <linux/serialP.h> #include <linux/sysrq.h> #include <asm/irq.h> #include <asm/hw_irq.h> #include <asm/uaccess.h> #ifdef CONFIG_KDB # include <linux/kdb.h> #endif #undef SIMSERIAL_DEBUG /* define this to get some debug information */ #define KEYBOARD_INTR 3 /* must match with simulator! */ #define NR_PORTS 1 /* only one port for now */ #define IRQ_T(info) ((info->flags & ASYNC_SHARE_IRQ) ? IRQF_SHARED : IRQF_DISABLED) #define SSC_GETCHAR 21 extern long ia64_ssc (long, long, long, long, int); extern void ia64_ssc_connect_irq (long intr, long irq); static char *serial_name = "SimSerial driver"; static char *serial_version = "0.6"; /* * This has been extracted from asm/serial.h. We need one eventually but * I don't know exactly what we're going to put in it so just fake one * for now. */ #define BASE_BAUD ( 1843200 / 16 ) #define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST) /* * Most of the values here are meaningless to this particular driver. * However some values must be preserved for the code (leveraged from serial.c * to work correctly). * port must not be 0 * type must not be UNKNOWN * So I picked arbitrary (guess from where?) values instead */ static struct serial_state rs_table[NR_PORTS]={ /* UART CLK PORT IRQ FLAGS */ { 0, BASE_BAUD, 0x3F8, 0, STD_COM_FLAGS,0,PORT_16550 } /* ttyS0 */ }; /* * Just for the fun of it ! */ static struct serial_uart_config uart_config[] = { { "unknown", 1, 0 }, { "8250", 1, 0 }, { "16450", 1, 0 }, { "16550", 1, 0 }, { "16550A", 16, UART_CLEAR_FIFO | UART_USE_FIFO }, { "cirrus", 1, 0 }, { "ST16650", 1, UART_CLEAR_FIFO | UART_STARTECH }, { "ST16650V2", 32, UART_CLEAR_FIFO | UART_USE_FIFO | UART_STARTECH }, { "TI16750", 64, UART_CLEAR_FIFO | UART_USE_FIFO}, { NULL, 0} }; struct tty_driver *hp_simserial_driver; static struct async_struct *IRQ_ports[NR_IRQS]; static struct console *console; static unsigned char *tmp_buf; extern struct console *console_drivers; /* from kernel/printk.c */ /* * ------------------------------------------------------------ * rs_stop() and rs_start() * * This routines are called before setting or resetting tty->stopped. * They enable or disable transmitter interrupts, as necessary. * ------------------------------------------------------------ */ static void rs_stop(struct tty_struct *tty) { #ifdef SIMSERIAL_DEBUG printk("rs_stop: tty->stopped=%d tty->hw_stopped=%d tty->flow_stopped=%d\n", tty->stopped, tty->hw_stopped, tty->flow_stopped); #endif } static void rs_start(struct tty_struct *tty) { #ifdef SIMSERIAL_DEBUG printk("rs_start: tty->stopped=%d tty->hw_stopped=%d tty->flow_stopped=%d\n", tty->stopped, tty->hw_stopped, tty->flow_stopped); #endif } static void receive_chars(struct tty_struct *tty) { unsigned char ch; static unsigned char seen_esc = 0; while ( (ch = ia64_ssc(0, 0, 0, 0, SSC_GETCHAR)) ) { if ( ch == 27 && seen_esc == 0 ) { seen_esc = 1; continue; } else { if ( seen_esc==1 && ch == 'O' ) { seen_esc = 2; continue; } else if ( seen_esc == 2 ) { if ( ch == 'P' ) /* F1 */ show_state(); #ifdef CONFIG_MAGIC_SYSRQ if ( ch == 'S' ) { /* F4 */ do ch = ia64_ssc(0, 0, 0, 0, SSC_GETCHAR); while (!ch); handle_sysrq(ch, NULL); } #endif seen_esc = 0; continue; } } seen_esc = 0; if (tty_insert_flip_char(tty, ch, TTY_NORMAL) == 0) break; } tty_flip_buffer_push(tty); } /* * This is the serial driver's interrupt routine for a single port */ static irqreturn_t rs_interrupt_single(int irq, void *dev_id) { struct async_struct * info; /* * I don't know exactly why they don't use the dev_id opaque data * pointer instead of this extra lookup table */ info = IRQ_ports[irq]; if (!info || !info->tty) { printk(KERN_INFO "simrs_interrupt_single: info|tty=0 info=%p problem\n", info); return IRQ_NONE; } /* * pretty simple in our case, because we only get interrupts * on inbound traffic */ receive_chars(info->tty); return IRQ_HANDLED; } /* * ------------------------------------------------------------------- * Here ends the serial interrupt routines. * ------------------------------------------------------------------- */ #if 0 /* * not really used in our situation so keep them commented out for now */ static DECLARE_TASK_QUEUE(tq_serial); /* used to be at the top of the file */ static void do_serial_bh(void) { run_task_queue(&tq_serial); printk(KERN_ERR "do_serial_bh: called\n"); } #endif static void do_softint(struct work_struct *private_) { printk(KERN_ERR "simserial: do_softint called\n"); } static void rs_put_char(struct tty_struct *tty, unsigned char ch) { struct async_struct *info = (struct async_struct *)tty->driver_data; unsigned long flags; if (!tty || !info->xmit.buf) return; local_irq_save(flags); if (CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) == 0) { local_irq_restore(flags); return; } info->xmit.buf[info->xmit.head] = ch; info->xmit.head = (info->xmit.head + 1) & (SERIAL_XMIT_SIZE-1); local_irq_restore(flags); } static void transmit_chars(struct async_struct *info, int *intr_done) { int count; unsigned long flags; local_irq_save(flags); if (info->x_char) { char c = info->x_char; console->write(console, &c, 1); info->state->icount.tx++; info->x_char = 0; goto out; } if (info->xmit.head == info->xmit.tail || info->tty->stopped || info->tty->hw_stopped) { #ifdef SIMSERIAL_DEBUG printk("transmit_chars: head=%d, tail=%d, stopped=%d\n", info->xmit.head, info->xmit.tail, info->tty->stopped); #endif goto out; } /* * We removed the loop and try to do it in to chunks. We need * 2 operations maximum because it's a ring buffer. * * First from current to tail if possible. * Then from the beginning of the buffer until necessary */ count = min(CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE), SERIAL_XMIT_SIZE - info->xmit.tail); console->write(console, info->xmit.buf+info->xmit.tail, count); info->xmit.tail = (info->xmit.tail+count) & (SERIAL_XMIT_SIZE-1); /* * We have more at the beginning of the buffer */ count = CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); if (count) { console->write(console, info->xmit.buf, count); info->xmit.tail += count; } out: local_irq_restore(flags); } static void rs_flush_chars(struct tty_struct *tty) { struct async_struct *info = (struct async_struct *)tty->driver_data; if (info->xmit.head == info->xmit.tail || tty->stopped || tty->hw_stopped || !info->xmit.buf) return; transmit_chars(info, NULL); } static int rs_write(struct tty_struct * tty, const unsigned char *buf, int count) { int c, ret = 0; struct async_struct *info = (struct async_struct *)tty->driver_data; unsigned long flags; if (!tty || !info->xmit.buf || !tmp_buf) return 0; local_irq_save(flags); while (1) { c = CIRC_SPACE_TO_END(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); if (count < c) c = count; if (c <= 0) { break; } memcpy(info->xmit.buf + info->xmit.head, buf, c); info->xmit.head = ((info->xmit.head + c) & (SERIAL_XMIT_SIZE-1)); buf += c; count -= c; ret += c; } local_irq_restore(flags); /* * Hey, we transmit directly from here in our case */ if (CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) && !tty->stopped && !tty->hw_stopped) { transmit_chars(info, NULL); } return ret; } static int rs_write_room(struct tty_struct *tty) { struct async_struct *info = (struct async_struct *)tty->driver_data; return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); } static int rs_chars_in_buffer(struct tty_struct *tty) { struct async_struct *info = (struct async_struct *)tty->driver_data; return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); } static void rs_flush_buffer(struct tty_struct *tty) { struct async_struct *info = (struct async_struct *)tty->driver_data; unsigned long flags; local_irq_save(flags); info->xmit.head = info->xmit.tail = 0; local_irq_restore(flags); wake_up_interruptible(&tty->write_wait); if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && tty->ldisc.write_wakeup) (tty->ldisc.write_wakeup)(tty); } /* * This function is used to send a high-priority XON/XOFF character to * the device */ static void rs_send_xchar(struct tty_struct *tty, char ch) { struct async_struct *info = (struct async_struct *)tty->driver_data; info->x_char = ch; if (ch) { /* * I guess we could call console->write() directly but * let's do that for now. */ transmit_chars(info, NULL); } } /* * ------------------------------------------------------------ * rs_throttle() * * This routine is called by the upper-layer tty layer to signal that * incoming characters should be throttled. * ------------------------------------------------------------ */ static void rs_throttle(struct tty_struct * tty) { if (I_IXOFF(tty)) rs_send_xchar(tty, STOP_CHAR(tty)); printk(KERN_INFO "simrs_throttle called\n"); } static void rs_unthrottle(struct tty_struct * tty) { struct async_struct *info = (struct async_struct *)tty->driver_data; if (I_IXOFF(tty)) { if (info->x_char) info->x_char = 0; else rs_send_xchar(tty, START_CHAR(tty)); } printk(KERN_INFO "simrs_unthrottle called\n"); } /* * rs_break() --- routine which turns the break handling on or off */ static void rs_break(struct tty_struct *tty, int break_state) { } static int rs_ioctl(struct tty_struct *tty, struct file * file, unsigned int cmd, unsigned long arg) { if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) && (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) { if (tty->flags & (1 << TTY_IO_ERROR)) return -EIO; } switch (cmd) { case TIOCMGET: printk(KERN_INFO "rs_ioctl: TIOCMGET called\n"); return -EINVAL; case TIOCMBIS: case TIOCMBIC: case TIOCMSET: printk(KERN_INFO "rs_ioctl: TIOCMBIS/BIC/SET called\n"); return -EINVAL; case TIOCGSERIAL: printk(KERN_INFO "simrs_ioctl TIOCGSERIAL called\n"); return 0; case TIOCSSERIAL: printk(KERN_INFO "simrs_ioctl TIOCSSERIAL called\n"); return 0; case TIOCSERCONFIG: printk(KERN_INFO "rs_ioctl: TIOCSERCONFIG called\n"); return -EINVAL; case TIOCSERGETLSR: /* Get line status register */ printk(KERN_INFO "rs_ioctl: TIOCSERGETLSR called\n"); return -EINVAL; case TIOCSERGSTRUCT: printk(KERN_INFO "rs_ioctl: TIOCSERGSTRUCT called\n"); #if 0 if (copy_to_user((struct async_struct *) arg, info, sizeof(struct async_struct))) return -EFAULT; #endif return 0; /* * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change * - mask passed in arg for lines of interest * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) * Caller should use TIOCGICOUNT to see which one it was */ case TIOCMIWAIT: printk(KERN_INFO "rs_ioctl: TIOCMIWAIT: called\n"); return 0; /* * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) * Return: write counters to the user passed counter struct * NB: both 1->0 and 0->1 transitions are counted except for * RI where only 0->1 is counted. */ case TIOCGICOUNT: printk(KERN_INFO "rs_ioctl: TIOCGICOUNT called\n"); return 0; case TIOCSERGWILD: case TIOCSERSWILD: /* "setserial -W" is called in Debian boot */ printk (KERN_INFO "TIOCSER?WILD ioctl obsolete, ignored.\n"); return 0; default: return -ENOIOCTLCMD; } return 0; } #define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios) { unsigned int cflag = tty->termios->c_cflag; if ( (cflag == old_termios->c_cflag) && ( RELEVANT_IFLAG(tty->termios->c_iflag) == RELEVANT_IFLAG(old_termios->c_iflag))) return; /* Handle turning off CRTSCTS */ if ((old_termios->c_cflag & CRTSCTS) && !(tty->termios->c_cflag & CRTSCTS)) { tty->hw_stopped = 0; rs_start(tty); } } /* * This routine will shutdown a serial port; interrupts are disabled, and * DTR is dropped if the hangup on close termio flag is on. */ static void shutdown(struct async_struct * info) { unsigned long flags; struct serial_state *state; int retval; if (!(info->flags & ASYNC_INITIALIZED)) return; state = info->state; #ifdef SIMSERIAL_DEBUG printk("Shutting down serial port %d (irq %d)....", info->line, state->irq); #endif local_irq_save(flags); { /* * First unlink the serial port from the IRQ chain... */ if (info->next_port) info->next_port->prev_port = info->prev_port; if (info->prev_port) info->prev_port->next_port = info->next_port; else IRQ_ports[state->irq] = info->next_port; /* * Free the IRQ, if necessary */ if (state->irq && (!IRQ_ports[state->irq] || !IRQ_ports[state->irq]->next_port)) { if (IRQ_ports[state->irq]) { free_irq(state->irq, NULL); retval = request_irq(state->irq, rs_interrupt_single, IRQ_T(info), "serial", NULL); if (retval) printk(KERN_ERR "serial shutdown: request_irq: error %d" " Couldn't reacquire IRQ.\n", retval); } else free_irq(state->irq, NULL); } if (info->xmit.buf) { free_page((unsigned long) info->xmit.buf); info->xmit.buf = NULL; } if (info->tty) set_bit(TTY_IO_ERROR, &info->tty->flags); info->flags &= ~ASYNC_INITIALIZED; } local_irq_restore(flags); } /* * ------------------------------------------------------------ * rs_close() * * This routine is called when the serial port gets closed. First, we * wait for the last remaining data to be sent. Then, we unlink its * async structure from the interrupt chain if necessary, and we free * that IRQ if nothing is left in the chain. * ------------------------------------------------------------ */ static void rs_close(struct tty_struct *tty, struct file * filp) { struct async_struct * info = (struct async_struct *)tty->driver_data; struct serial_state *state; unsigned long flags; if (!info ) return; state = info->state; local_irq_save(flags); if (tty_hung_up_p(filp)) { #ifdef SIMSERIAL_DEBUG printk("rs_close: hung_up\n"); #endif local_irq_restore(flags); return; } #ifdef SIMSERIAL_DEBUG printk("rs_close ttys%d, count = %d\n", info->line, state->count); #endif if ((tty->count == 1) && (state->count != 1)) { /* * Uh, oh. tty->count is 1, which means that the tty * structure will be freed. state->count should always * be one in these conditions. If it's greater than * one, we've got real problems, since it means the * serial port won't be shutdown. */ printk(KERN_ERR "rs_close: bad serial port count; tty->count is 1, " "state->count is %d\n", state->count); state->count = 1; } if (--state->count < 0) { printk(KERN_ERR "rs_close: bad serial port count for ttys%d: %d\n", info->line, state->count); state->count = 0; } if (state->count) { local_irq_restore(flags); return; } info->flags |= ASYNC_CLOSING; local_irq_restore(flags); /* * Now we wait for the transmit buffer to clear; and we notify * the line discipline to only process XON/XOFF characters. */ shutdown(info); if (tty->driver->flush_buffer) tty->driver->flush_buffer(tty); if (tty->ldisc.flush_buffer) tty->ldisc.flush_buffer(tty); info->event = 0; info->tty = NULL; if (info->blocked_open) { if (info->close_delay) schedule_timeout_interruptible(info->close_delay); wake_up_interruptible(&info->open_wait); } info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); wake_up_interruptible(&info->close_wait); } /* * rs_wait_until_sent() --- wait until the transmitter is empty */ static void rs_wait_until_sent(struct tty_struct *tty, int timeout) { } /* * rs_hangup() --- called by tty_hangup() when a hangup is signaled. */ static void rs_hangup(struct tty_struct *tty) { struct async_struct * info = (struct async_struct *)tty->driver_data; struct serial_state *state = info->state; #ifdef SIMSERIAL_DEBUG printk("rs_hangup: called\n"); #endif state = info->state; rs_flush_buffer(tty); if (info->flags & ASYNC_CLOSING) return; shutdown(info); info->event = 0; state->count = 0; info->flags &= ~ASYNC_NORMAL_ACTIVE; info->tty = NULL; wake_up_interruptible(&info->open_wait); } static int get_async_struct(int line, struct async_struct **ret_info) { struct async_struct *info; struct serial_state *sstate; sstate = rs_table + line; sstate->count++; if (sstate->info) { *ret_info = sstate->info; return 0; } info = kzalloc(sizeof(struct async_struct), GFP_KERNEL); if (!info) { sstate->count--; return -ENOMEM; } init_waitqueue_head(&info->open_wait); init_waitqueue_head(&info->close_wait); init_waitqueue_head(&info->delta_msr_wait); info->magic = SERIAL_MAGIC; info->port = sstate->port; info->flags = sstate->flags; info->xmit_fifo_size = sstate->xmit_fifo_size; info->line = line; INIT_WORK(&info->work, do_softint); info->state = sstate; if (sstate->info) { kfree(info); *ret_info = sstate->info; return 0; } *ret_info = sstate->info = info; return 0; } static int startup(struct async_struct *info) { unsigned long flags; int retval=0; irq_handler_t handler; struct serial_state *state= info->state; unsigned long page; page = get_zeroed_page(GFP_KERNEL); if (!page) return -ENOMEM; local_irq_save(flags); if (info->flags & ASYNC_INITIALIZED) { free_page(page); goto errout; } if (!state->port || !state->type) { if (info->tty) set_bit(TTY_IO_ERROR, &info->tty->flags); free_page(page); goto errout; } if (info->xmit.buf) free_page(page); else info->xmit.buf = (unsigned char *) page; #ifdef SIMSERIAL_DEBUG printk("startup: ttys%d (irq %d)...", info->line, state->irq); #endif /* * Allocate the IRQ if necessary */ if (state->irq && (!IRQ_ports[state->irq] || !IRQ_ports[state->irq]->next_port)) { if (IRQ_ports[state->irq]) { retval = -EBUSY; goto errout; } else handler = rs_interrupt_single; retval = request_irq(state->irq, handler, IRQ_T(info), "simserial", NULL); if (retval) { if (capable(CAP_SYS_ADMIN)) { if (info->tty) set_bit(TTY_IO_ERROR, &info->tty->flags); retval = 0; } goto errout; } } /* * Insert serial port into IRQ chain. */ info->prev_port = NULL; info->next_port = IRQ_ports[state->irq]; if (info->next_port) info->next_port->prev_port = info; IRQ_ports[state->irq] = info; if (info->tty) clear_bit(TTY_IO_ERROR, &info->tty->flags); info->xmit.head = info->xmit.tail = 0; #if 0 /* * Set up serial timers... */ timer_table[RS_TIMER].expires = jiffies + 2*HZ/100; timer_active |= 1 << RS_TIMER; #endif /* * Set up the tty->alt_speed kludge */ if (info->tty) { if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) info->tty->alt_speed = 57600; if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) info->tty->alt_speed = 115200; if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI) info->tty->alt_speed = 230400; if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP) info->tty->alt_speed = 460800; } info->flags |= ASYNC_INITIALIZED; local_irq_restore(flags); return 0; errout: local_irq_restore(flags); return retval; } /* * This routine is called whenever a serial port is opened. It * enables interrupts for a serial port, linking in its async structure into * the IRQ chain. It also performs the serial-specific * initialization for the tty structure. */ static int rs_open(struct tty_struct *tty, struct file * filp) { struct async_struct *info; int retval, line; unsigned long page; line = tty->index; if ((line < 0) || (line >= NR_PORTS)) return -ENODEV; retval = get_async_struct(line, &info); if (retval) return retval; tty->driver_data = info; info->tty = tty; #ifdef SIMSERIAL_DEBUG printk("rs_open %s, count = %d\n", tty->name, info->state->count); #endif info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0; if (!tmp_buf) { page = get_zeroed_page(GFP_KERNEL); if (!page) return -ENOMEM; if (tmp_buf) free_page(page); else tmp_buf = (unsigned char *) page; } /* * If the port is the middle of closing, bail out now */ if (tty_hung_up_p(filp) || (info->flags & ASYNC_CLOSING)) { if (info->flags & ASYNC_CLOSING) interruptible_sleep_on(&info->close_wait); #ifdef SERIAL_DO_RESTART return ((info->flags & ASYNC_HUP_NOTIFY) ? -EAGAIN : -ERESTARTSYS); #else return -EAGAIN; #endif } /* * Start up serial port */ retval = startup(info); if (retval) { return retval; } /* * figure out which console to use (should be one already) */ console = console_drivers; while (console) { if ((console->flags & CON_ENABLED) && console->write) break; console = console->next; } #ifdef SIMSERIAL_DEBUG printk("rs_open ttys%d successful\n", info->line); #endif return 0; } /* * /proc fs routines.... */ static inline int line_info(char *buf, struct serial_state *state) { return sprintf(buf, "%d: uart:%s port:%lX irq:%d\n", state->line, uart_config[state->type].name, state->port, state->irq); } static int rs_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { int i, len = 0, l; off_t begin = 0; len += sprintf(page, "simserinfo:1.0 driver:%s\n", serial_version); for (i = 0; i < NR_PORTS && len < 4000; i++) { l = line_info(page + len, &rs_table[i]); len += l; if (len+begin > off+count) goto done; if (len+begin < off) { begin += len; len = 0; } } *eof = 1; done: if (off >= len+begin) return 0; *start = page + (begin-off); return ((count < begin+len-off) ? count : begin+len-off); } /* * --------------------------------------------------------------------- * rs_init() and friends * * rs_init() is called at boot-time to initialize the serial driver. * --------------------------------------------------------------------- */ /* * This routine prints out the appropriate serial driver version * number, and identifies which options were configured into this * driver. */ static inline void show_serial_version(void) { printk(KERN_INFO "%s version %s with", serial_name, serial_version); printk(KERN_INFO " no serial options enabled\n"); } static const struct tty_operations hp_ops = { .open = rs_open, .close = rs_close, .write = rs_write, .put_char = rs_put_char, .flush_chars = rs_flush_chars, .write_room = rs_write_room, .chars_in_buffer = rs_chars_in_buffer, .flush_buffer = rs_flush_buffer, .ioctl = rs_ioctl, .throttle = rs_throttle, .unthrottle = rs_unthrottle, .send_xchar = rs_send_xchar, .set_termios = rs_set_termios, .stop = rs_stop, .start = rs_start, .hangup = rs_hangup, .break_ctl = rs_break, .wait_until_sent = rs_wait_until_sent, .read_proc = rs_read_proc, }; /* * The serial driver boot-time initialization code! */ static int __init simrs_init (void) { int i, rc; struct serial_state *state; if (!ia64_platform_is("hpsim")) return -ENODEV; hp_simserial_driver = alloc_tty_driver(1); if (!hp_simserial_driver) return -ENOMEM; show_serial_version(); /* Initialize the tty_driver structure */ hp_simserial_driver->owner = THIS_MODULE; hp_simserial_driver->driver_name = "simserial"; hp_simserial_driver->name = "ttyS"; hp_simserial_driver->major = TTY_MAJOR; hp_simserial_driver->minor_start = 64; hp_simserial_driver->type = TTY_DRIVER_TYPE_SERIAL; hp_simserial_driver->subtype = SERIAL_TYPE_NORMAL; hp_simserial_driver->init_termios = tty_std_termios; hp_simserial_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; hp_simserial_driver->flags = TTY_DRIVER_REAL_RAW; tty_set_operations(hp_simserial_driver, &hp_ops); /* * Let's have a little bit of fun ! */ for (i = 0, state = rs_table; i < NR_PORTS; i++,state++) { if (state->type == PORT_UNKNOWN) continue; if (!state->irq) { if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0) panic("%s: out of interrupt vectors!\n", __FUNCTION__); state->irq = rc; ia64_ssc_connect_irq(KEYBOARD_INTR, state->irq); } printk(KERN_INFO "ttyS%d at 0x%04lx (irq = %d) is a %s\n", state->line, state->port, state->irq, uart_config[state->type].name); } if (tty_register_driver(hp_simserial_driver)) panic("Couldn't register simserial driver\n"); return 0; } #ifndef MODULE __initcall(simrs_init); #endif
gpl-2.0
apascual89/android_kernel_oneplus_msm8996-1
drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
562
4941
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "priv.h" #ifdef CONFIG_NOUVEAU_I2C_INTERNAL #define T_TIMEOUT 2200000 #define T_RISEFALL 1000 #define T_HOLD 5000 static inline void i2c_drive_scl(struct nouveau_i2c_port *port, int state) { port->func->drive_scl(port, state); } static inline void i2c_drive_sda(struct nouveau_i2c_port *port, int state) { port->func->drive_sda(port, state); } static inline int i2c_sense_scl(struct nouveau_i2c_port *port) { return port->func->sense_scl(port); } static inline int i2c_sense_sda(struct nouveau_i2c_port *port) { return port->func->sense_sda(port); } static void i2c_delay(struct nouveau_i2c_port *port, u32 nsec) { udelay((nsec + 500) / 1000); } static bool i2c_raise_scl(struct nouveau_i2c_port *port) { u32 timeout = T_TIMEOUT / T_RISEFALL; i2c_drive_scl(port, 1); do { i2c_delay(port, T_RISEFALL); } while (!i2c_sense_scl(port) && --timeout); return timeout != 0; } static int i2c_start(struct nouveau_i2c_port *port) { int ret = 0; if (!i2c_sense_scl(port) || !i2c_sense_sda(port)) { i2c_drive_scl(port, 0); i2c_drive_sda(port, 1); if (!i2c_raise_scl(port)) ret = -EBUSY; } i2c_drive_sda(port, 0); i2c_delay(port, T_HOLD); i2c_drive_scl(port, 0); i2c_delay(port, T_HOLD); return ret; } static void i2c_stop(struct nouveau_i2c_port *port) { i2c_drive_scl(port, 0); i2c_drive_sda(port, 0); i2c_delay(port, T_RISEFALL); i2c_drive_scl(port, 1); i2c_delay(port, T_HOLD); i2c_drive_sda(port, 1); i2c_delay(port, T_HOLD); } static int i2c_bitw(struct nouveau_i2c_port *port, int sda) { i2c_drive_sda(port, sda); i2c_delay(port, T_RISEFALL); if (!i2c_raise_scl(port)) return -ETIMEDOUT; i2c_delay(port, T_HOLD); i2c_drive_scl(port, 0); i2c_delay(port, T_HOLD); return 0; } static int i2c_bitr(struct nouveau_i2c_port *port) { int sda; i2c_drive_sda(port, 1); i2c_delay(port, T_RISEFALL); if (!i2c_raise_scl(port)) return -ETIMEDOUT; i2c_delay(port, T_HOLD); sda = i2c_sense_sda(port); i2c_drive_scl(port, 0); i2c_delay(port, T_HOLD); return sda; } static int i2c_get_byte(struct nouveau_i2c_port *port, u8 *byte, bool last) { int i, bit; *byte = 0; for (i = 7; i >= 0; i--) { bit = i2c_bitr(port); if (bit < 0) return bit; *byte |= bit << i; } return i2c_bitw(port, last ? 1 : 0); } static int i2c_put_byte(struct nouveau_i2c_port *port, u8 byte) { int i, ret; for (i = 7; i >= 0; i--) { ret = i2c_bitw(port, !!(byte & (1 << i))); if (ret < 0) return ret; } ret = i2c_bitr(port); if (ret == 1) /* nack */ ret = -EIO; return ret; } static int i2c_addr(struct nouveau_i2c_port *port, struct i2c_msg *msg) { u32 addr = msg->addr << 1; if (msg->flags & I2C_M_RD) addr |= 1; return i2c_put_byte(port, addr); } static int i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct nouveau_i2c_port *port = adap->algo_data; struct i2c_msg *msg = msgs; int ret = 0, mcnt = num; ret = nouveau_i2c(port)->acquire(port, nsecs_to_jiffies(T_TIMEOUT)); if (ret) return ret; while (!ret && mcnt--) { u8 remaining = msg->len; u8 *ptr = msg->buf; ret = i2c_start(port); if (ret == 0) ret = i2c_addr(port, msg); if (msg->flags & I2C_M_RD) { while (!ret && remaining--) ret = i2c_get_byte(port, ptr++, !remaining); } else { while (!ret && remaining--) ret = i2c_put_byte(port, *ptr++); } msg++; } i2c_stop(port); nouveau_i2c(port)->release(port); return (ret < 0) ? ret : num; } #else static int i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { return -ENODEV; } #endif static u32 i2c_bit_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } const struct i2c_algorithm nouveau_i2c_bit_algo = { .master_xfer = i2c_bit_xfer, .functionality = i2c_bit_func };
gpl-2.0
k2wlxda/3.10
drivers/ata/pata_scc.c
1842
29547
/* * Support for IDE interfaces on Celleb platform * * (C) Copyright 2006 TOSHIBA CORPORATION * * This code is based on drivers/ata/ata_piix.c: * Copyright 2003-2005 Red Hat Inc * Copyright 2003-2005 Jeff Garzik * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> * Copyright (C) 2003 Red Hat Inc * * and drivers/ata/ahci.c: * Copyright 2004-2005 Red Hat, Inc. * * and drivers/ata/libata-core.c: * Copyright 2003-2004 Red Hat, Inc. All rights reserved. * Copyright 2003-2004 Jeff Garzik * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/device.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_scc" #define DRV_VERSION "0.3" #define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4 /* PCI BARs */ #define SCC_CTRL_BAR 0 #define SCC_BMID_BAR 1 /* offset of CTRL registers */ #define SCC_CTL_PIOSHT 0x000 #define SCC_CTL_PIOCT 0x004 #define SCC_CTL_MDMACT 0x008 #define SCC_CTL_MCRCST 0x00C #define SCC_CTL_SDMACT 0x010 #define SCC_CTL_SCRCST 0x014 #define SCC_CTL_UDENVT 0x018 #define SCC_CTL_TDVHSEL 0x020 #define SCC_CTL_MODEREG 0x024 #define SCC_CTL_ECMODE 0xF00 #define SCC_CTL_MAEA0 0xF50 #define SCC_CTL_MAEC0 0xF54 #define SCC_CTL_CCKCTRL 0xFF0 /* offset of BMID registers */ #define SCC_DMA_CMD 0x000 #define SCC_DMA_STATUS 0x004 #define SCC_DMA_TABLE_OFS 0x008 #define SCC_DMA_INTMASK 0x010 #define SCC_DMA_INTST 0x014 #define SCC_DMA_PTERADD 0x018 #define SCC_REG_CMD_ADDR 0x020 #define SCC_REG_DATA 0x000 #define SCC_REG_ERR 0x004 #define SCC_REG_FEATURE 0x004 #define SCC_REG_NSECT 0x008 #define SCC_REG_LBAL 0x00C #define SCC_REG_LBAM 0x010 #define SCC_REG_LBAH 0x014 #define SCC_REG_DEVICE 0x018 #define SCC_REG_STATUS 0x01C #define SCC_REG_CMD 0x01C #define SCC_REG_ALTSTATUS 0x020 /* register value */ #define TDVHSEL_MASTER 0x00000001 #define TDVHSEL_SLAVE 0x00000004 #define MODE_JCUSFEN 0x00000080 #define ECMODE_VALUE 0x01 #define CCKCTRL_ATARESET 0x00040000 #define CCKCTRL_BUFCNT 0x00020000 #define CCKCTRL_CRST 0x00010000 #define CCKCTRL_OCLKEN 0x00000100 #define CCKCTRL_ATACLKOEN 0x00000002 #define CCKCTRL_LCLKEN 0x00000001 #define QCHCD_IOS_SS 0x00000001 #define QCHSD_STPDIAG 0x00020000 #define INTMASK_MSK 0xD1000012 #define INTSTS_SERROR 0x80000000 #define INTSTS_PRERR 0x40000000 #define INTSTS_RERR 0x10000000 #define INTSTS_ICERR 0x01000000 #define INTSTS_BMSINT 0x00000010 #define INTSTS_BMHE 0x00000008 #define INTSTS_IOIRQS 0x00000004 #define INTSTS_INTRQ 0x00000002 #define INTSTS_ACTEINT 0x00000001 /* PIO transfer mode table */ /* JCHST */ static const unsigned long JCHSTtbl[2][7] = { {0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00}, /* 100MHz */ {0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00} /* 133MHz */ }; /* JCHHT */ static const unsigned long JCHHTtbl[2][7] = { {0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00}, /* 100MHz */ {0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00} /* 133MHz */ }; /* JCHCT */ static const unsigned long JCHCTtbl[2][7] = { {0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00}, /* 100MHz */ {0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00} /* 133MHz */ }; /* DMA transfer mode table */ /* JCHDCTM/JCHDCTS */ static const unsigned long JCHDCTxtbl[2][7] = { {0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00}, /* 100MHz */ {0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00} /* 133MHz */ }; /* JCSTWTM/JCSTWTS */ static const unsigned long JCSTWTxtbl[2][7] = { {0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00}, /* 100MHz */ {0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02} /* 133MHz */ }; /* JCTSS */ static const unsigned long JCTSStbl[2][7] = { {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00}, /* 100MHz */ {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05} /* 133MHz */ }; /* JCENVT */ static const unsigned long JCENVTtbl[2][7] = { {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00}, /* 100MHz */ {0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02} /* 133MHz */ }; /* JCACTSELS/JCACTSELM */ static const unsigned long JCACTSELtbl[2][7] = { {0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00}, /* 100MHz */ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} /* 133MHz */ }; static const struct pci_device_id scc_pci_tbl[] = { { PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA), 0}, { } /* terminate list */ }; /** * scc_set_piomode - Initialize host controller PATA PIO timings * @ap: Port whose timings we are configuring * @adev: um * * Set PIO mode for device. * * LOCKING: * None (inherited from caller). */ static void scc_set_piomode (struct ata_port *ap, struct ata_device *adev) { unsigned int pio = adev->pio_mode - XFER_PIO_0; void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR]; void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL; void __iomem *piosht_port = ctrl_base + SCC_CTL_PIOSHT; void __iomem *pioct_port = ctrl_base + SCC_CTL_PIOCT; unsigned long reg; int offset; reg = in_be32(cckctrl_port); if (reg & CCKCTRL_ATACLKOEN) offset = 1; /* 133MHz */ else offset = 0; /* 100MHz */ reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio]; out_be32(piosht_port, reg); reg = JCHCTtbl[offset][pio]; out_be32(pioct_port, reg); } /** * scc_set_dmamode - Initialize host controller PATA DMA timings * @ap: Port whose timings we are configuring * @adev: um * * Set UDMA mode for device. * * LOCKING: * None (inherited from caller). */ static void scc_set_dmamode (struct ata_port *ap, struct ata_device *adev) { unsigned int udma = adev->dma_mode; unsigned int is_slave = (adev->devno != 0); u8 speed = udma; void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR]; void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL; void __iomem *mdmact_port = ctrl_base + SCC_CTL_MDMACT; void __iomem *mcrcst_port = ctrl_base + SCC_CTL_MCRCST; void __iomem *sdmact_port = ctrl_base + SCC_CTL_SDMACT; void __iomem *scrcst_port = ctrl_base + SCC_CTL_SCRCST; void __iomem *udenvt_port = ctrl_base + SCC_CTL_UDENVT; void __iomem *tdvhsel_port = ctrl_base + SCC_CTL_TDVHSEL; int offset, idx; if (in_be32(cckctrl_port) & CCKCTRL_ATACLKOEN) offset = 1; /* 133MHz */ else offset = 0; /* 100MHz */ if (speed >= XFER_UDMA_0) idx = speed - XFER_UDMA_0; else return; if (is_slave) { out_be32(sdmact_port, JCHDCTxtbl[offset][idx]); out_be32(scrcst_port, JCSTWTxtbl[offset][idx]); out_be32(tdvhsel_port, (in_be32(tdvhsel_port) & ~TDVHSEL_SLAVE) | (JCACTSELtbl[offset][idx] << 2)); } else { out_be32(mdmact_port, JCHDCTxtbl[offset][idx]); out_be32(mcrcst_port, JCSTWTxtbl[offset][idx]); out_be32(tdvhsel_port, (in_be32(tdvhsel_port) & ~TDVHSEL_MASTER) | JCACTSELtbl[offset][idx]); } out_be32(udenvt_port, JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx]); } unsigned long scc_mode_filter(struct ata_device *adev, unsigned long mask) { /* errata A308 workaround: limit ATAPI UDMA mode to UDMA4 */ if (adev->class == ATA_DEV_ATAPI && (mask & (0xE0 << ATA_SHIFT_UDMA))) { printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME); mask &= ~(0xE0 << ATA_SHIFT_UDMA); } return mask; } /** * scc_tf_load - send taskfile registers to host controller * @ap: Port to which output is sent * @tf: ATA taskfile register set * * Note: Original code is ata_sff_tf_load(). */ static void scc_tf_load (struct ata_port *ap, const struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; if (tf->ctl != ap->last_ctl) { out_be32(ioaddr->ctl_addr, tf->ctl); ap->last_ctl = tf->ctl; ata_wait_idle(ap); } if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { out_be32(ioaddr->feature_addr, tf->hob_feature); out_be32(ioaddr->nsect_addr, tf->hob_nsect); out_be32(ioaddr->lbal_addr, tf->hob_lbal); out_be32(ioaddr->lbam_addr, tf->hob_lbam); out_be32(ioaddr->lbah_addr, tf->hob_lbah); VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", tf->hob_feature, tf->hob_nsect, tf->hob_lbal, tf->hob_lbam, tf->hob_lbah); } if (is_addr) { out_be32(ioaddr->feature_addr, tf->feature); out_be32(ioaddr->nsect_addr, tf->nsect); out_be32(ioaddr->lbal_addr, tf->lbal); out_be32(ioaddr->lbam_addr, tf->lbam); out_be32(ioaddr->lbah_addr, tf->lbah); VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", tf->feature, tf->nsect, tf->lbal, tf->lbam, tf->lbah); } if (tf->flags & ATA_TFLAG_DEVICE) { out_be32(ioaddr->device_addr, tf->device); VPRINTK("device 0x%X\n", tf->device); } ata_wait_idle(ap); } /** * scc_check_status - Read device status reg & clear interrupt * @ap: port where the device is * * Note: Original code is ata_check_status(). */ static u8 scc_check_status (struct ata_port *ap) { return in_be32(ap->ioaddr.status_addr); } /** * scc_tf_read - input device's ATA taskfile shadow registers * @ap: Port from which input is read * @tf: ATA taskfile register set for storing input * * Note: Original code is ata_sff_tf_read(). */ static void scc_tf_read (struct ata_port *ap, struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; tf->command = scc_check_status(ap); tf->feature = in_be32(ioaddr->error_addr); tf->nsect = in_be32(ioaddr->nsect_addr); tf->lbal = in_be32(ioaddr->lbal_addr); tf->lbam = in_be32(ioaddr->lbam_addr); tf->lbah = in_be32(ioaddr->lbah_addr); tf->device = in_be32(ioaddr->device_addr); if (tf->flags & ATA_TFLAG_LBA48) { out_be32(ioaddr->ctl_addr, tf->ctl | ATA_HOB); tf->hob_feature = in_be32(ioaddr->error_addr); tf->hob_nsect = in_be32(ioaddr->nsect_addr); tf->hob_lbal = in_be32(ioaddr->lbal_addr); tf->hob_lbam = in_be32(ioaddr->lbam_addr); tf->hob_lbah = in_be32(ioaddr->lbah_addr); out_be32(ioaddr->ctl_addr, tf->ctl); ap->last_ctl = tf->ctl; } } /** * scc_exec_command - issue ATA command to host controller * @ap: port to which command is being issued * @tf: ATA taskfile register set * * Note: Original code is ata_sff_exec_command(). */ static void scc_exec_command (struct ata_port *ap, const struct ata_taskfile *tf) { DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); out_be32(ap->ioaddr.command_addr, tf->command); ata_sff_pause(ap); } /** * scc_check_altstatus - Read device alternate status reg * @ap: port where the device is */ static u8 scc_check_altstatus (struct ata_port *ap) { return in_be32(ap->ioaddr.altstatus_addr); } /** * scc_dev_select - Select device 0/1 on ATA bus * @ap: ATA channel to manipulate * @device: ATA device (numbered from zero) to select * * Note: Original code is ata_sff_dev_select(). */ static void scc_dev_select (struct ata_port *ap, unsigned int device) { u8 tmp; if (device == 0) tmp = ATA_DEVICE_OBS; else tmp = ATA_DEVICE_OBS | ATA_DEV1; out_be32(ap->ioaddr.device_addr, tmp); ata_sff_pause(ap); } /** * scc_set_devctl - Write device control reg * @ap: port where the device is * @ctl: value to write */ static void scc_set_devctl(struct ata_port *ap, u8 ctl) { out_be32(ap->ioaddr.ctl_addr, ctl); } /** * scc_bmdma_setup - Set up PCI IDE BMDMA transaction * @qc: Info associated with this ATA transaction. * * Note: Original code is ata_bmdma_setup(). */ static void scc_bmdma_setup (struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); u8 dmactl; void __iomem *mmio = ap->ioaddr.bmdma_addr; /* load PRD table addr */ out_be32(mmio + SCC_DMA_TABLE_OFS, ap->bmdma_prd_dma); /* specify data direction, triple-check start bit is clear */ dmactl = in_be32(mmio + SCC_DMA_CMD); dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); if (!rw) dmactl |= ATA_DMA_WR; out_be32(mmio + SCC_DMA_CMD, dmactl); /* issue r/w command */ ap->ops->sff_exec_command(ap, &qc->tf); } /** * scc_bmdma_start - Start a PCI IDE BMDMA transaction * @qc: Info associated with this ATA transaction. * * Note: Original code is ata_bmdma_start(). */ static void scc_bmdma_start (struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; u8 dmactl; void __iomem *mmio = ap->ioaddr.bmdma_addr; /* start host DMA transaction */ dmactl = in_be32(mmio + SCC_DMA_CMD); out_be32(mmio + SCC_DMA_CMD, dmactl | ATA_DMA_START); } /** * scc_devchk - PATA device presence detection * @ap: ATA channel to examine * @device: Device to examine (starting at zero) * * Note: Original code is ata_devchk(). */ static unsigned int scc_devchk (struct ata_port *ap, unsigned int device) { struct ata_ioports *ioaddr = &ap->ioaddr; u8 nsect, lbal; ap->ops->sff_dev_select(ap, device); out_be32(ioaddr->nsect_addr, 0x55); out_be32(ioaddr->lbal_addr, 0xaa); out_be32(ioaddr->nsect_addr, 0xaa); out_be32(ioaddr->lbal_addr, 0x55); out_be32(ioaddr->nsect_addr, 0x55); out_be32(ioaddr->lbal_addr, 0xaa); nsect = in_be32(ioaddr->nsect_addr); lbal = in_be32(ioaddr->lbal_addr); if ((nsect == 0x55) && (lbal == 0xaa)) return 1; /* we found a device */ return 0; /* nothing found */ } /** * scc_wait_after_reset - wait for devices to become ready after reset * * Note: Original code is ata_sff_wait_after_reset */ static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask, unsigned long deadline) { struct ata_port *ap = link->ap; struct ata_ioports *ioaddr = &ap->ioaddr; unsigned int dev0 = devmask & (1 << 0); unsigned int dev1 = devmask & (1 << 1); int rc, ret = 0; /* Spec mandates ">= 2ms" before checking status. We wait * 150ms, because that was the magic delay used for ATAPI * devices in Hale Landis's ATADRVR, for the period of time * between when the ATA command register is written, and then * status is checked. Because waiting for "a while" before * checking status is fine, post SRST, we perform this magic * delay here as well. * * Old drivers/ide uses the 2mS rule and then waits for ready. */ ata_msleep(ap, 150); /* always check readiness of the master device */ rc = ata_sff_wait_ready(link, deadline); /* -ENODEV means the odd clown forgot the D7 pulldown resistor * and TF status is 0xff, bail out on it too. */ if (rc) return rc; /* if device 1 was found in ata_devchk, wait for register * access briefly, then wait for BSY to clear. */ if (dev1) { int i; ap->ops->sff_dev_select(ap, 1); /* Wait for register access. Some ATAPI devices fail * to set nsect/lbal after reset, so don't waste too * much time on it. We're gonna wait for !BSY anyway. */ for (i = 0; i < 2; i++) { u8 nsect, lbal; nsect = in_be32(ioaddr->nsect_addr); lbal = in_be32(ioaddr->lbal_addr); if ((nsect == 1) && (lbal == 1)) break; ata_msleep(ap, 50); /* give drive a breather */ } rc = ata_sff_wait_ready(link, deadline); if (rc) { if (rc != -ENODEV) return rc; ret = rc; } } /* is all this really necessary? */ ap->ops->sff_dev_select(ap, 0); if (dev1) ap->ops->sff_dev_select(ap, 1); if (dev0) ap->ops->sff_dev_select(ap, 0); return ret; } /** * scc_bus_softreset - PATA device software reset * * Note: Original code is ata_bus_softreset(). */ static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask, unsigned long deadline) { struct ata_ioports *ioaddr = &ap->ioaddr; DPRINTK("ata%u: bus reset via SRST\n", ap->print_id); /* software reset. causes dev0 to be selected */ out_be32(ioaddr->ctl_addr, ap->ctl); udelay(20); out_be32(ioaddr->ctl_addr, ap->ctl | ATA_SRST); udelay(20); out_be32(ioaddr->ctl_addr, ap->ctl); scc_wait_after_reset(&ap->link, devmask, deadline); return 0; } /** * scc_softreset - reset host port via ATA SRST * @ap: port to reset * @classes: resulting classes of attached devices * @deadline: deadline jiffies for the operation * * Note: Original code is ata_sff_softreset(). */ static int scc_softreset(struct ata_link *link, unsigned int *classes, unsigned long deadline) { struct ata_port *ap = link->ap; unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; unsigned int devmask = 0, err_mask; u8 err; DPRINTK("ENTER\n"); /* determine if device 0/1 are present */ if (scc_devchk(ap, 0)) devmask |= (1 << 0); if (slave_possible && scc_devchk(ap, 1)) devmask |= (1 << 1); /* select device 0 again */ ap->ops->sff_dev_select(ap, 0); /* issue bus reset */ DPRINTK("about to softreset, devmask=%x\n", devmask); err_mask = scc_bus_softreset(ap, devmask, deadline); if (err_mask) { ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", err_mask); return -EIO; } /* determine by signature whether we have ATA or ATAPI devices */ classes[0] = ata_sff_dev_classify(&ap->link.device[0], devmask & (1 << 0), &err); if (slave_possible && err != 0x81) classes[1] = ata_sff_dev_classify(&ap->link.device[1], devmask & (1 << 1), &err); DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); return 0; } /** * scc_bmdma_stop - Stop PCI IDE BMDMA transfer * @qc: Command we are ending DMA for */ static void scc_bmdma_stop (struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR]; void __iomem *bmid_base = ap->host->iomap[SCC_BMID_BAR]; u32 reg; while (1) { reg = in_be32(bmid_base + SCC_DMA_INTST); if (reg & INTSTS_SERROR) { printk(KERN_WARNING "%s: SERROR\n", DRV_NAME); out_be32(bmid_base + SCC_DMA_INTST, INTSTS_SERROR|INTSTS_BMSINT); out_be32(bmid_base + SCC_DMA_CMD, in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START); continue; } if (reg & INTSTS_PRERR) { u32 maea0, maec0; maea0 = in_be32(ctrl_base + SCC_CTL_MAEA0); maec0 = in_be32(ctrl_base + SCC_CTL_MAEC0); printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", DRV_NAME, maea0, maec0); out_be32(bmid_base + SCC_DMA_INTST, INTSTS_PRERR|INTSTS_BMSINT); out_be32(bmid_base + SCC_DMA_CMD, in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START); continue; } if (reg & INTSTS_RERR) { printk(KERN_WARNING "%s: Response Error\n", DRV_NAME); out_be32(bmid_base + SCC_DMA_INTST, INTSTS_RERR|INTSTS_BMSINT); out_be32(bmid_base + SCC_DMA_CMD, in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START); continue; } if (reg & INTSTS_ICERR) { out_be32(bmid_base + SCC_DMA_CMD, in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START); printk(KERN_WARNING "%s: Illegal Configuration\n", DRV_NAME); out_be32(bmid_base + SCC_DMA_INTST, INTSTS_ICERR|INTSTS_BMSINT); continue; } if (reg & INTSTS_BMSINT) { unsigned int classes; unsigned long deadline = ata_deadline(jiffies, ATA_TMOUT_BOOT); printk(KERN_WARNING "%s: Internal Bus Error\n", DRV_NAME); out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMSINT); /* TBD: SW reset */ scc_softreset(&ap->link, &classes, deadline); continue; } if (reg & INTSTS_BMHE) { out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMHE); continue; } if (reg & INTSTS_ACTEINT) { out_be32(bmid_base + SCC_DMA_INTST, INTSTS_ACTEINT); continue; } if (reg & INTSTS_IOIRQS) { out_be32(bmid_base + SCC_DMA_INTST, INTSTS_IOIRQS); continue; } break; } /* clear start/stop bit */ out_be32(bmid_base + SCC_DMA_CMD, in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START); /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ ata_sff_dma_pause(ap); /* dummy read */ } /** * scc_bmdma_status - Read PCI IDE BMDMA status * @ap: Port associated with this ATA transaction. */ static u8 scc_bmdma_status (struct ata_port *ap) { void __iomem *mmio = ap->ioaddr.bmdma_addr; u8 host_stat = in_be32(mmio + SCC_DMA_STATUS); u32 int_status = in_be32(mmio + SCC_DMA_INTST); struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); static int retry = 0; /* return if IOS_SS is cleared */ if (!(in_be32(mmio + SCC_DMA_CMD) & ATA_DMA_START)) return host_stat; /* errata A252,A308 workaround: Step4 */ if ((scc_check_altstatus(ap) & ATA_ERR) && (int_status & INTSTS_INTRQ)) return (host_stat | ATA_DMA_INTR); /* errata A308 workaround Step5 */ if (int_status & INTSTS_IOIRQS) { host_stat |= ATA_DMA_INTR; /* We don't check ATAPI DMA because it is limited to UDMA4 */ if ((qc->tf.protocol == ATA_PROT_DMA && qc->dev->xfer_mode > XFER_UDMA_4)) { if (!(int_status & INTSTS_ACTEINT)) { printk(KERN_WARNING "ata%u: operation failed (transfer data loss)\n", ap->print_id); host_stat |= ATA_DMA_ERR; if (retry++) ap->udma_mask &= ~(1 << qc->dev->xfer_mode); } else retry = 0; } } return host_stat; } /** * scc_data_xfer - Transfer data by PIO * @dev: device for this I/O * @buf: data buffer * @buflen: buffer length * @rw: read/write * * Note: Original code is ata_sff_data_xfer(). */ static unsigned int scc_data_xfer (struct ata_device *dev, unsigned char *buf, unsigned int buflen, int rw) { struct ata_port *ap = dev->link->ap; unsigned int words = buflen >> 1; unsigned int i; __le16 *buf16 = (__le16 *) buf; void __iomem *mmio = ap->ioaddr.data_addr; /* Transfer multiple of 2 bytes */ if (rw == READ) for (i = 0; i < words; i++) buf16[i] = cpu_to_le16(in_be32(mmio)); else for (i = 0; i < words; i++) out_be32(mmio, le16_to_cpu(buf16[i])); /* Transfer trailing 1 byte, if any. */ if (unlikely(buflen & 0x01)) { __le16 align_buf[1] = { 0 }; unsigned char *trailing_buf = buf + buflen - 1; if (rw == READ) { align_buf[0] = cpu_to_le16(in_be32(mmio)); memcpy(trailing_buf, align_buf, 1); } else { memcpy(align_buf, trailing_buf, 1); out_be32(mmio, le16_to_cpu(align_buf[0])); } words++; } return words << 1; } /** * scc_postreset - standard postreset callback * @ap: the target ata_port * @classes: classes of attached devices * * Note: Original code is ata_sff_postreset(). */ static void scc_postreset(struct ata_link *link, unsigned int *classes) { struct ata_port *ap = link->ap; DPRINTK("ENTER\n"); /* is double-select really necessary? */ if (classes[0] != ATA_DEV_NONE) ap->ops->sff_dev_select(ap, 1); if (classes[1] != ATA_DEV_NONE) ap->ops->sff_dev_select(ap, 0); /* bail out if no device is present */ if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { DPRINTK("EXIT, no device\n"); return; } /* set up device control */ out_be32(ap->ioaddr.ctl_addr, ap->ctl); DPRINTK("EXIT\n"); } /** * scc_irq_clear - Clear PCI IDE BMDMA interrupt. * @ap: Port associated with this ATA transaction. * * Note: Original code is ata_bmdma_irq_clear(). */ static void scc_irq_clear (struct ata_port *ap) { void __iomem *mmio = ap->ioaddr.bmdma_addr; if (!mmio) return; out_be32(mmio + SCC_DMA_STATUS, in_be32(mmio + SCC_DMA_STATUS)); } /** * scc_port_start - Set port up for dma. * @ap: Port to initialize * * Allocate space for PRD table using ata_bmdma_port_start(). * Set PRD table address for PTERADD. (PRD Transfer End Read) */ static int scc_port_start (struct ata_port *ap) { void __iomem *mmio = ap->ioaddr.bmdma_addr; int rc; rc = ata_bmdma_port_start(ap); if (rc) return rc; out_be32(mmio + SCC_DMA_PTERADD, ap->bmdma_prd_dma); return 0; } /** * scc_port_stop - Undo scc_port_start() * @ap: Port to shut down * * Reset PTERADD. */ static void scc_port_stop (struct ata_port *ap) { void __iomem *mmio = ap->ioaddr.bmdma_addr; out_be32(mmio + SCC_DMA_PTERADD, 0); } static struct scsi_host_template scc_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations scc_pata_ops = { .inherits = &ata_bmdma_port_ops, .set_piomode = scc_set_piomode, .set_dmamode = scc_set_dmamode, .mode_filter = scc_mode_filter, .sff_tf_load = scc_tf_load, .sff_tf_read = scc_tf_read, .sff_exec_command = scc_exec_command, .sff_check_status = scc_check_status, .sff_check_altstatus = scc_check_altstatus, .sff_dev_select = scc_dev_select, .sff_set_devctl = scc_set_devctl, .bmdma_setup = scc_bmdma_setup, .bmdma_start = scc_bmdma_start, .bmdma_stop = scc_bmdma_stop, .bmdma_status = scc_bmdma_status, .sff_data_xfer = scc_data_xfer, .cable_detect = ata_cable_80wire, .softreset = scc_softreset, .postreset = scc_postreset, .sff_irq_clear = scc_irq_clear, .port_start = scc_port_start, .port_stop = scc_port_stop, }; static struct ata_port_info scc_port_info[] = { { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, /* No MWDMA */ .udma_mask = ATA_UDMA6, .port_ops = &scc_pata_ops, }, }; /** * scc_reset_controller - initialize SCC PATA controller. */ static int scc_reset_controller(struct ata_host *host) { void __iomem *ctrl_base = host->iomap[SCC_CTRL_BAR]; void __iomem *bmid_base = host->iomap[SCC_BMID_BAR]; void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL; void __iomem *mode_port = ctrl_base + SCC_CTL_MODEREG; void __iomem *ecmode_port = ctrl_base + SCC_CTL_ECMODE; void __iomem *intmask_port = bmid_base + SCC_DMA_INTMASK; void __iomem *dmastatus_port = bmid_base + SCC_DMA_STATUS; u32 reg = 0; out_be32(cckctrl_port, reg); reg |= CCKCTRL_ATACLKOEN; out_be32(cckctrl_port, reg); reg |= CCKCTRL_LCLKEN | CCKCTRL_OCLKEN; out_be32(cckctrl_port, reg); reg |= CCKCTRL_CRST; out_be32(cckctrl_port, reg); for (;;) { reg = in_be32(cckctrl_port); if (reg & CCKCTRL_CRST) break; udelay(5000); } reg |= CCKCTRL_ATARESET; out_be32(cckctrl_port, reg); out_be32(ecmode_port, ECMODE_VALUE); out_be32(mode_port, MODE_JCUSFEN); out_be32(intmask_port, INTMASK_MSK); if (in_be32(dmastatus_port) & QCHSD_STPDIAG) { printk(KERN_WARNING "%s: failed to detect 80c cable. (PDIAG# is high)\n", DRV_NAME); return -EIO; } return 0; } /** * scc_setup_ports - initialize ioaddr with SCC PATA port offsets. * @ioaddr: IO address structure to be initialized * @base: base address of BMID region */ static void scc_setup_ports (struct ata_ioports *ioaddr, void __iomem *base) { ioaddr->cmd_addr = base + SCC_REG_CMD_ADDR; ioaddr->altstatus_addr = ioaddr->cmd_addr + SCC_REG_ALTSTATUS; ioaddr->ctl_addr = ioaddr->cmd_addr + SCC_REG_ALTSTATUS; ioaddr->bmdma_addr = base; ioaddr->data_addr = ioaddr->cmd_addr + SCC_REG_DATA; ioaddr->error_addr = ioaddr->cmd_addr + SCC_REG_ERR; ioaddr->feature_addr = ioaddr->cmd_addr + SCC_REG_FEATURE; ioaddr->nsect_addr = ioaddr->cmd_addr + SCC_REG_NSECT; ioaddr->lbal_addr = ioaddr->cmd_addr + SCC_REG_LBAL; ioaddr->lbam_addr = ioaddr->cmd_addr + SCC_REG_LBAM; ioaddr->lbah_addr = ioaddr->cmd_addr + SCC_REG_LBAH; ioaddr->device_addr = ioaddr->cmd_addr + SCC_REG_DEVICE; ioaddr->status_addr = ioaddr->cmd_addr + SCC_REG_STATUS; ioaddr->command_addr = ioaddr->cmd_addr + SCC_REG_CMD; } static int scc_host_init(struct ata_host *host) { struct pci_dev *pdev = to_pci_dev(host->dev); int rc; rc = scc_reset_controller(host); if (rc) return rc; rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); if (rc) return rc; rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); if (rc) return rc; scc_setup_ports(&host->ports[0]->ioaddr, host->iomap[SCC_BMID_BAR]); pci_set_master(pdev); return 0; } /** * scc_init_one - Register SCC PATA device with kernel services * @pdev: PCI device to register * @ent: Entry in scc_pci_tbl matching with @pdev * * LOCKING: * Inherited from PCI layer (may sleep). * * RETURNS: * Zero on success, or -ERRNO value. */ static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned int board_idx = (unsigned int) ent->driver_data; const struct ata_port_info *ppi[] = { &scc_port_info[board_idx], NULL }; struct ata_host *host; int rc; ata_print_version_once(&pdev->dev, DRV_VERSION); host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1); if (!host) return -ENOMEM; rc = pcim_enable_device(pdev); if (rc) return rc; rc = pcim_iomap_regions(pdev, (1 << SCC_CTRL_BAR) | (1 << SCC_BMID_BAR), DRV_NAME); if (rc == -EBUSY) pcim_pin_device(pdev); if (rc) return rc; host->iomap = pcim_iomap_table(pdev); ata_port_pbar_desc(host->ports[0], SCC_CTRL_BAR, -1, "ctrl"); ata_port_pbar_desc(host->ports[0], SCC_BMID_BAR, -1, "bmid"); rc = scc_host_init(host); if (rc) return rc; return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &scc_sht); } static struct pci_driver scc_pci_driver = { .name = DRV_NAME, .id_table = scc_pci_tbl, .probe = scc_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = ata_pci_device_resume, #endif }; module_pci_driver(scc_pci_driver); MODULE_AUTHOR("Toshiba corp"); MODULE_DESCRIPTION("SCSI low-level driver for Toshiba SCC PATA controller"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, scc_pci_tbl); MODULE_VERSION(DRV_VERSION);
gpl-2.0
vinay94185vinay/Hybrid
sound/usb/stream.c
3378
13252
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/audio.h> #include <linux/usb/audio-v2.h> #include <sound/core.h> #include <sound/pcm.h> #include "usbaudio.h" #include "card.h" #include "proc.h" #include "quirks.h" #include "endpoint.h" #include "pcm.h" #include "helper.h" #include "format.h" #include "clock.h" #include "stream.h" /* * free a substream */ static void free_substream(struct snd_usb_substream *subs) { struct list_head *p, *n; if (!subs->num_formats) return; /* not initialized */ list_for_each_safe(p, n, &subs->fmt_list) { struct audioformat *fp = list_entry(p, struct audioformat, list); kfree(fp->rate_table); kfree(fp); } kfree(subs->rate_list.list); } /* * free a usb stream instance */ static void snd_usb_audio_stream_free(struct snd_usb_stream *stream) { free_substream(&stream->substream[0]); free_substream(&stream->substream[1]); list_del(&stream->list); kfree(stream); } static void snd_usb_audio_pcm_free(struct snd_pcm *pcm) { struct snd_usb_stream *stream = pcm->private_data; if (stream) { stream->pcm = NULL; snd_usb_audio_stream_free(stream); } } /* * add this endpoint to the chip instance. * if a stream with the same endpoint already exists, append to it. * if not, create a new pcm stream. */ int snd_usb_add_audio_stream(struct snd_usb_audio *chip, int stream, struct audioformat *fp) { struct list_head *p; struct snd_usb_stream *as; struct snd_usb_substream *subs; struct snd_pcm *pcm; int err; list_for_each(p, &chip->pcm_list) { as = list_entry(p, struct snd_usb_stream, list); if (as->fmt_type != fp->fmt_type) continue; subs = &as->substream[stream]; if (!subs->endpoint) continue; if (subs->endpoint == fp->endpoint) { list_add_tail(&fp->list, &subs->fmt_list); subs->num_formats++; subs->formats |= fp->formats; return 0; } } /* look for an empty stream */ list_for_each(p, &chip->pcm_list) { as = list_entry(p, struct snd_usb_stream, list); if (as->fmt_type != fp->fmt_type) continue; subs = &as->substream[stream]; if (subs->endpoint) continue; err = snd_pcm_new_stream(as->pcm, stream, 1); if (err < 0) return err; snd_usb_init_substream(as, stream, fp); return 0; } /* create a new pcm */ as = kzalloc(sizeof(*as), GFP_KERNEL); if (!as) return -ENOMEM; as->pcm_index = chip->pcm_devs; as->chip = chip; as->fmt_type = fp->fmt_type; err = snd_pcm_new(chip->card, "USB Audio", chip->pcm_devs, stream == SNDRV_PCM_STREAM_PLAYBACK ? 1 : 0, stream == SNDRV_PCM_STREAM_PLAYBACK ? 0 : 1, &pcm); if (err < 0) { kfree(as); return err; } as->pcm = pcm; pcm->private_data = as; pcm->private_free = snd_usb_audio_pcm_free; pcm->info_flags = 0; if (chip->pcm_devs > 0) sprintf(pcm->name, "USB Audio #%d", chip->pcm_devs); else strcpy(pcm->name, "USB Audio"); snd_usb_init_substream(as, stream, fp); list_add(&as->list, &chip->pcm_list); chip->pcm_devs++; snd_usb_proc_pcm_format_add(as); return 0; } static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip, struct usb_host_interface *alts, int protocol, int iface_no) { /* parsed with a v1 header here. that's ok as we only look at the * header first which is the same for both versions */ struct uac_iso_endpoint_descriptor *csep; struct usb_interface_descriptor *altsd = get_iface_desc(alts); int attributes = 0; csep = snd_usb_find_desc(alts->endpoint[0].extra, alts->endpoint[0].extralen, NULL, USB_DT_CS_ENDPOINT); /* Creamware Noah has this descriptor after the 2nd endpoint */ if (!csep && altsd->bNumEndpoints >= 2) csep = snd_usb_find_desc(alts->endpoint[1].extra, alts->endpoint[1].extralen, NULL, USB_DT_CS_ENDPOINT); if (!csep || csep->bLength < 7 || csep->bDescriptorSubtype != UAC_EP_GENERAL) { snd_printk(KERN_WARNING "%d:%u:%d : no or invalid" " class specific endpoint descriptor\n", chip->dev->devnum, iface_no, altsd->bAlternateSetting); return 0; } if (protocol == UAC_VERSION_1) { attributes = csep->bmAttributes; } else { struct uac2_iso_endpoint_descriptor *csep2 = (struct uac2_iso_endpoint_descriptor *) csep; attributes = csep->bmAttributes & UAC_EP_CS_ATTR_FILL_MAX; /* emulate the endpoint attributes of a v1 device */ if (csep2->bmControls & UAC2_CONTROL_PITCH) attributes |= UAC_EP_CS_ATTR_PITCH_CONTROL; } return attributes; } static struct uac2_input_terminal_descriptor * snd_usb_find_input_terminal_descriptor(struct usb_host_interface *ctrl_iface, int terminal_id) { struct uac2_input_terminal_descriptor *term = NULL; while ((term = snd_usb_find_csint_desc(ctrl_iface->extra, ctrl_iface->extralen, term, UAC_INPUT_TERMINAL))) { if (term->bTerminalID == terminal_id) return term; } return NULL; } static struct uac2_output_terminal_descriptor * snd_usb_find_output_terminal_descriptor(struct usb_host_interface *ctrl_iface, int terminal_id) { struct uac2_output_terminal_descriptor *term = NULL; while ((term = snd_usb_find_csint_desc(ctrl_iface->extra, ctrl_iface->extralen, term, UAC_OUTPUT_TERMINAL))) { if (term->bTerminalID == terminal_id) return term; } return NULL; } int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no) { struct usb_device *dev; struct usb_interface *iface; struct usb_host_interface *alts; struct usb_interface_descriptor *altsd; int i, altno, err, stream; int format = 0, num_channels = 0; struct audioformat *fp = NULL; int num, protocol, clock = 0; struct uac_format_type_i_continuous_descriptor *fmt; dev = chip->dev; /* parse the interface's altsettings */ iface = usb_ifnum_to_if(dev, iface_no); num = iface->num_altsetting; /* * Dallas DS4201 workaround: It presents 5 altsettings, but the last * one misses syncpipe, and does not produce any sound. */ if (chip->usb_id == USB_ID(0x04fa, 0x4201)) num = 4; for (i = 0; i < num; i++) { alts = &iface->altsetting[i]; altsd = get_iface_desc(alts); protocol = altsd->bInterfaceProtocol; /* skip invalid one */ if ((altsd->bInterfaceClass != USB_CLASS_AUDIO && altsd->bInterfaceClass != USB_CLASS_VENDOR_SPEC) || (altsd->bInterfaceSubClass != USB_SUBCLASS_AUDIOSTREAMING && altsd->bInterfaceSubClass != USB_SUBCLASS_VENDOR_SPEC) || altsd->bNumEndpoints < 1 || le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize) == 0) continue; /* must be isochronous */ if ((get_endpoint(alts, 0)->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_ISOC) continue; /* check direction */ stream = (get_endpoint(alts, 0)->bEndpointAddress & USB_DIR_IN) ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK; altno = altsd->bAlternateSetting; if (snd_usb_apply_interface_quirk(chip, iface_no, altno)) continue; /* get audio formats */ switch (protocol) { default: snd_printdd(KERN_WARNING "%d:%u:%d: unknown interface protocol %#02x, assuming v1\n", dev->devnum, iface_no, altno, protocol); protocol = UAC_VERSION_1; /* fall through */ case UAC_VERSION_1: { struct uac1_as_header_descriptor *as = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_AS_GENERAL); if (!as) { snd_printk(KERN_ERR "%d:%u:%d : UAC_AS_GENERAL descriptor not found\n", dev->devnum, iface_no, altno); continue; } if (as->bLength < sizeof(*as)) { snd_printk(KERN_ERR "%d:%u:%d : invalid UAC_AS_GENERAL desc\n", dev->devnum, iface_no, altno); continue; } format = le16_to_cpu(as->wFormatTag); /* remember the format value */ break; } case UAC_VERSION_2: { struct uac2_input_terminal_descriptor *input_term; struct uac2_output_terminal_descriptor *output_term; struct uac2_as_header_descriptor *as = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_AS_GENERAL); if (!as) { snd_printk(KERN_ERR "%d:%u:%d : UAC_AS_GENERAL descriptor not found\n", dev->devnum, iface_no, altno); continue; } if (as->bLength < sizeof(*as)) { snd_printk(KERN_ERR "%d:%u:%d : invalid UAC_AS_GENERAL desc\n", dev->devnum, iface_no, altno); continue; } num_channels = as->bNrChannels; format = le32_to_cpu(as->bmFormats); /* lookup the terminal associated to this interface * to extract the clock */ input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf, as->bTerminalLink); if (input_term) { clock = input_term->bCSourceID; break; } output_term = snd_usb_find_output_terminal_descriptor(chip->ctrl_intf, as->bTerminalLink); if (output_term) { clock = output_term->bCSourceID; break; } snd_printk(KERN_ERR "%d:%u:%d : bogus bTerminalLink %d\n", dev->devnum, iface_no, altno, as->bTerminalLink); continue; } } /* get format type */ fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_FORMAT_TYPE); if (!fmt) { snd_printk(KERN_ERR "%d:%u:%d : no UAC_FORMAT_TYPE desc\n", dev->devnum, iface_no, altno); continue; } if (((protocol == UAC_VERSION_1) && (fmt->bLength < 8)) || ((protocol == UAC_VERSION_2) && (fmt->bLength < 6))) { snd_printk(KERN_ERR "%d:%u:%d : invalid UAC_FORMAT_TYPE desc\n", dev->devnum, iface_no, altno); continue; } /* * Blue Microphones workaround: The last altsetting is identical * with the previous one, except for a larger packet size, but * is actually a mislabeled two-channel setting; ignore it. */ if (fmt->bNrChannels == 1 && fmt->bSubframeSize == 2 && altno == 2 && num == 3 && fp && fp->altsetting == 1 && fp->channels == 1 && fp->formats == SNDRV_PCM_FMTBIT_S16_LE && protocol == UAC_VERSION_1 && le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize) == fp->maxpacksize * 2) continue; fp = kzalloc(sizeof(*fp), GFP_KERNEL); if (! fp) { snd_printk(KERN_ERR "cannot malloc\n"); return -ENOMEM; } fp->iface = iface_no; fp->altsetting = altno; fp->altset_idx = i; fp->endpoint = get_endpoint(alts, 0)->bEndpointAddress; fp->ep_attr = get_endpoint(alts, 0)->bmAttributes; fp->datainterval = snd_usb_parse_datainterval(chip, alts); fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize); /* num_channels is only set for v2 interfaces */ fp->channels = num_channels; if (snd_usb_get_speed(dev) == USB_SPEED_HIGH) fp->maxpacksize = (((fp->maxpacksize >> 11) & 3) + 1) * (fp->maxpacksize & 0x7ff); fp->attributes = parse_uac_endpoint_attributes(chip, alts, protocol, iface_no); fp->clock = clock; /* some quirks for attributes here */ switch (chip->usb_id) { case USB_ID(0x0a92, 0x0053): /* AudioTrak Optoplay */ /* Optoplay sets the sample rate attribute although * it seems not supporting it in fact. */ fp->attributes &= ~UAC_EP_CS_ATTR_SAMPLE_RATE; break; case USB_ID(0x041e, 0x3020): /* Creative SB Audigy 2 NX */ case USB_ID(0x0763, 0x2003): /* M-Audio Audiophile USB */ /* doesn't set the sample rate attribute, but supports it */ fp->attributes |= UAC_EP_CS_ATTR_SAMPLE_RATE; break; case USB_ID(0x0763, 0x2001): /* M-Audio Quattro USB */ case USB_ID(0x0763, 0x2012): /* M-Audio Fast Track Pro USB */ case USB_ID(0x047f, 0x0ca1): /* plantronics headset */ case USB_ID(0x077d, 0x07af): /* Griffin iMic (note that there is an older model 77d:223) */ /* * plantronics headset and Griffin iMic have set adaptive-in * although it's really not... */ fp->ep_attr &= ~USB_ENDPOINT_SYNCTYPE; if (stream == SNDRV_PCM_STREAM_PLAYBACK) fp->ep_attr |= USB_ENDPOINT_SYNC_ADAPTIVE; else fp->ep_attr |= USB_ENDPOINT_SYNC_SYNC; break; } /* ok, let's parse further... */ if (snd_usb_parse_audio_format(chip, fp, format, fmt, stream, alts) < 0) { kfree(fp->rate_table); kfree(fp); fp = NULL; continue; } snd_printdd(KERN_INFO "%d:%u:%d: add audio endpoint %#x\n", dev->devnum, iface_no, altno, fp->endpoint); err = snd_usb_add_audio_stream(chip, stream, fp); if (err < 0) { kfree(fp->rate_table); kfree(fp); return err; } /* try to set the interface... */ usb_set_interface(chip->dev, iface_no, altno); snd_usb_init_pitch(chip, iface_no, alts, fp); snd_usb_init_sample_rate(chip, iface_no, alts, fp, fp->rate_max); } return 0; }
gpl-2.0
tuxkids/kernel_ics
drivers/ata/pata_ns87410.c
3634
5057
/* * pata_ns87410.c - National Semiconductor 87410 PATA for new ATA layer * (C) 2006 Red Hat Inc * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_ns87410" #define DRV_VERSION "0.4.6" /** * ns87410_pre_reset - probe begin * @link: ATA link * @deadline: deadline jiffies for the operation * * Check enabled ports */ static int ns87410_pre_reset(struct ata_link *link, unsigned long deadline) { struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); static const struct pci_bits ns87410_enable_bits[] = { { 0x43, 1, 0x08, 0x08 }, { 0x47, 1, 0x08, 0x08 } }; if (!pci_test_config_bits(pdev, &ns87410_enable_bits[ap->port_no])) return -ENOENT; return ata_sff_prereset(link, deadline); } /** * ns87410_set_piomode - set initial PIO mode data * @ap: ATA interface * @adev: ATA device * * Program timing data. This is kept per channel not per device, * and only affects the data port. */ static void ns87410_set_piomode(struct ata_port *ap, struct ata_device *adev) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); int port = 0x40 + 4 * ap->port_no; u8 idetcr, idefr; struct ata_timing at; static const u8 activebits[15] = { 0, 1, 2, 3, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7 }; static const u8 recoverbits[12] = { 0, 1, 2, 3, 4, 5, 6, 6, 7, 7, 7, 7 }; pci_read_config_byte(pdev, port + 3, &idefr); if (ata_pio_need_iordy(adev)) idefr |= 0x04; /* IORDY enable */ else idefr &= ~0x04; if (ata_timing_compute(adev, adev->pio_mode, &at, 30303, 1) < 0) { dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", adev->pio_mode); return; } at.active = clamp_val(at.active, 2, 16) - 2; at.setup = clamp_val(at.setup, 1, 4) - 1; at.recover = clamp_val(at.recover, 1, 12) - 1; idetcr = (at.setup << 6) | (recoverbits[at.recover] << 3) | activebits[at.active]; pci_write_config_byte(pdev, port, idetcr); pci_write_config_byte(pdev, port + 3, idefr); /* We use ap->private_data as a pointer to the device currently loaded for timing */ ap->private_data = adev; } /** * ns87410_qc_issue - command issue * @qc: command pending * * Called when the libata layer is about to issue a command. We wrap * this interface so that we can load the correct ATA timings if * necessary. */ static unsigned int ns87410_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ata_device *adev = qc->dev; /* If modes have been configured and the channel data is not loaded then load it. We have to check if pio_mode is set as the core code does not set adev->pio_mode to XFER_PIO_0 while probing as would be logical */ if (adev->pio_mode && adev != ap->private_data) ns87410_set_piomode(ap, adev); return ata_sff_qc_issue(qc); } static struct scsi_host_template ns87410_sht = { ATA_PIO_SHT(DRV_NAME), }; static struct ata_port_operations ns87410_port_ops = { .inherits = &ata_sff_port_ops, .qc_issue = ns87410_qc_issue, .cable_detect = ata_cable_40wire, .set_piomode = ns87410_set_piomode, .prereset = ns87410_pre_reset, }; static int ns87410_init_one(struct pci_dev *dev, const struct pci_device_id *id) { static const struct ata_port_info info = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO3, .port_ops = &ns87410_port_ops }; const struct ata_port_info *ppi[] = { &info, NULL }; return ata_pci_sff_init_one(dev, ppi, &ns87410_sht, NULL, 0); } static const struct pci_device_id ns87410[] = { { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_87410), }, { }, }; static struct pci_driver ns87410_pci_driver = { .name = DRV_NAME, .id_table = ns87410, .probe = ns87410_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = ata_pci_device_resume, #endif }; static int __init ns87410_init(void) { return pci_register_driver(&ns87410_pci_driver); } static void __exit ns87410_exit(void) { pci_unregister_driver(&ns87410_pci_driver); } MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for Nat Semi 87410"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, ns87410); MODULE_VERSION(DRV_VERSION); module_init(ns87410_init); module_exit(ns87410_exit);
gpl-2.0
lloydchang/ubuntu-oneiric
drivers/net/ne2.c
4146
22924
/* ne2.c: A NE/2 Ethernet Driver for Linux. */ /* Based on the NE2000 driver written by Donald Becker (1992-94). modified by Wim Dumon (Apr 1996) This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. The author may be reached as wimpie@linux.cc.kuleuven.ac.be Currently supported: NE/2 This patch was never tested on other MCA-ethernet adapters, but it might work. Just give it a try and let me know if you have problems. Also mail me if it really works, please! Changelog: Mon Feb 3 16:26:02 MET 1997 - adapted the driver to work with the 2.1.25 kernel - multiple ne2 support (untested) - module support (untested) Fri Aug 28 00:18:36 CET 1998 (David Weinehall) - fixed a few minor typos - made the MODULE_PARM conditional (it only works with the v2.1.x kernels) - fixed the module support (Now it's working...) Mon Sep 7 19:01:44 CET 1998 (David Weinehall) - added support for Arco Electronics AE/2-card (experimental) Mon Sep 14 09:53:42 CET 1998 (David Weinehall) - added support for Compex ENET-16MC/P (experimental) Tue Sep 15 16:21:12 CET 1998 (David Weinehall, Magnus Jonsson, Tomas Ogren) - Miscellaneous bugfixes Tue Sep 19 16:21:12 CET 1998 (Magnus Jonsson) - Cleanup Wed Sep 23 14:33:34 CET 1998 (David Weinehall) - Restructuring and rewriting for v2.1.x compliance Wed Oct 14 17:19:21 CET 1998 (David Weinehall) - Added code that unregisters irq and proc-info - Version# bump Mon Nov 16 15:28:23 CET 1998 (Wim Dumon) - pass 'dev' as last parameter of request_irq in stead of 'NULL' Wed Feb 7 21:24:00 CET 2001 (Alfred Arnold) - added support for the D-Link DE-320CT * WARNING ------- This is alpha-test software. It is not guaranteed to work. As a matter of fact, I'm quite sure there are *LOTS* of bugs in here. I would like to hear from you if you use this driver, even if it works. If it doesn't work, be sure to send me a mail with the problems ! */ static const char *version = "ne2.c:v0.91 Nov 16 1998 Wim Dumon <wimpie@kotnet.org>\n"; #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/mca-legacy.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/bitops.h> #include <linux/jiffies.h> #include <asm/system.h> #include <asm/io.h> #include <asm/dma.h> #include "8390.h" #define DRV_NAME "ne2" /* Some defines that people can play with if so inclined. */ /* Do we perform extra sanity checks on stuff ? */ /* #define NE_SANITY_CHECK */ /* Do we implement the read before write bugfix ? */ /* #define NE_RW_BUGFIX */ /* Do we have a non std. amount of memory? (in units of 256 byte pages) */ /* #define PACKETBUF_MEMSIZE 0x40 */ /* ---- No user-serviceable parts below ---- */ #define NE_BASE (dev->base_addr) #define NE_CMD 0x00 #define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */ #define NE_RESET 0x20 /* Issue a read to reset, a write to clear. */ #define NE_IO_EXTENT 0x30 #define NE1SM_START_PG 0x20 /* First page of TX buffer */ #define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */ #define NESM_START_PG 0x40 /* First page of TX buffer */ #define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ /* From the .ADF file: */ static unsigned int addresses[7] __initdata = {0x1000, 0x2020, 0x8020, 0xa0a0, 0xb0b0, 0xc0c0, 0xc3d0}; static int irqs[4] __initdata = {3, 4, 5, 9}; /* From the D-Link ADF file: */ static unsigned int dlink_addresses[4] __initdata = {0x300, 0x320, 0x340, 0x360}; static int dlink_irqs[8] __initdata = {3, 4, 5, 9, 10, 11, 14, 15}; struct ne2_adapters_t { unsigned int id; char *name; }; static struct ne2_adapters_t ne2_adapters[] __initdata = { { 0x6354, "Arco Ethernet Adapter AE/2" }, { 0x70DE, "Compex ENET-16 MC/P" }, { 0x7154, "Novell Ethernet Adapter NE/2" }, { 0x56ea, "D-Link DE-320CT" }, { 0x0000, NULL } }; extern int netcard_probe(struct net_device *dev); static int ne2_probe1(struct net_device *dev, int slot); static void ne_reset_8390(struct net_device *dev); static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page); static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset); static void ne_block_output(struct net_device *dev, const int count, const unsigned char *buf, const int start_page); /* * special code to read the DE-320's MAC address EEPROM. In contrast to a * standard NE design, this is a serial EEPROM (93C46) that has to be read * bit by bit. The EEPROM cotrol port at base + 0x1e has the following * layout: * * Bit 0 = Data out (read from EEPROM) * Bit 1 = Data in (write to EEPROM) * Bit 2 = Clock * Bit 3 = Chip Select * Bit 7 = ~50 kHz clock for defined delays * */ static void __init dlink_put_eeprom(unsigned char value, unsigned int addr) { int z; unsigned char v1, v2; /* write the value to the NIC EEPROM register */ outb(value, addr + 0x1e); /* now wait the clock line to toggle twice. Effectively, we are waiting (at least) for one clock cycle */ for (z = 0; z < 2; z++) { do { v1 = inb(addr + 0x1e); v2 = inb(addr + 0x1e); } while (!((v1 ^ v2) & 0x80)); } } static void __init dlink_send_eeprom_bit(unsigned int bit, unsigned int addr) { /* shift data bit into correct position */ bit = bit << 1; /* write value, keep clock line high for two cycles */ dlink_put_eeprom(0x09 | bit, addr); dlink_put_eeprom(0x0d | bit, addr); dlink_put_eeprom(0x0d | bit, addr); dlink_put_eeprom(0x09 | bit, addr); } static void __init dlink_send_eeprom_word(unsigned int value, unsigned int len, unsigned int addr) { int z; /* adjust bits so that they are left-aligned in a 16-bit-word */ value = value << (16 - len); /* shift bits out to the EEPROM */ for (z = 0; z < len; z++) { dlink_send_eeprom_bit((value & 0x8000) >> 15, addr); value = value << 1; } } static unsigned int __init dlink_get_eeprom(unsigned int eeaddr, unsigned int addr) { int z; unsigned int value = 0; /* pull the CS line low for a moment. This resets the EEPROM- internal logic, and makes it ready for a new command. */ dlink_put_eeprom(0x01, addr); dlink_put_eeprom(0x09, addr); /* send one start bit, read command (1 - 0), plus the address to the EEPROM */ dlink_send_eeprom_word(0x0180 | (eeaddr & 0x3f), 9, addr); /* get the data word. We clock by sending 0s to the EEPROM, which get ignored during the read process */ for (z = 0; z < 16; z++) { dlink_send_eeprom_bit(0, addr); value = (value << 1) | (inb(addr + 0x1e) & 0x01); } return value; } /* * Note that at boot, this probe only picks up one card at a time. */ static int __init do_ne2_probe(struct net_device *dev) { static int current_mca_slot = -1; int i; int adapter_found = 0; /* Do not check any supplied i/o locations. POS registers usually don't fail :) */ /* MCA cards have POS registers. Autodetecting MCA cards is extremely simple. Just search for the card. */ for(i = 0; (ne2_adapters[i].name != NULL) && !adapter_found; i++) { current_mca_slot = mca_find_unused_adapter(ne2_adapters[i].id, 0); if((current_mca_slot != MCA_NOTFOUND) && !adapter_found) { int res; mca_set_adapter_name(current_mca_slot, ne2_adapters[i].name); mca_mark_as_used(current_mca_slot); res = ne2_probe1(dev, current_mca_slot); if (res) mca_mark_as_unused(current_mca_slot); return res; } } return -ENODEV; } #ifndef MODULE struct net_device * __init ne2_probe(int unit) { struct net_device *dev = alloc_eip_netdev(); int err; if (!dev) return ERR_PTR(-ENOMEM); sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); err = do_ne2_probe(dev); if (err) goto out; return dev; out: free_netdev(dev); return ERR_PTR(err); } #endif static int ne2_procinfo(char *buf, int slot, struct net_device *dev) { int len=0; len += sprintf(buf+len, "The NE/2 Ethernet Adapter\n" ); len += sprintf(buf+len, "Driver written by Wim Dumon "); len += sprintf(buf+len, "<wimpie@kotnet.org>\n"); len += sprintf(buf+len, "Modified by "); len += sprintf(buf+len, "David Weinehall <tao@acc.umu.se>\n"); len += sprintf(buf+len, "and by Magnus Jonsson <bigfoot@acc.umu.se>\n"); len += sprintf(buf+len, "Based on the original NE2000 drivers\n" ); len += sprintf(buf+len, "Base IO: %#x\n", (unsigned int)dev->base_addr); len += sprintf(buf+len, "IRQ : %d\n", dev->irq); len += sprintf(buf+len, "HW addr : %pM\n", dev->dev_addr); return len; } static int __init ne2_probe1(struct net_device *dev, int slot) { int i, base_addr, irq, retval; unsigned char POS; unsigned char SA_prom[32]; const char *name = "NE/2"; int start_page, stop_page; static unsigned version_printed; if (ei_debug && version_printed++ == 0) printk(version); printk("NE/2 ethercard found in slot %d:", slot); /* Read base IO and IRQ from the POS-registers */ POS = mca_read_stored_pos(slot, 2); if(!(POS % 2)) { printk(" disabled.\n"); return -ENODEV; } /* handle different POS register structure for D-Link card */ if (mca_read_stored_pos(slot, 0) == 0xea) { base_addr = dlink_addresses[(POS >> 5) & 0x03]; irq = dlink_irqs[(POS >> 2) & 0x07]; } else { i = (POS & 0xE)>>1; /* printk("Halleluja sdog, als er na de pijl een 1 staat is 1 - 1 == 0" " en zou het moeten werken -> %d\n", i); The above line was for remote testing, thanx to sdog ... */ base_addr = addresses[i - 1]; irq = irqs[(POS & 0x60)>>5]; } if (!request_region(base_addr, NE_IO_EXTENT, DRV_NAME)) return -EBUSY; #ifdef DEBUG printk("POS info : pos 2 = %#x ; base = %#x ; irq = %ld\n", POS, base_addr, irq); #endif #ifndef CRYNWR_WAY /* Reset the card the way they do it in the Crynwr packet driver */ for (i=0; i<8; i++) outb(0x0, base_addr + NE_RESET); inb(base_addr + NE_RESET); outb(0x21, base_addr + NE_CMD); if (inb(base_addr + NE_CMD) != 0x21) { printk("NE/2 adapter not responding\n"); retval = -ENODEV; goto out; } /* In the crynwr sources they do a RAM-test here. I skip it. I suppose my RAM is okay. Suppose your memory is broken. Then this test should fail and you won't be able to use your card. But if I do not test, you won't be able to use your card, neither. So this test won't help you. */ #else /* _I_ never tested it this way .. Go ahead and try ...*/ /* Reset card. Who knows what dain-bramaged state it was left in. */ { unsigned long reset_start_time = jiffies; /* DON'T change these to inb_p/outb_p or reset will fail on clones.. */ outb(inb(base_addr + NE_RESET), base_addr + NE_RESET); while ((inb_p(base_addr + EN0_ISR) & ENISR_RESET) == 0) if (time_after(jiffies, reset_start_time + 2*HZ/100)) { printk(" not found (no reset ack).\n"); retval = -ENODEV; goto out; } outb_p(0xff, base_addr + EN0_ISR); /* Ack all intr. */ } #endif /* Read the 16 bytes of station address PROM. We must first initialize registers, similar to NS8390p_init(eifdev, 0). We can't reliably read the SAPROM address without this. (I learned the hard way!). */ { struct { unsigned char value, offset; } program_seq[] = { /* Select page 0 */ {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, {0x49, EN0_DCFG}, /* Set WORD-wide (0x49) access. */ {0x00, EN0_RCNTLO}, /* Clear the count regs. */ {0x00, EN0_RCNTHI}, {0x00, EN0_IMR}, /* Mask completion irq. */ {0xFF, EN0_ISR}, {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */ {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */ {32, EN0_RCNTLO}, {0x00, EN0_RCNTHI}, {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */ {0x00, EN0_RSARHI}, {E8390_RREAD+E8390_START, E8390_CMD}, }; for (i = 0; i < ARRAY_SIZE(program_seq); i++) outb_p(program_seq[i].value, base_addr + program_seq[i].offset); } for(i = 0; i < 6 /*sizeof(SA_prom)*/; i+=1) { SA_prom[i] = inb(base_addr + NE_DATAPORT); } /* I don't know whether the previous sequence includes the general board reset procedure, so better don't omit it and just overwrite the garbage read from a DE-320 with correct stuff. */ if (mca_read_stored_pos(slot, 0) == 0xea) { unsigned int v; for (i = 0; i < 3; i++) { v = dlink_get_eeprom(i, base_addr); SA_prom[(i << 1) ] = v & 0xff; SA_prom[(i << 1) + 1] = (v >> 8) & 0xff; } } start_page = NESM_START_PG; stop_page = NESM_STOP_PG; dev->irq=irq; /* Snarf the interrupt now. There's no point in waiting since we cannot share and the board will usually be enabled. */ retval = request_irq(dev->irq, eip_interrupt, 0, DRV_NAME, dev); if (retval) { printk (" unable to get IRQ %d (irqval=%d).\n", dev->irq, retval); goto out; } dev->base_addr = base_addr; for(i = 0; i < ETHER_ADDR_LEN; i++) dev->dev_addr[i] = SA_prom[i]; printk(" %pM\n", dev->dev_addr); printk("%s: %s found at %#x, using IRQ %d.\n", dev->name, name, base_addr, dev->irq); mca_set_adapter_procfn(slot, (MCA_ProcFn) ne2_procinfo, dev); ei_status.name = name; ei_status.tx_start_page = start_page; ei_status.stop_page = stop_page; ei_status.word16 = (2 == 2); ei_status.rx_start_page = start_page + TX_PAGES; #ifdef PACKETBUF_MEMSIZE /* Allow the packet buffer size to be overridden by know-it-alls. */ ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE; #endif ei_status.reset_8390 = &ne_reset_8390; ei_status.block_input = &ne_block_input; ei_status.block_output = &ne_block_output; ei_status.get_8390_hdr = &ne_get_8390_hdr; ei_status.priv = slot; dev->netdev_ops = &eip_netdev_ops; NS8390p_init(dev, 0); retval = register_netdev(dev); if (retval) goto out1; return 0; out1: mca_set_adapter_procfn( ei_status.priv, NULL, NULL); free_irq(dev->irq, dev); out: release_region(base_addr, NE_IO_EXTENT); return retval; } /* Hard reset the card. This used to pause for the same period that a 8390 reset command required, but that shouldn't be necessary. */ static void ne_reset_8390(struct net_device *dev) { unsigned long reset_start_time = jiffies; if (ei_debug > 1) printk("resetting the 8390 t=%ld...", jiffies); /* DON'T change these to inb_p/outb_p or reset will fail on clones. */ outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET); ei_status.txing = 0; ei_status.dmaing = 0; /* This check _should_not_ be necessary, omit eventually. */ while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) if (time_after(jiffies, reset_start_time + 2*HZ/100)) { printk("%s: ne_reset_8390() did not complete.\n", dev->name); break; } outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */ } /* Grab the 8390 specific header. Similar to the block_input routine, but we don't need to be concerned with ring wrap as the header will be at the start of a page, so we optimize accordingly. */ static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) { int nic_base = dev->base_addr; /* This *shouldn't* happen. If it does, it's the last thing you'll see */ if (ei_status.dmaing) { printk("%s: DMAing conflict in ne_get_8390_hdr " "[DMAstat:%d][irqlock:%d].\n", dev->name, ei_status.dmaing, ei_status.irqlock); return; } ei_status.dmaing |= 0x01; outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); outb_p(0, nic_base + EN0_RCNTHI); outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */ outb_p(ring_page, nic_base + EN0_RSARHI); outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD); if (ei_status.word16) insw(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1); else insb(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)); outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ ei_status.dmaing &= ~0x01; } /* Block input and output, similar to the Crynwr packet driver. If you are porting to a new ethercard, look at the packet driver source for hints. The NEx000 doesn't share the on-board packet memory -- you have to put the packet out through the "remote DMA" dataport using outb. */ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) { #ifdef NE_SANITY_CHECK int xfer_count = count; #endif int nic_base = dev->base_addr; char *buf = skb->data; /* This *shouldn't* happen. If it does, it's the last thing you'll see */ if (ei_status.dmaing) { printk("%s: DMAing conflict in ne_block_input " "[DMAstat:%d][irqlock:%d].\n", dev->name, ei_status.dmaing, ei_status.irqlock); return; } ei_status.dmaing |= 0x01; outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); outb_p(count & 0xff, nic_base + EN0_RCNTLO); outb_p(count >> 8, nic_base + EN0_RCNTHI); outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO); outb_p(ring_offset >> 8, nic_base + EN0_RSARHI); outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD); if (ei_status.word16) { insw(NE_BASE + NE_DATAPORT,buf,count>>1); if (count & 0x01) { buf[count-1] = inb(NE_BASE + NE_DATAPORT); #ifdef NE_SANITY_CHECK xfer_count++; #endif } } else { insb(NE_BASE + NE_DATAPORT, buf, count); } #ifdef NE_SANITY_CHECK /* This was for the ALPHA version only, but enough people have been encountering problems so it is still here. If you see this message you either 1) have a slightly incompatible clone or 2) have noise/speed problems with your bus. */ if (ei_debug > 1) { /* DMA termination address check... */ int addr, tries = 20; do { /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here -- it's broken for Rx on some cards! */ int high = inb_p(nic_base + EN0_RSARHI); int low = inb_p(nic_base + EN0_RSARLO); addr = (high << 8) + low; if (((ring_offset + xfer_count) & 0xff) == low) break; } while (--tries > 0); if (tries <= 0) printk("%s: RX transfer address mismatch," "%#4.4x (expected) vs. %#4.4x (actual).\n", dev->name, ring_offset + xfer_count, addr); } #endif outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ ei_status.dmaing &= ~0x01; } static void ne_block_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page) { int nic_base = NE_BASE; unsigned long dma_start; #ifdef NE_SANITY_CHECK int retries = 0; #endif /* Round the count up for word writes. Do we need to do this? What effect will an odd byte count have on the 8390? I should check someday. */ if (ei_status.word16 && (count & 0x01)) count++; /* This *shouldn't* happen. If it does, it's the last thing you'll see */ if (ei_status.dmaing) { printk("%s: DMAing conflict in ne_block_output." "[DMAstat:%d][irqlock:%d]\n", dev->name, ei_status.dmaing, ei_status.irqlock); return; } ei_status.dmaing |= 0x01; /* We should already be in page 0, but to be safe... */ outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); #ifdef NE_SANITY_CHECK retry: #endif #ifdef NE8390_RW_BUGFIX /* Handle the read-before-write bug the same way as the Crynwr packet driver -- the NatSemi method doesn't work. Actually this doesn't always work either, but if you have problems with your NEx000 this is better than nothing! */ outb_p(0x42, nic_base + EN0_RCNTLO); outb_p(0x00, nic_base + EN0_RCNTHI); outb_p(0x42, nic_base + EN0_RSARLO); outb_p(0x00, nic_base + EN0_RSARHI); outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD); /* Make certain that the dummy read has occurred. */ SLOW_DOWN_IO; SLOW_DOWN_IO; SLOW_DOWN_IO; #endif outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Now the normal output. */ outb_p(count & 0xff, nic_base + EN0_RCNTLO); outb_p(count >> 8, nic_base + EN0_RCNTHI); outb_p(0x00, nic_base + EN0_RSARLO); outb_p(start_page, nic_base + EN0_RSARHI); outb_p(E8390_RWRITE+E8390_START, nic_base + NE_CMD); if (ei_status.word16) { outsw(NE_BASE + NE_DATAPORT, buf, count>>1); } else { outsb(NE_BASE + NE_DATAPORT, buf, count); } dma_start = jiffies; #ifdef NE_SANITY_CHECK /* This was for the ALPHA version only, but enough people have been encountering problems so it is still here. */ if (ei_debug > 1) { /* DMA termination address check... */ int addr, tries = 20; do { int high = inb_p(nic_base + EN0_RSARHI); int low = inb_p(nic_base + EN0_RSARLO); addr = (high << 8) + low; if ((start_page << 8) + count == addr) break; } while (--tries > 0); if (tries <= 0) { printk("%s: Tx packet transfer address mismatch," "%#4.4x (expected) vs. %#4.4x (actual).\n", dev->name, (start_page << 8) + count, addr); if (retries++ == 0) goto retry; } } #endif while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0) if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */ printk("%s: timeout waiting for Tx RDC.\n", dev->name); ne_reset_8390(dev); NS8390p_init(dev, 1); break; } outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ ei_status.dmaing &= ~0x01; } #ifdef MODULE #define MAX_NE_CARDS 4 /* Max number of NE cards per module */ static struct net_device *dev_ne[MAX_NE_CARDS]; static int io[MAX_NE_CARDS]; static int irq[MAX_NE_CARDS]; static int bad[MAX_NE_CARDS]; /* 0xbad = bad sig or no reset ack */ MODULE_LICENSE("GPL"); module_param_array(io, int, NULL, 0); module_param_array(irq, int, NULL, 0); module_param_array(bad, int, NULL, 0); MODULE_PARM_DESC(io, "(ignored)"); MODULE_PARM_DESC(irq, "(ignored)"); MODULE_PARM_DESC(bad, "(ignored)"); /* Module code fixed by David Weinehall */ int __init init_module(void) { struct net_device *dev; int this_dev, found = 0; for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { dev = alloc_eip_netdev(); if (!dev) break; dev->irq = irq[this_dev]; dev->mem_end = bad[this_dev]; dev->base_addr = io[this_dev]; if (do_ne2_probe(dev) == 0) { dev_ne[found++] = dev; continue; } free_netdev(dev); break; } if (found) return 0; printk(KERN_WARNING "ne2.c: No NE/2 card found\n"); return -ENXIO; } static void cleanup_card(struct net_device *dev) { mca_mark_as_unused(ei_status.priv); mca_set_adapter_procfn( ei_status.priv, NULL, NULL); free_irq(dev->irq, dev); release_region(dev->base_addr, NE_IO_EXTENT); } void __exit cleanup_module(void) { int this_dev; for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { struct net_device *dev = dev_ne[this_dev]; if (dev) { unregister_netdev(dev); cleanup_card(dev); free_netdev(dev); } } } #endif /* MODULE */
gpl-2.0
zydroid/kernel-g9208-s6
drivers/staging/vme/devices/vme_pio2_cntr.c
4402
1706
/* * GE PIO2 Counter Driver * * Author: Martyn Welch <martyn.welch@ge.com> * Copyright 2009 GE Intelligent Platforms Embedded Systems, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * The PIO-2 has 6 counters, currently this code just disables the interrupts * and leaves them alone. * */ #include <linux/device.h> #include <linux/types.h> #include <linux/gpio.h> #include <linux/vme.h> #include "vme_pio2.h" static int pio2_cntr_irq_set(struct pio2_card *card, int id) { int retval; u8 data; data = PIO2_CNTR_SC_DEV[id] | PIO2_CNTR_RW_BOTH | card->cntr[id].mode; retval = vme_master_write(card->window, &data, 1, PIO2_CNTR_CTRL[id]); if (retval < 0) return retval; data = card->cntr[id].count & 0xFF; retval = vme_master_write(card->window, &data, 1, PIO2_CNTR_DATA[id]); if (retval < 0) return retval; data = (card->cntr[id].count >> 8) & 0xFF; retval = vme_master_write(card->window, &data, 1, PIO2_CNTR_DATA[id]); if (retval < 0) return retval; return 0; } int pio2_cntr_reset(struct pio2_card *card) { int i, retval = 0; u8 reg; /* Clear down all timers */ for (i = 0; i < 6; i++) { card->cntr[i].mode = PIO2_CNTR_MODE5; card->cntr[i].count = 0; retval = pio2_cntr_irq_set(card, i); if (retval < 0) return retval; } /* Ensure all counter interrupts are cleared */ do { retval = vme_master_read(card->window, &reg, 1, PIO2_REGS_INT_STAT_CNTR); if (retval < 0) return retval; } while (reg != 0); return retval; }
gpl-2.0
Split-Screen/android_kernel_lge_gproj
arch/arm/plat-mxc/devices/platform-mx2-camera.c
4914
2340
/* * Copyright (C) 2010 Pengutronix * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include <mach/hardware.h> #include <mach/devices-common.h> #define imx_mx2_camera_data_entry_single(soc) \ { \ .iobasecsi = soc ## _CSI_BASE_ADDR, \ .iosizecsi = SZ_4K, \ .irqcsi = soc ## _INT_CSI, \ } #define imx_mx2_camera_data_entry_single_emma(soc) \ { \ .iobasecsi = soc ## _CSI_BASE_ADDR, \ .iosizecsi = SZ_32, \ .irqcsi = soc ## _INT_CSI, \ .iobaseemmaprp = soc ## _EMMAPRP_BASE_ADDR, \ .iosizeemmaprp = SZ_32, \ .irqemmaprp = soc ## _INT_EMMAPRP, \ } #ifdef CONFIG_SOC_IMX25 const struct imx_mx2_camera_data imx25_mx2_camera_data __initconst = imx_mx2_camera_data_entry_single(MX25); #endif /* ifdef CONFIG_SOC_IMX25 */ #ifdef CONFIG_SOC_IMX27 const struct imx_mx2_camera_data imx27_mx2_camera_data __initconst = imx_mx2_camera_data_entry_single_emma(MX27); #endif /* ifdef CONFIG_SOC_IMX27 */ struct platform_device *__init imx_add_mx2_camera( const struct imx_mx2_camera_data *data, const struct mx2_camera_platform_data *pdata) { struct resource res[] = { { .start = data->iobasecsi, .end = data->iobasecsi + data->iosizecsi - 1, .flags = IORESOURCE_MEM, }, { .start = data->irqcsi, .end = data->irqcsi, .flags = IORESOURCE_IRQ, }, { .start = data->iobaseemmaprp, .end = data->iobaseemmaprp + data->iosizeemmaprp - 1, .flags = IORESOURCE_MEM, }, { .start = data->irqemmaprp, .end = data->irqemmaprp, .flags = IORESOURCE_IRQ, }, }; return imx_add_platform_device_dmamask("mx2-camera", 0, res, data->iobaseemmaprp ? 4 : 2, pdata, sizeof(*pdata), DMA_BIT_MASK(32)); } struct platform_device *__init imx_add_mx2_emmaprp( const struct imx_mx2_camera_data *data) { struct resource res[] = { { .start = data->iobaseemmaprp, .end = data->iobaseemmaprp + data->iosizeemmaprp - 1, .flags = IORESOURCE_MEM, }, { .start = data->irqemmaprp, .end = data->irqemmaprp, .flags = IORESOURCE_IRQ, }, }; return imx_add_platform_device_dmamask("m2m-emmaprp", 0, res, 2, NULL, 0, DMA_BIT_MASK(32)); }
gpl-2.0
DevSwift/Kernel-3.4-NovaThor
arch/arm/mach-prima2/rtciobrg.c
5170
3477
/* * RTC I/O Bridge interfaces for CSR SiRFprimaII * ARM access the registers of SYSRTC, GPSRTC and PWRC through this module * * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. * * Licensed under GPLv2 or later. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_platform.h> #define SIRFSOC_CPUIOBRG_CTRL 0x00 #define SIRFSOC_CPUIOBRG_WRBE 0x04 #define SIRFSOC_CPUIOBRG_ADDR 0x08 #define SIRFSOC_CPUIOBRG_DATA 0x0c /* * suspend asm codes will access this address to make system deepsleep * after DRAM becomes self-refresh */ void __iomem *sirfsoc_rtciobrg_base; static DEFINE_SPINLOCK(rtciobrg_lock); /* * symbols without lock are only used by suspend asm codes * and these symbols are not exported too */ void sirfsoc_rtc_iobrg_wait_sync(void) { while (readl_relaxed(sirfsoc_rtciobrg_base + SIRFSOC_CPUIOBRG_CTRL)) cpu_relax(); } void sirfsoc_rtc_iobrg_besyncing(void) { unsigned long flags; spin_lock_irqsave(&rtciobrg_lock, flags); sirfsoc_rtc_iobrg_wait_sync(); spin_unlock_irqrestore(&rtciobrg_lock, flags); } EXPORT_SYMBOL_GPL(sirfsoc_rtc_iobrg_besyncing); u32 __sirfsoc_rtc_iobrg_readl(u32 addr) { sirfsoc_rtc_iobrg_wait_sync(); writel_relaxed(0x00, sirfsoc_rtciobrg_base + SIRFSOC_CPUIOBRG_WRBE); writel_relaxed(addr, sirfsoc_rtciobrg_base + SIRFSOC_CPUIOBRG_ADDR); writel_relaxed(0x01, sirfsoc_rtciobrg_base + SIRFSOC_CPUIOBRG_CTRL); sirfsoc_rtc_iobrg_wait_sync(); return readl_relaxed(sirfsoc_rtciobrg_base + SIRFSOC_CPUIOBRG_DATA); } u32 sirfsoc_rtc_iobrg_readl(u32 addr) { unsigned long flags, val; spin_lock_irqsave(&rtciobrg_lock, flags); val = __sirfsoc_rtc_iobrg_readl(addr); spin_unlock_irqrestore(&rtciobrg_lock, flags); return val; } EXPORT_SYMBOL_GPL(sirfsoc_rtc_iobrg_readl); void sirfsoc_rtc_iobrg_pre_writel(u32 val, u32 addr) { sirfsoc_rtc_iobrg_wait_sync(); writel_relaxed(0xf1, sirfsoc_rtciobrg_base + SIRFSOC_CPUIOBRG_WRBE); writel_relaxed(addr, sirfsoc_rtciobrg_base + SIRFSOC_CPUIOBRG_ADDR); writel_relaxed(val, sirfsoc_rtciobrg_base + SIRFSOC_CPUIOBRG_DATA); } void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr) { unsigned long flags; spin_lock_irqsave(&rtciobrg_lock, flags); sirfsoc_rtc_iobrg_pre_writel(val, addr); writel_relaxed(0x01, sirfsoc_rtciobrg_base + SIRFSOC_CPUIOBRG_CTRL); sirfsoc_rtc_iobrg_wait_sync(); spin_unlock_irqrestore(&rtciobrg_lock, flags); } EXPORT_SYMBOL_GPL(sirfsoc_rtc_iobrg_writel); static const struct of_device_id rtciobrg_ids[] = { { .compatible = "sirf,prima2-rtciobg" }, {} }; static int __devinit sirfsoc_rtciobrg_probe(struct platform_device *op) { struct device_node *np = op->dev.of_node; sirfsoc_rtciobrg_base = of_iomap(np, 0); if (!sirfsoc_rtciobrg_base) panic("unable to map rtc iobrg registers\n"); return 0; } static struct platform_driver sirfsoc_rtciobrg_driver = { .probe = sirfsoc_rtciobrg_probe, .driver = { .name = "sirfsoc-rtciobrg", .owner = THIS_MODULE, .of_match_table = rtciobrg_ids, }, }; static int __init sirfsoc_rtciobrg_init(void) { return platform_driver_register(&sirfsoc_rtciobrg_driver); } postcore_initcall(sirfsoc_rtciobrg_init); MODULE_AUTHOR("Zhiwu Song <zhiwu.song@csr.com>, " "Barry Song <baohua.song@csr.com>"); MODULE_DESCRIPTION("CSR SiRFprimaII rtc io bridge"); MODULE_LICENSE("GPL");
gpl-2.0
MoKee/android_kernel_xiaomi_msm8226-common
drivers/hid/hid-samsung.c
6450
6323
/* * HID driver for some samsung "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby * Copyright (c) 2010 Don Prince <dhprince.devel@yahoo.co.uk> * * * This driver supports several HID devices: * * [0419:0001] Samsung IrDA remote controller (reports as Cypress USB Mouse). * various hid report fixups for different variants. * * [0419:0600] Creative Desktop Wireless 6000 keyboard/mouse combo * several key mappings used from the consumer usage page * deviate from the USB HUT 1.12 standard. * */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/device.h> #include <linux/usb.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" /* * There are several variants for 0419:0001: * * 1. 184 byte report descriptor * Vendor specific report #4 has a size of 48 bit, * and therefore is not accepted when inspecting the descriptors. * As a workaround we reinterpret the report as: * Variable type, count 6, size 8 bit, log. maximum 255 * The burden to reconstruct the data is moved into user space. * * 2. 203 byte report descriptor * Report #4 has an array field with logical range 0..18 instead of 1..15. * * 3. 135 byte report descriptor * Report #4 has an array field with logical range 0..17 instead of 1..14. * * 4. 171 byte report descriptor * Report #3 has an array field with logical range 0..1 instead of 1..3. */ static inline void samsung_irda_dev_trace(struct hid_device *hdev, unsigned int rsize) { hid_info(hdev, "fixing up Samsung IrDA %d byte report descriptor\n", rsize); } static __u8 *samsung_irda_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize == 184 && rdesc[175] == 0x25 && rdesc[176] == 0x40 && rdesc[177] == 0x75 && rdesc[178] == 0x30 && rdesc[179] == 0x95 && rdesc[180] == 0x01 && rdesc[182] == 0x40) { samsung_irda_dev_trace(hdev, 184); rdesc[176] = 0xff; rdesc[178] = 0x08; rdesc[180] = 0x06; rdesc[182] = 0x42; } else if (*rsize == 203 && rdesc[192] == 0x15 && rdesc[193] == 0x0 && rdesc[194] == 0x25 && rdesc[195] == 0x12) { samsung_irda_dev_trace(hdev, 203); rdesc[193] = 0x1; rdesc[195] = 0xf; } else if (*rsize == 135 && rdesc[124] == 0x15 && rdesc[125] == 0x0 && rdesc[126] == 0x25 && rdesc[127] == 0x11) { samsung_irda_dev_trace(hdev, 135); rdesc[125] = 0x1; rdesc[127] = 0xe; } else if (*rsize == 171 && rdesc[160] == 0x15 && rdesc[161] == 0x0 && rdesc[162] == 0x25 && rdesc[163] == 0x01) { samsung_irda_dev_trace(hdev, 171); rdesc[161] = 0x1; rdesc[163] = 0x3; } return rdesc; } #define samsung_kbd_mouse_map_key_clear(c) \ hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c)) static int samsung_kbd_mouse_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber; if (1 != ifnum || HID_UP_CONSUMER != (usage->hid & HID_USAGE_PAGE)) return 0; dbg_hid("samsung wireless keyboard/mouse input mapping event [0x%x]\n", usage->hid & HID_USAGE); switch (usage->hid & HID_USAGE) { /* report 2 */ case 0x183: samsung_kbd_mouse_map_key_clear(KEY_MEDIA); break; case 0x195: samsung_kbd_mouse_map_key_clear(KEY_EMAIL); break; case 0x196: samsung_kbd_mouse_map_key_clear(KEY_CALC); break; case 0x197: samsung_kbd_mouse_map_key_clear(KEY_COMPUTER); break; case 0x22b: samsung_kbd_mouse_map_key_clear(KEY_SEARCH); break; case 0x22c: samsung_kbd_mouse_map_key_clear(KEY_WWW); break; case 0x22d: samsung_kbd_mouse_map_key_clear(KEY_BACK); break; case 0x22e: samsung_kbd_mouse_map_key_clear(KEY_FORWARD); break; case 0x22f: samsung_kbd_mouse_map_key_clear(KEY_FAVORITES); break; case 0x230: samsung_kbd_mouse_map_key_clear(KEY_REFRESH); break; case 0x231: samsung_kbd_mouse_map_key_clear(KEY_STOP); break; default: return 0; } return 1; } static __u8 *samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (USB_DEVICE_ID_SAMSUNG_IR_REMOTE == hdev->product) rdesc = samsung_irda_report_fixup(hdev, rdesc, rsize); return rdesc; } static int samsung_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { int ret = 0; if (USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE == hdev->product) ret = samsung_kbd_mouse_input_mapping(hdev, hi, field, usage, bit, max); return ret; } static int samsung_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; unsigned int cmask = HID_CONNECT_DEFAULT; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err_free; } if (USB_DEVICE_ID_SAMSUNG_IR_REMOTE == hdev->product) { if (hdev->rsize == 184) { /* disable hidinput, force hiddev */ cmask = (cmask & ~HID_CONNECT_HIDINPUT) | HID_CONNECT_HIDDEV_FORCE; } } ret = hid_hw_start(hdev, cmask); if (ret) { hid_err(hdev, "hw start failed\n"); goto err_free; } return 0; err_free: return ret; } static const struct hid_device_id samsung_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) }, { } }; MODULE_DEVICE_TABLE(hid, samsung_devices); static struct hid_driver samsung_driver = { .name = "samsung", .id_table = samsung_devices, .report_fixup = samsung_report_fixup, .input_mapping = samsung_input_mapping, .probe = samsung_probe, }; static int __init samsung_init(void) { return hid_register_driver(&samsung_driver); } static void __exit samsung_exit(void) { hid_unregister_driver(&samsung_driver); } module_init(samsung_init); module_exit(samsung_exit); MODULE_LICENSE("GPL");
gpl-2.0
TheTypoMaster/yotrino-linux-kernel
fs/jffs2/nodelist.c
7474
22184
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/mtd/mtd.h> #include <linux/rbtree.h> #include <linux/crc32.h> #include <linux/pagemap.h> #include "nodelist.h" static void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this); void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list) { struct jffs2_full_dirent **prev = list; dbg_dentlist("add dirent \"%s\", ino #%u\n", new->name, new->ino); while ((*prev) && (*prev)->nhash <= new->nhash) { if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) { /* Duplicate. Free one */ if (new->version < (*prev)->version) { dbg_dentlist("Eep! Marking new dirent node obsolete, old is \"%s\", ino #%u\n", (*prev)->name, (*prev)->ino); jffs2_mark_node_obsolete(c, new->raw); jffs2_free_full_dirent(new); } else { dbg_dentlist("marking old dirent \"%s\", ino #%u obsolete\n", (*prev)->name, (*prev)->ino); new->next = (*prev)->next; /* It may have been a 'placeholder' deletion dirent, if jffs2_can_mark_obsolete() (see jffs2_do_unlink()) */ if ((*prev)->raw) jffs2_mark_node_obsolete(c, ((*prev)->raw)); jffs2_free_full_dirent(*prev); *prev = new; } return; } prev = &((*prev)->next); } new->next = *prev; *prev = new; } uint32_t jffs2_truncate_fragtree(struct jffs2_sb_info *c, struct rb_root *list, uint32_t size) { struct jffs2_node_frag *frag = jffs2_lookup_node_frag(list, size); dbg_fragtree("truncating fragtree to 0x%08x bytes\n", size); /* We know frag->ofs <= size. That's what lookup does for us */ if (frag && frag->ofs != size) { if (frag->ofs+frag->size > size) { frag->size = size - frag->ofs; } frag = frag_next(frag); } while (frag && frag->ofs >= size) { struct jffs2_node_frag *next = frag_next(frag); frag_erase(frag, list); jffs2_obsolete_node_frag(c, frag); frag = next; } if (size == 0) return 0; frag = frag_last(list); /* Sanity check for truncation to longer than we started with... */ if (!frag) return 0; if (frag->ofs + frag->size < size) return frag->ofs + frag->size; /* If the last fragment starts at the RAM page boundary, it is * REF_PRISTINE irrespective of its size. */ if (frag->node && (frag->ofs & (PAGE_CACHE_SIZE - 1)) == 0) { dbg_fragtree2("marking the last fragment 0x%08x-0x%08x REF_PRISTINE.\n", frag->ofs, frag->ofs + frag->size); frag->node->raw->flash_offset = ref_offset(frag->node->raw) | REF_PRISTINE; } return size; } static void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this) { if (this->node) { this->node->frags--; if (!this->node->frags) { /* The node has no valid frags left. It's totally obsoleted */ dbg_fragtree2("marking old node @0x%08x (0x%04x-0x%04x) obsolete\n", ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size); jffs2_mark_node_obsolete(c, this->node->raw); jffs2_free_full_dnode(this->node); } else { dbg_fragtree2("marking old node @0x%08x (0x%04x-0x%04x) REF_NORMAL. frags is %d\n", ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size, this->node->frags); mark_ref_normal(this->node->raw); } } jffs2_free_node_frag(this); } static void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base) { struct rb_node *parent = &base->rb; struct rb_node **link = &parent; dbg_fragtree2("insert frag (0x%04x-0x%04x)\n", newfrag->ofs, newfrag->ofs + newfrag->size); while (*link) { parent = *link; base = rb_entry(parent, struct jffs2_node_frag, rb); if (newfrag->ofs > base->ofs) link = &base->rb.rb_right; else if (newfrag->ofs < base->ofs) link = &base->rb.rb_left; else { JFFS2_ERROR("duplicate frag at %08x (%p,%p)\n", newfrag->ofs, newfrag, base); BUG(); } } rb_link_node(&newfrag->rb, &base->rb, link); } /* * Allocate and initializes a new fragment. */ static struct jffs2_node_frag * new_fragment(struct jffs2_full_dnode *fn, uint32_t ofs, uint32_t size) { struct jffs2_node_frag *newfrag; newfrag = jffs2_alloc_node_frag(); if (likely(newfrag)) { newfrag->ofs = ofs; newfrag->size = size; newfrag->node = fn; } else { JFFS2_ERROR("cannot allocate a jffs2_node_frag object\n"); } return newfrag; } /* * Called when there is no overlapping fragment exist. Inserts a hole before the new * fragment and inserts the new fragment to the fragtree. */ static int no_overlapping_node(struct jffs2_sb_info *c, struct rb_root *root, struct jffs2_node_frag *newfrag, struct jffs2_node_frag *this, uint32_t lastend) { if (lastend < newfrag->node->ofs) { /* put a hole in before the new fragment */ struct jffs2_node_frag *holefrag; holefrag= new_fragment(NULL, lastend, newfrag->node->ofs - lastend); if (unlikely(!holefrag)) { jffs2_free_node_frag(newfrag); return -ENOMEM; } if (this) { /* By definition, the 'this' node has no right-hand child, because there are no frags with offset greater than it. So that's where we want to put the hole */ dbg_fragtree2("add hole frag %#04x-%#04x on the right of the new frag.\n", holefrag->ofs, holefrag->ofs + holefrag->size); rb_link_node(&holefrag->rb, &this->rb, &this->rb.rb_right); } else { dbg_fragtree2("Add hole frag %#04x-%#04x to the root of the tree.\n", holefrag->ofs, holefrag->ofs + holefrag->size); rb_link_node(&holefrag->rb, NULL, &root->rb_node); } rb_insert_color(&holefrag->rb, root); this = holefrag; } if (this) { /* By definition, the 'this' node has no right-hand child, because there are no frags with offset greater than it. So that's where we want to put new fragment */ dbg_fragtree2("add the new node at the right\n"); rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right); } else { dbg_fragtree2("insert the new node at the root of the tree\n"); rb_link_node(&newfrag->rb, NULL, &root->rb_node); } rb_insert_color(&newfrag->rb, root); return 0; } /* Doesn't set inode->i_size */ static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *root, struct jffs2_node_frag *newfrag) { struct jffs2_node_frag *this; uint32_t lastend; /* Skip all the nodes which are completed before this one starts */ this = jffs2_lookup_node_frag(root, newfrag->node->ofs); if (this) { dbg_fragtree2("lookup gave frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n", this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this); lastend = this->ofs + this->size; } else { dbg_fragtree2("lookup gave no frag\n"); lastend = 0; } /* See if we ran off the end of the fragtree */ if (lastend <= newfrag->ofs) { /* We did */ /* Check if 'this' node was on the same page as the new node. If so, both 'this' and the new node get marked REF_NORMAL so the GC can take a look. */ if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) { if (this->node) mark_ref_normal(this->node->raw); mark_ref_normal(newfrag->node->raw); } return no_overlapping_node(c, root, newfrag, this, lastend); } if (this->node) dbg_fragtree2("dealing with frag %u-%u, phys %#08x(%d).\n", this->ofs, this->ofs + this->size, ref_offset(this->node->raw), ref_flags(this->node->raw)); else dbg_fragtree2("dealing with hole frag %u-%u.\n", this->ofs, this->ofs + this->size); /* OK. 'this' is pointing at the first frag that newfrag->ofs at least partially obsoletes, * - i.e. newfrag->ofs < this->ofs+this->size && newfrag->ofs >= this->ofs */ if (newfrag->ofs > this->ofs) { /* This node isn't completely obsoleted. The start of it remains valid */ /* Mark the new node and the partially covered node REF_NORMAL -- let the GC take a look at them */ mark_ref_normal(newfrag->node->raw); if (this->node) mark_ref_normal(this->node->raw); if (this->ofs + this->size > newfrag->ofs + newfrag->size) { /* The new node splits 'this' frag into two */ struct jffs2_node_frag *newfrag2; if (this->node) dbg_fragtree2("split old frag 0x%04x-0x%04x, phys 0x%08x\n", this->ofs, this->ofs+this->size, ref_offset(this->node->raw)); else dbg_fragtree2("split old hole frag 0x%04x-0x%04x\n", this->ofs, this->ofs+this->size); /* New second frag pointing to this's node */ newfrag2 = new_fragment(this->node, newfrag->ofs + newfrag->size, this->ofs + this->size - newfrag->ofs - newfrag->size); if (unlikely(!newfrag2)) return -ENOMEM; if (this->node) this->node->frags++; /* Adjust size of original 'this' */ this->size = newfrag->ofs - this->ofs; /* Now, we know there's no node with offset greater than this->ofs but smaller than newfrag2->ofs or newfrag->ofs, for obvious reasons. So we can do a tree insert from 'this' to insert newfrag, and a tree insert from newfrag to insert newfrag2. */ jffs2_fragtree_insert(newfrag, this); rb_insert_color(&newfrag->rb, root); jffs2_fragtree_insert(newfrag2, newfrag); rb_insert_color(&newfrag2->rb, root); return 0; } /* New node just reduces 'this' frag in size, doesn't split it */ this->size = newfrag->ofs - this->ofs; /* Again, we know it lives down here in the tree */ jffs2_fragtree_insert(newfrag, this); rb_insert_color(&newfrag->rb, root); } else { /* New frag starts at the same point as 'this' used to. Replace it in the tree without doing a delete and insertion */ dbg_fragtree2("inserting newfrag (*%p),%d-%d in before 'this' (*%p),%d-%d\n", newfrag, newfrag->ofs, newfrag->ofs+newfrag->size, this, this->ofs, this->ofs+this->size); rb_replace_node(&this->rb, &newfrag->rb, root); if (newfrag->ofs + newfrag->size >= this->ofs+this->size) { dbg_fragtree2("obsoleting node frag %p (%x-%x)\n", this, this->ofs, this->ofs+this->size); jffs2_obsolete_node_frag(c, this); } else { this->ofs += newfrag->size; this->size -= newfrag->size; jffs2_fragtree_insert(this, newfrag); rb_insert_color(&this->rb, root); return 0; } } /* OK, now we have newfrag added in the correct place in the tree, but frag_next(newfrag) may be a fragment which is overlapped by it */ while ((this = frag_next(newfrag)) && newfrag->ofs + newfrag->size >= this->ofs + this->size) { /* 'this' frag is obsoleted completely. */ dbg_fragtree2("obsoleting node frag %p (%x-%x) and removing from tree\n", this, this->ofs, this->ofs+this->size); rb_erase(&this->rb, root); jffs2_obsolete_node_frag(c, this); } /* Now we're pointing at the first frag which isn't totally obsoleted by the new frag */ if (!this || newfrag->ofs + newfrag->size == this->ofs) return 0; /* Still some overlap but we don't need to move it in the tree */ this->size = (this->ofs + this->size) - (newfrag->ofs + newfrag->size); this->ofs = newfrag->ofs + newfrag->size; /* And mark them REF_NORMAL so the GC takes a look at them */ if (this->node) mark_ref_normal(this->node->raw); mark_ref_normal(newfrag->node->raw); return 0; } /* * Given an inode, probably with existing tree of fragments, add the new node * to the fragment tree. */ int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn) { int ret; struct jffs2_node_frag *newfrag; if (unlikely(!fn->size)) return 0; newfrag = new_fragment(fn, fn->ofs, fn->size); if (unlikely(!newfrag)) return -ENOMEM; newfrag->node->frags = 1; dbg_fragtree("adding node %#04x-%#04x @0x%08x on flash, newfrag *%p\n", fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag); ret = jffs2_add_frag_to_fragtree(c, &f->fragtree, newfrag); if (unlikely(ret)) return ret; /* If we now share a page with other nodes, mark either previous or next node REF_NORMAL, as appropriate. */ if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) { struct jffs2_node_frag *prev = frag_prev(newfrag); mark_ref_normal(fn->raw); /* If we don't start at zero there's _always_ a previous */ if (prev->node) mark_ref_normal(prev->node->raw); } if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) { struct jffs2_node_frag *next = frag_next(newfrag); if (next) { mark_ref_normal(fn->raw); if (next->node) mark_ref_normal(next->node->raw); } } jffs2_dbg_fragtree_paranoia_check_nolock(f); return 0; } void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state) { spin_lock(&c->inocache_lock); ic->state = state; wake_up(&c->inocache_wq); spin_unlock(&c->inocache_lock); } /* During mount, this needs no locking. During normal operation, its callers want to do other stuff while still holding the inocache_lock. Rather than introducing special case get_ino_cache functions or callbacks, we just let the caller do the locking itself. */ struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino) { struct jffs2_inode_cache *ret; ret = c->inocache_list[ino % c->inocache_hashsize]; while (ret && ret->ino < ino) { ret = ret->next; } if (ret && ret->ino != ino) ret = NULL; return ret; } void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new) { struct jffs2_inode_cache **prev; spin_lock(&c->inocache_lock); if (!new->ino) new->ino = ++c->highest_ino; dbg_inocache("add %p (ino #%u)\n", new, new->ino); prev = &c->inocache_list[new->ino % c->inocache_hashsize]; while ((*prev) && (*prev)->ino < new->ino) { prev = &(*prev)->next; } new->next = *prev; *prev = new; spin_unlock(&c->inocache_lock); } void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old) { struct jffs2_inode_cache **prev; #ifdef CONFIG_JFFS2_FS_XATTR BUG_ON(old->xref); #endif dbg_inocache("del %p (ino #%u)\n", old, old->ino); spin_lock(&c->inocache_lock); prev = &c->inocache_list[old->ino % c->inocache_hashsize]; while ((*prev) && (*prev)->ino < old->ino) { prev = &(*prev)->next; } if ((*prev) == old) { *prev = old->next; } /* Free it now unless it's in READING or CLEARING state, which are the transitions upon read_inode() and clear_inode(). The rest of the time we know nobody else is looking at it, and if it's held by read_inode() or clear_inode() they'll free it for themselves. */ if (old->state != INO_STATE_READING && old->state != INO_STATE_CLEARING) jffs2_free_inode_cache(old); spin_unlock(&c->inocache_lock); } void jffs2_free_ino_caches(struct jffs2_sb_info *c) { int i; struct jffs2_inode_cache *this, *next; for (i=0; i < c->inocache_hashsize; i++) { this = c->inocache_list[i]; while (this) { next = this->next; jffs2_xattr_free_inode(c, this); jffs2_free_inode_cache(this); this = next; } c->inocache_list[i] = NULL; } } void jffs2_free_raw_node_refs(struct jffs2_sb_info *c) { int i; struct jffs2_raw_node_ref *this, *next; for (i=0; i<c->nr_blocks; i++) { this = c->blocks[i].first_node; while (this) { if (this[REFS_PER_BLOCK].flash_offset == REF_LINK_NODE) next = this[REFS_PER_BLOCK].next_in_ino; else next = NULL; jffs2_free_refblock(this); this = next; } c->blocks[i].first_node = c->blocks[i].last_node = NULL; } } struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset) { /* The common case in lookup is that there will be a node which precisely matches. So we go looking for that first */ struct rb_node *next; struct jffs2_node_frag *prev = NULL; struct jffs2_node_frag *frag = NULL; dbg_fragtree2("root %p, offset %d\n", fragtree, offset); next = fragtree->rb_node; while(next) { frag = rb_entry(next, struct jffs2_node_frag, rb); if (frag->ofs + frag->size <= offset) { /* Remember the closest smaller match on the way down */ if (!prev || frag->ofs > prev->ofs) prev = frag; next = frag->rb.rb_right; } else if (frag->ofs > offset) { next = frag->rb.rb_left; } else { return frag; } } /* Exact match not found. Go back up looking at each parent, and return the closest smaller one */ if (prev) dbg_fragtree2("no match. Returning frag %#04x-%#04x, closest previous\n", prev->ofs, prev->ofs+prev->size); else dbg_fragtree2("returning NULL, empty fragtree\n"); return prev; } /* Pass 'c' argument to indicate that nodes should be marked obsolete as they're killed. */ void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c) { struct jffs2_node_frag *frag; struct jffs2_node_frag *parent; if (!root->rb_node) return; dbg_fragtree("killing\n"); frag = (rb_entry(root->rb_node, struct jffs2_node_frag, rb)); while(frag) { if (frag->rb.rb_left) { frag = frag_left(frag); continue; } if (frag->rb.rb_right) { frag = frag_right(frag); continue; } if (frag->node && !(--frag->node->frags)) { /* Not a hole, and it's the final remaining frag of this node. Free the node */ if (c) jffs2_mark_node_obsolete(c, frag->node->raw); jffs2_free_full_dnode(frag->node); } parent = frag_parent(frag); if (parent) { if (frag_left(parent) == frag) parent->rb.rb_left = NULL; else parent->rb.rb_right = NULL; } jffs2_free_node_frag(frag); frag = parent; cond_resched(); } } struct jffs2_raw_node_ref *jffs2_link_node_ref(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t ofs, uint32_t len, struct jffs2_inode_cache *ic) { struct jffs2_raw_node_ref *ref; BUG_ON(!jeb->allocated_refs); jeb->allocated_refs--; ref = jeb->last_node; dbg_noderef("Last node at %p is (%08x,%p)\n", ref, ref->flash_offset, ref->next_in_ino); while (ref->flash_offset != REF_EMPTY_NODE) { if (ref->flash_offset == REF_LINK_NODE) ref = ref->next_in_ino; else ref++; } dbg_noderef("New ref is %p (%08x becomes %08x,%p) len 0x%x\n", ref, ref->flash_offset, ofs, ref->next_in_ino, len); ref->flash_offset = ofs; if (!jeb->first_node) { jeb->first_node = ref; BUG_ON(ref_offset(ref) != jeb->offset); } else if (unlikely(ref_offset(ref) != jeb->offset + c->sector_size - jeb->free_size)) { uint32_t last_len = ref_totlen(c, jeb, jeb->last_node); JFFS2_ERROR("Adding new ref %p at (0x%08x-0x%08x) not immediately after previous (0x%08x-0x%08x)\n", ref, ref_offset(ref), ref_offset(ref)+len, ref_offset(jeb->last_node), ref_offset(jeb->last_node)+last_len); BUG(); } jeb->last_node = ref; if (ic) { ref->next_in_ino = ic->nodes; ic->nodes = ref; } else { ref->next_in_ino = NULL; } switch(ref_flags(ref)) { case REF_UNCHECKED: c->unchecked_size += len; jeb->unchecked_size += len; break; case REF_NORMAL: case REF_PRISTINE: c->used_size += len; jeb->used_size += len; break; case REF_OBSOLETE: c->dirty_size += len; jeb->dirty_size += len; break; } c->free_size -= len; jeb->free_size -= len; #ifdef TEST_TOTLEN /* Set (and test) __totlen field... for now */ ref->__totlen = len; ref_totlen(c, jeb, ref); #endif return ref; } /* No locking, no reservation of 'ref'. Do not use on a live file system */ int jffs2_scan_dirty_space(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t size) { if (!size) return 0; if (unlikely(size > jeb->free_size)) { pr_crit("Dirty space 0x%x larger then free_size 0x%x (wasted 0x%x)\n", size, jeb->free_size, jeb->wasted_size); BUG(); } /* REF_EMPTY_NODE is !obsolete, so that works OK */ if (jeb->last_node && ref_obsolete(jeb->last_node)) { #ifdef TEST_TOTLEN jeb->last_node->__totlen += size; #endif c->dirty_size += size; c->free_size -= size; jeb->dirty_size += size; jeb->free_size -= size; } else { uint32_t ofs = jeb->offset + c->sector_size - jeb->free_size; ofs |= REF_OBSOLETE; jffs2_link_node_ref(c, jeb, ofs, size, NULL); } return 0; } /* Calculate totlen from surrounding nodes or eraseblock */ static inline uint32_t __ref_totlen(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_raw_node_ref *ref) { uint32_t ref_end; struct jffs2_raw_node_ref *next_ref = ref_next(ref); if (next_ref) ref_end = ref_offset(next_ref); else { if (!jeb) jeb = &c->blocks[ref->flash_offset / c->sector_size]; /* Last node in block. Use free_space */ if (unlikely(ref != jeb->last_node)) { pr_crit("ref %p @0x%08x is not jeb->last_node (%p @0x%08x)\n", ref, ref_offset(ref), jeb->last_node, jeb->last_node ? ref_offset(jeb->last_node) : 0); BUG(); } ref_end = jeb->offset + c->sector_size - jeb->free_size; } return ref_end - ref_offset(ref); } uint32_t __jffs2_ref_totlen(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_raw_node_ref *ref) { uint32_t ret; ret = __ref_totlen(c, jeb, ref); #ifdef TEST_TOTLEN if (unlikely(ret != ref->__totlen)) { if (!jeb) jeb = &c->blocks[ref->flash_offset / c->sector_size]; pr_crit("Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n", ref, ref_offset(ref), ref_offset(ref) + ref->__totlen, ret, ref->__totlen); if (ref_next(ref)) { pr_crit("next %p (0x%08x-0x%08x)\n", ref_next(ref), ref_offset(ref_next(ref)), ref_offset(ref_next(ref)) + ref->__totlen); } else pr_crit("No next ref. jeb->last_node is %p\n", jeb->last_node); pr_crit("jeb->wasted_size %x, dirty_size %x, used_size %x, free_size %x\n", jeb->wasted_size, jeb->dirty_size, jeb->used_size, jeb->free_size); #if defined(JFFS2_DBG_DUMPS) || defined(JFFS2_DBG_PARANOIA_CHECKS) __jffs2_dbg_dump_node_refs_nolock(c, jeb); #endif WARN_ON(1); ret = ref->__totlen; } #endif /* TEST_TOTLEN */ return ret; }
gpl-2.0
vocoderism/Tegra-Note-7
drivers/media/video/uvc/uvc_entity.c
7986
3235
/* * uvc_entity.c -- USB Video Class driver * * Copyright (C) 2005-2011 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/kernel.h> #include <linux/list.h> #include <linux/videodev2.h> #include <media/v4l2-common.h> #include "uvcvideo.h" /* ------------------------------------------------------------------------ * Video subdevices registration and unregistration */ static int uvc_mc_register_entity(struct uvc_video_chain *chain, struct uvc_entity *entity) { const u32 flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE; struct media_entity *sink; unsigned int i; int ret; sink = (UVC_ENTITY_TYPE(entity) == UVC_TT_STREAMING) ? (entity->vdev ? &entity->vdev->entity : NULL) : &entity->subdev.entity; if (sink == NULL) return 0; for (i = 0; i < entity->num_pads; ++i) { struct media_entity *source; struct uvc_entity *remote; u8 remote_pad; if (!(entity->pads[i].flags & MEDIA_PAD_FL_SINK)) continue; remote = uvc_entity_by_id(chain->dev, entity->baSourceID[i]); if (remote == NULL) return -EINVAL; source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING) ? (remote->vdev ? &remote->vdev->entity : NULL) : &remote->subdev.entity; if (source == NULL) continue; remote_pad = remote->num_pads - 1; ret = media_entity_create_link(source, remote_pad, sink, i, flags); if (ret < 0) return ret; } if (UVC_ENTITY_TYPE(entity) == UVC_TT_STREAMING) return 0; return v4l2_device_register_subdev(&chain->dev->vdev, &entity->subdev); } static struct v4l2_subdev_ops uvc_subdev_ops = { }; void uvc_mc_cleanup_entity(struct uvc_entity *entity) { if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) media_entity_cleanup(&entity->subdev.entity); else if (entity->vdev != NULL) media_entity_cleanup(&entity->vdev->entity); } static int uvc_mc_init_entity(struct uvc_entity *entity) { int ret; if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) { v4l2_subdev_init(&entity->subdev, &uvc_subdev_ops); strlcpy(entity->subdev.name, entity->name, sizeof(entity->subdev.name)); ret = media_entity_init(&entity->subdev.entity, entity->num_pads, entity->pads, 0); } else if (entity->vdev != NULL) { ret = media_entity_init(&entity->vdev->entity, entity->num_pads, entity->pads, 0); } else ret = 0; return ret; } int uvc_mc_register_entities(struct uvc_video_chain *chain) { struct uvc_entity *entity; int ret; list_for_each_entry(entity, &chain->entities, chain) { ret = uvc_mc_init_entity(entity); if (ret < 0) { uvc_printk(KERN_INFO, "Failed to initialize entity for " "entity %u\n", entity->id); return ret; } } list_for_each_entry(entity, &chain->entities, chain) { ret = uvc_mc_register_entity(chain, entity); if (ret < 0) { uvc_printk(KERN_INFO, "Failed to register entity for " "entity %u\n", entity->id); return ret; } } return 0; }
gpl-2.0
SlimRoms/kernel_htc_msm8960
drivers/staging/comedi/drivers/mpc624.c
7986
12985
/* comedi/drivers/mpc624.c Hardware driver for a Micro/sys inc. MPC-624 PC/104 board COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: mpc624 Description: Micro/sys MPC-624 PC/104 board Devices: [Micro/sys] MPC-624 (mpc624) Author: Stanislaw Raczynski <sraczynski@op.pl> Updated: Thu, 15 Sep 2005 12:01:18 +0200 Status: working The Micro/sys MPC-624 board is based on the LTC2440 24-bit sigma-delta ADC chip. Subdevices supported by the driver: - Analog In: supported - Digital I/O: not supported - LEDs: not supported - EEPROM: not supported Configuration Options: [0] - I/O base address [1] - conversion rate Conversion rate RMS noise Effective Number Of Bits 0 3.52kHz 23uV 17 1 1.76kHz 3.5uV 20 2 880Hz 2uV 21.3 3 440Hz 1.4uV 21.8 4 220Hz 1uV 22.4 5 110Hz 750uV 22.9 6 55Hz 510nV 23.4 7 27.5Hz 375nV 24 8 13.75Hz 250nV 24.4 9 6.875Hz 200nV 24.6 [2] - voltage range 0 -1.01V .. +1.01V 1 -10.1V .. +10.1V */ #include "../comedidev.h" #include <linux/ioport.h> #include <linux/delay.h> /* Consecutive I/O port addresses */ #define MPC624_SIZE 16 /* Offsets of different ports */ #define MPC624_MASTER_CONTROL 0 /* not used */ #define MPC624_GNMUXCH 1 /* Gain, Mux, Channel of ADC */ #define MPC624_ADC 2 /* read/write to/from ADC */ #define MPC624_EE 3 /* read/write to/from serial EEPROM via I2C */ #define MPC624_LEDS 4 /* write to LEDs */ #define MPC624_DIO 5 /* read/write to/from digital I/O ports */ #define MPC624_IRQ_MASK 6 /* IRQ masking enable/disable */ /* Register bits' names */ #define MPC624_ADBUSY (1<<5) #define MPC624_ADSDO (1<<4) #define MPC624_ADFO (1<<3) #define MPC624_ADCS (1<<2) #define MPC624_ADSCK (1<<1) #define MPC624_ADSDI (1<<0) /* SDI Speed/Resolution Programming bits */ #define MPC624_OSR4 (1<<31) #define MPC624_OSR3 (1<<30) #define MPC624_OSR2 (1<<29) #define MPC624_OSR1 (1<<28) #define MPC624_OSR0 (1<<27) /* 32-bit output value bits' names */ #define MPC624_EOC_BIT (1<<31) #define MPC624_DMY_BIT (1<<30) #define MPC624_SGN_BIT (1<<29) /* Conversion speeds */ /* OSR4 OSR3 OSR2 OSR1 OSR0 Conversion rate RMS noise ENOB^ * X 0 0 0 1 3.52kHz 23uV 17 * X 0 0 1 0 1.76kHz 3.5uV 20 * X 0 0 1 1 880Hz 2uV 21.3 * X 0 1 0 0 440Hz 1.4uV 21.8 * X 0 1 0 1 220Hz 1uV 22.4 * X 0 1 1 0 110Hz 750uV 22.9 * X 0 1 1 1 55Hz 510nV 23.4 * X 1 0 0 0 27.5Hz 375nV 24 * X 1 0 0 1 13.75Hz 250nV 24.4 * X 1 1 1 1 6.875Hz 200nV 24.6 * * ^ - Effective Number Of Bits */ #define MPC624_SPEED_3_52_kHz (MPC624_OSR4 | MPC624_OSR0) #define MPC624_SPEED_1_76_kHz (MPC624_OSR4 | MPC624_OSR1) #define MPC624_SPEED_880_Hz (MPC624_OSR4 | MPC624_OSR1 | MPC624_OSR0) #define MPC624_SPEED_440_Hz (MPC624_OSR4 | MPC624_OSR2) #define MPC624_SPEED_220_Hz (MPC624_OSR4 | MPC624_OSR2 | MPC624_OSR0) #define MPC624_SPEED_110_Hz (MPC624_OSR4 | MPC624_OSR2 | MPC624_OSR1) #define MPC624_SPEED_55_Hz \ (MPC624_OSR4 | MPC624_OSR2 | MPC624_OSR1 | MPC624_OSR0) #define MPC624_SPEED_27_5_Hz (MPC624_OSR4 | MPC624_OSR3) #define MPC624_SPEED_13_75_Hz (MPC624_OSR4 | MPC624_OSR3 | MPC624_OSR0) #define MPC624_SPEED_6_875_Hz \ (MPC624_OSR4 | MPC624_OSR3 | MPC624_OSR2 | MPC624_OSR1 | MPC624_OSR0) /* -------------------------------------------------------------------------- */ struct skel_private { /* set by mpc624_attach() from driver's parameters */ unsigned long int ulConvertionRate; }; #define devpriv ((struct skel_private *)dev->private) /* -------------------------------------------------------------------------- */ static const struct comedi_lrange range_mpc624_bipolar1 = { 1, { /* BIP_RANGE(1.01) this is correct, */ /* but my MPC-624 actually seems to have a range of 2.02 */ BIP_RANGE(2.02) } }; static const struct comedi_lrange range_mpc624_bipolar10 = { 1, { /* BIP_RANGE(10.1) this is correct, */ /* but my MPC-624 actually seems to have a range of 20.2 */ BIP_RANGE(20.2) } }; /* -------------------------------------------------------------------------- */ static int mpc624_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int mpc624_detach(struct comedi_device *dev); /* -------------------------------------------------------------------------- */ static struct comedi_driver driver_mpc624 = { .driver_name = "mpc624", .module = THIS_MODULE, .attach = mpc624_attach, .detach = mpc624_detach }; /* -------------------------------------------------------------------------- */ static int mpc624_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); /* -------------------------------------------------------------------------- */ static int mpc624_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; unsigned long iobase; iobase = it->options[0]; printk(KERN_INFO "comedi%d: mpc624 [0x%04lx, ", dev->minor, iobase); if (request_region(iobase, MPC624_SIZE, "mpc624") == NULL) { printk(KERN_ERR "I/O port(s) in use\n"); return -EIO; } dev->iobase = iobase; dev->board_name = "mpc624"; /* Private structure initialization */ if (alloc_private(dev, sizeof(struct skel_private)) < 0) return -ENOMEM; switch (it->options[1]) { case 0: devpriv->ulConvertionRate = MPC624_SPEED_3_52_kHz; printk(KERN_INFO "3.52 kHz, "); break; case 1: devpriv->ulConvertionRate = MPC624_SPEED_1_76_kHz; printk(KERN_INFO "1.76 kHz, "); break; case 2: devpriv->ulConvertionRate = MPC624_SPEED_880_Hz; printk(KERN_INFO "880 Hz, "); break; case 3: devpriv->ulConvertionRate = MPC624_SPEED_440_Hz; printk(KERN_INFO "440 Hz, "); break; case 4: devpriv->ulConvertionRate = MPC624_SPEED_220_Hz; printk(KERN_INFO "220 Hz, "); break; case 5: devpriv->ulConvertionRate = MPC624_SPEED_110_Hz; printk(KERN_INFO "110 Hz, "); break; case 6: devpriv->ulConvertionRate = MPC624_SPEED_55_Hz; printk(KERN_INFO "55 Hz, "); break; case 7: devpriv->ulConvertionRate = MPC624_SPEED_27_5_Hz; printk(KERN_INFO "27.5 Hz, "); break; case 8: devpriv->ulConvertionRate = MPC624_SPEED_13_75_Hz; printk(KERN_INFO "13.75 Hz, "); break; case 9: devpriv->ulConvertionRate = MPC624_SPEED_6_875_Hz; printk(KERN_INFO "6.875 Hz, "); break; default: printk (KERN_ERR "illegal conversion rate setting!" " Valid numbers are 0..9. Using 9 => 6.875 Hz, "); devpriv->ulConvertionRate = MPC624_SPEED_3_52_kHz; } /* Subdevices structures */ if (alloc_subdevices(dev, 1) < 0) return -ENOMEM; s = dev->subdevices + 0; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_DIFF; s->n_chan = 8; switch (it->options[1]) { default: s->maxdata = 0x3FFFFFFF; printk(KERN_INFO "30 bit, "); } switch (it->options[1]) { case 0: s->range_table = &range_mpc624_bipolar1; printk(KERN_INFO "1.01V]: "); break; default: s->range_table = &range_mpc624_bipolar10; printk(KERN_INFO "10.1V]: "); } s->len_chanlist = 1; s->insn_read = mpc624_ai_rinsn; printk(KERN_INFO "attached\n"); return 1; } static int mpc624_detach(struct comedi_device *dev) { printk(KERN_INFO "comedi%d: mpc624: remove\n", dev->minor); if (dev->iobase) release_region(dev->iobase, MPC624_SIZE); return 0; } /* Timeout 200ms */ #define TIMEOUT 200 static int mpc624_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int n, i; unsigned long int data_in, data_out; unsigned char ucPort; /* * WARNING: * We always write 0 to GNSWA bit, so the channel range is +-/10.1Vdc */ outb(insn->chanspec, dev->iobase + MPC624_GNMUXCH); /* printk("Channel %d:\n", insn->chanspec); */ if (!insn->n) { printk(KERN_INFO "MPC624: Warning, no data to acquire\n"); return 0; } for (n = 0; n < insn->n; n++) { /* Trigger the conversion */ outb(MPC624_ADSCK, dev->iobase + MPC624_ADC); udelay(1); outb(MPC624_ADCS | MPC624_ADSCK, dev->iobase + MPC624_ADC); udelay(1); outb(0, dev->iobase + MPC624_ADC); udelay(1); /* Wait for the conversion to end */ for (i = 0; i < TIMEOUT; i++) { ucPort = inb(dev->iobase + MPC624_ADC); if (ucPort & MPC624_ADBUSY) udelay(1000); else break; } if (i == TIMEOUT) { printk(KERN_ERR "MPC624: timeout (%dms)\n", TIMEOUT); data[n] = 0; return -ETIMEDOUT; } /* Start reading data */ data_in = 0; data_out = devpriv->ulConvertionRate; udelay(1); for (i = 0; i < 32; i++) { /* Set the clock low */ outb(0, dev->iobase + MPC624_ADC); udelay(1); if (data_out & (1 << 31)) { /* the next bit is a 1 */ /* Set the ADSDI line (send to MPC624) */ outb(MPC624_ADSDI, dev->iobase + MPC624_ADC); udelay(1); /* Set the clock high */ outb(MPC624_ADSCK | MPC624_ADSDI, dev->iobase + MPC624_ADC); } else { /* the next bit is a 0 */ /* Set the ADSDI line (send to MPC624) */ outb(0, dev->iobase + MPC624_ADC); udelay(1); /* Set the clock high */ outb(MPC624_ADSCK, dev->iobase + MPC624_ADC); } /* Read ADSDO on high clock (receive from MPC624) */ udelay(1); data_in <<= 1; data_in |= (inb(dev->iobase + MPC624_ADC) & MPC624_ADSDO) >> 4; udelay(1); data_out <<= 1; } /* * Received 32-bit long value consist of: * 31: EOC - * (End Of Transmission) bit - should be 0 * 30: DMY * (Dummy) bit - should be 0 * 29: SIG * (Sign) bit- 1 if the voltage is positive, * 0 if negative * 28: MSB * (Most Significant Bit) - the first bit of * the conversion result * .... * 05: LSB * (Least Significant Bit)- the last bit of the * conversion result * 04-00: sub-LSB * - sub-LSBs are basically noise, but when * averaged properly, they can increase conversion * precision up to 29 bits; they can be discarded * without loss of resolution. */ if (data_in & MPC624_EOC_BIT) printk(KERN_INFO "MPC624:EOC bit is set (data_in=%lu)!", data_in); if (data_in & MPC624_DMY_BIT) printk(KERN_INFO "MPC624:DMY bit is set (data_in=%lu)!", data_in); if (data_in & MPC624_SGN_BIT) { /* Volatge is positive */ /* * comedi operates on unsigned numbers, so mask off EOC * and DMY and don't clear the SGN bit */ data_in &= 0x3FFFFFFF; data[n] = data_in; } else { /* The voltage is negative */ /* * data_in contains a number in 30-bit two's complement * code and we must deal with it */ data_in |= MPC624_SGN_BIT; data_in = ~data_in; data_in += 1; data_in &= ~(MPC624_EOC_BIT | MPC624_DMY_BIT); /* clear EOC and DMY bits */ data_in = 0x20000000 - data_in; data[n] = data_in; } } /* Return the number of samples read/written */ return n; } static int __init driver_mpc624_init_module(void) { return comedi_driver_register(&driver_mpc624); } static void __exit driver_mpc624_cleanup_module(void) { comedi_driver_unregister(&driver_mpc624); } module_init(driver_mpc624_init_module); module_exit(driver_mpc624_cleanup_module); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
SatrioDwiPrabowo/Android_Sony_Alexa_Kernel_Viskan-JB4.3
arch/mips/pnx8550/jbs/board_setup.c
9522
2007
/* * JBS Specific board startup routines. * * Copyright 2005, Embedded Alley Solutions, Inc * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/sched.h> #include <linux/ioport.h> #include <linux/mm.h> #include <linux/console.h> #include <linux/mc146818rtc.h> #include <linux/delay.h> #include <asm/cpu.h> #include <asm/bootinfo.h> #include <asm/irq.h> #include <asm/mipsregs.h> #include <asm/reboot.h> #include <asm/pgtable.h> #include <glb.h> /* CP0 hazard avoidance. */ #define BARRIER __asm__ __volatile__(".set noreorder\n\t" \ "nop; nop; nop; nop; nop; nop;\n\t" \ ".set reorder\n\t") void __init board_setup(void) { unsigned long configpr; configpr = read_c0_config7(); configpr |= (1<<19); /* enable tlb */ write_c0_config7(configpr); BARRIER; }
gpl-2.0
lazybios/linux
fs/romfs/storage.c
9778
6508
/* RomFS storage access routines * * Copyright © 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/fs.h> #include <linux/mtd/super.h> #include <linux/buffer_head.h> #include "internal.h" #if !defined(CONFIG_ROMFS_ON_MTD) && !defined(CONFIG_ROMFS_ON_BLOCK) #error no ROMFS backing store interface configured #endif #ifdef CONFIG_ROMFS_ON_MTD #define ROMFS_MTD_READ(sb, ...) mtd_read((sb)->s_mtd, ##__VA_ARGS__) /* * read data from an romfs image on an MTD device */ static int romfs_mtd_read(struct super_block *sb, unsigned long pos, void *buf, size_t buflen) { size_t rlen; int ret; ret = ROMFS_MTD_READ(sb, pos, buflen, &rlen, buf); return (ret < 0 || rlen != buflen) ? -EIO : 0; } /* * determine the length of a string in a romfs image on an MTD device */ static ssize_t romfs_mtd_strnlen(struct super_block *sb, unsigned long pos, size_t maxlen) { ssize_t n = 0; size_t segment; u_char buf[16], *p; size_t len; int ret; /* scan the string up to 16 bytes at a time */ while (maxlen > 0) { segment = min_t(size_t, maxlen, 16); ret = ROMFS_MTD_READ(sb, pos, segment, &len, buf); if (ret < 0) return ret; p = memchr(buf, 0, len); if (p) return n + (p - buf); maxlen -= len; pos += len; n += len; } return n; } /* * compare a string to one in a romfs image on MTD * - return 1 if matched, 0 if differ, -ve if error */ static int romfs_mtd_strcmp(struct super_block *sb, unsigned long pos, const char *str, size_t size) { u_char buf[17]; size_t len, segment; int ret; /* scan the string up to 16 bytes at a time, and attempt to grab the * trailing NUL whilst we're at it */ buf[0] = 0xff; while (size > 0) { segment = min_t(size_t, size + 1, 17); ret = ROMFS_MTD_READ(sb, pos, segment, &len, buf); if (ret < 0) return ret; len--; if (memcmp(buf, str, len) != 0) return 0; buf[0] = buf[len]; size -= len; pos += len; str += len; } /* check the trailing NUL was */ if (buf[0]) return 0; return 1; } #endif /* CONFIG_ROMFS_ON_MTD */ #ifdef CONFIG_ROMFS_ON_BLOCK /* * read data from an romfs image on a block device */ static int romfs_blk_read(struct super_block *sb, unsigned long pos, void *buf, size_t buflen) { struct buffer_head *bh; unsigned long offset; size_t segment; /* copy the string up to blocksize bytes at a time */ while (buflen > 0) { offset = pos & (ROMBSIZE - 1); segment = min_t(size_t, buflen, ROMBSIZE - offset); bh = sb_bread(sb, pos >> ROMBSBITS); if (!bh) return -EIO; memcpy(buf, bh->b_data + offset, segment); brelse(bh); buf += segment; buflen -= segment; pos += segment; } return 0; } /* * determine the length of a string in romfs on a block device */ static ssize_t romfs_blk_strnlen(struct super_block *sb, unsigned long pos, size_t limit) { struct buffer_head *bh; unsigned long offset; ssize_t n = 0; size_t segment; u_char *buf, *p; /* scan the string up to blocksize bytes at a time */ while (limit > 0) { offset = pos & (ROMBSIZE - 1); segment = min_t(size_t, limit, ROMBSIZE - offset); bh = sb_bread(sb, pos >> ROMBSBITS); if (!bh) return -EIO; buf = bh->b_data + offset; p = memchr(buf, 0, segment); brelse(bh); if (p) return n + (p - buf); limit -= segment; pos += segment; n += segment; } return n; } /* * compare a string to one in a romfs image on a block device * - return 1 if matched, 0 if differ, -ve if error */ static int romfs_blk_strcmp(struct super_block *sb, unsigned long pos, const char *str, size_t size) { struct buffer_head *bh; unsigned long offset; size_t segment; bool matched, terminated = false; /* compare string up to a block at a time */ while (size > 0) { offset = pos & (ROMBSIZE - 1); segment = min_t(size_t, size, ROMBSIZE - offset); bh = sb_bread(sb, pos >> ROMBSBITS); if (!bh) return -EIO; matched = (memcmp(bh->b_data + offset, str, segment) == 0); size -= segment; pos += segment; str += segment; if (matched && size == 0 && offset + segment < ROMBSIZE) { if (!bh->b_data[offset + segment]) terminated = true; else matched = false; } brelse(bh); if (!matched) return 0; } if (!terminated) { /* the terminating NUL must be on the first byte of the next * block */ BUG_ON((pos & (ROMBSIZE - 1)) != 0); bh = sb_bread(sb, pos >> ROMBSBITS); if (!bh) return -EIO; matched = !bh->b_data[0]; brelse(bh); if (!matched) return 0; } return 1; } #endif /* CONFIG_ROMFS_ON_BLOCK */ /* * read data from the romfs image */ int romfs_dev_read(struct super_block *sb, unsigned long pos, void *buf, size_t buflen) { size_t limit; limit = romfs_maxsize(sb); if (pos >= limit) return -EIO; if (buflen > limit - pos) buflen = limit - pos; #ifdef CONFIG_ROMFS_ON_MTD if (sb->s_mtd) return romfs_mtd_read(sb, pos, buf, buflen); #endif #ifdef CONFIG_ROMFS_ON_BLOCK if (sb->s_bdev) return romfs_blk_read(sb, pos, buf, buflen); #endif return -EIO; } /* * determine the length of a string in romfs */ ssize_t romfs_dev_strnlen(struct super_block *sb, unsigned long pos, size_t maxlen) { size_t limit; limit = romfs_maxsize(sb); if (pos >= limit) return -EIO; if (maxlen > limit - pos) maxlen = limit - pos; #ifdef CONFIG_ROMFS_ON_MTD if (sb->s_mtd) return romfs_mtd_strnlen(sb, pos, maxlen); #endif #ifdef CONFIG_ROMFS_ON_BLOCK if (sb->s_bdev) return romfs_blk_strnlen(sb, pos, maxlen); #endif return -EIO; } /* * compare a string to one in romfs * - the string to be compared to, str, may not be NUL-terminated; instead the * string is of the specified size * - return 1 if matched, 0 if differ, -ve if error */ int romfs_dev_strcmp(struct super_block *sb, unsigned long pos, const char *str, size_t size) { size_t limit; limit = romfs_maxsize(sb); if (pos >= limit) return -EIO; if (size > ROMFS_MAXFN) return -ENAMETOOLONG; if (size + 1 > limit - pos) return -EIO; #ifdef CONFIG_ROMFS_ON_MTD if (sb->s_mtd) return romfs_mtd_strcmp(sb, pos, str, size); #endif #ifdef CONFIG_ROMFS_ON_BLOCK if (sb->s_bdev) return romfs_blk_strcmp(sb, pos, str, size); #endif return -EIO; }
gpl-2.0
AICP/kernel_oppo_msm8974
drivers/uwb/scan.c
10034
4150
/* * Ultra Wide Band * Scanning management * * Copyright (C) 2005-2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * * FIXME: docs * FIXME: there are issues here on how BEACON and SCAN on USB RCI deal * with each other. Currently seems that START_BEACON while * SCAN_ONLY will cancel the scan, so we need to update the * state here. Clarification request sent by email on * 10/05/2005. * 10/28/2005 No clear answer heard--maybe we'll hack the API * so that when we start beaconing, if the HC is * scanning in a mode not compatible with beaconing * we just fail. */ #include <linux/device.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/stat.h> #include "uwb-internal.h" /** * Start/stop scanning in a radio controller * * @rc: UWB Radio Controller * @channel: Channel to scan; encodings in WUSB1.0[Table 5.12] * @type: Type of scanning to do. * @bpst_offset: value at which to start scanning (if type == * UWB_SCAN_ONLY_STARTTIME) * @returns: 0 if ok, < 0 errno code on error * * We put the command on kmalloc'ed memory as some arches cannot do * USB from the stack. The reply event is copied from an stage buffer, * so it can be in the stack. See WUSB1.0[8.6.2.4] for more details. */ int uwb_rc_scan(struct uwb_rc *rc, unsigned channel, enum uwb_scan_type type, unsigned bpst_offset) { int result; struct uwb_rc_cmd_scan *cmd; struct uwb_rc_evt_confirm reply; result = -ENOMEM; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) goto error_kzalloc; mutex_lock(&rc->uwb_dev.mutex); cmd->rccb.bCommandType = UWB_RC_CET_GENERAL; cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SCAN); cmd->bChannelNumber = channel; cmd->bScanState = type; cmd->wStartTime = cpu_to_le16(bpst_offset); reply.rceb.bEventType = UWB_RC_CET_GENERAL; reply.rceb.wEvent = UWB_RC_CMD_SCAN; result = uwb_rc_cmd(rc, "SCAN", &cmd->rccb, sizeof(*cmd), &reply.rceb, sizeof(reply)); if (result < 0) goto error_cmd; if (reply.bResultCode != UWB_RC_RES_SUCCESS) { dev_err(&rc->uwb_dev.dev, "SCAN: command execution failed: %s (%d)\n", uwb_rc_strerror(reply.bResultCode), reply.bResultCode); result = -EIO; goto error_cmd; } rc->scanning = channel; rc->scan_type = type; error_cmd: mutex_unlock(&rc->uwb_dev.mutex); kfree(cmd); error_kzalloc: return result; } /* * Print scanning state */ static ssize_t uwb_rc_scan_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uwb_dev *uwb_dev = to_uwb_dev(dev); struct uwb_rc *rc = uwb_dev->rc; ssize_t result; mutex_lock(&rc->uwb_dev.mutex); result = sprintf(buf, "%d %d\n", rc->scanning, rc->scan_type); mutex_unlock(&rc->uwb_dev.mutex); return result; } /* * */ static ssize_t uwb_rc_scan_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct uwb_dev *uwb_dev = to_uwb_dev(dev); struct uwb_rc *rc = uwb_dev->rc; unsigned channel; unsigned type; unsigned bpst_offset = 0; ssize_t result = -EINVAL; result = sscanf(buf, "%u %u %u\n", &channel, &type, &bpst_offset); if (result >= 2 && type < UWB_SCAN_TOP) result = uwb_rc_scan(rc, channel, type, bpst_offset); return result < 0 ? result : size; } /** Radio Control sysfs interface (declaration) */ DEVICE_ATTR(scan, S_IRUGO | S_IWUSR, uwb_rc_scan_show, uwb_rc_scan_store);
gpl-2.0
sakuraba001/android_kernel_samsung_tblte
drivers/staging/android/ion/ion_cma_heap.c
51
6586
/* * drivers/gpu/ion/ion_cma_heap.c * * Copyright (C) Linaro 2012 * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/device.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/dma-mapping.h> #include <linux/msm_ion.h> #include <linux/highmem.h> #include <asm/cacheflush.h> #include "ion.h" #include "ion_priv.h" #define ION_CMA_ALLOCATE_FAILED -1 struct ion_cma_buffer_info { void *cpu_addr; dma_addr_t handle; struct sg_table *table; bool is_cached; }; static int cma_heap_has_outer_cache; /* * Create scatter-list for the already allocated DMA buffer. * This function could be replace by dma_common_get_sgtable * as soon as it will avalaible. */ static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t handle, size_t size) { struct page *page = pfn_to_page(PFN_DOWN(handle)); int ret; ret = sg_alloc_table(sgt, 1, GFP_KERNEL); if (unlikely(ret)) return ret; sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); return 0; } /* ION CMA heap operations functions */ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, unsigned long len, unsigned long align, unsigned long flags) { struct device *dev = heap->priv; struct ion_cma_buffer_info *info; dev_dbg(dev, "Request buffer allocation len %ld\n", len); info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL); if (!info) { dev_err(dev, "Can't allocate buffer info\n"); return ION_CMA_ALLOCATE_FAILED; } if (!ION_IS_CACHED(flags)) info->cpu_addr = dma_alloc_writecombine(dev, len, &(info->handle), GFP_KERNEL); else info->cpu_addr = dma_alloc_nonconsistent(dev, len, &(info->handle), GFP_KERNEL); if (!info->cpu_addr) { dev_err(dev, "Fail to allocate buffer\n"); goto err; } info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); if (!info->table) { dev_err(dev, "Fail to allocate sg table\n"); goto err; } info->is_cached = ION_IS_CACHED(flags); ion_cma_get_sgtable(dev, info->table, info->cpu_addr, info->handle, len); /* keep this for memory release */ buffer->priv_virt = info; dev_dbg(dev, "Allocate buffer %p\n", buffer); if (heap->id == 27) { // printk("[ION_alloc id==27|QSEECOM] 0x%p/0x%x => kmap_flush_unused\n", (void*)info->handle, (unsigned int)len); kmap_flush_unused(); } return 0; err: kfree(info); return ION_CMA_ALLOCATE_FAILED; } static void ion_cma_free(struct ion_buffer *buffer) { struct device *dev = buffer->heap->priv; struct ion_cma_buffer_info *info = buffer->priv_virt; dev_dbg(dev, "Release buffer %p\n", buffer); /* release memory */ dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle); sg_free_table(info->table); /* release sg table */ kfree(info->table); kfree(info); } /* return physical address in addr */ static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer, ion_phys_addr_t *addr, size_t *len) { struct device *dev = heap->priv; struct ion_cma_buffer_info *info = buffer->priv_virt; dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer, &info->handle); *addr = info->handle; *len = buffer->size; return 0; } static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap, struct ion_buffer *buffer) { struct ion_cma_buffer_info *info = buffer->priv_virt; return info->table; } static void ion_cma_heap_unmap_dma(struct ion_heap *heap, struct ion_buffer *buffer) { return; } static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer, struct vm_area_struct *vma) { struct device *dev = buffer->heap->priv; struct ion_cma_buffer_info *info = buffer->priv_virt; #ifdef CONFIG_TIMA_RKP if ((buffer->size) && (boot_mode_security == 1)) { /* iommu optimization- needs to be turned ON from * the tz side. */ cpu_v7_tima_iommu_opt(vma->vm_start, vma->vm_end, (unsigned long)vma->vm_mm->pgd); __asm__ __volatile__ ( "mcr p15, 0, r0, c8, c3, 0\n" "dsb\n" "isb\n"); } #endif if (info->is_cached) return dma_mmap_nonconsistent(dev, vma, info->cpu_addr, info->handle, buffer->size); else return dma_mmap_writecombine(dev, vma, info->cpu_addr, info->handle, buffer->size); } static void *ion_cma_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer) { struct ion_cma_buffer_info *info = buffer->priv_virt; return info->cpu_addr; } static void ion_cma_unmap_kernel(struct ion_heap *heap, struct ion_buffer *buffer) { return; } static int ion_cma_print_debug(struct ion_heap *heap, struct seq_file *s, const struct list_head *mem_map) { if (mem_map) { struct mem_map_data *data; seq_printf(s, "\nMemory Map\n"); seq_printf(s, "%16.s %14.s %14.s %14.s\n", "client", "start address", "end address", "size (hex)"); list_for_each_entry(data, mem_map, node) { const char *client_name = "(null)"; if (data->client_name) client_name = data->client_name; seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n", client_name, &data->addr, &data->addr_end, data->size, data->size); } } return 0; } static struct ion_heap_ops ion_cma_ops = { .allocate = ion_cma_allocate, .free = ion_cma_free, .map_dma = ion_cma_heap_map_dma, .unmap_dma = ion_cma_heap_unmap_dma, .phys = ion_cma_phys, .map_user = ion_cma_mmap, .map_kernel = ion_cma_map_kernel, .unmap_kernel = ion_cma_unmap_kernel, .print_debug = ion_cma_print_debug, }; struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data) { struct ion_heap *heap; heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); if (!heap) return ERR_PTR(-ENOMEM); heap->ops = &ion_cma_ops; /* set device as private heaps data, later it will be * used to make the link with reserved CMA memory */ heap->priv = data->priv; heap->type = ION_HEAP_TYPE_DMA; cma_heap_has_outer_cache = data->has_outer_cache; return heap; } void ion_cma_heap_destroy(struct ion_heap *heap) { kfree(heap); }
gpl-2.0
saturday06/fizzbuzz-linux-kernel
arch/arm/kernel/arch_timer.c
51
8064
/* * linux/arch/arm/kernel/arch_timer.c * * Copyright (C) 2011 ARM Ltd. * All Rights Reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/smp.h> #include <linux/cpu.h> #include <linux/jiffies.h> #include <linux/clockchips.h> #include <linux/interrupt.h> #include <linux/of_irq.h> #include <linux/io.h> #include <asm/cputype.h> #include <asm/localtimer.h> #include <asm/arch_timer.h> #include <asm/system_info.h> #include <asm/sched_clock.h> static unsigned long arch_timer_rate; static int arch_timer_ppi; static int arch_timer_ppi2; static struct clock_event_device __percpu **arch_timer_evt; /* * Architected system timer support. */ #define ARCH_TIMER_CTRL_ENABLE (1 << 0) #define ARCH_TIMER_CTRL_IT_MASK (1 << 1) #define ARCH_TIMER_CTRL_IT_STAT (1 << 2) #define ARCH_TIMER_REG_CTRL 0 #define ARCH_TIMER_REG_FREQ 1 #define ARCH_TIMER_REG_TVAL 2 static void arch_timer_reg_write(int reg, u32 val) { switch (reg) { case ARCH_TIMER_REG_CTRL: asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val)); break; case ARCH_TIMER_REG_TVAL: asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val)); break; } isb(); } static u32 arch_timer_reg_read(int reg) { u32 val; switch (reg) { case ARCH_TIMER_REG_CTRL: asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val)); break; case ARCH_TIMER_REG_FREQ: asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val)); break; case ARCH_TIMER_REG_TVAL: asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val)); break; default: BUG(); } return val; } static irqreturn_t arch_timer_handler(int irq, void *dev_id) { struct clock_event_device *evt = *(struct clock_event_device **)dev_id; unsigned long ctrl; ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL); if (ctrl & ARCH_TIMER_CTRL_IT_STAT) { ctrl |= ARCH_TIMER_CTRL_IT_MASK; arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl); evt->event_handler(evt); return IRQ_HANDLED; } return IRQ_NONE; } static void arch_timer_disable(void) { unsigned long ctrl; ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL); ctrl &= ~ARCH_TIMER_CTRL_ENABLE; arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl); } static void arch_timer_set_mode(enum clock_event_mode mode, struct clock_event_device *clk) { switch (mode) { case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: arch_timer_disable(); break; default: break; } } static int arch_timer_set_next_event(unsigned long evt, struct clock_event_device *unused) { unsigned long ctrl; ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL); ctrl |= ARCH_TIMER_CTRL_ENABLE; ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; arch_timer_reg_write(ARCH_TIMER_REG_TVAL, evt); arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl); return 0; } static int __cpuinit arch_timer_setup(struct clock_event_device *clk) { /* Be safe... */ arch_timer_disable(); clk->features = CLOCK_EVT_FEAT_ONESHOT; clk->name = "arch_sys_timer"; clk->rating = 450; clk->set_mode = arch_timer_set_mode; clk->set_next_event = arch_timer_set_next_event; clk->irq = arch_timer_ppi; clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff); *__this_cpu_ptr(arch_timer_evt) = clk; enable_percpu_irq(clk->irq, 0); if (arch_timer_ppi2) enable_percpu_irq(arch_timer_ppi2, 0); return 0; } /* Is the optional system timer available? */ static int local_timer_is_architected(void) { return (cpu_architecture() >= CPU_ARCH_ARMv7) && ((read_cpuid_ext(CPUID_EXT_PFR1) >> 16) & 0xf) == 1; } static int arch_timer_available(void) { unsigned long freq; if (!local_timer_is_architected()) return -ENXIO; if (arch_timer_rate == 0) { arch_timer_reg_write(ARCH_TIMER_REG_CTRL, 0); freq = arch_timer_reg_read(ARCH_TIMER_REG_FREQ); /* Check the timer frequency. */ if (freq == 0) { pr_warn("Architected timer frequency not available\n"); return -EINVAL; } arch_timer_rate = freq; } pr_info_once("Architected local timer running at %lu.%02luMHz.\n", arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100); return 0; } static inline cycle_t arch_counter_get_cntpct(void) { u32 cvall, cvalh; asm volatile("mrrc p15, 0, %0, %1, c14" : "=r" (cvall), "=r" (cvalh)); return ((cycle_t) cvalh << 32) | cvall; } static inline cycle_t arch_counter_get_cntvct(void) { u32 cvall, cvalh; asm volatile("mrrc p15, 1, %0, %1, c14" : "=r" (cvall), "=r" (cvalh)); return ((cycle_t) cvalh << 32) | cvall; } static u32 notrace arch_counter_get_cntvct32(void) { cycle_t cntvct = arch_counter_get_cntvct(); /* * The sched_clock infrastructure only knows about counters * with at most 32bits. Forget about the upper 24 bits for the * time being... */ return (u32)(cntvct & (u32)~0); } static cycle_t arch_counter_read(struct clocksource *cs) { return arch_counter_get_cntpct(); } static struct clocksource clocksource_counter = { .name = "arch_sys_counter", .rating = 400, .read = arch_counter_read, .mask = CLOCKSOURCE_MASK(56), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static void __cpuinit arch_timer_stop(struct clock_event_device *clk) { pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id()); disable_percpu_irq(clk->irq); if (arch_timer_ppi2) disable_percpu_irq(arch_timer_ppi2); arch_timer_set_mode(CLOCK_EVT_MODE_UNUSED, clk); } static struct local_timer_ops arch_timer_ops __cpuinitdata = { .setup = arch_timer_setup, .stop = arch_timer_stop, }; static struct clock_event_device arch_timer_global_evt; static int __init arch_timer_register(void) { int err; err = arch_timer_available(); if (err) return err; arch_timer_evt = alloc_percpu(struct clock_event_device *); if (!arch_timer_evt) return -ENOMEM; clocksource_register_hz(&clocksource_counter, arch_timer_rate); err = request_percpu_irq(arch_timer_ppi, arch_timer_handler, "arch_timer", arch_timer_evt); if (err) { pr_err("arch_timer: can't register interrupt %d (%d)\n", arch_timer_ppi, err); goto out_free; } if (arch_timer_ppi2) { err = request_percpu_irq(arch_timer_ppi2, arch_timer_handler, "arch_timer", arch_timer_evt); if (err) { pr_err("arch_timer: can't register interrupt %d (%d)\n", arch_timer_ppi2, err); arch_timer_ppi2 = 0; goto out_free_irq; } } err = local_timer_register(&arch_timer_ops); if (err) { /* * We couldn't register as a local timer (could be * because we're on a UP platform, or because some * other local timer is already present...). Try as a * global timer instead. */ arch_timer_global_evt.cpumask = cpumask_of(0); err = arch_timer_setup(&arch_timer_global_evt); } if (err) goto out_free_irq; return 0; out_free_irq: free_percpu_irq(arch_timer_ppi, arch_timer_evt); if (arch_timer_ppi2) free_percpu_irq(arch_timer_ppi2, arch_timer_evt); out_free: free_percpu(arch_timer_evt); return err; } static const struct of_device_id arch_timer_of_match[] __initconst = { { .compatible = "arm,armv7-timer", }, {}, }; int __init arch_timer_of_register(void) { struct device_node *np; u32 freq; np = of_find_matching_node(NULL, arch_timer_of_match); if (!np) { pr_err("arch_timer: can't find DT node\n"); return -ENODEV; } /* Try to determine the frequency from the device tree or CNTFRQ */ if (!of_property_read_u32(np, "clock-frequency", &freq)) arch_timer_rate = freq; arch_timer_ppi = irq_of_parse_and_map(np, 0); arch_timer_ppi2 = irq_of_parse_and_map(np, 1); pr_info("arch_timer: found %s irqs %d %d\n", np->name, arch_timer_ppi, arch_timer_ppi2); return arch_timer_register(); } int __init arch_timer_sched_clock_init(void) { int err; err = arch_timer_available(); if (err) return err; setup_sched_clock(arch_counter_get_cntvct32, 32, arch_timer_rate); return 0; }
gpl-2.0
qhh7812/android_kernel_htc_ville-lp
drivers/usb/gadget/f_rmnet_sdio.c
307
38894
/* * f_rmnet_sdio.c -- RmNet SDIO function driver * * Copyright (C) 2003-2005,2008 David Brownell * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com) * Copyright (C) 2008 Nokia Corporation * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/err.h> #include <linux/list.h> #include <linux/device.h> #include <linux/workqueue.h> #include <linux/netdevice.h> #include <linux/usb/cdc.h> #include <linux/usb/composite.h> #include <linux/usb/ch9.h> #include <linux/termios.h> #include <linux/debugfs.h> #include <mach/sdio_cmux.h> #include <mach/sdio_dmux.h> #ifdef CONFIG_RMNET_SDIO_CTL_CHANNEL static uint32_t rmnet_sdio_ctl_ch = CONFIG_RMNET_SDIO_CTL_CHANNEL; #else static uint32_t rmnet_sdio_ctl_ch; #endif module_param(rmnet_sdio_ctl_ch, uint, S_IRUGO); MODULE_PARM_DESC(rmnet_sdio_ctl_ch, "RmNet control SDIO channel ID"); #ifdef CONFIG_RMNET_SDIO_DATA_CHANNEL static uint32_t rmnet_sdio_data_ch = CONFIG_RMNET_SDIO_DATA_CHANNEL; #else static uint32_t rmnet_sdio_data_ch; #endif module_param(rmnet_sdio_data_ch, uint, S_IRUGO); MODULE_PARM_DESC(rmnet_sdio_data_ch, "RmNet data SDIO channel ID"); #define ACM_CTRL_DTR (1 << 0) #define SDIO_MUX_HDR 8 #define RMNET_SDIO_NOTIFY_INTERVAL 5 #define RMNET_SDIO_MAX_NFY_SZE sizeof(struct usb_cdc_notification) #define RMNET_SDIO_RX_REQ_MAX 16 #define RMNET_SDIO_RX_REQ_SIZE 2048 #define RMNET_SDIO_TX_REQ_MAX 200 #define TX_PKT_DROP_THRESHOLD 1000 #define RX_PKT_FLOW_CTRL_EN_THRESHOLD 1000 #define RX_PKT_FLOW_CTRL_DISABLE 500 unsigned int sdio_tx_pkt_drop_thld = TX_PKT_DROP_THRESHOLD; module_param(sdio_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR); unsigned int sdio_rx_fctrl_en_thld = RX_PKT_FLOW_CTRL_EN_THRESHOLD; module_param(sdio_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR); unsigned int sdio_rx_fctrl_dis_thld = RX_PKT_FLOW_CTRL_DISABLE; module_param(sdio_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR); struct rmnet_sdio_qmi_buf { void *buf; int len; struct list_head list; }; struct rmnet_sdio_dev { struct usb_function function; struct usb_composite_dev *cdev; struct usb_ep *epout; struct usb_ep *epin; struct usb_ep *epnotify; struct usb_request *notify_req; u8 ifc_id; struct list_head qmi_req_q; unsigned int qreq_q_len; struct list_head qmi_resp_q; unsigned int qresp_q_len; struct list_head tx_idle; unsigned int tx_idle_len; struct sk_buff_head tx_skb_queue; struct list_head rx_idle; unsigned int rx_idle_len; struct sk_buff_head rx_skb_queue; spinlock_t lock; atomic_t online; atomic_t notify_count; struct workqueue_struct *wq; struct work_struct disconnect_work; struct work_struct ctl_rx_work; struct work_struct data_rx_work; struct delayed_work sdio_open_work; struct work_struct sdio_close_work; #define RMNET_SDIO_CH_OPEN 1 unsigned long data_ch_status; unsigned long ctrl_ch_status; unsigned int dpkts_pending_atdmux; int cbits_to_modem; struct work_struct set_modem_ctl_bits_work; struct dentry *dent; unsigned long dpkt_tolaptop; unsigned long dpkt_tomodem; unsigned long tx_drp_cnt; unsigned long cpkt_tolaptop; unsigned long cpkt_tomodem; }; static struct usb_interface_descriptor rmnet_sdio_interface_desc = { .bLength = USB_DT_INTERFACE_SIZE, .bDescriptorType = USB_DT_INTERFACE, .bNumEndpoints = 3, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC, .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC, }; static struct usb_endpoint_descriptor rmnet_sdio_fs_notify_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, .wMaxPacketSize = __constant_cpu_to_le16(RMNET_SDIO_MAX_NFY_SZE), .bInterval = 1 << RMNET_SDIO_NOTIFY_INTERVAL, }; static struct usb_endpoint_descriptor rmnet_sdio_fs_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(64), }; static struct usb_endpoint_descriptor rmnet_sdio_fs_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(64), }; static struct usb_descriptor_header *rmnet_sdio_fs_function[] = { (struct usb_descriptor_header *) &rmnet_sdio_interface_desc, (struct usb_descriptor_header *) &rmnet_sdio_fs_notify_desc, (struct usb_descriptor_header *) &rmnet_sdio_fs_in_desc, (struct usb_descriptor_header *) &rmnet_sdio_fs_out_desc, NULL, }; static struct usb_endpoint_descriptor rmnet_sdio_hs_notify_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, .wMaxPacketSize = __constant_cpu_to_le16(RMNET_SDIO_MAX_NFY_SZE), .bInterval = RMNET_SDIO_NOTIFY_INTERVAL + 4, }; static struct usb_endpoint_descriptor rmnet_sdio_hs_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(512), }; static struct usb_endpoint_descriptor rmnet_sdio_hs_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(512), }; static struct usb_descriptor_header *rmnet_sdio_hs_function[] = { (struct usb_descriptor_header *) &rmnet_sdio_interface_desc, (struct usb_descriptor_header *) &rmnet_sdio_hs_notify_desc, (struct usb_descriptor_header *) &rmnet_sdio_hs_in_desc, (struct usb_descriptor_header *) &rmnet_sdio_hs_out_desc, NULL, }; static struct usb_string rmnet_sdio_string_defs[] = { [0].s = "QMI RmNet", { } }; static struct usb_gadget_strings rmnet_sdio_string_table = { .language = 0x0409, .strings = rmnet_sdio_string_defs, }; static struct usb_gadget_strings *rmnet_sdio_strings[] = { &rmnet_sdio_string_table, NULL, }; static struct rmnet_sdio_qmi_buf * rmnet_sdio_alloc_qmi(unsigned len, gfp_t kmalloc_flags) { struct rmnet_sdio_qmi_buf *qmi; qmi = kmalloc(sizeof(struct rmnet_sdio_qmi_buf), kmalloc_flags); if (qmi != NULL) { qmi->buf = kmalloc(len, kmalloc_flags); if (qmi->buf == NULL) { kfree(qmi); qmi = NULL; } } return qmi ? qmi : ERR_PTR(-ENOMEM); } static void rmnet_sdio_free_qmi(struct rmnet_sdio_qmi_buf *qmi) { kfree(qmi->buf); kfree(qmi); } static struct usb_request * rmnet_sdio_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) { struct usb_request *req; req = usb_ep_alloc_request(ep, kmalloc_flags); if (len && req != NULL) { req->length = len; req->buf = kmalloc(len, kmalloc_flags); if (req->buf == NULL) { usb_ep_free_request(ep, req); req = NULL; } } return req ? req : ERR_PTR(-ENOMEM); } static void rmnet_sdio_free_req(struct usb_ep *ep, struct usb_request *req) { kfree(req->buf); usb_ep_free_request(ep, req); } static void rmnet_sdio_notify_complete(struct usb_ep *ep, struct usb_request *req) { struct rmnet_sdio_dev *dev = req->context; struct usb_composite_dev *cdev = dev->cdev; int status = req->status; switch (status) { case -ECONNRESET: case -ESHUTDOWN: atomic_set(&dev->notify_count, 0); break; default: ERROR(cdev, "rmnet notifyep error %d\n", status); case 0: if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) return; if (atomic_dec_and_test(&dev->notify_count)) break; status = usb_ep_queue(dev->epnotify, req, GFP_ATOMIC); if (status) { atomic_dec(&dev->notify_count); ERROR(cdev, "rmnet notify ep enq error %d\n", status); } break; } } static void rmnet_sdio_qmi_resp_available(struct rmnet_sdio_dev *dev) { struct usb_composite_dev *cdev = dev->cdev; struct usb_cdc_notification *event; int status; unsigned long flags; if (atomic_inc_return(&dev->notify_count) != 1) return; spin_lock_irqsave(&dev->lock, flags); if (!atomic_read(&dev->online)) { spin_unlock_irqrestore(&dev->lock, flags); return; } event = dev->notify_req->buf; event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE; event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE; event->wValue = cpu_to_le16(0); event->wIndex = cpu_to_le16(dev->ifc_id); event->wLength = cpu_to_le16(0); spin_unlock_irqrestore(&dev->lock, flags); status = usb_ep_queue(dev->epnotify, dev->notify_req, GFP_ATOMIC); if (status < 0) { if (atomic_read(&dev->online)) atomic_dec(&dev->notify_count); ERROR(cdev, "rmnet notify ep enqueue error %d\n", status); } } #define SDIO_MAX_CTRL_PKT_SIZE 4096 static void rmnet_sdio_ctl_receive_cb(void *data, int size, void *priv) { struct rmnet_sdio_dev *dev = priv; struct usb_composite_dev *cdev = dev->cdev; struct rmnet_sdio_qmi_buf *qmi_resp; unsigned long flags; if (!data) { pr_info("%s: cmux_ch close event\n", __func__); if (test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status) && test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) { clear_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status); clear_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status); queue_work(dev->wq, &dev->sdio_close_work); } return; } if (!size || !test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) return; if (size > SDIO_MAX_CTRL_PKT_SIZE) { ERROR(cdev, "ctrl pkt size:%d exceeds max pkt size:%d\n", size, SDIO_MAX_CTRL_PKT_SIZE); return; } if (!atomic_read(&dev->online)) { DBG(cdev, "USB disconnected\n"); return; } qmi_resp = rmnet_sdio_alloc_qmi(size, GFP_KERNEL); if (IS_ERR(qmi_resp)) { DBG(cdev, "unable to allocate memory for QMI resp\n"); return; } memcpy(qmi_resp->buf, data, size); qmi_resp->len = size; spin_lock_irqsave(&dev->lock, flags); list_add_tail(&qmi_resp->list, &dev->qmi_resp_q); dev->qresp_q_len++; spin_unlock_irqrestore(&dev->lock, flags); rmnet_sdio_qmi_resp_available(dev); } static void rmnet_sdio_ctl_write_done(void *data, int size, void *priv) { struct rmnet_sdio_dev *dev = priv; struct usb_composite_dev *cdev = dev->cdev; VDBG(cdev, "rmnet control write done = %d bytes\n", size); } static void rmnet_sdio_sts_callback(int id, void *priv) { struct rmnet_sdio_dev *dev = priv; struct usb_composite_dev *cdev = dev->cdev; DBG(cdev, "rmnet_sdio_sts_callback: id: %d\n", id); } static void rmnet_sdio_control_rx_work(struct work_struct *w) { struct rmnet_sdio_dev *dev = container_of(w, struct rmnet_sdio_dev, ctl_rx_work); struct usb_composite_dev *cdev = dev->cdev; struct rmnet_sdio_qmi_buf *qmi_req; unsigned long flags; int ret; while (1) { spin_lock_irqsave(&dev->lock, flags); if (list_empty(&dev->qmi_req_q)) goto unlock; qmi_req = list_first_entry(&dev->qmi_req_q, struct rmnet_sdio_qmi_buf, list); list_del(&qmi_req->list); dev->qreq_q_len--; spin_unlock_irqrestore(&dev->lock, flags); ret = sdio_cmux_write(rmnet_sdio_ctl_ch, qmi_req->buf, qmi_req->len); if (ret != qmi_req->len) { ERROR(cdev, "rmnet control SDIO write failed\n"); return; } dev->cpkt_tomodem++; rmnet_sdio_free_qmi(qmi_req); } unlock: spin_unlock_irqrestore(&dev->lock, flags); } static void rmnet_sdio_response_complete(struct usb_ep *ep, struct usb_request *req) { struct rmnet_sdio_dev *dev = req->context; struct usb_composite_dev *cdev = dev->cdev; switch (req->status) { case -ECONNRESET: case -ESHUTDOWN: case 0: return; default: INFO(cdev, "rmnet %s response error %d, %d/%d\n", ep->name, req->status, req->actual, req->length); } } static void rmnet_sdio_command_complete(struct usb_ep *ep, struct usb_request *req) { struct rmnet_sdio_dev *dev = req->context; struct usb_composite_dev *cdev = dev->cdev; struct rmnet_sdio_qmi_buf *qmi_req; int len = req->actual; if (req->status < 0) { ERROR(cdev, "rmnet command error %d\n", req->status); return; } if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) return; qmi_req = rmnet_sdio_alloc_qmi(len, GFP_ATOMIC); if (IS_ERR(qmi_req)) { ERROR(cdev, "unable to allocate memory for QMI req\n"); return; } memcpy(qmi_req->buf, req->buf, len); qmi_req->len = len; spin_lock(&dev->lock); list_add_tail(&qmi_req->list, &dev->qmi_req_q); dev->qreq_q_len++; spin_unlock(&dev->lock); queue_work(dev->wq, &dev->ctl_rx_work); } static int rmnet_sdio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) { struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev, function); struct usb_composite_dev *cdev = f->config->cdev; struct usb_request *req = cdev->req; int ret = -EOPNOTSUPP; u16 w_index = le16_to_cpu(ctrl->wIndex); u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); struct rmnet_sdio_qmi_buf *resp; if (!atomic_read(&dev->online)) return -ENOTCONN; switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_SEND_ENCAPSULATED_COMMAND: ret = w_length; req->complete = rmnet_sdio_command_complete; req->context = dev; break; case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_GET_ENCAPSULATED_RESPONSE: if (w_value) goto invalid; else { unsigned len; spin_lock(&dev->lock); if (list_empty(&dev->qmi_resp_q)) { INFO(cdev, "qmi resp empty " " req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); spin_unlock(&dev->lock); goto invalid; } resp = list_first_entry(&dev->qmi_resp_q, struct rmnet_sdio_qmi_buf, list); list_del(&resp->list); dev->qresp_q_len--; spin_unlock(&dev->lock); len = min_t(unsigned, w_length, resp->len); memcpy(req->buf, resp->buf, len); ret = len; req->context = dev; req->complete = rmnet_sdio_response_complete; rmnet_sdio_free_qmi(resp); dev->cpkt_tolaptop++; } break; case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_REQ_SET_CONTROL_LINE_STATE: if (w_value & ACM_CTRL_DTR) dev->cbits_to_modem |= TIOCM_DTR; else dev->cbits_to_modem &= ~TIOCM_DTR; queue_work(dev->wq, &dev->set_modem_ctl_bits_work); ret = 0; break; default: invalid: DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); } if (ret >= 0) { VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); req->zero = (ret < w_length); req->length = ret; ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); if (ret < 0) ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret); } return ret; } static int rmnet_sdio_rx_submit(struct rmnet_sdio_dev *dev, struct usb_request *req, gfp_t gfp_flags) { struct sk_buff *skb; int retval; skb = alloc_skb(RMNET_SDIO_RX_REQ_SIZE + SDIO_MUX_HDR, gfp_flags); if (skb == NULL) return -ENOMEM; skb_reserve(skb, SDIO_MUX_HDR); req->buf = skb->data; req->length = RMNET_SDIO_RX_REQ_SIZE; req->context = skb; retval = usb_ep_queue(dev->epout, req, gfp_flags); if (retval) dev_kfree_skb_any(skb); return retval; } static void rmnet_sdio_start_rx(struct rmnet_sdio_dev *dev) { struct usb_composite_dev *cdev = dev->cdev; int status; struct usb_request *req; unsigned long flags; if (!atomic_read(&dev->online)) { pr_err("%s: USB not connected\n", __func__); return; } spin_lock_irqsave(&dev->lock, flags); while (!list_empty(&dev->rx_idle)) { req = list_first_entry(&dev->rx_idle, struct usb_request, list); list_del(&req->list); dev->rx_idle_len--; spin_unlock_irqrestore(&dev->lock, flags); status = rmnet_sdio_rx_submit(dev, req, GFP_ATOMIC); spin_lock_irqsave(&dev->lock, flags); if (status) { ERROR(cdev, "rmnet data rx enqueue err %d\n", status); list_add_tail(&req->list, &dev->rx_idle); dev->rx_idle_len++; break; } } spin_unlock_irqrestore(&dev->lock, flags); } static void rmnet_sdio_start_tx(struct rmnet_sdio_dev *dev) { unsigned long flags; int status; struct sk_buff *skb; struct usb_request *req; struct usb_composite_dev *cdev = dev->cdev; if (!atomic_read(&dev->online)) return; if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) return; spin_lock_irqsave(&dev->lock, flags); while (!list_empty(&dev->tx_idle)) { skb = __skb_dequeue(&dev->tx_skb_queue); if (!skb) { spin_unlock_irqrestore(&dev->lock, flags); return; } req = list_first_entry(&dev->tx_idle, struct usb_request, list); req->context = skb; req->buf = skb->data; req->length = skb->len; list_del(&req->list); dev->tx_idle_len--; spin_unlock(&dev->lock); status = usb_ep_queue(dev->epin, req, GFP_ATOMIC); spin_lock(&dev->lock); if (status) { if (atomic_read(&dev->online)) { ERROR(cdev, "rmnet tx data enqueue err %d\n", status); list_add_tail(&req->list, &dev->tx_idle); dev->tx_idle_len++; __skb_queue_head(&dev->tx_skb_queue, skb); } else { req->buf = 0; rmnet_sdio_free_req(dev->epin, req); dev_kfree_skb_any(skb); } break; } dev->dpkt_tolaptop++; } spin_unlock_irqrestore(&dev->lock, flags); } static void rmnet_sdio_data_receive_cb(void *priv, struct sk_buff *skb) { struct rmnet_sdio_dev *dev = priv; unsigned long flags; if (!skb) { pr_info("%s: dmux_ch close event\n", __func__); if (test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status) && test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) { clear_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status); clear_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status); queue_work(dev->wq, &dev->sdio_close_work); } return; } if (!atomic_read(&dev->online)) { dev_kfree_skb_any(skb); return; } spin_lock_irqsave(&dev->lock, flags); if (dev->tx_skb_queue.qlen > sdio_tx_pkt_drop_thld) { if (printk_ratelimit()) pr_err("%s: tx pkt dropped: tx_drop_cnt:%lu\n", __func__, dev->tx_drp_cnt); dev->tx_drp_cnt++; spin_unlock_irqrestore(&dev->lock, flags); dev_kfree_skb_any(skb); return; } __skb_queue_tail(&dev->tx_skb_queue, skb); spin_unlock_irqrestore(&dev->lock, flags); rmnet_sdio_start_tx(dev); } static void rmnet_sdio_data_write_done(void *priv, struct sk_buff *skb) { struct rmnet_sdio_dev *dev = priv; if (!skb) { pr_info("%s: dmux_ch open event\n", __func__); queue_delayed_work(dev->wq, &dev->sdio_open_work, 0); return; } dev_kfree_skb_any(skb); spin_lock(&dev->lock); dev->dpkts_pending_atdmux--; if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status) || dev->dpkts_pending_atdmux >= sdio_rx_fctrl_dis_thld) { spin_unlock(&dev->lock); return; } spin_unlock(&dev->lock); rmnet_sdio_start_rx(dev); } static void rmnet_sdio_data_rx_work(struct work_struct *w) { struct rmnet_sdio_dev *dev = container_of(w, struct rmnet_sdio_dev, data_rx_work); struct usb_composite_dev *cdev = dev->cdev; struct sk_buff *skb; int ret; unsigned long flags; if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) { pr_info("%s: sdio data ch not open\n", __func__); return; } spin_lock_irqsave(&dev->lock, flags); while ((skb = __skb_dequeue(&dev->rx_skb_queue))) { spin_unlock_irqrestore(&dev->lock, flags); ret = msm_sdio_dmux_write(rmnet_sdio_data_ch, skb); spin_lock_irqsave(&dev->lock, flags); if (ret < 0) { ERROR(cdev, "rmnet SDIO data write failed\n"); dev_kfree_skb_any(skb); break; } else { dev->dpkt_tomodem++; dev->dpkts_pending_atdmux++; } } spin_unlock_irqrestore(&dev->lock, flags); } static void rmnet_sdio_complete_epout(struct usb_ep *ep, struct usb_request *req) { struct rmnet_sdio_dev *dev = ep->driver_data; struct usb_composite_dev *cdev = dev->cdev; struct sk_buff *skb = req->context; int status = req->status; int queue = 0; switch (status) { case 0: skb_put(skb, req->actual); queue = 1; break; case -ECONNRESET: case -ESHUTDOWN: dev_kfree_skb_any(skb); req->buf = 0; rmnet_sdio_free_req(ep, req); return; default: ERROR(cdev, "RMNET %s response error %d, %d/%d\n", ep->name, status, req->actual, req->length); dev_kfree_skb_any(skb); break; } if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) { pr_info("%s: sdio data ch not open\n", __func__); dev_kfree_skb_any(skb); req->buf = 0; rmnet_sdio_free_req(ep, req); return; } spin_lock(&dev->lock); if (queue) { __skb_queue_tail(&dev->rx_skb_queue, skb); queue_work(dev->wq, &dev->data_rx_work); } if (dev->dpkts_pending_atdmux >= sdio_rx_fctrl_en_thld) { list_add_tail(&req->list, &dev->rx_idle); dev->rx_idle_len++; spin_unlock(&dev->lock); return; } spin_unlock(&dev->lock); status = rmnet_sdio_rx_submit(dev, req, GFP_ATOMIC); if (status) { ERROR(cdev, "rmnet data rx enqueue err %d\n", status); list_add_tail(&req->list, &dev->rx_idle); dev->rx_idle_len++; } } static void rmnet_sdio_complete_epin(struct usb_ep *ep, struct usb_request *req) { struct rmnet_sdio_dev *dev = ep->driver_data; struct sk_buff *skb = req->context; struct usb_composite_dev *cdev = dev->cdev; int status = req->status; switch (status) { case 0: case -ECONNRESET: case -ESHUTDOWN: break; default: ERROR(cdev, "rmnet data tx ep error %d\n", status); break; } spin_lock(&dev->lock); list_add_tail(&req->list, &dev->tx_idle); dev->tx_idle_len++; spin_unlock(&dev->lock); dev_kfree_skb_any(skb); rmnet_sdio_start_tx(dev); } static void rmnet_sdio_free_buf(struct rmnet_sdio_dev *dev) { struct rmnet_sdio_qmi_buf *qmi; struct usb_request *req; struct list_head *act, *tmp; struct sk_buff *skb; unsigned long flags; spin_lock_irqsave(&dev->lock, flags); dev->dpkt_tolaptop = 0; dev->dpkt_tomodem = 0; dev->cpkt_tolaptop = 0; dev->cpkt_tomodem = 0; dev->dpkts_pending_atdmux = 0; dev->tx_drp_cnt = 0; list_for_each_safe(act, tmp, &dev->tx_idle) { req = list_entry(act, struct usb_request, list); list_del(&req->list); dev->tx_idle_len--; req->buf = NULL; rmnet_sdio_free_req(dev->epout, req); } list_for_each_safe(act, tmp, &dev->rx_idle) { req = list_entry(act, struct usb_request, list); list_del(&req->list); dev->rx_idle_len--; req->buf = NULL; rmnet_sdio_free_req(dev->epin, req); } list_for_each_safe(act, tmp, &dev->qmi_req_q) { qmi = list_entry(act, struct rmnet_sdio_qmi_buf, list); list_del(&qmi->list); dev->qreq_q_len--; rmnet_sdio_free_qmi(qmi); } list_for_each_safe(act, tmp, &dev->qmi_resp_q) { qmi = list_entry(act, struct rmnet_sdio_qmi_buf, list); list_del(&qmi->list); dev->qresp_q_len--; rmnet_sdio_free_qmi(qmi); } while ((skb = __skb_dequeue(&dev->tx_skb_queue))) dev_kfree_skb_any(skb); while ((skb = __skb_dequeue(&dev->rx_skb_queue))) dev_kfree_skb_any(skb); rmnet_sdio_free_req(dev->epnotify, dev->notify_req); spin_unlock_irqrestore(&dev->lock, flags); } static void rmnet_sdio_set_modem_cbits_w(struct work_struct *w) { struct rmnet_sdio_dev *dev; dev = container_of(w, struct rmnet_sdio_dev, set_modem_ctl_bits_work); if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) return; pr_debug("%s: cbits_to_modem:%d\n", __func__, dev->cbits_to_modem); sdio_cmux_tiocmset(rmnet_sdio_ctl_ch, dev->cbits_to_modem, ~dev->cbits_to_modem); } static void rmnet_sdio_disconnect_work(struct work_struct *w) { } static void rmnet_sdio_suspend(struct usb_function *f) { struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev, function); if (!atomic_read(&dev->online)) return; dev->cbits_to_modem &= ~TIOCM_DTR; queue_work(dev->wq, &dev->set_modem_ctl_bits_work); } static void rmnet_sdio_disable(struct usb_function *f) { struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev, function); if (!atomic_read(&dev->online)) return; usb_ep_disable(dev->epnotify); usb_ep_disable(dev->epout); usb_ep_disable(dev->epin); atomic_set(&dev->online, 0); atomic_set(&dev->notify_count, 0); rmnet_sdio_free_buf(dev); queue_work(dev->wq, &dev->disconnect_work); dev->cbits_to_modem = 0; queue_work(dev->wq, &dev->set_modem_ctl_bits_work); } static void rmnet_close_sdio_work(struct work_struct *w) { struct rmnet_sdio_dev *dev; unsigned long flags; struct usb_cdc_notification *event; int status; struct rmnet_sdio_qmi_buf *qmi; struct usb_request *req; struct sk_buff *skb; pr_debug("%s:\n", __func__); dev = container_of(w, struct rmnet_sdio_dev, sdio_close_work); if (!atomic_read(&dev->online)) return; usb_ep_fifo_flush(dev->epnotify); spin_lock_irqsave(&dev->lock, flags); event = dev->notify_req->buf; event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE; event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION; event->wValue = cpu_to_le16(0); event->wIndex = cpu_to_le16(dev->ifc_id); event->wLength = cpu_to_le16(0); spin_unlock_irqrestore(&dev->lock, flags); status = usb_ep_queue(dev->epnotify, dev->notify_req, GFP_KERNEL); if (status < 0) { if (!atomic_read(&dev->online)) return; pr_err("%s: rmnet notify ep enqueue error %d\n", __func__, status); } usb_ep_fifo_flush(dev->epout); usb_ep_fifo_flush(dev->epin); cancel_work_sync(&dev->data_rx_work); spin_lock_irqsave(&dev->lock, flags); if (!atomic_read(&dev->online)) { spin_unlock_irqrestore(&dev->lock, flags); return; } while (!list_empty(&dev->tx_idle)) { req = list_first_entry(&dev->tx_idle, struct usb_request, list); list_del(&req->list); dev->tx_idle_len--; req->buf = NULL; rmnet_sdio_free_req(dev->epout, req); } while (!list_empty(&dev->rx_idle)) { req = list_first_entry(&dev->rx_idle, struct usb_request, list); list_del(&req->list); dev->rx_idle_len--; req->buf = NULL; rmnet_sdio_free_req(dev->epin, req); } while (!list_empty(&dev->qmi_req_q)) { qmi = list_first_entry(&dev->qmi_req_q, struct rmnet_sdio_qmi_buf, list); list_del(&qmi->list); dev->qreq_q_len--; rmnet_sdio_free_qmi(qmi); } while (!list_empty(&dev->qmi_resp_q)) { qmi = list_first_entry(&dev->qmi_resp_q, struct rmnet_sdio_qmi_buf, list); list_del(&qmi->list); dev->qresp_q_len--; rmnet_sdio_free_qmi(qmi); } atomic_set(&dev->notify_count, 0); pr_info("%s: setting notify count to zero\n", __func__); while ((skb = __skb_dequeue(&dev->tx_skb_queue))) dev_kfree_skb_any(skb); while ((skb = __skb_dequeue(&dev->rx_skb_queue))) dev_kfree_skb_any(skb); spin_unlock_irqrestore(&dev->lock, flags); } static int rmnet_sdio_start_io(struct rmnet_sdio_dev *dev) { struct usb_request *req; int ret, i; unsigned long flags; spin_lock_irqsave(&dev->lock, flags); if (!atomic_read(&dev->online)) { spin_unlock_irqrestore(&dev->lock, flags); return 0; } if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status) || !test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) { spin_unlock_irqrestore(&dev->lock, flags); return 0; } for (i = 0; i < RMNET_SDIO_RX_REQ_MAX; i++) { req = rmnet_sdio_alloc_req(dev->epout, 0, GFP_ATOMIC); if (IS_ERR(req)) { ret = PTR_ERR(req); spin_unlock_irqrestore(&dev->lock, flags); goto free_buf; } req->complete = rmnet_sdio_complete_epout; list_add_tail(&req->list, &dev->rx_idle); dev->rx_idle_len++; } for (i = 0; i < RMNET_SDIO_TX_REQ_MAX; i++) { req = rmnet_sdio_alloc_req(dev->epin, 0, GFP_ATOMIC); if (IS_ERR(req)) { ret = PTR_ERR(req); spin_unlock_irqrestore(&dev->lock, flags); goto free_buf; } req->complete = rmnet_sdio_complete_epin; list_add_tail(&req->list, &dev->tx_idle); dev->tx_idle_len++; } spin_unlock_irqrestore(&dev->lock, flags); rmnet_sdio_start_rx(dev); return 0; free_buf: rmnet_sdio_free_buf(dev); dev->epout = dev->epin = dev->epnotify = NULL; return ret; } #define RMNET_SDIO_OPEN_RETRY_DELAY msecs_to_jiffies(2000) #define SDIO_SDIO_OPEN_MAX_RETRY 90 static void rmnet_open_sdio_work(struct work_struct *w) { struct rmnet_sdio_dev *dev = container_of(w, struct rmnet_sdio_dev, sdio_open_work.work); struct usb_composite_dev *cdev = dev->cdev; int ret; static int retry_cnt; if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) { ret = sdio_cmux_open(rmnet_sdio_ctl_ch, rmnet_sdio_ctl_receive_cb, rmnet_sdio_ctl_write_done, rmnet_sdio_sts_callback, dev); if (!ret) set_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status); } if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) { ret = msm_sdio_dmux_open(rmnet_sdio_data_ch, dev, rmnet_sdio_data_receive_cb, rmnet_sdio_data_write_done); if (!ret) set_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status); } if (test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status) && test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) { rmnet_sdio_start_io(dev); if (atomic_read(&dev->online)) queue_work(dev->wq, &dev->set_modem_ctl_bits_work); pr_info("%s: usb rmnet sdio channels are open retry_cnt:%d\n", __func__, retry_cnt); retry_cnt = 0; return; } retry_cnt++; pr_debug("%s: usb rmnet sdio open retry_cnt:%d\n", __func__, retry_cnt); if (retry_cnt > SDIO_SDIO_OPEN_MAX_RETRY) { if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) ERROR(cdev, "Unable to open control SDIO channel\n"); if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) ERROR(cdev, "Unable to open DATA SDIO channel\n"); } else { queue_delayed_work(dev->wq, &dev->sdio_open_work, RMNET_SDIO_OPEN_RETRY_DELAY); } } static int rmnet_sdio_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev, function); struct usb_composite_dev *cdev = dev->cdev; int ret = 0; dev->epin->driver_data = dev; ret = config_ep_by_speed(cdev->gadget, f, dev->epin); if (ret) { dev->epin->desc = NULL; ERROR(cdev, "config_ep_by_speed failes for ep %s, result %d\n", dev->epin->name, ret); return ret; } ret = usb_ep_enable(dev->epin); if (ret) { ERROR(cdev, "can't enable %s, result %d\n", dev->epin->name, ret); return ret; } dev->epout->driver_data = dev; ret = config_ep_by_speed(cdev->gadget, f, dev->epout); if (ret) { dev->epout->desc = NULL; ERROR(cdev, "config_ep_by_speed failes for ep %s, result %d\n", dev->epout->name, ret); usb_ep_disable(dev->epin); return ret; } ret = usb_ep_enable(dev->epout); if (ret) { ERROR(cdev, "can't enable %s, result %d\n", dev->epout->name, ret); usb_ep_disable(dev->epin); return ret; } ret = config_ep_by_speed(cdev->gadget, f, dev->epnotify); if (ret) { dev->epnotify->desc = NULL; ERROR(cdev, "config_ep_by_speed failes for ep %s, result %d\n", dev->epnotify->name, ret); usb_ep_disable(dev->epin); usb_ep_disable(dev->epout); return ret; } ret = usb_ep_enable(dev->epnotify); if (ret) { ERROR(cdev, "can't enable %s, result %d\n", dev->epnotify->name, ret); usb_ep_disable(dev->epin); usb_ep_disable(dev->epout); return ret; } dev->notify_req = rmnet_sdio_alloc_req(dev->epnotify, RMNET_SDIO_MAX_NFY_SZE, GFP_ATOMIC); if (IS_ERR(dev->notify_req)) { ret = PTR_ERR(dev->notify_req); pr_err("%s: unable to allocate memory for notify ep\n", __func__); return ret; } dev->notify_req->complete = rmnet_sdio_notify_complete; dev->notify_req->context = dev; dev->notify_req->length = RMNET_SDIO_MAX_NFY_SZE; atomic_set(&dev->online, 1); ret = rmnet_sdio_start_io(dev); return ret; } static int rmnet_sdio_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev, function); int id; struct usb_ep *ep; dev->cdev = cdev; id = usb_interface_id(c, f); if (id < 0) return id; dev->ifc_id = id; rmnet_sdio_interface_desc.bInterfaceNumber = id; ep = usb_ep_autoconfig(cdev->gadget, &rmnet_sdio_fs_in_desc); if (!ep) goto out; ep->driver_data = cdev; dev->epin = ep; ep = usb_ep_autoconfig(cdev->gadget, &rmnet_sdio_fs_out_desc); if (!ep) goto out; ep->driver_data = cdev; dev->epout = ep; ep = usb_ep_autoconfig(cdev->gadget, &rmnet_sdio_fs_notify_desc); if (!ep) goto out; ep->driver_data = cdev; dev->epnotify = ep; if (gadget_is_dualspeed(c->cdev->gadget)) { rmnet_sdio_hs_in_desc.bEndpointAddress = rmnet_sdio_fs_in_desc.bEndpointAddress; rmnet_sdio_hs_out_desc.bEndpointAddress = rmnet_sdio_fs_out_desc.bEndpointAddress; rmnet_sdio_hs_notify_desc.bEndpointAddress = rmnet_sdio_fs_notify_desc.bEndpointAddress; } queue_delayed_work(dev->wq, &dev->sdio_open_work, 0); return 0; out: if (dev->epnotify) dev->epnotify->driver_data = NULL; if (dev->epout) dev->epout->driver_data = NULL; if (dev->epin) dev->epin->driver_data = NULL; return -ENODEV; } static void rmnet_sdio_unbind(struct usb_configuration *c, struct usb_function *f) { struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev, function); cancel_delayed_work_sync(&dev->sdio_open_work); destroy_workqueue(dev->wq); dev->epout = dev->epin = dev->epnotify = NULL; if (test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) { msm_sdio_dmux_close(rmnet_sdio_data_ch); clear_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status); } if (test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) { sdio_cmux_close(rmnet_sdio_ctl_ch); clear_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status); } debugfs_remove_recursive(dev->dent); kfree(dev); } #if defined(CONFIG_DEBUG_FS) static ssize_t rmnet_sdio_read_stats(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { struct rmnet_sdio_dev *dev = file->private_data; char *buf; unsigned long flags; int ret; buf = kzalloc(sizeof(char) * 1024, GFP_KERNEL); if (!buf) return -ENOMEM; spin_lock_irqsave(&dev->lock, flags); ret = scnprintf(buf, PAGE_SIZE, "-*-DATA-*-\n" "dpkts_tohost:%lu epInPool:%u tx_size:%u drp_cnt:%lu\n" "dpkts_tomodem:%lu epOutPool:%u rx_size:%u pending:%u\n" "-*-QMI-*-\n" "cpkts_tomodem:%lu qmi_req_q:%u cbits:%d\n" "cpkts_tolaptop:%lu qmi_resp_q:%u notify_cnt:%d\n" "-*-MISC-*-\n" "data_ch_status: %lu ctrl_ch_status: %lu\n", dev->dpkt_tolaptop, dev->tx_idle_len, dev->tx_skb_queue.qlen, dev->tx_drp_cnt, dev->dpkt_tomodem, dev->rx_idle_len, dev->rx_skb_queue.qlen, dev->dpkts_pending_atdmux, dev->cpkt_tomodem, dev->qreq_q_len, dev->cbits_to_modem, dev->cpkt_tolaptop, dev->qresp_q_len, atomic_read(&dev->notify_count), dev->data_ch_status, dev->ctrl_ch_status); spin_unlock_irqrestore(&dev->lock, flags); ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret); kfree(buf); return ret; } static ssize_t rmnet_sdio_reset_stats(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct rmnet_sdio_dev *dev = file->private_data; dev->dpkt_tolaptop = 0; dev->dpkt_tomodem = 0; dev->cpkt_tolaptop = 0; dev->cpkt_tomodem = 0; dev->dpkts_pending_atdmux = 0; dev->tx_drp_cnt = 0; return count; } static int debug_rmnet_sdio_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } const struct file_operations debug_rmnet_sdio_stats_ops = { .open = debug_rmnet_sdio_open, .read = rmnet_sdio_read_stats, .write = rmnet_sdio_reset_stats, }; static void rmnet_sdio_debugfs_init(struct rmnet_sdio_dev *dev) { dev->dent = debugfs_create_dir("usb_rmnet_sdio", 0); if (IS_ERR(dev->dent)) return; debugfs_create_file("status", 0444, dev->dent, dev, &debug_rmnet_sdio_stats_ops); } #else static void rmnet_sdio_debugfs_init(struct rmnet_sdio_dev *dev) { return; } #endif int rmnet_sdio_function_add(struct usb_configuration *c) { struct rmnet_sdio_dev *dev; int ret; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; dev->wq = create_singlethread_workqueue("k_rmnet_work"); if (!dev->wq) { ret = -ENOMEM; goto free_dev; } spin_lock_init(&dev->lock); atomic_set(&dev->notify_count, 0); atomic_set(&dev->online, 0); INIT_WORK(&dev->disconnect_work, rmnet_sdio_disconnect_work); INIT_WORK(&dev->set_modem_ctl_bits_work, rmnet_sdio_set_modem_cbits_w); INIT_WORK(&dev->ctl_rx_work, rmnet_sdio_control_rx_work); INIT_WORK(&dev->data_rx_work, rmnet_sdio_data_rx_work); INIT_DELAYED_WORK(&dev->sdio_open_work, rmnet_open_sdio_work); INIT_WORK(&dev->sdio_close_work, rmnet_close_sdio_work); INIT_LIST_HEAD(&dev->qmi_req_q); INIT_LIST_HEAD(&dev->qmi_resp_q); INIT_LIST_HEAD(&dev->rx_idle); INIT_LIST_HEAD(&dev->tx_idle); skb_queue_head_init(&dev->tx_skb_queue); skb_queue_head_init(&dev->rx_skb_queue); dev->function.name = "rmnet_sdio"; dev->function.strings = rmnet_sdio_strings; dev->function.descriptors = rmnet_sdio_fs_function; dev->function.hs_descriptors = rmnet_sdio_hs_function; dev->function.bind = rmnet_sdio_bind; dev->function.unbind = rmnet_sdio_unbind; dev->function.setup = rmnet_sdio_setup; dev->function.set_alt = rmnet_sdio_set_alt; dev->function.disable = rmnet_sdio_disable; dev->function.suspend = rmnet_sdio_suspend; ret = usb_add_function(c, &dev->function); if (ret) goto free_wq; rmnet_sdio_debugfs_init(dev); return 0; free_wq: destroy_workqueue(dev->wq); free_dev: kfree(dev); return ret; }
gpl-2.0
pjsports/kernel-2.6.39.4-A500-OC1.5G
arch/arm/mach-at91/board-sam9g20ek.c
307
9749
/* * Copyright (C) 2005 SAN People * Copyright (C) 2008 Atmel * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/spi/at73c213.h> #include <linux/gpio_keys.h> #include <linux/input.h> #include <linux/clk.h> #include <linux/regulator/machine.h> #include <linux/regulator/fixed.h> #include <linux/regulator/consumer.h> #include <mach/hardware.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/board.h> #include <mach/gpio.h> #include <mach/at91sam9_smc.h> #include "sam9_smc.h" #include "generic.h" /* * board revision encoding * bit 0: * 0 => 1 sd/mmc slot * 1 => 2 sd/mmc slots connectors (board from revision C) */ #define HAVE_2MMC (1 << 0) static int inline ek_have_2mmc(void) { return machine_is_at91sam9g20ek_2mmc() || (system_rev & HAVE_2MMC); } static void __init ek_map_io(void) { /* Initialize processor: 18.432 MHz crystal */ at91sam9260_initialize(18432000); /* DBGU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS | ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD | ATMEL_UART_RI); /* USART1 on ttyS2. (Rx, Tx, RTS, CTS) */ at91_register_uart(AT91SAM9260_ID_US1, 2, ATMEL_UART_CTS | ATMEL_UART_RTS); /* set serial console to ttyS0 (ie, DBGU) */ at91_set_serial_console(0); } static void __init ek_init_irq(void) { at91sam9260_init_interrupts(NULL); } /* * USB Host port */ static struct at91_usbh_data __initdata ek_usbh_data = { .ports = 2, }; /* * USB Device port */ static struct at91_udc_data __initdata ek_udc_data = { .vbus_pin = AT91_PIN_PC5, .pullup_pin = 0, /* pull-up driven by UDC */ }; /* * SPI devices. */ static struct spi_board_info ek_spi_devices[] = { #if !(defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_AT91)) { /* DataFlash chip */ .modalias = "mtd_dataflash", .chip_select = 1, .max_speed_hz = 15 * 1000 * 1000, .bus_num = 0, }, #if defined(CONFIG_MTD_AT91_DATAFLASH_CARD) { /* DataFlash card */ .modalias = "mtd_dataflash", .chip_select = 0, .max_speed_hz = 15 * 1000 * 1000, .bus_num = 0, }, #endif #endif }; /* * MACB Ethernet device */ static struct at91_eth_data __initdata ek_macb_data = { .phy_irq_pin = AT91_PIN_PA7, .is_rmii = 1, }; static void __init ek_add_device_macb(void) { if (ek_have_2mmc()) ek_macb_data.phy_irq_pin = AT91_PIN_PB0; at91_add_device_eth(&ek_macb_data); } /* * NAND flash */ static struct mtd_partition __initdata ek_nand_partition[] = { { .name = "Bootstrap", .offset = 0, .size = 4 * SZ_1M, }, { .name = "Partition 1", .offset = MTDPART_OFS_NXTBLK, .size = 60 * SZ_1M, }, { .name = "Partition 2", .offset = MTDPART_OFS_NXTBLK, .size = MTDPART_SIZ_FULL, }, }; static struct mtd_partition * __init nand_partitions(int size, int *num_partitions) { *num_partitions = ARRAY_SIZE(ek_nand_partition); return ek_nand_partition; } /* det_pin is not connected */ static struct atmel_nand_data __initdata ek_nand_data = { .ale = 21, .cle = 22, .rdy_pin = AT91_PIN_PC13, .enable_pin = AT91_PIN_PC14, .partition_info = nand_partitions, #if defined(CONFIG_MTD_NAND_ATMEL_BUSWIDTH_16) .bus_width_16 = 1, #else .bus_width_16 = 0, #endif }; static struct sam9_smc_config __initdata ek_nand_smc_config = { .ncs_read_setup = 0, .nrd_setup = 2, .ncs_write_setup = 0, .nwe_setup = 2, .ncs_read_pulse = 4, .nrd_pulse = 4, .ncs_write_pulse = 4, .nwe_pulse = 4, .read_cycle = 7, .write_cycle = 7, .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE, .tdf_cycles = 3, }; static void __init ek_add_device_nand(void) { /* setup bus-width (8 or 16) */ if (ek_nand_data.bus_width_16) ek_nand_smc_config.mode |= AT91_SMC_DBW_16; else ek_nand_smc_config.mode |= AT91_SMC_DBW_8; /* configure chip-select 3 (NAND) */ sam9_smc_configure(3, &ek_nand_smc_config); at91_add_device_nand(&ek_nand_data); } /* * MCI (SD/MMC) * wp_pin and vcc_pin are not connected */ #if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE) static struct mci_platform_data __initdata ek_mmc_data = { .slot[1] = { .bus_width = 4, .detect_pin = AT91_PIN_PC9, }, }; #else static struct at91_mmc_data __initdata ek_mmc_data = { .slot_b = 1, /* Only one slot so use slot B */ .wire4 = 1, .det_pin = AT91_PIN_PC9, }; #endif static void __init ek_add_device_mmc(void) { #if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE) if (ek_have_2mmc()) { ek_mmc_data.slot[0].bus_width = 4; ek_mmc_data.slot[0].detect_pin = AT91_PIN_PC2; } at91_add_device_mci(0, &ek_mmc_data); #else at91_add_device_mmc(0, &ek_mmc_data); #endif } /* * LEDs */ static struct gpio_led ek_leds[] = { { /* "bottom" led, green, userled1 to be defined */ .name = "ds5", .gpio = AT91_PIN_PA6, .active_low = 1, .default_trigger = "none", }, { /* "power" led, yellow */ .name = "ds1", .gpio = AT91_PIN_PA9, .default_trigger = "heartbeat", } }; static void __init ek_add_device_gpio_leds(void) { if (ek_have_2mmc()) { ek_leds[0].gpio = AT91_PIN_PB8; ek_leds[1].gpio = AT91_PIN_PB9; } at91_gpio_leds(ek_leds, ARRAY_SIZE(ek_leds)); } /* * GPIO Buttons */ #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) static struct gpio_keys_button ek_buttons[] = { { .gpio = AT91_PIN_PA30, .code = BTN_3, .desc = "Button 3", .active_low = 1, .wakeup = 1, }, { .gpio = AT91_PIN_PA31, .code = BTN_4, .desc = "Button 4", .active_low = 1, .wakeup = 1, } }; static struct gpio_keys_platform_data ek_button_data = { .buttons = ek_buttons, .nbuttons = ARRAY_SIZE(ek_buttons), }; static struct platform_device ek_button_device = { .name = "gpio-keys", .id = -1, .num_resources = 0, .dev = { .platform_data = &ek_button_data, } }; static void __init ek_add_device_buttons(void) { at91_set_gpio_input(AT91_PIN_PA30, 1); /* btn3 */ at91_set_deglitch(AT91_PIN_PA30, 1); at91_set_gpio_input(AT91_PIN_PA31, 1); /* btn4 */ at91_set_deglitch(AT91_PIN_PA31, 1); platform_device_register(&ek_button_device); } #else static void __init ek_add_device_buttons(void) {} #endif #if defined(CONFIG_REGULATOR_FIXED_VOLTAGE) || defined(CONFIG_REGULATOR_FIXED_VOLTAGE_MODULE) static struct regulator_consumer_supply ek_audio_consumer_supplies[] = { REGULATOR_SUPPLY("AVDD", "0-001b"), REGULATOR_SUPPLY("HPVDD", "0-001b"), REGULATOR_SUPPLY("DBVDD", "0-001b"), REGULATOR_SUPPLY("DCVDD", "0-001b"), }; static struct regulator_init_data ek_avdd_reg_init_data = { .constraints = { .name = "3V3", .valid_ops_mask = REGULATOR_CHANGE_STATUS, }, .consumer_supplies = ek_audio_consumer_supplies, .num_consumer_supplies = ARRAY_SIZE(ek_audio_consumer_supplies), }; static struct fixed_voltage_config ek_vdd_pdata = { .supply_name = "board-3V3", .microvolts = 3300000, .gpio = -EINVAL, .enabled_at_boot = 0, .init_data = &ek_avdd_reg_init_data, }; static struct platform_device ek_voltage_regulator = { .name = "reg-fixed-voltage", .id = -1, .num_resources = 0, .dev = { .platform_data = &ek_vdd_pdata, }, }; static void __init ek_add_regulators(void) { platform_device_register(&ek_voltage_regulator); } #else static void __init ek_add_regulators(void) {} #endif static struct i2c_board_info __initdata ek_i2c_devices[] = { { I2C_BOARD_INFO("24c512", 0x50) }, { I2C_BOARD_INFO("wm8731", 0x1b) }, }; static void __init ek_board_init(void) { /* Serial */ at91_add_device_serial(); /* USB Host */ at91_add_device_usbh(&ek_usbh_data); /* USB Device */ at91_add_device_udc(&ek_udc_data); /* SPI */ at91_add_device_spi(ek_spi_devices, ARRAY_SIZE(ek_spi_devices)); /* NAND */ ek_add_device_nand(); /* Ethernet */ ek_add_device_macb(); /* Regulators */ ek_add_regulators(); /* MMC */ ek_add_device_mmc(); /* I2C */ at91_add_device_i2c(ek_i2c_devices, ARRAY_SIZE(ek_i2c_devices)); /* LEDs */ ek_add_device_gpio_leds(); /* Push Buttons */ ek_add_device_buttons(); /* PCK0 provides MCLK to the WM8731 */ at91_set_B_periph(AT91_PIN_PC1, 0); /* SSC (for WM8731) */ at91_add_device_ssc(AT91SAM9260_ID_SSC, ATMEL_SSC_TX); } MACHINE_START(AT91SAM9G20EK, "Atmel AT91SAM9G20-EK") /* Maintainer: Atmel */ .boot_params = AT91_SDRAM_BASE + 0x100, .timer = &at91sam926x_timer, .map_io = ek_map_io, .init_irq = ek_init_irq, .init_machine = ek_board_init, MACHINE_END MACHINE_START(AT91SAM9G20EK_2MMC, "Atmel AT91SAM9G20-EK 2 MMC Slot Mod") /* Maintainer: Atmel */ .boot_params = AT91_SDRAM_BASE + 0x100, .timer = &at91sam926x_timer, .map_io = ek_map_io, .init_irq = ek_init_irq, .init_machine = ek_board_init, MACHINE_END
gpl-2.0
KiranSurath/Audacity-Endeavoru
arch/powerpc/platforms/85xx/p1023_rds.c
563
3623
/* * Copyright 2010-2011 Freescale Semiconductor, Inc. * * Author: Roy Zang <tie-fei.zang@freescale.com> * * Description: * P1023 RDS Board Setup * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/fsl_devices.h> #include <linux/of_platform.h> #include <linux/of_device.h> #include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <mm/mmu_decl.h> #include <asm/prom.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> /* ************************************************************************ * * Setup the architecture * */ #ifdef CONFIG_SMP void __init mpc85xx_smp_init(void); #endif static void __init mpc85xx_rds_setup_arch(void) { struct device_node *np; if (ppc_md.progress) ppc_md.progress("p1023_rds_setup_arch()", 0); /* Map BCSR area */ np = of_find_node_by_name(NULL, "bcsr"); if (np != NULL) { static u8 __iomem *bcsr_regs; bcsr_regs = of_iomap(np, 0); of_node_put(np); if (!bcsr_regs) { printk(KERN_ERR "BCSR: Failed to map bcsr register space\n"); return; } else { #define BCSR15_I2C_BUS0_SEG_CLR 0x07 #define BCSR15_I2C_BUS0_SEG2 0x02 /* * Note: Accessing exclusively i2c devices. * * The i2c controller selects initially ID EEPROM in the u-boot; * but if menu configuration selects RTC support in the kernel, * the i2c controller switches to select RTC chip in the kernel. */ #ifdef CONFIG_RTC_CLASS /* Enable RTC chip on the segment #2 of i2c */ clrbits8(&bcsr_regs[15], BCSR15_I2C_BUS0_SEG_CLR); setbits8(&bcsr_regs[15], BCSR15_I2C_BUS0_SEG2); #endif iounmap(bcsr_regs); } } #ifdef CONFIG_PCI for_each_compatible_node(np, "pci", "fsl,p1023-pcie") fsl_add_bridge(np, 0); #endif #ifdef CONFIG_SMP mpc85xx_smp_init(); #endif } static struct of_device_id p1023_ids[] = { { .type = "soc", }, { .compatible = "soc", }, { .compatible = "simple-bus", }, {}, }; static int __init p1023_publish_devices(void) { of_platform_bus_probe(NULL, p1023_ids, NULL); return 0; } machine_device_initcall(p1023_rds, p1023_publish_devices); static void __init mpc85xx_rds_pic_init(void) { struct mpic *mpic; struct resource r; struct device_node *np = NULL; np = of_find_node_by_type(NULL, "open-pic"); if (!np) { printk(KERN_ERR "Could not find open-pic node\n"); return; } if (of_address_to_resource(np, 0, &r)) { printk(KERN_ERR "Failed to map mpic register space\n"); of_node_put(np); return; } mpic = mpic_alloc(np, r.start, MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); of_node_put(np); mpic_init(mpic); } static int __init p1023_rds_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "fsl,P1023RDS"); } define_machine(p1023_rds) { .name = "P1023 RDS", .probe = p1023_rds_probe, .setup_arch = mpc85xx_rds_setup_arch, .init_IRQ = mpc85xx_rds_pic_init, .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif };
gpl-2.0
kashifmin/KKernel_yu_msm8916
drivers/scsi/mpt3sas/mpt3sas_scsih.c
1843
235027
/* * Scsi Host Layer for MPT (Message Passing Technology) based controllers * * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c * Copyright (C) 2012 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * NO WARRANTY * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is * solely responsible for determining the appropriateness of using and * distributing the Program and assumes all risks associated with its * exercise of rights under this Agreement, including but not limited to * the risks and costs of program errors, damage to or loss of data, * programs or equipment, and unavailability or interruption of operations. * DISCLAIMER OF LIABILITY * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/blkdev.h> #include <linux/sched.h> #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/aer.h> #include <linux/raid_class.h> #include "mpt3sas_base.h" MODULE_AUTHOR(MPT3SAS_AUTHOR); MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION); MODULE_LICENSE("GPL"); MODULE_VERSION(MPT3SAS_DRIVER_VERSION); #define RAID_CHANNEL 1 /* forward proto's */ static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_expander); static void _firmware_event_work(struct work_struct *work); static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, struct _sas_device *sas_device); static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 retry_count, u8 is_pd); static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid); static void _scsih_scan_start(struct Scsi_Host *shost); static int _scsih_scan_finished(struct Scsi_Host *shost, unsigned long time); /* global parameters */ LIST_HEAD(mpt3sas_ioc_list); /* local parameters */ static u8 scsi_io_cb_idx = -1; static u8 tm_cb_idx = -1; static u8 ctl_cb_idx = -1; static u8 base_cb_idx = -1; static u8 port_enable_cb_idx = -1; static u8 transport_cb_idx = -1; static u8 scsih_cb_idx = -1; static u8 config_cb_idx = -1; static int mpt_ids; static u8 tm_tr_cb_idx = -1 ; static u8 tm_tr_volume_cb_idx = -1 ; static u8 tm_sas_control_cb_idx = -1; /* command line options */ static u32 logging_level; MODULE_PARM_DESC(logging_level, " bits for enabling additional logging info (default=0)"); static ushort max_sectors = 0xFFFF; module_param(max_sectors, ushort, 0); MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767"); static int missing_delay[2] = {-1, -1}; module_param_array(missing_delay, int, NULL, 0); MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay"); /* scsi-mid layer global parmeter is max_report_luns, which is 511 */ #define MPT3SAS_MAX_LUN (16895) static int max_lun = MPT3SAS_MAX_LUN; module_param(max_lun, int, 0); MODULE_PARM_DESC(max_lun, " max lun, default=16895 "); /* diag_buffer_enable is bitwise * bit 0 set = TRACE * bit 1 set = SNAPSHOT * bit 2 set = EXTENDED * * Either bit can be set, or both */ static int diag_buffer_enable = -1; module_param(diag_buffer_enable, int, 0); MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)"); static int disable_discovery = -1; module_param(disable_discovery, int, 0); MODULE_PARM_DESC(disable_discovery, " disable discovery "); /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */ static int prot_mask = -1; module_param(prot_mask, int, 0); MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 "); /* raid transport support */ static struct raid_template *mpt3sas_raid_template; /** * struct sense_info - common structure for obtaining sense keys * @skey: sense key * @asc: additional sense code * @ascq: additional sense code qualifier */ struct sense_info { u8 skey; u8 asc; u8 ascq; }; #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB) #define MPT3SAS_TURN_ON_FAULT_LED (0xFFFC) #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD) #define MPT3SAS_ABRT_TASK_SET (0xFFFE) #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF) /** * struct fw_event_work - firmware event struct * @list: link list framework * @work: work object (ioc->fault_reset_work_q) * @cancel_pending_work: flag set during reset handling * @ioc: per adapter object * @device_handle: device handle * @VF_ID: virtual function id * @VP_ID: virtual port id * @ignore: flag meaning this event has been marked to ignore * @event: firmware event MPI2_EVENT_XXX defined in mpt2_ioc.h * @event_data: reply event data payload follows * * This object stored on ioc->fw_event_list. */ struct fw_event_work { struct list_head list; struct work_struct work; u8 cancel_pending_work; struct delayed_work delayed_work; struct MPT3SAS_ADAPTER *ioc; u16 device_handle; u8 VF_ID; u8 VP_ID; u8 ignore; u16 event; void *event_data; }; /* raid transport support */ static struct raid_template *mpt3sas_raid_template; /** * struct _scsi_io_transfer - scsi io transfer * @handle: sas device handle (assigned by firmware) * @is_raid: flag set for hidden raid components * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE, * @data_length: data transfer length * @data_dma: dma pointer to data * @sense: sense data * @lun: lun number * @cdb_length: cdb length * @cdb: cdb contents * @timeout: timeout for this command * @VF_ID: virtual function id * @VP_ID: virtual port id * @valid_reply: flag set for reply message * @sense_length: sense length * @ioc_status: ioc status * @scsi_state: scsi state * @scsi_status: scsi staus * @log_info: log information * @transfer_length: data length transfer when there is a reply message * * Used for sending internal scsi commands to devices within this module. * Refer to _scsi_send_scsi_io(). */ struct _scsi_io_transfer { u16 handle; u8 is_raid; enum dma_data_direction dir; u32 data_length; dma_addr_t data_dma; u8 sense[SCSI_SENSE_BUFFERSIZE]; u32 lun; u8 cdb_length; u8 cdb[32]; u8 timeout; u8 VF_ID; u8 VP_ID; u8 valid_reply; /* the following bits are only valid when 'valid_reply = 1' */ u32 sense_length; u16 ioc_status; u8 scsi_state; u8 scsi_status; u32 log_info; u32 transfer_length; }; /* * The pci device ids are defined in mpi/mpi2_cnfg.h. */ static DEFINE_PCI_DEVICE_TABLE(scsih_pci_table) = { /* Fury ~ 3004 and 3008 */ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004, PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008, PCI_ANY_ID, PCI_ANY_ID }, /* Invader ~ 3108 */ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1, PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2, PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5, PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6, PCI_ANY_ID, PCI_ANY_ID }, {0} /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, scsih_pci_table); /** * _scsih_set_debug_level - global setting of ioc->logging_level. * * Note: The logging levels are defined in mpt3sas_debug.h. */ static int _scsih_set_debug_level(const char *val, struct kernel_param *kp) { int ret = param_set_int(val, kp); struct MPT3SAS_ADAPTER *ioc; if (ret) return ret; pr_info("setting logging_level(0x%08x)\n", logging_level); list_for_each_entry(ioc, &mpt3sas_ioc_list, list) ioc->logging_level = logging_level; return 0; } module_param_call(logging_level, _scsih_set_debug_level, param_get_int, &logging_level, 0644); /** * _scsih_srch_boot_sas_address - search based on sas_address * @sas_address: sas address * @boot_device: boot device object from bios page 2 * * Returns 1 when there's a match, 0 means no match. */ static inline int _scsih_srch_boot_sas_address(u64 sas_address, Mpi2BootDeviceSasWwid_t *boot_device) { return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0; } /** * _scsih_srch_boot_device_name - search based on device name * @device_name: device name specified in INDENTIFY fram * @boot_device: boot device object from bios page 2 * * Returns 1 when there's a match, 0 means no match. */ static inline int _scsih_srch_boot_device_name(u64 device_name, Mpi2BootDeviceDeviceName_t *boot_device) { return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0; } /** * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot * @enclosure_logical_id: enclosure logical id * @slot_number: slot number * @boot_device: boot device object from bios page 2 * * Returns 1 when there's a match, 0 means no match. */ static inline int _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number, Mpi2BootDeviceEnclosureSlot_t *boot_device) { return (enclosure_logical_id == le64_to_cpu(boot_device-> EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device-> SlotNumber)) ? 1 : 0; } /** * _scsih_is_boot_device - search for matching boot device. * @sas_address: sas address * @device_name: device name specified in INDENTIFY fram * @enclosure_logical_id: enclosure logical id * @slot_number: slot number * @form: specifies boot device form * @boot_device: boot device object from bios page 2 * * Returns 1 when there's a match, 0 means no match. */ static int _scsih_is_boot_device(u64 sas_address, u64 device_name, u64 enclosure_logical_id, u16 slot, u8 form, Mpi2BiosPage2BootDevice_t *boot_device) { int rc = 0; switch (form) { case MPI2_BIOSPAGE2_FORM_SAS_WWID: if (!sas_address) break; rc = _scsih_srch_boot_sas_address( sas_address, &boot_device->SasWwid); break; case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT: if (!enclosure_logical_id) break; rc = _scsih_srch_boot_encl_slot( enclosure_logical_id, slot, &boot_device->EnclosureSlot); break; case MPI2_BIOSPAGE2_FORM_DEVICE_NAME: if (!device_name) break; rc = _scsih_srch_boot_device_name( device_name, &boot_device->DeviceName); break; case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED: break; } return rc; } /** * _scsih_get_sas_address - set the sas_address for given device handle * @handle: device handle * @sas_address: sas address * * Returns 0 success, non-zero when failure */ static int _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 *sas_address) { Mpi2SasDevicePage0_t sas_device_pg0; Mpi2ConfigReply_t mpi_reply; u32 ioc_status; *sas_address = 0; if (handle <= ioc->sas_hba.num_phys) { *sas_address = ioc->sas_hba.sas_address; return 0; } if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return -ENXIO; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { *sas_address = le64_to_cpu(sas_device_pg0.SASAddress); return 0; } /* we hit this becuase the given parent handle doesn't exist */ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) return -ENXIO; /* else error case */ pr_err(MPT3SAS_FMT "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n", ioc->name, handle, ioc_status, __FILE__, __LINE__, __func__); return -EIO; } /** * _scsih_determine_boot_device - determine boot device. * @ioc: per adapter object * @device: either sas_device or raid_device object * @is_raid: [flag] 1 = raid object, 0 = sas object * * Determines whether this device should be first reported device to * to scsi-ml or sas transport, this purpose is for persistent boot device. * There are primary, alternate, and current entries in bios page 2. The order * priority is primary, alternate, then current. This routine saves * the corresponding device object and is_raid flag in the ioc object. * The saved data to be used later in _scsih_probe_boot_devices(). */ static void _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device, u8 is_raid) { struct _sas_device *sas_device; struct _raid_device *raid_device; u64 sas_address; u64 device_name; u64 enclosure_logical_id; u16 slot; /* only process this function when driver loads */ if (!ioc->is_driver_loading) return; /* no Bios, return immediately */ if (!ioc->bios_pg3.BiosVersion) return; if (!is_raid) { sas_device = device; sas_address = sas_device->sas_address; device_name = sas_device->device_name; enclosure_logical_id = sas_device->enclosure_logical_id; slot = sas_device->slot; } else { raid_device = device; sas_address = raid_device->wwid; device_name = 0; enclosure_logical_id = 0; slot = 0; } if (!ioc->req_boot_device.device) { if (_scsih_is_boot_device(sas_address, device_name, enclosure_logical_id, slot, (ioc->bios_pg2.ReqBootDeviceForm & MPI2_BIOSPAGE2_FORM_MASK), &ioc->bios_pg2.RequestedBootDevice)) { dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: req_boot_device(0x%016llx)\n", ioc->name, __func__, (unsigned long long)sas_address)); ioc->req_boot_device.device = device; ioc->req_boot_device.is_raid = is_raid; } } if (!ioc->req_alt_boot_device.device) { if (_scsih_is_boot_device(sas_address, device_name, enclosure_logical_id, slot, (ioc->bios_pg2.ReqAltBootDeviceForm & MPI2_BIOSPAGE2_FORM_MASK), &ioc->bios_pg2.RequestedAltBootDevice)) { dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: req_alt_boot_device(0x%016llx)\n", ioc->name, __func__, (unsigned long long)sas_address)); ioc->req_alt_boot_device.device = device; ioc->req_alt_boot_device.is_raid = is_raid; } } if (!ioc->current_boot_device.device) { if (_scsih_is_boot_device(sas_address, device_name, enclosure_logical_id, slot, (ioc->bios_pg2.CurrentBootDeviceForm & MPI2_BIOSPAGE2_FORM_MASK), &ioc->bios_pg2.CurrentBootDevice)) { dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: current_boot_device(0x%016llx)\n", ioc->name, __func__, (unsigned long long)sas_address)); ioc->current_boot_device.device = device; ioc->current_boot_device.is_raid = is_raid; } } } /** * mpt3sas_scsih_sas_device_find_by_sas_address - sas device search * @ioc: per adapter object * @sas_address: sas address * Context: Calling function should acquire ioc->sas_device_lock * * This searches for sas_device based on sas_address, then return sas_device * object. */ struct _sas_device * mpt3sas_scsih_sas_device_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc, u64 sas_address) { struct _sas_device *sas_device; list_for_each_entry(sas_device, &ioc->sas_device_list, list) if (sas_device->sas_address == sas_address) return sas_device; list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) if (sas_device->sas_address == sas_address) return sas_device; return NULL; } /** * _scsih_sas_device_find_by_handle - sas device search * @ioc: per adapter object * @handle: sas device handle (assigned by firmware) * Context: Calling function should acquire ioc->sas_device_lock * * This searches for sas_device based on sas_address, then return sas_device * object. */ static struct _sas_device * _scsih_sas_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) { struct _sas_device *sas_device; list_for_each_entry(sas_device, &ioc->sas_device_list, list) if (sas_device->handle == handle) return sas_device; list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) if (sas_device->handle == handle) return sas_device; return NULL; } /** * _scsih_sas_device_remove - remove sas_device from list. * @ioc: per adapter object * @sas_device: the sas_device object * Context: This function will acquire ioc->sas_device_lock. * * Removing object and freeing associated memory from the ioc->sas_device_list. */ static void _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc, struct _sas_device *sas_device) { unsigned long flags; if (!sas_device) return; spin_lock_irqsave(&ioc->sas_device_lock, flags); list_del(&sas_device->list); kfree(sas_device); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); } /** * _scsih_device_remove_by_handle - removing device object by handle * @ioc: per adapter object * @handle: device handle * * Return nothing. */ static void _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) { struct _sas_device *sas_device; unsigned long flags; if (ioc->shost_recovery) return; spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _scsih_sas_device_find_by_handle(ioc, handle); if (sas_device) list_del(&sas_device->list); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (sas_device) _scsih_remove_device(ioc, sas_device); } /** * mpt3sas_device_remove_by_sas_address - removing device object by sas address * @ioc: per adapter object * @sas_address: device sas_address * * Return nothing. */ void mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc, u64 sas_address) { struct _sas_device *sas_device; unsigned long flags; if (ioc->shost_recovery) return; spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, sas_address); if (sas_device) list_del(&sas_device->list); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (sas_device) _scsih_remove_device(ioc, sas_device); } /** * _scsih_sas_device_add - insert sas_device to the list. * @ioc: per adapter object * @sas_device: the sas_device object * Context: This function will acquire ioc->sas_device_lock. * * Adding new object to the ioc->sas_device_list. */ static void _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc, struct _sas_device *sas_device) { unsigned long flags; dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__, sas_device->handle, (unsigned long long)sas_device->sas_address)); spin_lock_irqsave(&ioc->sas_device_lock, flags); list_add_tail(&sas_device->list, &ioc->sas_device_list); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (!mpt3sas_transport_port_add(ioc, sas_device->handle, sas_device->sas_address_parent)) { _scsih_sas_device_remove(ioc, sas_device); } else if (!sas_device->starget) { /* * When asyn scanning is enabled, its not possible to remove * devices while scanning is turned on due to an oops in * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start() */ if (!ioc->is_driver_loading) mpt3sas_transport_port_remove(ioc, sas_device->sas_address, sas_device->sas_address_parent); _scsih_sas_device_remove(ioc, sas_device); } } /** * _scsih_sas_device_init_add - insert sas_device to the list. * @ioc: per adapter object * @sas_device: the sas_device object * Context: This function will acquire ioc->sas_device_lock. * * Adding new object at driver load time to the ioc->sas_device_init_list. */ static void _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc, struct _sas_device *sas_device) { unsigned long flags; dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__, sas_device->handle, (unsigned long long)sas_device->sas_address)); spin_lock_irqsave(&ioc->sas_device_lock, flags); list_add_tail(&sas_device->list, &ioc->sas_device_init_list); _scsih_determine_boot_device(ioc, sas_device, 0); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); } /** * _scsih_raid_device_find_by_id - raid device search * @ioc: per adapter object * @id: sas device target id * @channel: sas device channel * Context: Calling function should acquire ioc->raid_device_lock * * This searches for raid_device based on target id, then return raid_device * object. */ static struct _raid_device * _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel) { struct _raid_device *raid_device, *r; r = NULL; list_for_each_entry(raid_device, &ioc->raid_device_list, list) { if (raid_device->id == id && raid_device->channel == channel) { r = raid_device; goto out; } } out: return r; } /** * _scsih_raid_device_find_by_handle - raid device search * @ioc: per adapter object * @handle: sas device handle (assigned by firmware) * Context: Calling function should acquire ioc->raid_device_lock * * This searches for raid_device based on handle, then return raid_device * object. */ static struct _raid_device * _scsih_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) { struct _raid_device *raid_device, *r; r = NULL; list_for_each_entry(raid_device, &ioc->raid_device_list, list) { if (raid_device->handle != handle) continue; r = raid_device; goto out; } out: return r; } /** * _scsih_raid_device_find_by_wwid - raid device search * @ioc: per adapter object * @handle: sas device handle (assigned by firmware) * Context: Calling function should acquire ioc->raid_device_lock * * This searches for raid_device based on wwid, then return raid_device * object. */ static struct _raid_device * _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) { struct _raid_device *raid_device, *r; r = NULL; list_for_each_entry(raid_device, &ioc->raid_device_list, list) { if (raid_device->wwid != wwid) continue; r = raid_device; goto out; } out: return r; } /** * _scsih_raid_device_add - add raid_device object * @ioc: per adapter object * @raid_device: raid_device object * * This is added to the raid_device_list link list. */ static void _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc, struct _raid_device *raid_device) { unsigned long flags; dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__, raid_device->handle, (unsigned long long)raid_device->wwid)); spin_lock_irqsave(&ioc->raid_device_lock, flags); list_add_tail(&raid_device->list, &ioc->raid_device_list); spin_unlock_irqrestore(&ioc->raid_device_lock, flags); } /** * _scsih_raid_device_remove - delete raid_device object * @ioc: per adapter object * @raid_device: raid_device object * */ static void _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc, struct _raid_device *raid_device) { unsigned long flags; spin_lock_irqsave(&ioc->raid_device_lock, flags); list_del(&raid_device->list); kfree(raid_device); spin_unlock_irqrestore(&ioc->raid_device_lock, flags); } /** * mpt3sas_scsih_expander_find_by_handle - expander device search * @ioc: per adapter object * @handle: expander handle (assigned by firmware) * Context: Calling function should acquire ioc->sas_device_lock * * This searches for expander device based on handle, then returns the * sas_node object. */ struct _sas_node * mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) { struct _sas_node *sas_expander, *r; r = NULL; list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { if (sas_expander->handle != handle) continue; r = sas_expander; goto out; } out: return r; } /** * mpt3sas_scsih_expander_find_by_sas_address - expander device search * @ioc: per adapter object * @sas_address: sas address * Context: Calling function should acquire ioc->sas_node_lock. * * This searches for expander device based on sas_address, then returns the * sas_node object. */ struct _sas_node * mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc, u64 sas_address) { struct _sas_node *sas_expander, *r; r = NULL; list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { if (sas_expander->sas_address != sas_address) continue; r = sas_expander; goto out; } out: return r; } /** * _scsih_expander_node_add - insert expander device to the list. * @ioc: per adapter object * @sas_expander: the sas_device object * Context: This function will acquire ioc->sas_node_lock. * * Adding new object to the ioc->sas_expander_list. * * Return nothing. */ static void _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_expander) { unsigned long flags; spin_lock_irqsave(&ioc->sas_node_lock, flags); list_add_tail(&sas_expander->list, &ioc->sas_expander_list); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); } /** * _scsih_is_end_device - determines if device is an end device * @device_info: bitfield providing information about the device. * Context: none * * Returns 1 if end device. */ static int _scsih_is_end_device(u32 device_info) { if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE && ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) | (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) | (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE))) return 1; else return 0; } /** * _scsih_scsi_lookup_get - returns scmd entry * @ioc: per adapter object * @smid: system request message index * * Returns the smid stored scmd pointer. */ static struct scsi_cmnd * _scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid) { return ioc->scsi_lookup[smid - 1].scmd; } /** * _scsih_scsi_lookup_get_clear - returns scmd entry * @ioc: per adapter object * @smid: system request message index * * Returns the smid stored scmd pointer. * Then will derefrence the stored scmd pointer. */ static inline struct scsi_cmnd * _scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc, u16 smid) { unsigned long flags; struct scsi_cmnd *scmd; spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); scmd = ioc->scsi_lookup[smid - 1].scmd; ioc->scsi_lookup[smid - 1].scmd = NULL; spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); return scmd; } /** * _scsih_scsi_lookup_find_by_scmd - scmd lookup * @ioc: per adapter object * @smid: system request message index * @scmd: pointer to scsi command object * Context: This function will acquire ioc->scsi_lookup_lock. * * This will search for a scmd pointer in the scsi_lookup array, * returning the revelent smid. A returned value of zero means invalid. */ static u16 _scsih_scsi_lookup_find_by_scmd(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd) { u16 smid; unsigned long flags; int i; spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); smid = 0; for (i = 0; i < ioc->scsiio_depth; i++) { if (ioc->scsi_lookup[i].scmd == scmd) { smid = ioc->scsi_lookup[i].smid; goto out; } } out: spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); return smid; } /** * _scsih_scsi_lookup_find_by_target - search for matching channel:id * @ioc: per adapter object * @id: target id * @channel: channel * Context: This function will acquire ioc->scsi_lookup_lock. * * This will search for a matching channel:id in the scsi_lookup array, * returning 1 if found. */ static u8 _scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id, int channel) { u8 found; unsigned long flags; int i; spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); found = 0; for (i = 0 ; i < ioc->scsiio_depth; i++) { if (ioc->scsi_lookup[i].scmd && (ioc->scsi_lookup[i].scmd->device->id == id && ioc->scsi_lookup[i].scmd->device->channel == channel)) { found = 1; goto out; } } out: spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); return found; } /** * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun * @ioc: per adapter object * @id: target id * @lun: lun number * @channel: channel * Context: This function will acquire ioc->scsi_lookup_lock. * * This will search for a matching channel:id:lun in the scsi_lookup array, * returning 1 if found. */ static u8 _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id, unsigned int lun, int channel) { u8 found; unsigned long flags; int i; spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); found = 0; for (i = 0 ; i < ioc->scsiio_depth; i++) { if (ioc->scsi_lookup[i].scmd && (ioc->scsi_lookup[i].scmd->device->id == id && ioc->scsi_lookup[i].scmd->device->channel == channel && ioc->scsi_lookup[i].scmd->device->lun == lun)) { found = 1; goto out; } } out: spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); return found; } static void _scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth) { struct Scsi_Host *shost = sdev->host; int max_depth; struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); struct MPT3SAS_DEVICE *sas_device_priv_data; struct MPT3SAS_TARGET *sas_target_priv_data; struct _sas_device *sas_device; unsigned long flags; max_depth = shost->can_queue; /* limit max device queue for SATA to 32 */ sas_device_priv_data = sdev->hostdata; if (!sas_device_priv_data) goto not_sata; sas_target_priv_data = sas_device_priv_data->sas_target; if (!sas_target_priv_data) goto not_sata; if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) goto not_sata; spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, sas_device_priv_data->sas_target->sas_address); if (sas_device && sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) max_depth = MPT3SAS_SATA_QUEUE_DEPTH; spin_unlock_irqrestore(&ioc->sas_device_lock, flags); not_sata: if (!sdev->tagged_supported) max_depth = 1; if (qdepth > max_depth) qdepth = max_depth; scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); } /** * _scsih_change_queue_depth - setting device queue depth * @sdev: scsi device struct * @qdepth: requested queue depth * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP * (see include/scsi/scsi_host.h for definition) * * Returns queue depth. */ static int _scsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) { if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) _scsih_adjust_queue_depth(sdev, qdepth); else if (reason == SCSI_QDEPTH_QFULL) scsi_track_queue_full(sdev, qdepth); else return -EOPNOTSUPP; if (sdev->inquiry_len > 7) sdev_printk(KERN_INFO, sdev, "qdepth(%d), tagged(%d), " \ "simple(%d), ordered(%d), scsi_level(%d), cmd_que(%d)\n", sdev->queue_depth, sdev->tagged_supported, sdev->simple_tags, sdev->ordered_tags, sdev->scsi_level, (sdev->inquiry[7] & 2) >> 1); return sdev->queue_depth; } /** * _scsih_change_queue_type - changing device queue tag type * @sdev: scsi device struct * @tag_type: requested tag type * * Returns queue tag type. */ static int _scsih_change_queue_type(struct scsi_device *sdev, int tag_type) { if (sdev->tagged_supported) { scsi_set_tag_type(sdev, tag_type); if (tag_type) scsi_activate_tcq(sdev, sdev->queue_depth); else scsi_deactivate_tcq(sdev, sdev->queue_depth); } else tag_type = 0; return tag_type; } /** * _scsih_target_alloc - target add routine * @starget: scsi target struct * * Returns 0 if ok. Any other return is assumed to be an error and * the device is ignored. */ static int _scsih_target_alloc(struct scsi_target *starget) { struct Scsi_Host *shost = dev_to_shost(&starget->dev); struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); struct MPT3SAS_TARGET *sas_target_priv_data; struct _sas_device *sas_device; struct _raid_device *raid_device; unsigned long flags; struct sas_rphy *rphy; sas_target_priv_data = kzalloc(sizeof(struct scsi_target), GFP_KERNEL); if (!sas_target_priv_data) return -ENOMEM; starget->hostdata = sas_target_priv_data; sas_target_priv_data->starget = starget; sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; /* RAID volumes */ if (starget->channel == RAID_CHANNEL) { spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = _scsih_raid_device_find_by_id(ioc, starget->id, starget->channel); if (raid_device) { sas_target_priv_data->handle = raid_device->handle; sas_target_priv_data->sas_address = raid_device->wwid; sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME; raid_device->starget = starget; } spin_unlock_irqrestore(&ioc->raid_device_lock, flags); return 0; } /* sas/sata devices */ spin_lock_irqsave(&ioc->sas_device_lock, flags); rphy = dev_to_rphy(starget->dev.parent); sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, rphy->identify.sas_address); if (sas_device) { sas_target_priv_data->handle = sas_device->handle; sas_target_priv_data->sas_address = sas_device->sas_address; sas_device->starget = starget; sas_device->id = starget->id; sas_device->channel = starget->channel; if (test_bit(sas_device->handle, ioc->pd_handles)) sas_target_priv_data->flags |= MPT_TARGET_FLAGS_RAID_COMPONENT; if (sas_device->fast_path) sas_target_priv_data->flags |= MPT_TARGET_FASTPATH_IO; } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); return 0; } /** * _scsih_target_destroy - target destroy routine * @starget: scsi target struct * * Returns nothing. */ static void _scsih_target_destroy(struct scsi_target *starget) { struct Scsi_Host *shost = dev_to_shost(&starget->dev); struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); struct MPT3SAS_TARGET *sas_target_priv_data; struct _sas_device *sas_device; struct _raid_device *raid_device; unsigned long flags; struct sas_rphy *rphy; sas_target_priv_data = starget->hostdata; if (!sas_target_priv_data) return; if (starget->channel == RAID_CHANNEL) { spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = _scsih_raid_device_find_by_id(ioc, starget->id, starget->channel); if (raid_device) { raid_device->starget = NULL; raid_device->sdev = NULL; } spin_unlock_irqrestore(&ioc->raid_device_lock, flags); goto out; } spin_lock_irqsave(&ioc->sas_device_lock, flags); rphy = dev_to_rphy(starget->dev.parent); sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, rphy->identify.sas_address); if (sas_device && (sas_device->starget == starget) && (sas_device->id == starget->id) && (sas_device->channel == starget->channel)) sas_device->starget = NULL; spin_unlock_irqrestore(&ioc->sas_device_lock, flags); out: kfree(sas_target_priv_data); starget->hostdata = NULL; } /** * _scsih_slave_alloc - device add routine * @sdev: scsi device struct * * Returns 0 if ok. Any other return is assumed to be an error and * the device is ignored. */ static int _scsih_slave_alloc(struct scsi_device *sdev) { struct Scsi_Host *shost; struct MPT3SAS_ADAPTER *ioc; struct MPT3SAS_TARGET *sas_target_priv_data; struct MPT3SAS_DEVICE *sas_device_priv_data; struct scsi_target *starget; struct _raid_device *raid_device; struct _sas_device *sas_device; unsigned long flags; sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL); if (!sas_device_priv_data) return -ENOMEM; sas_device_priv_data->lun = sdev->lun; sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT; starget = scsi_target(sdev); sas_target_priv_data = starget->hostdata; sas_target_priv_data->num_luns++; sas_device_priv_data->sas_target = sas_target_priv_data; sdev->hostdata = sas_device_priv_data; if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT)) sdev->no_uld_attach = 1; shost = dev_to_shost(&starget->dev); ioc = shost_priv(shost); if (starget->channel == RAID_CHANNEL) { spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = _scsih_raid_device_find_by_id(ioc, starget->id, starget->channel); if (raid_device) raid_device->sdev = sdev; /* raid is single lun */ spin_unlock_irqrestore(&ioc->raid_device_lock, flags); } if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, sas_target_priv_data->sas_address); if (sas_device && (sas_device->starget == NULL)) { sdev_printk(KERN_INFO, sdev, "%s : sas_device->starget set to starget @ %d\n", __func__, __LINE__); sas_device->starget = starget; } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); } return 0; } /** * _scsih_slave_destroy - device destroy routine * @sdev: scsi device struct * * Returns nothing. */ static void _scsih_slave_destroy(struct scsi_device *sdev) { struct MPT3SAS_TARGET *sas_target_priv_data; struct scsi_target *starget; struct Scsi_Host *shost; struct MPT3SAS_ADAPTER *ioc; struct _sas_device *sas_device; unsigned long flags; if (!sdev->hostdata) return; starget = scsi_target(sdev); sas_target_priv_data = starget->hostdata; sas_target_priv_data->num_luns--; shost = dev_to_shost(&starget->dev); ioc = shost_priv(shost); if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, sas_target_priv_data->sas_address); if (sas_device && !sas_target_priv_data->num_luns) sas_device->starget = NULL; spin_unlock_irqrestore(&ioc->sas_device_lock, flags); } kfree(sdev->hostdata); sdev->hostdata = NULL; } /** * _scsih_display_sata_capabilities - sata capabilities * @ioc: per adapter object * @handle: device handle * @sdev: scsi device struct */ static void _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc, u16 handle, struct scsi_device *sdev) { Mpi2ConfigReply_t mpi_reply; Mpi2SasDevicePage0_t sas_device_pg0; u32 ioc_status; u16 flags; u32 device_info; if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } flags = le16_to_cpu(sas_device_pg0.Flags); device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); sdev_printk(KERN_INFO, sdev, "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), " "sw_preserve(%s)\n", (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n", (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n", (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" : "n", (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n", (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n", (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n"); } /* * raid transport support - * Enabled for SLES11 and newer, in older kernels the driver will panic when * unloading the driver followed by a load - I beleive that the subroutine * raid_class_release() is not cleaning up properly. */ /** * _scsih_is_raid - return boolean indicating device is raid volume * @dev the device struct object */ static int _scsih_is_raid(struct device *dev) { struct scsi_device *sdev = to_scsi_device(dev); return (sdev->channel == RAID_CHANNEL) ? 1 : 0; } /** * _scsih_get_resync - get raid volume resync percent complete * @dev the device struct object */ static void _scsih_get_resync(struct device *dev) { struct scsi_device *sdev = to_scsi_device(dev); struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); static struct _raid_device *raid_device; unsigned long flags; Mpi2RaidVolPage0_t vol_pg0; Mpi2ConfigReply_t mpi_reply; u32 volume_status_flags; u8 percent_complete; u16 handle; percent_complete = 0; handle = 0; spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, sdev->channel); if (raid_device) { handle = raid_device->handle; percent_complete = raid_device->percent_complete; } spin_unlock_irqrestore(&ioc->raid_device_lock, flags); if (!handle) goto out; if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, sizeof(Mpi2RaidVolPage0_t))) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); percent_complete = 0; goto out; } volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags); if (!(volume_status_flags & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS)) percent_complete = 0; out: raid_set_resync(mpt3sas_raid_template, dev, percent_complete); } /** * _scsih_get_state - get raid volume level * @dev the device struct object */ static void _scsih_get_state(struct device *dev) { struct scsi_device *sdev = to_scsi_device(dev); struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); static struct _raid_device *raid_device; unsigned long flags; Mpi2RaidVolPage0_t vol_pg0; Mpi2ConfigReply_t mpi_reply; u32 volstate; enum raid_state state = RAID_STATE_UNKNOWN; u16 handle = 0; spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, sdev->channel); if (raid_device) handle = raid_device->handle; spin_unlock_irqrestore(&ioc->raid_device_lock, flags); if (!raid_device) goto out; if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, sizeof(Mpi2RaidVolPage0_t))) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out; } volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags); if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) { state = RAID_STATE_RESYNCING; goto out; } switch (vol_pg0.VolumeState) { case MPI2_RAID_VOL_STATE_OPTIMAL: case MPI2_RAID_VOL_STATE_ONLINE: state = RAID_STATE_ACTIVE; break; case MPI2_RAID_VOL_STATE_DEGRADED: state = RAID_STATE_DEGRADED; break; case MPI2_RAID_VOL_STATE_FAILED: case MPI2_RAID_VOL_STATE_MISSING: state = RAID_STATE_OFFLINE; break; } out: raid_set_state(mpt3sas_raid_template, dev, state); } /** * _scsih_set_level - set raid level * @sdev: scsi device struct * @volume_type: volume type */ static void _scsih_set_level(struct scsi_device *sdev, u8 volume_type) { enum raid_level level = RAID_LEVEL_UNKNOWN; switch (volume_type) { case MPI2_RAID_VOL_TYPE_RAID0: level = RAID_LEVEL_0; break; case MPI2_RAID_VOL_TYPE_RAID10: level = RAID_LEVEL_10; break; case MPI2_RAID_VOL_TYPE_RAID1E: level = RAID_LEVEL_1E; break; case MPI2_RAID_VOL_TYPE_RAID1: level = RAID_LEVEL_1; break; } raid_set_level(mpt3sas_raid_template, &sdev->sdev_gendev, level); } /** * _scsih_get_volume_capabilities - volume capabilities * @ioc: per adapter object * @sas_device: the raid_device object * * Returns 0 for success, else 1 */ static int _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc, struct _raid_device *raid_device) { Mpi2RaidVolPage0_t *vol_pg0; Mpi2RaidPhysDiskPage0_t pd_pg0; Mpi2SasDevicePage0_t sas_device_pg0; Mpi2ConfigReply_t mpi_reply; u16 sz; u8 num_pds; if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle, &num_pds)) || !num_pds) { dfailprintk(ioc, pr_warn(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__)); return 1; } raid_device->num_pds = num_pds; sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds * sizeof(Mpi2RaidVol0PhysDisk_t)); vol_pg0 = kzalloc(sz, GFP_KERNEL); if (!vol_pg0) { dfailprintk(ioc, pr_warn(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__)); return 1; } if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) { dfailprintk(ioc, pr_warn(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__)); kfree(vol_pg0); return 1; } raid_device->volume_type = vol_pg0->VolumeType; /* figure out what the underlying devices are by * obtaining the device_info bits for the 1st device */ if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM, vol_pg0->PhysDisk[0].PhysDiskNum))) { if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, le16_to_cpu(pd_pg0.DevHandle)))) { raid_device->device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); } } kfree(vol_pg0); return 0; } /** * _scsih_enable_tlr - setting TLR flags * @ioc: per adapter object * @sdev: scsi device struct * * Enabling Transaction Layer Retries for tape devices when * vpd page 0x90 is present * */ static void _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev) { /* only for TAPE */ if (sdev->type != TYPE_TAPE) return; if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR)) return; sas_enable_tlr(sdev); sdev_printk(KERN_INFO, sdev, "TLR %s\n", sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled"); return; } /** * _scsih_slave_configure - device configure routine. * @sdev: scsi device struct * * Returns 0 if ok. Any other return is assumed to be an error and * the device is ignored. */ static int _scsih_slave_configure(struct scsi_device *sdev) { struct Scsi_Host *shost = sdev->host; struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); struct MPT3SAS_DEVICE *sas_device_priv_data; struct MPT3SAS_TARGET *sas_target_priv_data; struct _sas_device *sas_device; struct _raid_device *raid_device; unsigned long flags; int qdepth; u8 ssp_target = 0; char *ds = ""; char *r_level = ""; u16 handle, volume_handle = 0; u64 volume_wwid = 0; qdepth = 1; sas_device_priv_data = sdev->hostdata; sas_device_priv_data->configured_lun = 1; sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT; sas_target_priv_data = sas_device_priv_data->sas_target; handle = sas_target_priv_data->handle; /* raid volume handling */ if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) { spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = _scsih_raid_device_find_by_handle(ioc, handle); spin_unlock_irqrestore(&ioc->raid_device_lock, flags); if (!raid_device) { dfailprintk(ioc, pr_warn(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__)); return 1; } if (_scsih_get_volume_capabilities(ioc, raid_device)) { dfailprintk(ioc, pr_warn(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__)); return 1; } /* RAID Queue Depth Support * IS volume = underlying qdepth of drive type, either * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH) */ if (raid_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) { qdepth = MPT3SAS_SAS_QUEUE_DEPTH; ds = "SSP"; } else { qdepth = MPT3SAS_SATA_QUEUE_DEPTH; if (raid_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) ds = "SATA"; else ds = "STP"; } switch (raid_device->volume_type) { case MPI2_RAID_VOL_TYPE_RAID0: r_level = "RAID0"; break; case MPI2_RAID_VOL_TYPE_RAID1E: qdepth = MPT3SAS_RAID_QUEUE_DEPTH; if (ioc->manu_pg10.OEMIdentifier && (le32_to_cpu(ioc->manu_pg10.GenericFlags0) & MFG10_GF0_R10_DISPLAY) && !(raid_device->num_pds % 2)) r_level = "RAID10"; else r_level = "RAID1E"; break; case MPI2_RAID_VOL_TYPE_RAID1: qdepth = MPT3SAS_RAID_QUEUE_DEPTH; r_level = "RAID1"; break; case MPI2_RAID_VOL_TYPE_RAID10: qdepth = MPT3SAS_RAID_QUEUE_DEPTH; r_level = "RAID10"; break; case MPI2_RAID_VOL_TYPE_UNKNOWN: default: qdepth = MPT3SAS_RAID_QUEUE_DEPTH; r_level = "RAIDX"; break; } sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), wwid(0x%016llx), pd_count(%d), type(%s)\n", r_level, raid_device->handle, (unsigned long long)raid_device->wwid, raid_device->num_pds, ds); _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); /* raid transport support */ _scsih_set_level(sdev, raid_device->volume_type); return 0; } /* non-raid handling */ if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) { if (mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle)) { dfailprintk(ioc, pr_warn(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__)); return 1; } if (volume_handle && mpt3sas_config_get_volume_wwid(ioc, volume_handle, &volume_wwid)) { dfailprintk(ioc, pr_warn(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__)); return 1; } } spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, sas_device_priv_data->sas_target->sas_address); if (!sas_device) { spin_unlock_irqrestore(&ioc->sas_device_lock, flags); dfailprintk(ioc, pr_warn(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__)); return 1; } sas_device->volume_handle = volume_handle; sas_device->volume_wwid = volume_wwid; if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) { qdepth = MPT3SAS_SAS_QUEUE_DEPTH; ssp_target = 1; ds = "SSP"; } else { qdepth = MPT3SAS_SATA_QUEUE_DEPTH; if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) ds = "STP"; else if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) ds = "SATA"; } sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \ "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n", ds, handle, (unsigned long long)sas_device->sas_address, sas_device->phy, (unsigned long long)sas_device->device_name); sdev_printk(KERN_INFO, sdev, "%s: enclosure_logical_id(0x%016llx), slot(%d)\n", ds, (unsigned long long) sas_device->enclosure_logical_id, sas_device->slot); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (!ssp_target) _scsih_display_sata_capabilities(ioc, handle, sdev); _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); if (ssp_target) { sas_read_port_mode_page(sdev); _scsih_enable_tlr(ioc, sdev); } return 0; } /** * _scsih_bios_param - fetch head, sector, cylinder info for a disk * @sdev: scsi device struct * @bdev: pointer to block device context * @capacity: device size (in 512 byte sectors) * @params: three element array to place output: * params[0] number of heads (max 255) * params[1] number of sectors (max 63) * params[2] number of cylinders * * Return nothing. */ static int _scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int params[]) { int heads; int sectors; sector_t cylinders; ulong dummy; heads = 64; sectors = 32; dummy = heads * sectors; cylinders = capacity; sector_div(cylinders, dummy); /* * Handle extended translation size for logical drives * > 1Gb */ if ((ulong)capacity >= 0x200000) { heads = 255; sectors = 63; dummy = heads * sectors; cylinders = capacity; sector_div(cylinders, dummy); } /* return result */ params[0] = heads; params[1] = sectors; params[2] = cylinders; return 0; } /** * _scsih_response_code - translation of device response code * @ioc: per adapter object * @response_code: response code returned by the device * * Return nothing. */ static void _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code) { char *desc; switch (response_code) { case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: desc = "task management request completed"; break; case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: desc = "invalid frame"; break; case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: desc = "task management request not supported"; break; case MPI2_SCSITASKMGMT_RSP_TM_FAILED: desc = "task management request failed"; break; case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: desc = "task management request succeeded"; break; case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: desc = "invalid lun"; break; case 0xA: desc = "overlapped tag attempted"; break; case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: desc = "task queued, however not sent to target"; break; default: desc = "unknown"; break; } pr_warn(MPT3SAS_FMT "response_code(0x%01x): %s\n", ioc->name, response_code, desc); } /** * _scsih_tm_done - tm completion routine * @ioc: per adapter object * @smid: system request message index * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * Context: none. * * The callback handler when using scsih_issue_tm. * * Return 1 meaning mf should be freed from _base_interrupt * 0 means the mf is freed from this function. */ static u8 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) { MPI2DefaultReply_t *mpi_reply; if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED) return 1; if (ioc->tm_cmds.smid != smid) return 1; mpt3sas_base_flush_reply_queues(ioc); ioc->tm_cmds.status |= MPT3_CMD_COMPLETE; mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); if (mpi_reply) { memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID; } ioc->tm_cmds.status &= ~MPT3_CMD_PENDING; complete(&ioc->tm_cmds.done); return 1; } /** * mpt3sas_scsih_set_tm_flag - set per target tm_busy * @ioc: per adapter object * @handle: device handle * * During taskmangement request, we need to freeze the device queue. */ void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) { struct MPT3SAS_DEVICE *sas_device_priv_data; struct scsi_device *sdev; u8 skip = 0; shost_for_each_device(sdev, ioc->shost) { if (skip) continue; sas_device_priv_data = sdev->hostdata; if (!sas_device_priv_data) continue; if (sas_device_priv_data->sas_target->handle == handle) { sas_device_priv_data->sas_target->tm_busy = 1; skip = 1; ioc->ignore_loginfos = 1; } } } /** * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy * @ioc: per adapter object * @handle: device handle * * During taskmangement request, we need to freeze the device queue. */ void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) { struct MPT3SAS_DEVICE *sas_device_priv_data; struct scsi_device *sdev; u8 skip = 0; shost_for_each_device(sdev, ioc->shost) { if (skip) continue; sas_device_priv_data = sdev->hostdata; if (!sas_device_priv_data) continue; if (sas_device_priv_data->sas_target->handle == handle) { sas_device_priv_data->sas_target->tm_busy = 0; skip = 1; ioc->ignore_loginfos = 0; } } } /** * mpt3sas_scsih_issue_tm - main routine for sending tm requests * @ioc: per adapter struct * @device_handle: device handle * @channel: the channel assigned by the OS * @id: the id assigned by the OS * @lun: lun number * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) * @smid_task: smid assigned to the task * @timeout: timeout in seconds * @serial_number: the serial_number from scmd * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF * Context: user * * A generic API for sending task management requests to firmware. * * The callback index is set inside `ioc->tm_cb_idx`. * * Return SUCCESS or FAILED. */ int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, uint id, uint lun, u8 type, u16 smid_task, ulong timeout, unsigned long serial_number, enum mutex_type m_type) { Mpi2SCSITaskManagementRequest_t *mpi_request; Mpi2SCSITaskManagementReply_t *mpi_reply; u16 smid = 0; u32 ioc_state; unsigned long timeleft; struct scsiio_tracker *scsi_lookup = NULL; int rc; if (m_type == TM_MUTEX_ON) mutex_lock(&ioc->tm_cmds.mutex); if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) { pr_info(MPT3SAS_FMT "%s: tm_cmd busy!!!\n", __func__, ioc->name); rc = FAILED; goto err_out; } if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) { pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", __func__, ioc->name); rc = FAILED; goto err_out; } ioc_state = mpt3sas_base_get_iocstate(ioc, 0); if (ioc_state & MPI2_DOORBELL_USED) { dhsprintk(ioc, pr_info(MPT3SAS_FMT "unexpected doorbell active!\n", ioc->name)); rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); rc = (!rc) ? SUCCESS : FAILED; goto err_out; } if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { mpt3sas_base_fault_info(ioc, ioc_state & MPI2_DOORBELL_DATA_MASK); rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); rc = (!rc) ? SUCCESS : FAILED; goto err_out; } smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx); if (!smid) { pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); rc = FAILED; goto err_out; } if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) scsi_lookup = &ioc->scsi_lookup[smid_task - 1]; dtmprintk(ioc, pr_info(MPT3SAS_FMT "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d)\n", ioc->name, handle, type, smid_task)); ioc->tm_cmds.status = MPT3_CMD_PENDING; mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); ioc->tm_cmds.smid = smid; memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t)); mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; mpi_request->DevHandle = cpu_to_le16(handle); mpi_request->TaskType = type; mpi_request->TaskMID = cpu_to_le16(smid_task); int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN); mpt3sas_scsih_set_tm_flag(ioc, handle); init_completion(&ioc->tm_cmds.done); mpt3sas_base_put_smid_hi_priority(ioc, smid); timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ); if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) { pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, __func__); _debug_dump_mf(mpi_request, sizeof(Mpi2SCSITaskManagementRequest_t)/4); if (!(ioc->tm_cmds.status & MPT3_CMD_RESET)) { rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); rc = (!rc) ? SUCCESS : FAILED; ioc->tm_cmds.status = MPT3_CMD_NOT_USED; mpt3sas_scsih_clear_tm_flag(ioc, handle); goto err_out; } } if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) { mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); mpi_reply = ioc->tm_cmds.reply; dtmprintk(ioc, pr_info(MPT3SAS_FMT "complete tm: " \ "ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n", ioc->name, le16_to_cpu(mpi_reply->IOCStatus), le32_to_cpu(mpi_reply->IOCLogInfo), le32_to_cpu(mpi_reply->TerminationCount))); if (ioc->logging_level & MPT_DEBUG_TM) { _scsih_response_code(ioc, mpi_reply->ResponseCode); if (mpi_reply->IOCStatus) _debug_dump_mf(mpi_request, sizeof(Mpi2SCSITaskManagementRequest_t)/4); } } switch (type) { case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: rc = SUCCESS; if (scsi_lookup->scmd == NULL) break; rc = FAILED; break; case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: if (_scsih_scsi_lookup_find_by_target(ioc, id, channel)) rc = FAILED; else rc = SUCCESS; break; case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: if (_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel)) rc = FAILED; else rc = SUCCESS; break; case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK: rc = SUCCESS; break; default: rc = FAILED; break; } mpt3sas_scsih_clear_tm_flag(ioc, handle); ioc->tm_cmds.status = MPT3_CMD_NOT_USED; if (m_type == TM_MUTEX_ON) mutex_unlock(&ioc->tm_cmds.mutex); return rc; err_out: if (m_type == TM_MUTEX_ON) mutex_unlock(&ioc->tm_cmds.mutex); return rc; } /** * _scsih_tm_display_info - displays info about the device * @ioc: per adapter struct * @scmd: pointer to scsi command object * * Called by task management callback handlers. */ static void _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd) { struct scsi_target *starget = scmd->device->sdev_target; struct MPT3SAS_TARGET *priv_target = starget->hostdata; struct _sas_device *sas_device = NULL; unsigned long flags; char *device_str = NULL; if (!priv_target) return; device_str = "volume"; scsi_print_command(scmd); if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { starget_printk(KERN_INFO, starget, "%s handle(0x%04x), %s wwid(0x%016llx)\n", device_str, priv_target->handle, device_str, (unsigned long long)priv_target->sas_address); } else { spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, priv_target->sas_address); if (sas_device) { if (priv_target->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) { starget_printk(KERN_INFO, starget, "volume handle(0x%04x), " "volume wwid(0x%016llx)\n", sas_device->volume_handle, (unsigned long long)sas_device->volume_wwid); } starget_printk(KERN_INFO, starget, "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n", sas_device->handle, (unsigned long long)sas_device->sas_address, sas_device->phy); starget_printk(KERN_INFO, starget, "enclosure_logical_id(0x%016llx), slot(%d)\n", (unsigned long long)sas_device->enclosure_logical_id, sas_device->slot); } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); } } /** * _scsih_abort - eh threads main abort routine * @scmd: pointer to scsi command object * * Returns SUCCESS if command aborted else FAILED */ static int _scsih_abort(struct scsi_cmnd *scmd) { struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); struct MPT3SAS_DEVICE *sas_device_priv_data; u16 smid; u16 handle; int r; sdev_printk(KERN_INFO, scmd->device, "attempting task abort! scmd(%p)\n", scmd); _scsih_tm_display_info(ioc, scmd); sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { sdev_printk(KERN_INFO, scmd->device, "device been deleted! scmd(%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); r = SUCCESS; goto out; } /* search for the command */ smid = _scsih_scsi_lookup_find_by_scmd(ioc, scmd); if (!smid) { scmd->result = DID_RESET << 16; r = SUCCESS; goto out; } /* for hidden raid components and volumes this is not supported */ if (sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_RAID_COMPONENT || sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) { scmd->result = DID_RESET << 16; r = FAILED; goto out; } mpt3sas_halt_firmware(ioc); handle = sas_device_priv_data->sas_target->handle; r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel, scmd->device->id, scmd->device->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, scmd->serial_number, TM_MUTEX_ON); out: sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); return r; } /** * _scsih_dev_reset - eh threads main device reset routine * @scmd: pointer to scsi command object * * Returns SUCCESS if command aborted else FAILED */ static int _scsih_dev_reset(struct scsi_cmnd *scmd) { struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); struct MPT3SAS_DEVICE *sas_device_priv_data; struct _sas_device *sas_device; unsigned long flags; u16 handle; int r; sdev_printk(KERN_INFO, scmd->device, "attempting device reset! scmd(%p)\n", scmd); _scsih_tm_display_info(ioc, scmd); sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { sdev_printk(KERN_INFO, scmd->device, "device been deleted! scmd(%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); r = SUCCESS; goto out; } /* for hidden raid components obtain the volume_handle */ handle = 0; if (sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) { spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _scsih_sas_device_find_by_handle(ioc, sas_device_priv_data->sas_target->handle); if (sas_device) handle = sas_device->volume_handle; spin_unlock_irqrestore(&ioc->sas_device_lock, flags); } else handle = sas_device_priv_data->sas_target->handle; if (!handle) { scmd->result = DID_RESET << 16; r = FAILED; goto out; } r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel, scmd->device->id, scmd->device->lun, MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, 0, TM_MUTEX_ON); out: sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n", ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); return r; } /** * _scsih_target_reset - eh threads main target reset routine * @scmd: pointer to scsi command object * * Returns SUCCESS if command aborted else FAILED */ static int _scsih_target_reset(struct scsi_cmnd *scmd) { struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); struct MPT3SAS_DEVICE *sas_device_priv_data; struct _sas_device *sas_device; unsigned long flags; u16 handle; int r; struct scsi_target *starget = scmd->device->sdev_target; starget_printk(KERN_INFO, starget, "attempting target reset! scmd(%p)\n", scmd); _scsih_tm_display_info(ioc, scmd); sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); r = SUCCESS; goto out; } /* for hidden raid components obtain the volume_handle */ handle = 0; if (sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) { spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _scsih_sas_device_find_by_handle(ioc, sas_device_priv_data->sas_target->handle); if (sas_device) handle = sas_device->volume_handle; spin_unlock_irqrestore(&ioc->sas_device_lock, flags); } else handle = sas_device_priv_data->sas_target->handle; if (!handle) { scmd->result = DID_RESET << 16; r = FAILED; goto out; } r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel, scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30, 0, TM_MUTEX_ON); out: starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); return r; } /** * _scsih_host_reset - eh threads main host reset routine * @scmd: pointer to scsi command object * * Returns SUCCESS if command aborted else FAILED */ static int _scsih_host_reset(struct scsi_cmnd *scmd) { struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); int r, retval; pr_info(MPT3SAS_FMT "attempting host reset! scmd(%p)\n", ioc->name, scmd); scsi_print_command(scmd); retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); r = (retval < 0) ? FAILED : SUCCESS; pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n", ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); return r; } /** * _scsih_fw_event_add - insert and queue up fw_event * @ioc: per adapter object * @fw_event: object describing the event * Context: This function will acquire ioc->fw_event_lock. * * This adds the firmware event object into link list, then queues it up to * be processed from user context. * * Return nothing. */ static void _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { unsigned long flags; if (ioc->firmware_event_thread == NULL) return; spin_lock_irqsave(&ioc->fw_event_lock, flags); INIT_LIST_HEAD(&fw_event->list); list_add_tail(&fw_event->list, &ioc->fw_event_list); INIT_WORK(&fw_event->work, _firmware_event_work); queue_work(ioc->firmware_event_thread, &fw_event->work); spin_unlock_irqrestore(&ioc->fw_event_lock, flags); } /** * _scsih_fw_event_free - delete fw_event * @ioc: per adapter object * @fw_event: object describing the event * Context: This function will acquire ioc->fw_event_lock. * * This removes firmware event object from link list, frees associated memory. * * Return nothing. */ static void _scsih_fw_event_free(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { unsigned long flags; spin_lock_irqsave(&ioc->fw_event_lock, flags); list_del(&fw_event->list); kfree(fw_event->event_data); kfree(fw_event); spin_unlock_irqrestore(&ioc->fw_event_lock, flags); } /** * mpt3sas_send_trigger_data_event - send event for processing trigger data * @ioc: per adapter object * @event_data: trigger event data * * Return nothing. */ void mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc, struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) { struct fw_event_work *fw_event; if (ioc->is_driver_loading) return; fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); if (!fw_event) return; fw_event->event_data = kzalloc(sizeof(*event_data), GFP_ATOMIC); if (!fw_event->event_data) return; fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG; fw_event->ioc = ioc; memcpy(fw_event->event_data, event_data, sizeof(*event_data)); _scsih_fw_event_add(ioc, fw_event); } /** * _scsih_error_recovery_delete_devices - remove devices not responding * @ioc: per adapter object * * Return nothing. */ static void _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc) { struct fw_event_work *fw_event; if (ioc->is_driver_loading) return; fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); if (!fw_event) return; fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES; fw_event->ioc = ioc; _scsih_fw_event_add(ioc, fw_event); } /** * mpt3sas_port_enable_complete - port enable completed (fake event) * @ioc: per adapter object * * Return nothing. */ void mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc) { struct fw_event_work *fw_event; fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); if (!fw_event) return; fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE; fw_event->ioc = ioc; _scsih_fw_event_add(ioc, fw_event); } /** * _scsih_fw_event_cleanup_queue - cleanup event queue * @ioc: per adapter object * * Walk the firmware event queue, either killing timers, or waiting * for outstanding events to complete * * Return nothing. */ static void _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc) { struct fw_event_work *fw_event, *next; if (list_empty(&ioc->fw_event_list) || !ioc->firmware_event_thread || in_interrupt()) return; list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) { if (cancel_delayed_work(&fw_event->delayed_work)) { _scsih_fw_event_free(ioc, fw_event); continue; } fw_event->cancel_pending_work = 1; } } /** * _scsih_ublock_io_all_device - unblock every device * @ioc: per adapter object * * change the device state from block to running */ static void _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc) { struct MPT3SAS_DEVICE *sas_device_priv_data; struct scsi_device *sdev; shost_for_each_device(sdev, ioc->shost) { sas_device_priv_data = sdev->hostdata; if (!sas_device_priv_data) continue; if (!sas_device_priv_data->block) continue; sas_device_priv_data->block = 0; dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, "device_running, handle(0x%04x)\n", sas_device_priv_data->sas_target->handle)); scsi_internal_device_unblock(sdev, SDEV_RUNNING); } } /** * _scsih_ublock_io_device - prepare device to be deleted * @ioc: per adapter object * @sas_addr: sas address * * unblock then put device in offline state */ static void _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address) { struct MPT3SAS_DEVICE *sas_device_priv_data; struct scsi_device *sdev; shost_for_each_device(sdev, ioc->shost) { sas_device_priv_data = sdev->hostdata; if (!sas_device_priv_data) continue; if (sas_device_priv_data->sas_target->sas_address != sas_address) continue; if (sas_device_priv_data->block) { sas_device_priv_data->block = 0; scsi_internal_device_unblock(sdev, SDEV_RUNNING); } } } /** * _scsih_block_io_all_device - set the device state to SDEV_BLOCK * @ioc: per adapter object * @handle: device handle * * During device pull we need to appropiately set the sdev state. */ static void _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc) { struct MPT3SAS_DEVICE *sas_device_priv_data; struct scsi_device *sdev; shost_for_each_device(sdev, ioc->shost) { sas_device_priv_data = sdev->hostdata; if (!sas_device_priv_data) continue; if (sas_device_priv_data->block) continue; sas_device_priv_data->block = 1; scsi_internal_device_block(sdev); sdev_printk(KERN_INFO, sdev, "device_blocked, handle(0x%04x)\n", sas_device_priv_data->sas_target->handle); } } /** * _scsih_block_io_device - set the device state to SDEV_BLOCK * @ioc: per adapter object * @handle: device handle * * During device pull we need to appropiately set the sdev state. */ static void _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) { struct MPT3SAS_DEVICE *sas_device_priv_data; struct scsi_device *sdev; shost_for_each_device(sdev, ioc->shost) { sas_device_priv_data = sdev->hostdata; if (!sas_device_priv_data) continue; if (sas_device_priv_data->sas_target->handle != handle) continue; if (sas_device_priv_data->block) continue; sas_device_priv_data->block = 1; scsi_internal_device_block(sdev); sdev_printk(KERN_INFO, sdev, "device_blocked, handle(0x%04x)\n", handle); } } /** * _scsih_block_io_to_children_attached_to_ex * @ioc: per adapter object * @sas_expander: the sas_device object * * This routine set sdev state to SDEV_BLOCK for all devices * attached to this expander. This function called when expander is * pulled. */ static void _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_expander) { struct _sas_port *mpt3sas_port; struct _sas_device *sas_device; struct _sas_node *expander_sibling; unsigned long flags; if (!sas_expander) return; list_for_each_entry(mpt3sas_port, &sas_expander->sas_port_list, port_list) { if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) { spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, mpt3sas_port->remote_identify.sas_address); if (sas_device) set_bit(sas_device->handle, ioc->blocking_handles); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); } } list_for_each_entry(mpt3sas_port, &sas_expander->sas_port_list, port_list) { if (mpt3sas_port->remote_identify.device_type == SAS_EDGE_EXPANDER_DEVICE || mpt3sas_port->remote_identify.device_type == SAS_FANOUT_EXPANDER_DEVICE) { expander_sibling = mpt3sas_scsih_expander_find_by_sas_address( ioc, mpt3sas_port->remote_identify.sas_address); _scsih_block_io_to_children_attached_to_ex(ioc, expander_sibling); } } } /** * _scsih_block_io_to_children_attached_directly * @ioc: per adapter object * @event_data: topology change event data * * This routine set sdev state to SDEV_BLOCK for all devices * direct attached during device pull. */ static void _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc, Mpi2EventDataSasTopologyChangeList_t *event_data) { int i; u16 handle; u16 reason_code; for (i = 0; i < event_data->NumEntries; i++) { handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); if (!handle) continue; reason_code = event_data->PHY[i].PhyStatus & MPI2_EVENT_SAS_TOPO_RC_MASK; if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING) _scsih_block_io_device(ioc, handle); } } /** * _scsih_tm_tr_send - send task management request * @ioc: per adapter object * @handle: device handle * Context: interrupt time. * * This code is to initiate the device removal handshake protocol * with controller firmware. This function will issue target reset * using high priority request queue. It will send a sas iounit * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion. * * This is designed to send muliple task management request at the same * time to the fifo. If the fifo is full, we will append the request, * and process it in a future completion. */ static void _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) { Mpi2SCSITaskManagementRequest_t *mpi_request; u16 smid; struct _sas_device *sas_device; struct MPT3SAS_TARGET *sas_target_priv_data = NULL; u64 sas_address = 0; unsigned long flags; struct _tr_list *delayed_tr; u32 ioc_state; if (ioc->remove_host) { dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: host has been removed: handle(0x%04x)\n", __func__, ioc->name, handle)); return; } else if (ioc->pci_error_recovery) { dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: host in pci error recovery: handle(0x%04x)\n", __func__, ioc->name, handle)); return; } ioc_state = mpt3sas_base_get_iocstate(ioc, 1); if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: host is not operational: handle(0x%04x)\n", __func__, ioc->name, handle)); return; } /* if PD, then return */ if (test_bit(handle, ioc->pd_handles)) return; spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _scsih_sas_device_find_by_handle(ioc, handle); if (sas_device && sas_device->starget && sas_device->starget->hostdata) { sas_target_priv_data = sas_device->starget->hostdata; sas_target_priv_data->deleted = 1; sas_address = sas_device->sas_address; } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (sas_target_priv_data) { dewtprintk(ioc, pr_info(MPT3SAS_FMT "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, handle, (unsigned long long)sas_address)); _scsih_ublock_io_device(ioc, sas_address); sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; } smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx); if (!smid) { delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); if (!delayed_tr) return; INIT_LIST_HEAD(&delayed_tr->list); delayed_tr->handle = handle; list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); dewtprintk(ioc, pr_info(MPT3SAS_FMT "DELAYED:tr:handle(0x%04x), (open)\n", ioc->name, handle)); return; } dewtprintk(ioc, pr_info(MPT3SAS_FMT "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", ioc->name, handle, smid, ioc->tm_tr_cb_idx)); mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; mpi_request->DevHandle = cpu_to_le16(handle); mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; mpt3sas_base_put_smid_hi_priority(ioc, smid); mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL); } /** * _scsih_tm_tr_complete - * @ioc: per adapter object * @smid: system request message index * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * Context: interrupt time. * * This is the target reset completion routine. * This code is part of the code to initiate the device removal * handshake protocol with controller firmware. * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE) * * Return 1 meaning mf should be freed from _base_interrupt * 0 means the mf is freed from this function. */ static u8 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) { u16 handle; Mpi2SCSITaskManagementRequest_t *mpi_request_tm; Mpi2SCSITaskManagementReply_t *mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); Mpi2SasIoUnitControlRequest_t *mpi_request; u16 smid_sas_ctrl; u32 ioc_state; if (ioc->remove_host) { dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: host has been removed\n", __func__, ioc->name)); return 1; } else if (ioc->pci_error_recovery) { dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: host in pci error recovery\n", __func__, ioc->name)); return 1; } ioc_state = mpt3sas_base_get_iocstate(ioc, 1); if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: host is not operational\n", __func__, ioc->name)); return 1; } if (unlikely(!mpi_reply)) { pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return 1; } mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); handle = le16_to_cpu(mpi_request_tm->DevHandle); if (handle != le16_to_cpu(mpi_reply->DevHandle)) { dewtprintk(ioc, pr_err(MPT3SAS_FMT "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", ioc->name, handle, le16_to_cpu(mpi_reply->DevHandle), smid)); return 0; } mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); dewtprintk(ioc, pr_info(MPT3SAS_FMT "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), " "loginfo(0x%08x), completed(%d)\n", ioc->name, handle, smid, le16_to_cpu(mpi_reply->IOCStatus), le32_to_cpu(mpi_reply->IOCLogInfo), le32_to_cpu(mpi_reply->TerminationCount))); smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx); if (!smid_sas_ctrl) { pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); return 1; } dewtprintk(ioc, pr_info(MPT3SAS_FMT "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", ioc->name, handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx)); mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl); memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; mpi_request->DevHandle = mpi_request_tm->DevHandle; mpt3sas_base_put_smid_default(ioc, smid_sas_ctrl); return _scsih_check_for_pending_tm(ioc, smid); } /** * _scsih_sas_control_complete - completion routine * @ioc: per adapter object * @smid: system request message index * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * Context: interrupt time. * * This is the sas iounit control completion routine. * This code is part of the code to initiate the device removal * handshake protocol with controller firmware. * * Return 1 meaning mf should be freed from _base_interrupt * 0 means the mf is freed from this function. */ static u8 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) { Mpi2SasIoUnitControlReply_t *mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); if (likely(mpi_reply)) { dewtprintk(ioc, pr_info(MPT3SAS_FMT "sc_complete:handle(0x%04x), (open) " "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid, le16_to_cpu(mpi_reply->IOCStatus), le32_to_cpu(mpi_reply->IOCLogInfo))); } else { pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); } return 1; } /** * _scsih_tm_tr_volume_send - send target reset request for volumes * @ioc: per adapter object * @handle: device handle * Context: interrupt time. * * This is designed to send muliple task management request at the same * time to the fifo. If the fifo is full, we will append the request, * and process it in a future completion. */ static void _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) { Mpi2SCSITaskManagementRequest_t *mpi_request; u16 smid; struct _tr_list *delayed_tr; if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) { dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", __func__, ioc->name)); return; } smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx); if (!smid) { delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); if (!delayed_tr) return; INIT_LIST_HEAD(&delayed_tr->list); delayed_tr->handle = handle; list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list); dewtprintk(ioc, pr_info(MPT3SAS_FMT "DELAYED:tr:handle(0x%04x), (open)\n", ioc->name, handle)); return; } dewtprintk(ioc, pr_info(MPT3SAS_FMT "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", ioc->name, handle, smid, ioc->tm_tr_volume_cb_idx)); mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; mpi_request->DevHandle = cpu_to_le16(handle); mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; mpt3sas_base_put_smid_hi_priority(ioc, smid); } /** * _scsih_tm_volume_tr_complete - target reset completion * @ioc: per adapter object * @smid: system request message index * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * Context: interrupt time. * * Return 1 meaning mf should be freed from _base_interrupt * 0 means the mf is freed from this function. */ static u8 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) { u16 handle; Mpi2SCSITaskManagementRequest_t *mpi_request_tm; Mpi2SCSITaskManagementReply_t *mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) { dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", __func__, ioc->name)); return 1; } if (unlikely(!mpi_reply)) { pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return 1; } mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); handle = le16_to_cpu(mpi_request_tm->DevHandle); if (handle != le16_to_cpu(mpi_reply->DevHandle)) { dewtprintk(ioc, pr_err(MPT3SAS_FMT "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", ioc->name, handle, le16_to_cpu(mpi_reply->DevHandle), smid)); return 0; } dewtprintk(ioc, pr_info(MPT3SAS_FMT "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), " "loginfo(0x%08x), completed(%d)\n", ioc->name, handle, smid, le16_to_cpu(mpi_reply->IOCStatus), le32_to_cpu(mpi_reply->IOCLogInfo), le32_to_cpu(mpi_reply->TerminationCount))); return _scsih_check_for_pending_tm(ioc, smid); } /** * _scsih_check_for_pending_tm - check for pending task management * @ioc: per adapter object * @smid: system request message index * * This will check delayed target reset list, and feed the * next reqeust. * * Return 1 meaning mf should be freed from _base_interrupt * 0 means the mf is freed from this function. */ static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid) { struct _tr_list *delayed_tr; if (!list_empty(&ioc->delayed_tr_volume_list)) { delayed_tr = list_entry(ioc->delayed_tr_volume_list.next, struct _tr_list, list); mpt3sas_base_free_smid(ioc, smid); _scsih_tm_tr_volume_send(ioc, delayed_tr->handle); list_del(&delayed_tr->list); kfree(delayed_tr); return 0; } if (!list_empty(&ioc->delayed_tr_list)) { delayed_tr = list_entry(ioc->delayed_tr_list.next, struct _tr_list, list); mpt3sas_base_free_smid(ioc, smid); _scsih_tm_tr_send(ioc, delayed_tr->handle); list_del(&delayed_tr->list); kfree(delayed_tr); return 0; } return 1; } /** * _scsih_check_topo_delete_events - sanity check on topo events * @ioc: per adapter object * @event_data: the event data payload * * This routine added to better handle cable breaker. * * This handles the case where driver receives multiple expander * add and delete events in a single shot. When there is a delete event * the routine will void any pending add events waiting in the event queue. * * Return nothing. */ static void _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc, Mpi2EventDataSasTopologyChangeList_t *event_data) { struct fw_event_work *fw_event; Mpi2EventDataSasTopologyChangeList_t *local_event_data; u16 expander_handle; struct _sas_node *sas_expander; unsigned long flags; int i, reason_code; u16 handle; for (i = 0 ; i < event_data->NumEntries; i++) { handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); if (!handle) continue; reason_code = event_data->PHY[i].PhyStatus & MPI2_EVENT_SAS_TOPO_RC_MASK; if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING) _scsih_tm_tr_send(ioc, handle); } expander_handle = le16_to_cpu(event_data->ExpanderDevHandle); if (expander_handle < ioc->sas_hba.num_phys) { _scsih_block_io_to_children_attached_directly(ioc, event_data); return; } if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) { /* put expander attached devices into blocking state */ spin_lock_irqsave(&ioc->sas_node_lock, flags); sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc, expander_handle); _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); do { handle = find_first_bit(ioc->blocking_handles, ioc->facts.MaxDevHandle); if (handle < ioc->facts.MaxDevHandle) _scsih_block_io_device(ioc, handle); } while (test_and_clear_bit(handle, ioc->blocking_handles)); } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING) _scsih_block_io_to_children_attached_directly(ioc, event_data); if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) return; /* mark ignore flag for pending events */ spin_lock_irqsave(&ioc->fw_event_lock, flags); list_for_each_entry(fw_event, &ioc->fw_event_list, list) { if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST || fw_event->ignore) continue; local_event_data = fw_event->event_data; if (local_event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED || local_event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING) { if (le16_to_cpu(local_event_data->ExpanderDevHandle) == expander_handle) { dewtprintk(ioc, pr_info(MPT3SAS_FMT "setting ignoring flag\n", ioc->name)); fw_event->ignore = 1; } } } spin_unlock_irqrestore(&ioc->fw_event_lock, flags); } /** * _scsih_set_volume_delete_flag - setting volume delete flag * @ioc: per adapter object * @handle: device handle * * This returns nothing. */ static void _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) { struct _raid_device *raid_device; struct MPT3SAS_TARGET *sas_target_priv_data; unsigned long flags; spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = _scsih_raid_device_find_by_handle(ioc, handle); if (raid_device && raid_device->starget && raid_device->starget->hostdata) { sas_target_priv_data = raid_device->starget->hostdata; sas_target_priv_data->deleted = 1; dewtprintk(ioc, pr_info(MPT3SAS_FMT "setting delete flag: handle(0x%04x), " "wwid(0x%016llx)\n", ioc->name, handle, (unsigned long long) raid_device->wwid)); } spin_unlock_irqrestore(&ioc->raid_device_lock, flags); } /** * _scsih_set_volume_handle_for_tr - set handle for target reset to volume * @handle: input handle * @a: handle for volume a * @b: handle for volume b * * IR firmware only supports two raid volumes. The purpose of this * routine is to set the volume handle in either a or b. When the given * input handle is non-zero, or when a and b have not been set before. */ static void _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b) { if (!handle || handle == *a || handle == *b) return; if (!*a) *a = handle; else if (!*b) *b = handle; } /** * _scsih_check_ir_config_unhide_events - check for UNHIDE events * @ioc: per adapter object * @event_data: the event data payload * Context: interrupt time. * * This routine will send target reset to volume, followed by target * resets to the PDs. This is called when a PD has been removed, or * volume has been deleted or removed. When the target reset is sent * to volume, the PD target resets need to be queued to start upon * completion of the volume target reset. * * Return nothing. */ static void _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc, Mpi2EventDataIrConfigChangeList_t *event_data) { Mpi2EventIrConfigElement_t *element; int i; u16 handle, volume_handle, a, b; struct _tr_list *delayed_tr; a = 0; b = 0; /* Volume Resets for Deleted or Removed */ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; for (i = 0; i < event_data->NumElements; i++, element++) { if (le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) continue; if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED || element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_REMOVED) { volume_handle = le16_to_cpu(element->VolDevHandle); _scsih_set_volume_delete_flag(ioc, volume_handle); _scsih_set_volume_handle_for_tr(volume_handle, &a, &b); } } /* Volume Resets for UNHIDE events */ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; for (i = 0; i < event_data->NumElements; i++, element++) { if (le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) continue; if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) { volume_handle = le16_to_cpu(element->VolDevHandle); _scsih_set_volume_handle_for_tr(volume_handle, &a, &b); } } if (a) _scsih_tm_tr_volume_send(ioc, a); if (b) _scsih_tm_tr_volume_send(ioc, b); /* PD target resets */ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; for (i = 0; i < event_data->NumElements; i++, element++) { if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE) continue; handle = le16_to_cpu(element->PhysDiskDevHandle); volume_handle = le16_to_cpu(element->VolDevHandle); clear_bit(handle, ioc->pd_handles); if (!volume_handle) _scsih_tm_tr_send(ioc, handle); else if (volume_handle == a || volume_handle == b) { delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); BUG_ON(!delayed_tr); INIT_LIST_HEAD(&delayed_tr->list); delayed_tr->handle = handle; list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); dewtprintk(ioc, pr_info(MPT3SAS_FMT "DELAYED:tr:handle(0x%04x), (open)\n", ioc->name, handle)); } else _scsih_tm_tr_send(ioc, handle); } } /** * _scsih_check_volume_delete_events - set delete flag for volumes * @ioc: per adapter object * @event_data: the event data payload * Context: interrupt time. * * This will handle the case when the cable connected to entire volume is * pulled. We will take care of setting the deleted flag so normal IO will * not be sent. * * Return nothing. */ static void _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc, Mpi2EventDataIrVolume_t *event_data) { u32 state; if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) return; state = le32_to_cpu(event_data->NewValue); if (state == MPI2_RAID_VOL_STATE_MISSING || state == MPI2_RAID_VOL_STATE_FAILED) _scsih_set_volume_delete_flag(ioc, le16_to_cpu(event_data->VolDevHandle)); } /** * _scsih_flush_running_cmds - completing outstanding commands. * @ioc: per adapter object * * The flushing out of all pending scmd commands following host reset, * where all IO is dropped to the floor. * * Return nothing. */ static void _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) { struct scsi_cmnd *scmd; u16 smid; u16 count = 0; for (smid = 1; smid <= ioc->scsiio_depth; smid++) { scmd = _scsih_scsi_lookup_get_clear(ioc, smid); if (!scmd) continue; count++; mpt3sas_base_free_smid(ioc, smid); scsi_dma_unmap(scmd); if (ioc->pci_error_recovery) scmd->result = DID_NO_CONNECT << 16; else scmd->result = DID_RESET << 16; scmd->scsi_done(scmd); } dtmprintk(ioc, pr_info(MPT3SAS_FMT "completing %d cmds\n", ioc->name, count)); } /** * _scsih_setup_eedp - setup MPI request for EEDP transfer * @ioc: per adapter object * @scmd: pointer to scsi command object * @mpi_request: pointer to the SCSI_IO reqest message frame * * Supporting protection 1 and 3. * * Returns nothing */ static void _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request) { u16 eedp_flags; unsigned char prot_op = scsi_get_prot_op(scmd); unsigned char prot_type = scsi_get_prot_type(scmd); Mpi25SCSIIORequest_t *mpi_request_3v = (Mpi25SCSIIORequest_t *)mpi_request; if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL) return; if (prot_op == SCSI_PROT_READ_STRIP) eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP; else if (prot_op == SCSI_PROT_WRITE_INSERT) eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; else return; switch (prot_type) { case SCSI_PROT_DIF_TYPE1: case SCSI_PROT_DIF_TYPE2: /* * enable ref/guard checking * auto increment ref tag */ eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; mpi_request->CDB.EEDP32.PrimaryReferenceTag = cpu_to_be32(scsi_get_lba(scmd)); break; case SCSI_PROT_DIF_TYPE3: /* * enable guard checking */ eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; break; } mpi_request_3v->EEDPBlockSize = cpu_to_le16(scmd->device->sector_size); mpi_request->EEDPFlags = cpu_to_le16(eedp_flags); } /** * _scsih_eedp_error_handling - return sense code for EEDP errors * @scmd: pointer to scsi command object * @ioc_status: ioc status * * Returns nothing */ static void _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) { u8 ascq; switch (ioc_status) { case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: ascq = 0x01; break; case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: ascq = 0x02; break; case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: ascq = 0x03; break; default: ascq = 0x00; break; } scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10, ascq); scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION; } /** * _scsih_qcmd_lck - main scsi request entry point * @scmd: pointer to scsi command object * @done: function pointer to be invoked on completion * * The callback index is set inside `ioc->scsi_io_cb_idx`. * * Returns 0 on success. If there's a failure, return either: * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full */ static int _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) { struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); struct MPT3SAS_DEVICE *sas_device_priv_data; struct MPT3SAS_TARGET *sas_target_priv_data; Mpi2SCSIIORequest_t *mpi_request; u32 mpi_control; u16 smid; u16 handle; #ifdef CONFIG_SCSI_MPT3SAS_LOGGING if (ioc->logging_level & MPT_DEBUG_SCSI) scsi_print_command(scmd); #endif scmd->scsi_done = done; sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); return 0; } if (ioc->pci_error_recovery || ioc->remove_host) { scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); return 0; } sas_target_priv_data = sas_device_priv_data->sas_target; /* invalid device handle */ handle = sas_target_priv_data->handle; if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) { scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); return 0; } /* host recovery or link resets sent via IOCTLs */ if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) return SCSI_MLQUEUE_HOST_BUSY; /* device has been deleted */ else if (sas_target_priv_data->deleted) { scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); return 0; /* device busy with task managment */ } else if (sas_target_priv_data->tm_busy || sas_device_priv_data->block) return SCSI_MLQUEUE_DEVICE_BUSY; if (scmd->sc_data_direction == DMA_FROM_DEVICE) mpi_control = MPI2_SCSIIO_CONTROL_READ; else if (scmd->sc_data_direction == DMA_TO_DEVICE) mpi_control = MPI2_SCSIIO_CONTROL_WRITE; else mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; /* set tags */ if (!(sas_device_priv_data->flags & MPT_DEVICE_FLAGS_INIT)) { if (scmd->device->tagged_supported) { if (scmd->device->ordered_tags) mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ; else mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; } else mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; } else mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) && scmd->cmd_len != 32) mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd); if (!smid) { pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); goto out; } mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t)); _scsih_setup_eedp(ioc, scmd, mpi_request); if (scmd->cmd_len == 32) mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; if (sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; else mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; mpi_request->DevHandle = cpu_to_le16(handle); mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); mpi_request->Control = cpu_to_le32(mpi_control); mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len); mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR; mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; mpi_request->SenseBufferLowAddress = mpt3sas_base_get_sense_buffer_dma(ioc, smid); mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4; int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *) mpi_request->LUN); memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); if (mpi_request->DataLength) { if (ioc->build_sg_scmd(ioc, scmd, smid)) { mpt3sas_base_free_smid(ioc, smid); goto out; } } else ioc->build_zero_len_sge(ioc, &mpi_request->SGL); if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) { if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) { mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len | MPI25_SCSIIO_IOFLAGS_FAST_PATH); mpt3sas_base_put_smid_fast_path(ioc, smid, handle); } else mpt3sas_base_put_smid_scsi_io(ioc, smid, handle); } else mpt3sas_base_put_smid_default(ioc, smid); return 0; out: return SCSI_MLQUEUE_HOST_BUSY; } static DEF_SCSI_QCMD(_scsih_qcmd) /** * _scsih_normalize_sense - normalize descriptor and fixed format sense data * @sense_buffer: sense data returned by target * @data: normalized skey/asc/ascq * * Return nothing. */ static void _scsih_normalize_sense(char *sense_buffer, struct sense_info *data) { if ((sense_buffer[0] & 0x7F) >= 0x72) { /* descriptor format */ data->skey = sense_buffer[1] & 0x0F; data->asc = sense_buffer[2]; data->ascq = sense_buffer[3]; } else { /* fixed format */ data->skey = sense_buffer[2] & 0x0F; data->asc = sense_buffer[12]; data->ascq = sense_buffer[13]; } } #ifdef CONFIG_SCSI_MPT3SAS_LOGGING /** * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request * @ioc: per adapter object * @scmd: pointer to scsi command object * @mpi_reply: reply mf payload returned from firmware * * scsi_status - SCSI Status code returned from target device * scsi_state - state info associated with SCSI_IO determined by ioc * ioc_status - ioc supplied status info * * Return nothing. */ static void _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, Mpi2SCSIIOReply_t *mpi_reply, u16 smid) { u32 response_info; u8 *response_bytes; u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; u8 scsi_state = mpi_reply->SCSIState; u8 scsi_status = mpi_reply->SCSIStatus; char *desc_ioc_state = NULL; char *desc_scsi_status = NULL; char *desc_scsi_state = ioc->tmp_string; u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); struct _sas_device *sas_device = NULL; unsigned long flags; struct scsi_target *starget = scmd->device->sdev_target; struct MPT3SAS_TARGET *priv_target = starget->hostdata; char *device_str = NULL; if (!priv_target) return; device_str = "volume"; if (log_info == 0x31170000) return; switch (ioc_status) { case MPI2_IOCSTATUS_SUCCESS: desc_ioc_state = "success"; break; case MPI2_IOCSTATUS_INVALID_FUNCTION: desc_ioc_state = "invalid function"; break; case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: desc_ioc_state = "scsi recovered error"; break; case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: desc_ioc_state = "scsi invalid dev handle"; break; case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: desc_ioc_state = "scsi device not there"; break; case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: desc_ioc_state = "scsi data overrun"; break; case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: desc_ioc_state = "scsi data underrun"; break; case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: desc_ioc_state = "scsi io data error"; break; case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: desc_ioc_state = "scsi protocol error"; break; case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: desc_ioc_state = "scsi task terminated"; break; case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: desc_ioc_state = "scsi residual mismatch"; break; case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: desc_ioc_state = "scsi task mgmt failed"; break; case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: desc_ioc_state = "scsi ioc terminated"; break; case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: desc_ioc_state = "scsi ext terminated"; break; case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: desc_ioc_state = "eedp guard error"; break; case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: desc_ioc_state = "eedp ref tag error"; break; case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: desc_ioc_state = "eedp app tag error"; break; default: desc_ioc_state = "unknown"; break; } switch (scsi_status) { case MPI2_SCSI_STATUS_GOOD: desc_scsi_status = "good"; break; case MPI2_SCSI_STATUS_CHECK_CONDITION: desc_scsi_status = "check condition"; break; case MPI2_SCSI_STATUS_CONDITION_MET: desc_scsi_status = "condition met"; break; case MPI2_SCSI_STATUS_BUSY: desc_scsi_status = "busy"; break; case MPI2_SCSI_STATUS_INTERMEDIATE: desc_scsi_status = "intermediate"; break; case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET: desc_scsi_status = "intermediate condmet"; break; case MPI2_SCSI_STATUS_RESERVATION_CONFLICT: desc_scsi_status = "reservation conflict"; break; case MPI2_SCSI_STATUS_COMMAND_TERMINATED: desc_scsi_status = "command terminated"; break; case MPI2_SCSI_STATUS_TASK_SET_FULL: desc_scsi_status = "task set full"; break; case MPI2_SCSI_STATUS_ACA_ACTIVE: desc_scsi_status = "aca active"; break; case MPI2_SCSI_STATUS_TASK_ABORTED: desc_scsi_status = "task aborted"; break; default: desc_scsi_status = "unknown"; break; } desc_scsi_state[0] = '\0'; if (!scsi_state) desc_scsi_state = " "; if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) strcat(desc_scsi_state, "response info "); if (scsi_state & MPI2_SCSI_STATE_TERMINATED) strcat(desc_scsi_state, "state terminated "); if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) strcat(desc_scsi_state, "no status "); if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED) strcat(desc_scsi_state, "autosense failed "); if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) strcat(desc_scsi_state, "autosense valid "); scsi_print_command(scmd); if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { pr_warn(MPT3SAS_FMT "\t%s wwid(0x%016llx)\n", ioc->name, device_str, (unsigned long long)priv_target->sas_address); } else { spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, priv_target->sas_address); if (sas_device) { pr_warn(MPT3SAS_FMT "\tsas_address(0x%016llx), phy(%d)\n", ioc->name, (unsigned long long) sas_device->sas_address, sas_device->phy); pr_warn(MPT3SAS_FMT "\tenclosure_logical_id(0x%016llx), slot(%d)\n", ioc->name, (unsigned long long) sas_device->enclosure_logical_id, sas_device->slot); } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); } pr_warn(MPT3SAS_FMT "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n", ioc->name, le16_to_cpu(mpi_reply->DevHandle), desc_ioc_state, ioc_status, smid); pr_warn(MPT3SAS_FMT "\trequest_len(%d), underflow(%d), resid(%d)\n", ioc->name, scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd)); pr_warn(MPT3SAS_FMT "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n", ioc->name, le16_to_cpu(mpi_reply->TaskTag), le32_to_cpu(mpi_reply->TransferCount), scmd->result); pr_warn(MPT3SAS_FMT "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n", ioc->name, desc_scsi_status, scsi_status, desc_scsi_state, scsi_state); if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { struct sense_info data; _scsih_normalize_sense(scmd->sense_buffer, &data); pr_warn(MPT3SAS_FMT "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n", ioc->name, data.skey, data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount)); } if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { response_info = le32_to_cpu(mpi_reply->ResponseInfo); response_bytes = (u8 *)&response_info; _scsih_response_code(ioc, response_bytes[0]); } } #endif /** * _scsih_turn_on_fault_led - illuminate Fault LED * @ioc: per adapter object * @handle: device handle * Context: process * * Return nothing. */ static void _scsih_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) { Mpi2SepReply_t mpi_reply; Mpi2SepRequest_t mpi_request; memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; mpi_request.SlotStatus = cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT); mpi_request.DevHandle = cpu_to_le16(handle); mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS; if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply, &mpi_request)) != 0) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { dewtprintk(ioc, pr_info(MPT3SAS_FMT "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", ioc->name, le16_to_cpu(mpi_reply.IOCStatus), le32_to_cpu(mpi_reply.IOCLogInfo))); return; } } /** * _scsih_send_event_to_turn_on_fault_led - fire delayed event * @ioc: per adapter object * @handle: device handle * Context: interrupt. * * Return nothing. */ static void _scsih_send_event_to_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) { struct fw_event_work *fw_event; fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); if (!fw_event) return; fw_event->event = MPT3SAS_TURN_ON_FAULT_LED; fw_event->device_handle = handle; fw_event->ioc = ioc; _scsih_fw_event_add(ioc, fw_event); } /** * _scsih_smart_predicted_fault - process smart errors * @ioc: per adapter object * @handle: device handle * Context: interrupt. * * Return nothing. */ static void _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle) { struct scsi_target *starget; struct MPT3SAS_TARGET *sas_target_priv_data; Mpi2EventNotificationReply_t *event_reply; Mpi2EventDataSasDeviceStatusChange_t *event_data; struct _sas_device *sas_device; ssize_t sz; unsigned long flags; /* only handle non-raid devices */ spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _scsih_sas_device_find_by_handle(ioc, handle); if (!sas_device) { spin_unlock_irqrestore(&ioc->sas_device_lock, flags); return; } starget = sas_device->starget; sas_target_priv_data = starget->hostdata; if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) || ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))) { spin_unlock_irqrestore(&ioc->sas_device_lock, flags); return; } starget_printk(KERN_WARNING, starget, "predicted fault\n"); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) _scsih_send_event_to_turn_on_fault_led(ioc, handle); /* insert into event log */ sz = offsetof(Mpi2EventNotificationReply_t, EventData) + sizeof(Mpi2EventDataSasDeviceStatusChange_t); event_reply = kzalloc(sz, GFP_KERNEL); if (!event_reply) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; event_reply->Event = cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); event_reply->MsgLength = sz/4; event_reply->EventDataLength = cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4); event_data = (Mpi2EventDataSasDeviceStatusChange_t *) event_reply->EventData; event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA; event_data->ASC = 0x5D; event_data->DevHandle = cpu_to_le16(handle); event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address); mpt3sas_ctl_add_to_event_log(ioc, event_reply); kfree(event_reply); } /** * _scsih_io_done - scsi request callback * @ioc: per adapter object * @smid: system request message index * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * * Callback handler when using _scsih_qcmd. * * Return 1 meaning mf should be freed from _base_interrupt * 0 means the mf is freed from this function. */ static u8 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) { Mpi2SCSIIORequest_t *mpi_request; Mpi2SCSIIOReply_t *mpi_reply; struct scsi_cmnd *scmd; u16 ioc_status; u32 xfer_cnt; u8 scsi_state; u8 scsi_status; u32 log_info; struct MPT3SAS_DEVICE *sas_device_priv_data; u32 response_code = 0; mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); scmd = _scsih_scsi_lookup_get_clear(ioc, smid); if (scmd == NULL) return 1; mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); if (mpi_reply == NULL) { scmd->result = DID_OK << 16; goto out; } sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target || sas_device_priv_data->sas_target->deleted) { scmd->result = DID_NO_CONNECT << 16; goto out; } ioc_status = le16_to_cpu(mpi_reply->IOCStatus); /* turning off TLR */ scsi_state = mpi_reply->SCSIState; if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) response_code = le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF; if (!sas_device_priv_data->tlr_snoop_check) { sas_device_priv_data->tlr_snoop_check++; if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) && response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) sas_device_priv_data->flags &= ~MPT_DEVICE_TLR_ON; } xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) log_info = le32_to_cpu(mpi_reply->IOCLogInfo); else log_info = 0; ioc_status &= MPI2_IOCSTATUS_MASK; scsi_status = mpi_reply->SCSIStatus; if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 && (scsi_status == MPI2_SCSI_STATUS_BUSY || scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT || scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) { ioc_status = MPI2_IOCSTATUS_SUCCESS; } if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { struct sense_info data; const void *sense_data = mpt3sas_base_get_sense_buffer(ioc, smid); u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, le32_to_cpu(mpi_reply->SenseCount)); memcpy(scmd->sense_buffer, sense_data, sz); _scsih_normalize_sense(scmd->sense_buffer, &data); /* failure prediction threshold exceeded */ if (data.asc == 0x5D) _scsih_smart_predicted_fault(ioc, le16_to_cpu(mpi_reply->DevHandle)); mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq); } switch (ioc_status) { case MPI2_IOCSTATUS_BUSY: case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: scmd->result = SAM_STAT_BUSY; break; case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: scmd->result = DID_NO_CONNECT << 16; break; case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: if (sas_device_priv_data->block) { scmd->result = DID_TRANSPORT_DISRUPTED << 16; goto out; } if (log_info == 0x31110630) { if (scmd->retries > 2) { scmd->result = DID_NO_CONNECT << 16; scsi_device_set_state(scmd->device, SDEV_OFFLINE); } else { scmd->result = DID_SOFT_ERROR << 16; scmd->device->expecting_cc_ua = 1; } break; } scmd->result = DID_SOFT_ERROR << 16; break; case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: scmd->result = DID_RESET << 16; break; case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt)) scmd->result = DID_SOFT_ERROR << 16; else scmd->result = (DID_OK << 16) | scsi_status; break; case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: scmd->result = (DID_OK << 16) | scsi_status; if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)) break; if (xfer_cnt < scmd->underflow) { if (scsi_status == SAM_STAT_BUSY) scmd->result = SAM_STAT_BUSY; else scmd->result = DID_SOFT_ERROR << 16; } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | MPI2_SCSI_STATE_NO_SCSI_STATUS)) scmd->result = DID_SOFT_ERROR << 16; else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) scmd->result = DID_RESET << 16; else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) { mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID; mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION; scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; scmd->sense_buffer[0] = 0x70; scmd->sense_buffer[2] = ILLEGAL_REQUEST; scmd->sense_buffer[12] = 0x20; scmd->sense_buffer[13] = 0; } break; case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: scsi_set_resid(scmd, 0); case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: case MPI2_IOCSTATUS_SUCCESS: scmd->result = (DID_OK << 16) | scsi_status; if (response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME || (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | MPI2_SCSI_STATE_NO_SCSI_STATUS))) scmd->result = DID_SOFT_ERROR << 16; else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) scmd->result = DID_RESET << 16; break; case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: _scsih_eedp_error_handling(scmd, ioc_status); break; case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: case MPI2_IOCSTATUS_INVALID_FUNCTION: case MPI2_IOCSTATUS_INVALID_SGL: case MPI2_IOCSTATUS_INTERNAL_ERROR: case MPI2_IOCSTATUS_INVALID_FIELD: case MPI2_IOCSTATUS_INVALID_STATE: case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: default: scmd->result = DID_SOFT_ERROR << 16; break; } #ifdef CONFIG_SCSI_MPT3SAS_LOGGING if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY)) _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid); #endif out: scsi_dma_unmap(scmd); scmd->scsi_done(scmd); return 1; } /** * _scsih_sas_host_refresh - refreshing sas host object contents * @ioc: per adapter object * Context: user * * During port enable, fw will send topology events for every device. Its * possible that the handles may change from the previous setting, so this * code keeping handles updating if changed. * * Return nothing. */ static void _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) { u16 sz; u16 ioc_status; int i; Mpi2ConfigReply_t mpi_reply; Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; u16 attached_handle; u8 link_rate; dtmprintk(ioc, pr_info(MPT3SAS_FMT "updating handles for sas_host(0x%016llx)\n", ioc->name, (unsigned long long)ioc->sas_hba.sas_address)); sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys * sizeof(Mpi2SasIOUnit0PhyData_t)); sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); if (!sas_iounit_pg0) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, sas_iounit_pg0, sz)) != 0) goto out; ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) goto out; for (i = 0; i < ioc->sas_hba.num_phys ; i++) { link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4; if (i == 0) ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> PhyData[0].ControllerDevHandle); ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i]. AttachedDevHandle); if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) link_rate = MPI2_SAS_NEG_LINK_RATE_1_5; mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address, attached_handle, i, link_rate); } out: kfree(sas_iounit_pg0); } /** * _scsih_sas_host_add - create sas host object * @ioc: per adapter object * * Creating host side data object, stored in ioc->sas_hba * * Return nothing. */ static void _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) { int i; Mpi2ConfigReply_t mpi_reply; Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; Mpi2SasPhyPage0_t phy_pg0; Mpi2SasDevicePage0_t sas_device_pg0; Mpi2SasEnclosurePage0_t enclosure_pg0; u16 ioc_status; u16 sz; u8 device_missing_delay; mpt3sas_config_get_number_hba_phys(ioc, &ioc->sas_hba.num_phys); if (!ioc->sas_hba.num_phys) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } /* sas_iounit page 0 */ sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys * sizeof(Mpi2SasIOUnit0PhyData_t)); sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); if (!sas_iounit_pg0) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, sas_iounit_pg0, sz))) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out; } /* sas_iounit page 1 */ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys * sizeof(Mpi2SasIOUnit1PhyData_t)); sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); if (!sas_iounit_pg1) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out; } if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, sz))) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out; } ioc->io_missing_delay = sas_iounit_pg1->IODeviceMissingDelay; device_missing_delay = sas_iounit_pg1->ReportDeviceMissingDelay; if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) ioc->device_missing_delay = (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; else ioc->device_missing_delay = device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev; ioc->sas_hba.phy = kcalloc(ioc->sas_hba.num_phys, sizeof(struct _sas_phy), GFP_KERNEL); if (!ioc->sas_hba.phy) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out; } for (i = 0; i < ioc->sas_hba.num_phys ; i++) { if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, i))) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out; } if (i == 0) ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> PhyData[0].ControllerDevHandle); ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; ioc->sas_hba.phy[i].phy_id = i; mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i], phy_pg0, ioc->sas_hba.parent_dev); } if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out; } ioc->sas_hba.enclosure_handle = le16_to_cpu(sas_device_pg0.EnclosureHandle); ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress); pr_info(MPT3SAS_FMT "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n", ioc->name, ioc->sas_hba.handle, (unsigned long long) ioc->sas_hba.sas_address, ioc->sas_hba.num_phys) ; if (ioc->sas_hba.enclosure_handle) { if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, ioc->sas_hba.enclosure_handle))) ioc->sas_hba.enclosure_logical_id = le64_to_cpu(enclosure_pg0.EnclosureLogicalID); } out: kfree(sas_iounit_pg1); kfree(sas_iounit_pg0); } /** * _scsih_expander_add - creating expander object * @ioc: per adapter object * @handle: expander handle * * Creating expander object, stored in ioc->sas_expander_list. * * Return 0 for success, else error. */ static int _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) { struct _sas_node *sas_expander; Mpi2ConfigReply_t mpi_reply; Mpi2ExpanderPage0_t expander_pg0; Mpi2ExpanderPage1_t expander_pg1; Mpi2SasEnclosurePage0_t enclosure_pg0; u32 ioc_status; u16 parent_handle; u64 sas_address, sas_address_parent = 0; int i; unsigned long flags; struct _sas_port *mpt3sas_port = NULL; int rc = 0; if (!handle) return -1; if (ioc->shost_recovery || ioc->pci_error_recovery) return -1; if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return -1; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return -1; } /* handle out of order topology events */ parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle); if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent) != 0) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return -1; } if (sas_address_parent != ioc->sas_hba.sas_address) { spin_lock_irqsave(&ioc->sas_node_lock, flags); sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, sas_address_parent); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); if (!sas_expander) { rc = _scsih_expander_add(ioc, parent_handle); if (rc != 0) return rc; } } spin_lock_irqsave(&ioc->sas_node_lock, flags); sas_address = le64_to_cpu(expander_pg0.SASAddress); sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, sas_address); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); if (sas_expander) return 0; sas_expander = kzalloc(sizeof(struct _sas_node), GFP_KERNEL); if (!sas_expander) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return -1; } sas_expander->handle = handle; sas_expander->num_phys = expander_pg0.NumPhys; sas_expander->sas_address_parent = sas_address_parent; sas_expander->sas_address = sas_address; pr_info(MPT3SAS_FMT "expander_add: handle(0x%04x)," \ " parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", ioc->name, handle, parent_handle, (unsigned long long) sas_expander->sas_address, sas_expander->num_phys); if (!sas_expander->num_phys) goto out_fail; sas_expander->phy = kcalloc(sas_expander->num_phys, sizeof(struct _sas_phy), GFP_KERNEL); if (!sas_expander->phy) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); rc = -1; goto out_fail; } INIT_LIST_HEAD(&sas_expander->sas_port_list); mpt3sas_port = mpt3sas_transport_port_add(ioc, handle, sas_address_parent); if (!mpt3sas_port) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); rc = -1; goto out_fail; } sas_expander->parent_dev = &mpt3sas_port->rphy->dev; for (i = 0 ; i < sas_expander->num_phys ; i++) { if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply, &expander_pg1, i, handle))) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); rc = -1; goto out_fail; } sas_expander->phy[i].handle = handle; sas_expander->phy[i].phy_id = i; if ((mpt3sas_transport_add_expander_phy(ioc, &sas_expander->phy[i], expander_pg1, sas_expander->parent_dev))) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); rc = -1; goto out_fail; } } if (sas_expander->enclosure_handle) { if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, sas_expander->enclosure_handle))) sas_expander->enclosure_logical_id = le64_to_cpu(enclosure_pg0.EnclosureLogicalID); } _scsih_expander_node_add(ioc, sas_expander); return 0; out_fail: if (mpt3sas_port) mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, sas_address_parent); kfree(sas_expander); return rc; } /** * mpt3sas_expander_remove - removing expander object * @ioc: per adapter object * @sas_address: expander sas_address * * Return nothing. */ void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address) { struct _sas_node *sas_expander; unsigned long flags; if (ioc->shost_recovery) return; spin_lock_irqsave(&ioc->sas_node_lock, flags); sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, sas_address); if (sas_expander) list_del(&sas_expander->list); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); if (sas_expander) _scsih_expander_node_remove(ioc, sas_expander); } /** * _scsih_done - internal SCSI_IO callback handler. * @ioc: per adapter object * @smid: system request message index * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * * Callback handler when sending internal generated SCSI_IO. * The callback index passed is `ioc->scsih_cb_idx` * * Return 1 meaning mf should be freed from _base_interrupt * 0 means the mf is freed from this function. */ static u8 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) { MPI2DefaultReply_t *mpi_reply; mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED) return 1; if (ioc->scsih_cmds.smid != smid) return 1; ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE; if (mpi_reply) { memcpy(ioc->scsih_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID; } ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING; complete(&ioc->scsih_cmds.done); return 1; } #define MPT3_MAX_LUNS (255) /** * _scsih_check_access_status - check access flags * @ioc: per adapter object * @sas_address: sas address * @handle: sas device handle * @access_flags: errors returned during discovery of the device * * Return 0 for success, else failure */ static u8 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, u16 handle, u8 access_status) { u8 rc = 1; char *desc = NULL; switch (access_status) { case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS: case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION: rc = 0; break; case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED: desc = "sata capability failed"; break; case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT: desc = "sata affiliation conflict"; break; case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE: desc = "route not addressable"; break; case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE: desc = "smp error not addressable"; break; case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED: desc = "device blocked"; break; case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED: case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN: case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT: case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG: case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION: case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER: case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN: case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN: case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN: case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION: case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE: case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX: desc = "sata initialization failed"; break; default: desc = "unknown"; break; } if (!rc) return 0; pr_err(MPT3SAS_FMT "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n", ioc->name, desc, (unsigned long long)sas_address, handle); return rc; } /** * _scsih_check_device - checking device responsiveness * @ioc: per adapter object * @parent_sas_address: sas address of parent expander or sas host * @handle: attached device handle * @phy_numberv: phy number * @link_rate: new link rate * * Returns nothing. */ static void _scsih_check_device(struct MPT3SAS_ADAPTER *ioc, u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate) { Mpi2ConfigReply_t mpi_reply; Mpi2SasDevicePage0_t sas_device_pg0; struct _sas_device *sas_device; u32 ioc_status; unsigned long flags; u64 sas_address; struct scsi_target *starget; struct MPT3SAS_TARGET *sas_target_priv_data; u32 device_info; if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) return; ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) return; /* wide port handling ~ we need only handle device once for the phy that * is matched in sas device page zero */ if (phy_number != sas_device_pg0.PhyNum) return; /* check if this is end device */ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); if (!(_scsih_is_end_device(device_info))) return; spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_address = le64_to_cpu(sas_device_pg0.SASAddress); sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, sas_address); if (!sas_device) { spin_unlock_irqrestore(&ioc->sas_device_lock, flags); return; } if (unlikely(sas_device->handle != handle)) { starget = sas_device->starget; sas_target_priv_data = starget->hostdata; starget_printk(KERN_INFO, starget, "handle changed from(0x%04x) to (0x%04x)!!!\n", sas_device->handle, handle); sas_target_priv_data->handle = handle; sas_device->handle = handle; } /* check if device is present */ if (!(le16_to_cpu(sas_device_pg0.Flags) & MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { pr_err(MPT3SAS_FMT "device is not present handle(0x%04x), flags!!!\n", ioc->name, handle); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); return; } /* check if there were any issues with discovery */ if (_scsih_check_access_status(ioc, sas_address, handle, sas_device_pg0.AccessStatus)) { spin_unlock_irqrestore(&ioc->sas_device_lock, flags); return; } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); _scsih_ublock_io_device(ioc, sas_address); } /** * _scsih_add_device - creating sas device object * @ioc: per adapter object * @handle: sas device handle * @phy_num: phy number end device attached to * @is_pd: is this hidden raid component * * Creating end device object, stored in ioc->sas_device_list. * * Returns 0 for success, non-zero for failure. */ static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd) { Mpi2ConfigReply_t mpi_reply; Mpi2SasDevicePage0_t sas_device_pg0; Mpi2SasEnclosurePage0_t enclosure_pg0; struct _sas_device *sas_device; u32 ioc_status; u64 sas_address; u32 device_info; unsigned long flags; if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return -1; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return -1; } /* check if this is end device */ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); if (!(_scsih_is_end_device(device_info))) return -1; sas_address = le64_to_cpu(sas_device_pg0.SASAddress); /* check if device is present */ if (!(le16_to_cpu(sas_device_pg0.Flags) & MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { pr_err(MPT3SAS_FMT "device is not present handle(0x04%x)!!!\n", ioc->name, handle); return -1; } /* check if there were any issues with discovery */ if (_scsih_check_access_status(ioc, sas_address, handle, sas_device_pg0.AccessStatus)) return -1; spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, sas_address); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (sas_device) return -1; sas_device = kzalloc(sizeof(struct _sas_device), GFP_KERNEL); if (!sas_device) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return 0; } sas_device->handle = handle; if (_scsih_get_sas_address(ioc, le16_to_cpu(sas_device_pg0.ParentDevHandle), &sas_device->sas_address_parent) != 0) pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); sas_device->enclosure_handle = le16_to_cpu(sas_device_pg0.EnclosureHandle); sas_device->slot = le16_to_cpu(sas_device_pg0.Slot); sas_device->device_info = device_info; sas_device->sas_address = sas_address; sas_device->phy = sas_device_pg0.PhyNum; sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) & MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; /* get enclosure_logical_id */ if (sas_device->enclosure_handle && !(mpt3sas_config_get_enclosure_pg0( ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, sas_device->enclosure_handle))) sas_device->enclosure_logical_id = le64_to_cpu(enclosure_pg0.EnclosureLogicalID); /* get device name */ sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); if (ioc->wait_for_discovery_to_complete) _scsih_sas_device_init_add(ioc, sas_device); else _scsih_sas_device_add(ioc, sas_device); return 0; } /** * _scsih_remove_device - removing sas device object * @ioc: per adapter object * @sas_device_delete: the sas_device object * * Return nothing. */ static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, struct _sas_device *sas_device) { struct MPT3SAS_TARGET *sas_target_priv_data; dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__, sas_device->handle, (unsigned long long) sas_device->sas_address)); if (sas_device->starget && sas_device->starget->hostdata) { sas_target_priv_data = sas_device->starget->hostdata; sas_target_priv_data->deleted = 1; _scsih_ublock_io_device(ioc, sas_device->sas_address); sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; } mpt3sas_transport_port_remove(ioc, sas_device->sas_address, sas_device->sas_address_parent); pr_info(MPT3SAS_FMT "removing handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, sas_device->handle, (unsigned long long) sas_device->sas_address); dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__, sas_device->handle, (unsigned long long) sas_device->sas_address)); kfree(sas_device); } #ifdef CONFIG_SCSI_MPT3SAS_LOGGING /** * _scsih_sas_topology_change_event_debug - debug for topology event * @ioc: per adapter object * @event_data: event data payload * Context: user. */ static void _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc, Mpi2EventDataSasTopologyChangeList_t *event_data) { int i; u16 handle; u16 reason_code; u8 phy_number; char *status_str = NULL; u8 link_rate, prev_link_rate; switch (event_data->ExpStatus) { case MPI2_EVENT_SAS_TOPO_ES_ADDED: status_str = "add"; break; case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING: status_str = "remove"; break; case MPI2_EVENT_SAS_TOPO_ES_RESPONDING: case 0: status_str = "responding"; break; case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: status_str = "remove delay"; break; default: status_str = "unknown status"; break; } pr_info(MPT3SAS_FMT "sas topology change: (%s)\n", ioc->name, status_str); pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \ "start_phy(%02d), count(%d)\n", le16_to_cpu(event_data->ExpanderDevHandle), le16_to_cpu(event_data->EnclosureHandle), event_data->StartPhyNum, event_data->NumEntries); for (i = 0; i < event_data->NumEntries; i++) { handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); if (!handle) continue; phy_number = event_data->StartPhyNum + i; reason_code = event_data->PHY[i].PhyStatus & MPI2_EVENT_SAS_TOPO_RC_MASK; switch (reason_code) { case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: status_str = "target add"; break; case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: status_str = "target remove"; break; case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING: status_str = "delay target remove"; break; case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: status_str = "link rate change"; break; case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE: status_str = "target responding"; break; default: status_str = "unknown"; break; } link_rate = event_data->PHY[i].LinkRate >> 4; prev_link_rate = event_data->PHY[i].LinkRate & 0xF; pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \ " link rate: new(0x%02x), old(0x%02x)\n", phy_number, handle, status_str, link_rate, prev_link_rate); } } #endif /** * _scsih_sas_topology_change_event - handle topology changes * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. * */ static int _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { int i; u16 parent_handle, handle; u16 reason_code; u8 phy_number, max_phys; struct _sas_node *sas_expander; u64 sas_address; unsigned long flags; u8 link_rate, prev_link_rate; Mpi2EventDataSasTopologyChangeList_t *event_data = fw_event->event_data; #ifdef CONFIG_SCSI_MPT3SAS_LOGGING if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) _scsih_sas_topology_change_event_debug(ioc, event_data); #endif if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) return 0; if (!ioc->sas_hba.num_phys) _scsih_sas_host_add(ioc); else _scsih_sas_host_refresh(ioc); if (fw_event->ignore) { dewtprintk(ioc, pr_info(MPT3SAS_FMT "ignoring expander event\n", ioc->name)); return 0; } parent_handle = le16_to_cpu(event_data->ExpanderDevHandle); /* handle expander add */ if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED) if (_scsih_expander_add(ioc, parent_handle) != 0) return 0; spin_lock_irqsave(&ioc->sas_node_lock, flags); sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc, parent_handle); if (sas_expander) { sas_address = sas_expander->sas_address; max_phys = sas_expander->num_phys; } else if (parent_handle < ioc->sas_hba.num_phys) { sas_address = ioc->sas_hba.sas_address; max_phys = ioc->sas_hba.num_phys; } else { spin_unlock_irqrestore(&ioc->sas_node_lock, flags); return 0; } spin_unlock_irqrestore(&ioc->sas_node_lock, flags); /* handle siblings events */ for (i = 0; i < event_data->NumEntries; i++) { if (fw_event->ignore) { dewtprintk(ioc, pr_info(MPT3SAS_FMT "ignoring expander event\n", ioc->name)); return 0; } if (ioc->remove_host || ioc->pci_error_recovery) return 0; phy_number = event_data->StartPhyNum + i; if (phy_number >= max_phys) continue; reason_code = event_data->PHY[i].PhyStatus & MPI2_EVENT_SAS_TOPO_RC_MASK; if ((event_data->PHY[i].PhyStatus & MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code != MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) continue; handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); if (!handle) continue; link_rate = event_data->PHY[i].LinkRate >> 4; prev_link_rate = event_data->PHY[i].LinkRate & 0xF; switch (reason_code) { case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: if (ioc->shost_recovery) break; if (link_rate == prev_link_rate) break; mpt3sas_transport_update_links(ioc, sas_address, handle, phy_number, link_rate); if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) break; _scsih_check_device(ioc, sas_address, handle, phy_number, link_rate); case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: if (ioc->shost_recovery) break; mpt3sas_transport_update_links(ioc, sas_address, handle, phy_number, link_rate); _scsih_add_device(ioc, handle, phy_number, 0); break; case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: _scsih_device_remove_by_handle(ioc, handle); break; } } /* handle expander removal */ if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING && sas_expander) mpt3sas_expander_remove(ioc, sas_address); return 0; } #ifdef CONFIG_SCSI_MPT3SAS_LOGGING /** * _scsih_sas_device_status_change_event_debug - debug for device event * @event_data: event data payload * Context: user. * * Return nothing. */ static void _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, Mpi2EventDataSasDeviceStatusChange_t *event_data) { char *reason_str = NULL; switch (event_data->ReasonCode) { case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA: reason_str = "smart data"; break; case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED: reason_str = "unsupported device discovered"; break; case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: reason_str = "internal device reset"; break; case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL: reason_str = "internal task abort"; break; case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL: reason_str = "internal task abort set"; break; case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: reason_str = "internal clear task set"; break; case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL: reason_str = "internal query task"; break; case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE: reason_str = "sata init failure"; break; case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET: reason_str = "internal device reset complete"; break; case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL: reason_str = "internal task abort complete"; break; case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION: reason_str = "internal async notification"; break; case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY: reason_str = "expander reduced functionality"; break; case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY: reason_str = "expander reduced functionality complete"; break; default: reason_str = "unknown reason"; break; } pr_info(MPT3SAS_FMT "device status change: (%s)\n" "\thandle(0x%04x), sas address(0x%016llx), tag(%d)", ioc->name, reason_str, le16_to_cpu(event_data->DevHandle), (unsigned long long)le64_to_cpu(event_data->SASAddress), le16_to_cpu(event_data->TaskTag)); if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA) pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name, event_data->ASC, event_data->ASCQ); pr_info("\n"); } #endif /** * _scsih_sas_device_status_change_event - handle device status change * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. * * Return nothing. */ static void _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { struct MPT3SAS_TARGET *target_priv_data; struct _sas_device *sas_device; u64 sas_address; unsigned long flags; Mpi2EventDataSasDeviceStatusChange_t *event_data = fw_event->event_data; #ifdef CONFIG_SCSI_MPT3SAS_LOGGING if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) _scsih_sas_device_status_change_event_debug(ioc, event_data); #endif /* In MPI Revision K (0xC), the internal device reset complete was * implemented, so avoid setting tm_busy flag for older firmware. */ if ((ioc->facts.HeaderVersion >> 8) < 0xC) return; if (event_data->ReasonCode != MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET && event_data->ReasonCode != MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET) return; spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_address = le64_to_cpu(event_data->SASAddress); sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, sas_address); if (!sas_device || !sas_device->starget) { spin_unlock_irqrestore(&ioc->sas_device_lock, flags); return; } target_priv_data = sas_device->starget->hostdata; if (!target_priv_data) { spin_unlock_irqrestore(&ioc->sas_device_lock, flags); return; } if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET) target_priv_data->tm_busy = 1; else target_priv_data->tm_busy = 0; spin_unlock_irqrestore(&ioc->sas_device_lock, flags); } #ifdef CONFIG_SCSI_MPT3SAS_LOGGING /** * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure * event * @ioc: per adapter object * @event_data: event data payload * Context: user. * * Return nothing. */ static void _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, Mpi2EventDataSasEnclDevStatusChange_t *event_data) { char *reason_str = NULL; switch (event_data->ReasonCode) { case MPI2_EVENT_SAS_ENCL_RC_ADDED: reason_str = "enclosure add"; break; case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING: reason_str = "enclosure remove"; break; default: reason_str = "unknown reason"; break; } pr_info(MPT3SAS_FMT "enclosure status change: (%s)\n" "\thandle(0x%04x), enclosure logical id(0x%016llx)" " number slots(%d)\n", ioc->name, reason_str, le16_to_cpu(event_data->EnclosureHandle), (unsigned long long)le64_to_cpu(event_data->EnclosureLogicalID), le16_to_cpu(event_data->StartSlot)); } #endif /** * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. * * Return nothing. */ static void _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { #ifdef CONFIG_SCSI_MPT3SAS_LOGGING if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) _scsih_sas_enclosure_dev_status_change_event_debug(ioc, fw_event->event_data); #endif } /** * _scsih_sas_broadcast_primitive_event - handle broadcast events * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. * * Return nothing. */ static void _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { struct scsi_cmnd *scmd; struct scsi_device *sdev; u16 smid, handle; u32 lun; struct MPT3SAS_DEVICE *sas_device_priv_data; u32 termination_count; u32 query_count; Mpi2SCSITaskManagementReply_t *mpi_reply; Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data; u16 ioc_status; unsigned long flags; int r; u8 max_retries = 0; u8 task_abort_retries; mutex_lock(&ioc->tm_cmds.mutex); pr_info(MPT3SAS_FMT "%s: enter: phy number(%d), width(%d)\n", ioc->name, __func__, event_data->PhyNum, event_data->PortWidth); _scsih_block_io_all_device(ioc); spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); mpi_reply = ioc->tm_cmds.reply; broadcast_aen_retry: /* sanity checks for retrying this loop */ if (max_retries++ == 5) { dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: giving up\n", ioc->name, __func__)); goto out; } else if (max_retries > 1) dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: %d retry\n", ioc->name, __func__, max_retries - 1)); termination_count = 0; query_count = 0; for (smid = 1; smid <= ioc->scsiio_depth; smid++) { if (ioc->shost_recovery) goto out; scmd = _scsih_scsi_lookup_get(ioc, smid); if (!scmd) continue; sdev = scmd->device; sas_device_priv_data = sdev->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) continue; /* skip hidden raid components */ if (sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) continue; /* skip volumes */ if (sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) continue; handle = sas_device_priv_data->sas_target->handle; lun = sas_device_priv_data->lun; query_count++; if (ioc->shost_recovery) goto out; spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun, MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 0, TM_MUTEX_OFF); if (r == FAILED) { sdev_printk(KERN_WARNING, sdev, "mpt3sas_scsih_issue_tm: FAILED when sending " "QUERY_TASK: scmd(%p)\n", scmd); spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); goto broadcast_aen_retry; } ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { sdev_printk(KERN_WARNING, sdev, "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n", ioc_status, scmd); spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); goto broadcast_aen_retry; } /* see if IO is still owned by IOC and target */ if (mpi_reply->ResponseCode == MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED || mpi_reply->ResponseCode == MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) { spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); continue; } task_abort_retries = 0; tm_retry: if (task_abort_retries++ == 60) { dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: ABORT_TASK: giving up\n", ioc->name, __func__)); spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); goto broadcast_aen_retry; } if (ioc->shost_recovery) goto out_no_lock; r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, scmd->serial_number, TM_MUTEX_OFF); if (r == FAILED) { sdev_printk(KERN_WARNING, sdev, "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : " "scmd(%p)\n", scmd); goto tm_retry; } if (task_abort_retries > 1) sdev_printk(KERN_WARNING, sdev, "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):" " scmd(%p)\n", task_abort_retries - 1, scmd); termination_count += le32_to_cpu(mpi_reply->TerminationCount); spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); } if (ioc->broadcast_aen_pending) { dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: loop back due to pending AEN\n", ioc->name, __func__)); ioc->broadcast_aen_pending = 0; goto broadcast_aen_retry; } out: spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); out_no_lock: dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s - exit, query_count = %d termination_count = %d\n", ioc->name, __func__, query_count, termination_count)); ioc->broadcast_aen_busy = 0; if (!ioc->shost_recovery) _scsih_ublock_io_all_device(ioc); mutex_unlock(&ioc->tm_cmds.mutex); } /** * _scsih_sas_discovery_event - handle discovery events * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. * * Return nothing. */ static void _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { Mpi2EventDataSasDiscovery_t *event_data = fw_event->event_data; #ifdef CONFIG_SCSI_MPT3SAS_LOGGING if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) { pr_info(MPT3SAS_FMT "discovery event: (%s)", ioc->name, (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ? "start" : "stop"); if (event_data->DiscoveryStatus) pr_info("discovery_status(0x%08x)", le32_to_cpu(event_data->DiscoveryStatus)); pr_info("\n"); } #endif if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED && !ioc->sas_hba.num_phys) { if (disable_discovery > 0 && ioc->shost_recovery) { /* Wait for the reset to complete */ while (ioc->shost_recovery) ssleep(1); } _scsih_sas_host_add(ioc); } } /** * _scsih_ir_fastpath - turn on fastpath for IR physdisk * @ioc: per adapter object * @handle: device handle for physical disk * @phys_disk_num: physical disk number * * Return 0 for success, else failure. */ static int _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num) { Mpi2RaidActionRequest_t *mpi_request; Mpi2RaidActionReply_t *mpi_reply; u16 smid; u8 issue_reset = 0; int rc = 0; u16 ioc_status; u32 log_info; mutex_lock(&ioc->scsih_cmds.mutex); if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n", ioc->name, __func__); rc = -EAGAIN; goto out; } ioc->scsih_cmds.status = MPT3_CMD_PENDING; smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); if (!smid) { pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; rc = -EAGAIN; goto out; } mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); ioc->scsih_cmds.smid = smid; memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t)); mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN; mpi_request->PhysDiskNum = phys_disk_num; dewtprintk(ioc, pr_info(MPT3SAS_FMT "IR RAID_ACTION: turning fast "\ "path on for handle(0x%04x), phys_disk_num (0x%02x)\n", ioc->name, handle, phys_disk_num)); init_completion(&ioc->scsih_cmds.done); mpt3sas_base_put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, __func__); if (!(ioc->scsih_cmds.status & MPT3_CMD_RESET)) issue_reset = 1; rc = -EFAULT; goto out; } if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { mpi_reply = ioc->scsih_cmds.reply; ioc_status = le16_to_cpu(mpi_reply->IOCStatus); if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) log_info = le32_to_cpu(mpi_reply->IOCLogInfo); else log_info = 0; ioc_status &= MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { dewtprintk(ioc, pr_info(MPT3SAS_FMT "IR RAID_ACTION: failed: ioc_status(0x%04x), " "loginfo(0x%08x)!!!\n", ioc->name, ioc_status, log_info)); rc = -EFAULT; } else dewtprintk(ioc, pr_info(MPT3SAS_FMT "IR RAID_ACTION: completed successfully\n", ioc->name)); } out: ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; mutex_unlock(&ioc->scsih_cmds.mutex); if (issue_reset) mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); return rc; } /** * _scsih_reprobe_lun - reprobing lun * @sdev: scsi device struct * @no_uld_attach: sdev->no_uld_attach flag setting * **/ static void _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach) { int rc; sdev->no_uld_attach = no_uld_attach ? 1 : 0; sdev_printk(KERN_INFO, sdev, "%s raid component\n", sdev->no_uld_attach ? "hidding" : "exposing"); rc = scsi_device_reprobe(sdev); } /** * _scsih_sas_volume_add - add new volume * @ioc: per adapter object * @element: IR config element data * Context: user. * * Return nothing. */ static void _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc, Mpi2EventIrConfigElement_t *element) { struct _raid_device *raid_device; unsigned long flags; u64 wwid; u16 handle = le16_to_cpu(element->VolDevHandle); int rc; mpt3sas_config_get_volume_wwid(ioc, handle, &wwid); if (!wwid) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid); spin_unlock_irqrestore(&ioc->raid_device_lock, flags); if (raid_device) return; raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); if (!raid_device) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } raid_device->id = ioc->sas_id++; raid_device->channel = RAID_CHANNEL; raid_device->handle = handle; raid_device->wwid = wwid; _scsih_raid_device_add(ioc, raid_device); if (!ioc->wait_for_discovery_to_complete) { rc = scsi_add_device(ioc->shost, RAID_CHANNEL, raid_device->id, 0); if (rc) _scsih_raid_device_remove(ioc, raid_device); } else { spin_lock_irqsave(&ioc->raid_device_lock, flags); _scsih_determine_boot_device(ioc, raid_device, 1); spin_unlock_irqrestore(&ioc->raid_device_lock, flags); } } /** * _scsih_sas_volume_delete - delete volume * @ioc: per adapter object * @handle: volume device handle * Context: user. * * Return nothing. */ static void _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle) { struct _raid_device *raid_device; unsigned long flags; struct MPT3SAS_TARGET *sas_target_priv_data; struct scsi_target *starget = NULL; spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = _scsih_raid_device_find_by_handle(ioc, handle); if (raid_device) { if (raid_device->starget) { starget = raid_device->starget; sas_target_priv_data = starget->hostdata; sas_target_priv_data->deleted = 1; } pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n", ioc->name, raid_device->handle, (unsigned long long) raid_device->wwid); list_del(&raid_device->list); kfree(raid_device); } spin_unlock_irqrestore(&ioc->raid_device_lock, flags); if (starget) scsi_remove_target(&starget->dev); } /** * _scsih_sas_pd_expose - expose pd component to /dev/sdX * @ioc: per adapter object * @element: IR config element data * Context: user. * * Return nothing. */ static void _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc, Mpi2EventIrConfigElement_t *element) { struct _sas_device *sas_device; struct scsi_target *starget = NULL; struct MPT3SAS_TARGET *sas_target_priv_data; unsigned long flags; u16 handle = le16_to_cpu(element->PhysDiskDevHandle); spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _scsih_sas_device_find_by_handle(ioc, handle); if (sas_device) { sas_device->volume_handle = 0; sas_device->volume_wwid = 0; clear_bit(handle, ioc->pd_handles); if (sas_device->starget && sas_device->starget->hostdata) { starget = sas_device->starget; sas_target_priv_data = starget->hostdata; sas_target_priv_data->flags &= ~MPT_TARGET_FLAGS_RAID_COMPONENT; } } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (!sas_device) return; /* exposing raid component */ if (starget) starget_for_each_device(starget, NULL, _scsih_reprobe_lun); } /** * _scsih_sas_pd_hide - hide pd component from /dev/sdX * @ioc: per adapter object * @element: IR config element data * Context: user. * * Return nothing. */ static void _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc, Mpi2EventIrConfigElement_t *element) { struct _sas_device *sas_device; struct scsi_target *starget = NULL; struct MPT3SAS_TARGET *sas_target_priv_data; unsigned long flags; u16 handle = le16_to_cpu(element->PhysDiskDevHandle); u16 volume_handle = 0; u64 volume_wwid = 0; mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle); if (volume_handle) mpt3sas_config_get_volume_wwid(ioc, volume_handle, &volume_wwid); spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _scsih_sas_device_find_by_handle(ioc, handle); if (sas_device) { set_bit(handle, ioc->pd_handles); if (sas_device->starget && sas_device->starget->hostdata) { starget = sas_device->starget; sas_target_priv_data = starget->hostdata; sas_target_priv_data->flags |= MPT_TARGET_FLAGS_RAID_COMPONENT; sas_device->volume_handle = volume_handle; sas_device->volume_wwid = volume_wwid; } } spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (!sas_device) return; /* hiding raid component */ _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); if (starget) starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun); } /** * _scsih_sas_pd_delete - delete pd component * @ioc: per adapter object * @element: IR config element data * Context: user. * * Return nothing. */ static void _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc, Mpi2EventIrConfigElement_t *element) { u16 handle = le16_to_cpu(element->PhysDiskDevHandle); _scsih_device_remove_by_handle(ioc, handle); } /** * _scsih_sas_pd_add - remove pd component * @ioc: per adapter object * @element: IR config element data * Context: user. * * Return nothing. */ static void _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc, Mpi2EventIrConfigElement_t *element) { struct _sas_device *sas_device; unsigned long flags; u16 handle = le16_to_cpu(element->PhysDiskDevHandle); Mpi2ConfigReply_t mpi_reply; Mpi2SasDevicePage0_t sas_device_pg0; u32 ioc_status; u64 sas_address; u16 parent_handle; set_bit(handle, ioc->pd_handles); spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _scsih_sas_device_find_by_handle(ioc, handle); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (sas_device) { _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); return; } if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) mpt3sas_transport_update_links(ioc, sas_address, handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); _scsih_add_device(ioc, handle, 0, 1); } #ifdef CONFIG_SCSI_MPT3SAS_LOGGING /** * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events * @ioc: per adapter object * @event_data: event data payload * Context: user. * * Return nothing. */ static void _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc, Mpi2EventDataIrConfigChangeList_t *event_data) { Mpi2EventIrConfigElement_t *element; u8 element_type; int i; char *reason_str = NULL, *element_str = NULL; element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; pr_info(MPT3SAS_FMT "raid config change: (%s), elements(%d)\n", ioc->name, (le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? "foreign" : "native", event_data->NumElements); for (i = 0; i < event_data->NumElements; i++, element++) { switch (element->ReasonCode) { case MPI2_EVENT_IR_CHANGE_RC_ADDED: reason_str = "add"; break; case MPI2_EVENT_IR_CHANGE_RC_REMOVED: reason_str = "remove"; break; case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE: reason_str = "no change"; break; case MPI2_EVENT_IR_CHANGE_RC_HIDE: reason_str = "hide"; break; case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: reason_str = "unhide"; break; case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: reason_str = "volume_created"; break; case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: reason_str = "volume_deleted"; break; case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: reason_str = "pd_created"; break; case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: reason_str = "pd_deleted"; break; default: reason_str = "unknown reason"; break; } element_type = le16_to_cpu(element->ElementFlags) & MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK; switch (element_type) { case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT: element_str = "volume"; break; case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT: element_str = "phys disk"; break; case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT: element_str = "hot spare"; break; default: element_str = "unknown element"; break; } pr_info("\t(%s:%s), vol handle(0x%04x), " \ "pd handle(0x%04x), pd num(0x%02x)\n", element_str, reason_str, le16_to_cpu(element->VolDevHandle), le16_to_cpu(element->PhysDiskDevHandle), element->PhysDiskNum); } } #endif /** * _scsih_sas_ir_config_change_event - handle ir configuration change events * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. * * Return nothing. */ static void _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { Mpi2EventIrConfigElement_t *element; int i; u8 foreign_config; Mpi2EventDataIrConfigChangeList_t *event_data = fw_event->event_data; #ifdef CONFIG_SCSI_MPT3SAS_LOGGING if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) _scsih_sas_ir_config_change_event_debug(ioc, event_data); #endif foreign_config = (le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0; element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; if (ioc->shost_recovery) { for (i = 0; i < event_data->NumElements; i++, element++) { if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE) _scsih_ir_fastpath(ioc, le16_to_cpu(element->PhysDiskDevHandle), element->PhysDiskNum); } return; } for (i = 0; i < event_data->NumElements; i++, element++) { switch (element->ReasonCode) { case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: case MPI2_EVENT_IR_CHANGE_RC_ADDED: if (!foreign_config) _scsih_sas_volume_add(ioc, element); break; case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: case MPI2_EVENT_IR_CHANGE_RC_REMOVED: if (!foreign_config) _scsih_sas_volume_delete(ioc, le16_to_cpu(element->VolDevHandle)); break; case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: _scsih_sas_pd_hide(ioc, element); break; case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: _scsih_sas_pd_expose(ioc, element); break; case MPI2_EVENT_IR_CHANGE_RC_HIDE: _scsih_sas_pd_add(ioc, element); break; case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: _scsih_sas_pd_delete(ioc, element); break; } } } /** * _scsih_sas_ir_volume_event - IR volume event * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. * * Return nothing. */ static void _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { u64 wwid; unsigned long flags; struct _raid_device *raid_device; u16 handle; u32 state; int rc; Mpi2EventDataIrVolume_t *event_data = fw_event->event_data; if (ioc->shost_recovery) return; if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) return; handle = le16_to_cpu(event_data->VolDevHandle); state = le32_to_cpu(event_data->NewValue); dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", ioc->name, __func__, handle, le32_to_cpu(event_data->PreviousValue), state)); switch (state) { case MPI2_RAID_VOL_STATE_MISSING: case MPI2_RAID_VOL_STATE_FAILED: _scsih_sas_volume_delete(ioc, handle); break; case MPI2_RAID_VOL_STATE_ONLINE: case MPI2_RAID_VOL_STATE_DEGRADED: case MPI2_RAID_VOL_STATE_OPTIMAL: spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = _scsih_raid_device_find_by_handle(ioc, handle); spin_unlock_irqrestore(&ioc->raid_device_lock, flags); if (raid_device) break; mpt3sas_config_get_volume_wwid(ioc, handle, &wwid); if (!wwid) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); break; } raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); if (!raid_device) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); break; } raid_device->id = ioc->sas_id++; raid_device->channel = RAID_CHANNEL; raid_device->handle = handle; raid_device->wwid = wwid; _scsih_raid_device_add(ioc, raid_device); rc = scsi_add_device(ioc->shost, RAID_CHANNEL, raid_device->id, 0); if (rc) _scsih_raid_device_remove(ioc, raid_device); break; case MPI2_RAID_VOL_STATE_INITIALIZING: default: break; } } /** * _scsih_sas_ir_physical_disk_event - PD event * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. * * Return nothing. */ static void _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { u16 handle, parent_handle; u32 state; struct _sas_device *sas_device; unsigned long flags; Mpi2ConfigReply_t mpi_reply; Mpi2SasDevicePage0_t sas_device_pg0; u32 ioc_status; Mpi2EventDataIrPhysicalDisk_t *event_data = fw_event->event_data; u64 sas_address; if (ioc->shost_recovery) return; if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) return; handle = le16_to_cpu(event_data->PhysDiskDevHandle); state = le32_to_cpu(event_data->NewValue); dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", ioc->name, __func__, handle, le32_to_cpu(event_data->PreviousValue), state)); switch (state) { case MPI2_RAID_PD_STATE_ONLINE: case MPI2_RAID_PD_STATE_DEGRADED: case MPI2_RAID_PD_STATE_REBUILDING: case MPI2_RAID_PD_STATE_OPTIMAL: case MPI2_RAID_PD_STATE_HOT_SPARE: set_bit(handle, ioc->pd_handles); spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _scsih_sas_device_find_by_handle(ioc, handle); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (sas_device) return; if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) mpt3sas_transport_update_links(ioc, sas_address, handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); _scsih_add_device(ioc, handle, 0, 1); break; case MPI2_RAID_PD_STATE_OFFLINE: case MPI2_RAID_PD_STATE_NOT_CONFIGURED: case MPI2_RAID_PD_STATE_NOT_COMPATIBLE: default: break; } } #ifdef CONFIG_SCSI_MPT3SAS_LOGGING /** * _scsih_sas_ir_operation_status_event_debug - debug for IR op event * @ioc: per adapter object * @event_data: event data payload * Context: user. * * Return nothing. */ static void _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc, Mpi2EventDataIrOperationStatus_t *event_data) { char *reason_str = NULL; switch (event_data->RAIDOperation) { case MPI2_EVENT_IR_RAIDOP_RESYNC: reason_str = "resync"; break; case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION: reason_str = "online capacity expansion"; break; case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK: reason_str = "consistency check"; break; case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT: reason_str = "background init"; break; case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT: reason_str = "make data consistent"; break; } if (!reason_str) return; pr_info(MPT3SAS_FMT "raid operational status: (%s)" \ "\thandle(0x%04x), percent complete(%d)\n", ioc->name, reason_str, le16_to_cpu(event_data->VolDevHandle), event_data->PercentComplete); } #endif /** * _scsih_sas_ir_operation_status_event - handle RAID operation events * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. * * Return nothing. */ static void _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data; static struct _raid_device *raid_device; unsigned long flags; u16 handle; #ifdef CONFIG_SCSI_MPT3SAS_LOGGING if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) _scsih_sas_ir_operation_status_event_debug(ioc, event_data); #endif /* code added for raid transport support */ if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) { spin_lock_irqsave(&ioc->raid_device_lock, flags); handle = le16_to_cpu(event_data->VolDevHandle); raid_device = _scsih_raid_device_find_by_handle(ioc, handle); if (raid_device) raid_device->percent_complete = event_data->PercentComplete; spin_unlock_irqrestore(&ioc->raid_device_lock, flags); } } /** * _scsih_prep_device_scan - initialize parameters prior to device scan * @ioc: per adapter object * * Set the deleted flag prior to device scan. If the device is found during * the scan, then we clear the deleted flag. */ static void _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc) { struct MPT3SAS_DEVICE *sas_device_priv_data; struct scsi_device *sdev; shost_for_each_device(sdev, ioc->shost) { sas_device_priv_data = sdev->hostdata; if (sas_device_priv_data && sas_device_priv_data->sas_target) sas_device_priv_data->sas_target->deleted = 1; } } /** * _scsih_mark_responding_sas_device - mark a sas_devices as responding * @ioc: per adapter object * @sas_address: sas address * @slot: enclosure slot id * @handle: device handle * * After host reset, find out whether devices are still responding. * Used in _scsih_remove_unresponsive_sas_devices. * * Return nothing. */ static void _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, u16 slot, u16 handle) { struct MPT3SAS_TARGET *sas_target_priv_data = NULL; struct scsi_target *starget; struct _sas_device *sas_device; unsigned long flags; spin_lock_irqsave(&ioc->sas_device_lock, flags); list_for_each_entry(sas_device, &ioc->sas_device_list, list) { if (sas_device->sas_address == sas_address && sas_device->slot == slot) { sas_device->responding = 1; starget = sas_device->starget; if (starget && starget->hostdata) { sas_target_priv_data = starget->hostdata; sas_target_priv_data->tm_busy = 0; sas_target_priv_data->deleted = 0; } else sas_target_priv_data = NULL; if (starget) starget_printk(KERN_INFO, starget, "handle(0x%04x), sas_addr(0x%016llx), " "enclosure logical id(0x%016llx), " "slot(%d)\n", handle, (unsigned long long)sas_device->sas_address, (unsigned long long) sas_device->enclosure_logical_id, sas_device->slot); if (sas_device->handle == handle) goto out; pr_info("\thandle changed from(0x%04x)!!!\n", sas_device->handle); sas_device->handle = handle; if (sas_target_priv_data) sas_target_priv_data->handle = handle; goto out; } } out: spin_unlock_irqrestore(&ioc->sas_device_lock, flags); } /** * _scsih_search_responding_sas_devices - * @ioc: per adapter object * * After host reset, find out whether devices are still responding. * If not remove. * * Return nothing. */ static void _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc) { Mpi2SasDevicePage0_t sas_device_pg0; Mpi2ConfigReply_t mpi_reply; u16 ioc_status; u16 handle; u32 device_info; pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name); if (list_empty(&ioc->sas_device_list)) goto out; handle = 0xFFFF; while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) break; handle = le16_to_cpu(sas_device_pg0.DevHandle); device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); if (!(_scsih_is_end_device(device_info))) continue; _scsih_mark_responding_sas_device(ioc, le64_to_cpu(sas_device_pg0.SASAddress), le16_to_cpu(sas_device_pg0.Slot), handle); } out: pr_info(MPT3SAS_FMT "search for end-devices: complete\n", ioc->name); } /** * _scsih_mark_responding_raid_device - mark a raid_device as responding * @ioc: per adapter object * @wwid: world wide identifier for raid volume * @handle: device handle * * After host reset, find out whether devices are still responding. * Used in _scsih_remove_unresponsive_raid_devices. * * Return nothing. */ static void _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid, u16 handle) { struct MPT3SAS_TARGET *sas_target_priv_data; struct scsi_target *starget; struct _raid_device *raid_device; unsigned long flags; spin_lock_irqsave(&ioc->raid_device_lock, flags); list_for_each_entry(raid_device, &ioc->raid_device_list, list) { if (raid_device->wwid == wwid && raid_device->starget) { starget = raid_device->starget; if (starget && starget->hostdata) { sas_target_priv_data = starget->hostdata; sas_target_priv_data->deleted = 0; } else sas_target_priv_data = NULL; raid_device->responding = 1; spin_unlock_irqrestore(&ioc->raid_device_lock, flags); starget_printk(KERN_INFO, raid_device->starget, "handle(0x%04x), wwid(0x%016llx)\n", handle, (unsigned long long)raid_device->wwid); spin_lock_irqsave(&ioc->raid_device_lock, flags); if (raid_device->handle == handle) { spin_unlock_irqrestore(&ioc->raid_device_lock, flags); return; } pr_info("\thandle changed from(0x%04x)!!!\n", raid_device->handle); raid_device->handle = handle; if (sas_target_priv_data) sas_target_priv_data->handle = handle; spin_unlock_irqrestore(&ioc->raid_device_lock, flags); return; } } spin_unlock_irqrestore(&ioc->raid_device_lock, flags); } /** * _scsih_search_responding_raid_devices - * @ioc: per adapter object * * After host reset, find out whether devices are still responding. * If not remove. * * Return nothing. */ static void _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc) { Mpi2RaidVolPage1_t volume_pg1; Mpi2RaidVolPage0_t volume_pg0; Mpi2RaidPhysDiskPage0_t pd_pg0; Mpi2ConfigReply_t mpi_reply; u16 ioc_status; u16 handle; u8 phys_disk_num; if (!ioc->ir_firmware) return; pr_info(MPT3SAS_FMT "search for raid volumes: start\n", ioc->name); if (list_empty(&ioc->raid_device_list)) goto out; handle = 0xFFFF; while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) break; handle = le16_to_cpu(volume_pg1.DevHandle); if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, sizeof(Mpi2RaidVolPage0_t))) continue; if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE || volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) _scsih_mark_responding_raid_device(ioc, le64_to_cpu(volume_pg1.WWID), handle); } /* refresh the pd_handles */ phys_disk_num = 0xFF; memset(ioc->pd_handles, 0, ioc->pd_handles_sz); while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, phys_disk_num))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) break; phys_disk_num = pd_pg0.PhysDiskNum; handle = le16_to_cpu(pd_pg0.DevHandle); set_bit(handle, ioc->pd_handles); } out: pr_info(MPT3SAS_FMT "search for responding raid volumes: complete\n", ioc->name); } /** * _scsih_mark_responding_expander - mark a expander as responding * @ioc: per adapter object * @sas_address: sas address * @handle: * * After host reset, find out whether devices are still responding. * Used in _scsih_remove_unresponsive_expanders. * * Return nothing. */ static void _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, u16 handle) { struct _sas_node *sas_expander; unsigned long flags; int i; spin_lock_irqsave(&ioc->sas_node_lock, flags); list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { if (sas_expander->sas_address != sas_address) continue; sas_expander->responding = 1; if (sas_expander->handle == handle) goto out; pr_info("\texpander(0x%016llx): handle changed" \ " from(0x%04x) to (0x%04x)!!!\n", (unsigned long long)sas_expander->sas_address, sas_expander->handle, handle); sas_expander->handle = handle; for (i = 0 ; i < sas_expander->num_phys ; i++) sas_expander->phy[i].handle = handle; goto out; } out: spin_unlock_irqrestore(&ioc->sas_node_lock, flags); } /** * _scsih_search_responding_expanders - * @ioc: per adapter object * * After host reset, find out whether devices are still responding. * If not remove. * * Return nothing. */ static void _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc) { Mpi2ExpanderPage0_t expander_pg0; Mpi2ConfigReply_t mpi_reply; u16 ioc_status; u64 sas_address; u16 handle; pr_info(MPT3SAS_FMT "search for expanders: start\n", ioc->name); if (list_empty(&ioc->sas_expander_list)) goto out; handle = 0xFFFF; while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) break; handle = le16_to_cpu(expander_pg0.DevHandle); sas_address = le64_to_cpu(expander_pg0.SASAddress); pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n", handle, (unsigned long long)sas_address); _scsih_mark_responding_expander(ioc, sas_address, handle); } out: pr_info(MPT3SAS_FMT "search for expanders: complete\n", ioc->name); } /** * _scsih_remove_unresponding_sas_devices - removing unresponding devices * @ioc: per adapter object * * Return nothing. */ static void _scsih_remove_unresponding_sas_devices(struct MPT3SAS_ADAPTER *ioc) { struct _sas_device *sas_device, *sas_device_next; struct _sas_node *sas_expander, *sas_expander_next; struct _raid_device *raid_device, *raid_device_next; struct list_head tmp_list; unsigned long flags; pr_info(MPT3SAS_FMT "removing unresponding devices: start\n", ioc->name); /* removing unresponding end devices */ pr_info(MPT3SAS_FMT "removing unresponding devices: end-devices\n", ioc->name); list_for_each_entry_safe(sas_device, sas_device_next, &ioc->sas_device_list, list) { if (!sas_device->responding) mpt3sas_device_remove_by_sas_address(ioc, sas_device->sas_address); else sas_device->responding = 0; } /* removing unresponding volumes */ if (ioc->ir_firmware) { pr_info(MPT3SAS_FMT "removing unresponding devices: volumes\n", ioc->name); list_for_each_entry_safe(raid_device, raid_device_next, &ioc->raid_device_list, list) { if (!raid_device->responding) _scsih_sas_volume_delete(ioc, raid_device->handle); else raid_device->responding = 0; } } /* removing unresponding expanders */ pr_info(MPT3SAS_FMT "removing unresponding devices: expanders\n", ioc->name); spin_lock_irqsave(&ioc->sas_node_lock, flags); INIT_LIST_HEAD(&tmp_list); list_for_each_entry_safe(sas_expander, sas_expander_next, &ioc->sas_expander_list, list) { if (!sas_expander->responding) list_move_tail(&sas_expander->list, &tmp_list); else sas_expander->responding = 0; } spin_unlock_irqrestore(&ioc->sas_node_lock, flags); list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list, list) { list_del(&sas_expander->list); _scsih_expander_node_remove(ioc, sas_expander); } pr_info(MPT3SAS_FMT "removing unresponding devices: complete\n", ioc->name); /* unblock devices */ _scsih_ublock_io_all_device(ioc); } static void _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_expander, u16 handle) { Mpi2ExpanderPage1_t expander_pg1; Mpi2ConfigReply_t mpi_reply; int i; for (i = 0 ; i < sas_expander->num_phys ; i++) { if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply, &expander_pg1, i, handle))) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return; } mpt3sas_transport_update_links(ioc, sas_expander->sas_address, le16_to_cpu(expander_pg1.AttachedDevHandle), i, expander_pg1.NegotiatedLinkRate >> 4); } } /** * _scsih_scan_for_devices_after_reset - scan for devices after host reset * @ioc: per adapter object * * Return nothing. */ static void _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) { Mpi2ExpanderPage0_t expander_pg0; Mpi2SasDevicePage0_t sas_device_pg0; Mpi2RaidVolPage1_t volume_pg1; Mpi2RaidVolPage0_t volume_pg0; Mpi2RaidPhysDiskPage0_t pd_pg0; Mpi2EventIrConfigElement_t element; Mpi2ConfigReply_t mpi_reply; u8 phys_disk_num; u16 ioc_status; u16 handle, parent_handle; u64 sas_address; struct _sas_device *sas_device; struct _sas_node *expander_device; static struct _raid_device *raid_device; u8 retry_count; unsigned long flags; pr_info(MPT3SAS_FMT "scan devices: start\n", ioc->name); _scsih_sas_host_refresh(ioc); pr_info(MPT3SAS_FMT "\tscan devices: expanders start\n", ioc->name); /* expanders */ handle = 0xFFFF; while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { pr_info(MPT3SAS_FMT "\tbreak from expander scan: " \ "ioc_status(0x%04x), loginfo(0x%08x)\n", ioc->name, ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); break; } handle = le16_to_cpu(expander_pg0.DevHandle); spin_lock_irqsave(&ioc->sas_node_lock, flags); expander_device = mpt3sas_scsih_expander_find_by_sas_address( ioc, le64_to_cpu(expander_pg0.SASAddress)); spin_unlock_irqrestore(&ioc->sas_node_lock, flags); if (expander_device) _scsih_refresh_expander_links(ioc, expander_device, handle); else { pr_info(MPT3SAS_FMT "\tBEFORE adding expander: " \ "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name, handle, (unsigned long long) le64_to_cpu(expander_pg0.SASAddress)); _scsih_expander_add(ioc, handle); pr_info(MPT3SAS_FMT "\tAFTER adding expander: " \ "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name, handle, (unsigned long long) le64_to_cpu(expander_pg0.SASAddress)); } } pr_info(MPT3SAS_FMT "\tscan devices: expanders complete\n", ioc->name); if (!ioc->ir_firmware) goto skip_to_sas; pr_info(MPT3SAS_FMT "\tscan devices: phys disk start\n", ioc->name); /* phys disk */ phys_disk_num = 0xFF; while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, phys_disk_num))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { pr_info(MPT3SAS_FMT "\tbreak from phys disk scan: "\ "ioc_status(0x%04x), loginfo(0x%08x)\n", ioc->name, ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); break; } phys_disk_num = pd_pg0.PhysDiskNum; handle = le16_to_cpu(pd_pg0.DevHandle); spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = _scsih_sas_device_find_by_handle(ioc, handle); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (sas_device) continue; if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle) != 0) continue; ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { pr_info(MPT3SAS_FMT "\tbreak from phys disk scan " \ "ioc_status(0x%04x), loginfo(0x%08x)\n", ioc->name, ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); break; } parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) { pr_info(MPT3SAS_FMT "\tBEFORE adding phys disk: " \ " handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name, handle, (unsigned long long) le64_to_cpu(sas_device_pg0.SASAddress)); mpt3sas_transport_update_links(ioc, sas_address, handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); set_bit(handle, ioc->pd_handles); retry_count = 0; /* This will retry adding the end device. * _scsih_add_device() will decide on retries and * return "1" when it should be retried */ while (_scsih_add_device(ioc, handle, retry_count++, 1)) { ssleep(1); } pr_info(MPT3SAS_FMT "\tAFTER adding phys disk: " \ " handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name, handle, (unsigned long long) le64_to_cpu(sas_device_pg0.SASAddress)); } } pr_info(MPT3SAS_FMT "\tscan devices: phys disk complete\n", ioc->name); pr_info(MPT3SAS_FMT "\tscan devices: volumes start\n", ioc->name); /* volumes */ handle = 0xFFFF; while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \ "ioc_status(0x%04x), loginfo(0x%08x)\n", ioc->name, ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); break; } handle = le16_to_cpu(volume_pg1.DevHandle); spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = _scsih_raid_device_find_by_wwid(ioc, le64_to_cpu(volume_pg1.WWID)); spin_unlock_irqrestore(&ioc->raid_device_lock, flags); if (raid_device) continue; if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, sizeof(Mpi2RaidVolPage0_t))) continue; ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \ "ioc_status(0x%04x), loginfo(0x%08x)\n", ioc->name, ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); break; } if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE || volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) { memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t)); element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED; element.VolDevHandle = volume_pg1.DevHandle; pr_info(MPT3SAS_FMT "\tBEFORE adding volume: handle (0x%04x)\n", ioc->name, volume_pg1.DevHandle); _scsih_sas_volume_add(ioc, &element); pr_info(MPT3SAS_FMT "\tAFTER adding volume: handle (0x%04x)\n", ioc->name, volume_pg1.DevHandle); } } pr_info(MPT3SAS_FMT "\tscan devices: volumes complete\n", ioc->name); skip_to_sas: pr_info(MPT3SAS_FMT "\tscan devices: end devices start\n", ioc->name); /* sas devices */ handle = 0xFFFF; while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { pr_info(MPT3SAS_FMT "\tbreak from end device scan:"\ " ioc_status(0x%04x), loginfo(0x%08x)\n", ioc->name, ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); break; } handle = le16_to_cpu(sas_device_pg0.DevHandle); if (!(_scsih_is_end_device( le32_to_cpu(sas_device_pg0.DeviceInfo)))) continue; spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, le64_to_cpu(sas_device_pg0.SASAddress)); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (sas_device) continue; parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) { pr_info(MPT3SAS_FMT "\tBEFORE adding end device: " \ "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name, handle, (unsigned long long) le64_to_cpu(sas_device_pg0.SASAddress)); mpt3sas_transport_update_links(ioc, sas_address, handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); retry_count = 0; /* This will retry adding the end device. * _scsih_add_device() will decide on retries and * return "1" when it should be retried */ while (_scsih_add_device(ioc, handle, retry_count++, 0)) { ssleep(1); } pr_info(MPT3SAS_FMT "\tAFTER adding end device: " \ "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name, handle, (unsigned long long) le64_to_cpu(sas_device_pg0.SASAddress)); } } pr_info(MPT3SAS_FMT "\tscan devices: end devices complete\n", ioc->name); pr_info(MPT3SAS_FMT "scan devices: complete\n", ioc->name); } /** * mpt3sas_scsih_reset_handler - reset callback handler (for scsih) * @ioc: per adapter object * @reset_phase: phase * * The handler for doing any required cleanup or initialization. * * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET, * MPT3_IOC_DONE_RESET * * Return nothing. */ void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) { switch (reset_phase) { case MPT3_IOC_PRE_RESET: dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__)); break; case MPT3_IOC_AFTER_RESET: dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__)); if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) { ioc->scsih_cmds.status |= MPT3_CMD_RESET; mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid); complete(&ioc->scsih_cmds.done); } if (ioc->tm_cmds.status & MPT3_CMD_PENDING) { ioc->tm_cmds.status |= MPT3_CMD_RESET; mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid); complete(&ioc->tm_cmds.done); } _scsih_fw_event_cleanup_queue(ioc); _scsih_flush_running_cmds(ioc); break; case MPT3_IOC_DONE_RESET: dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__)); if ((!ioc->is_driver_loading) && !(disable_discovery > 0 && !ioc->sas_hba.num_phys)) { _scsih_prep_device_scan(ioc); _scsih_search_responding_sas_devices(ioc); _scsih_search_responding_raid_devices(ioc); _scsih_search_responding_expanders(ioc); _scsih_error_recovery_delete_devices(ioc); } break; } } /** * _mpt3sas_fw_work - delayed task for processing firmware events * @ioc: per adapter object * @fw_event: The fw_event_work object * Context: user. * * Return nothing. */ static void _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) { /* the queue is being flushed so ignore this event */ if (ioc->remove_host || fw_event->cancel_pending_work || ioc->pci_error_recovery) { _scsih_fw_event_free(ioc, fw_event); return; } switch (fw_event->event) { case MPT3SAS_PROCESS_TRIGGER_DIAG: mpt3sas_process_trigger_data(ioc, fw_event->event_data); break; case MPT3SAS_REMOVE_UNRESPONDING_DEVICES: while (scsi_host_in_recovery(ioc->shost) || ioc->shost_recovery) ssleep(1); _scsih_remove_unresponding_sas_devices(ioc); _scsih_scan_for_devices_after_reset(ioc); break; case MPT3SAS_PORT_ENABLE_COMPLETE: ioc->start_scan = 0; if (missing_delay[0] != -1 && missing_delay[1] != -1) mpt3sas_base_update_missing_delay(ioc, missing_delay[0], missing_delay[1]); dewtprintk(ioc, pr_info(MPT3SAS_FMT "port enable: complete from worker thread\n", ioc->name)); break; case MPT3SAS_TURN_ON_FAULT_LED: _scsih_turn_on_fault_led(ioc, fw_event->device_handle); break; case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: _scsih_sas_topology_change_event(ioc, fw_event); break; case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: _scsih_sas_device_status_change_event(ioc, fw_event); break; case MPI2_EVENT_SAS_DISCOVERY: _scsih_sas_discovery_event(ioc, fw_event); break; case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: _scsih_sas_broadcast_primitive_event(ioc, fw_event); break; case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: _scsih_sas_enclosure_dev_status_change_event(ioc, fw_event); break; case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: _scsih_sas_ir_config_change_event(ioc, fw_event); break; case MPI2_EVENT_IR_VOLUME: _scsih_sas_ir_volume_event(ioc, fw_event); break; case MPI2_EVENT_IR_PHYSICAL_DISK: _scsih_sas_ir_physical_disk_event(ioc, fw_event); break; case MPI2_EVENT_IR_OPERATION_STATUS: _scsih_sas_ir_operation_status_event(ioc, fw_event); break; } _scsih_fw_event_free(ioc, fw_event); } /** * _firmware_event_work * @ioc: per adapter object * @work: The fw_event_work object * Context: user. * * wrappers for the work thread handling firmware events * * Return nothing. */ static void _firmware_event_work(struct work_struct *work) { struct fw_event_work *fw_event = container_of(work, struct fw_event_work, work); _mpt3sas_fw_work(fw_event->ioc, fw_event); } /** * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time) * @ioc: per adapter object * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * Context: interrupt. * * This function merely adds a new work task into ioc->firmware_event_thread. * The tasks are worked from _firmware_event_work in user context. * * Return 1 meaning mf should be freed from _base_interrupt * 0 means the mf is freed from this function. */ u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply) { struct fw_event_work *fw_event; Mpi2EventNotificationReply_t *mpi_reply; u16 event; u16 sz; /* events turned off due to host reset or driver unloading */ if (ioc->remove_host || ioc->pci_error_recovery) return 1; mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); if (unlikely(!mpi_reply)) { pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return 1; } event = le16_to_cpu(mpi_reply->Event); if (event != MPI2_EVENT_LOG_ENTRY_ADDED) mpt3sas_trigger_event(ioc, event, 0); switch (event) { /* handle these */ case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: { Mpi2EventDataSasBroadcastPrimitive_t *baen_data = (Mpi2EventDataSasBroadcastPrimitive_t *) mpi_reply->EventData; if (baen_data->Primitive != MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT) return 1; if (ioc->broadcast_aen_busy) { ioc->broadcast_aen_pending++; return 1; } else ioc->broadcast_aen_busy = 1; break; } case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: _scsih_check_topo_delete_events(ioc, (Mpi2EventDataSasTopologyChangeList_t *) mpi_reply->EventData); break; case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: _scsih_check_ir_config_unhide_events(ioc, (Mpi2EventDataIrConfigChangeList_t *) mpi_reply->EventData); break; case MPI2_EVENT_IR_VOLUME: _scsih_check_volume_delete_events(ioc, (Mpi2EventDataIrVolume_t *) mpi_reply->EventData); break; case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: case MPI2_EVENT_IR_OPERATION_STATUS: case MPI2_EVENT_SAS_DISCOVERY: case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: case MPI2_EVENT_IR_PHYSICAL_DISK: break; default: /* ignore the rest */ return 1; } fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); if (!fw_event) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); return 1; } sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; fw_event->event_data = kzalloc(sz, GFP_ATOMIC); if (!fw_event->event_data) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); kfree(fw_event); return 1; } memcpy(fw_event->event_data, mpi_reply->EventData, sz); fw_event->ioc = ioc; fw_event->VF_ID = mpi_reply->VF_ID; fw_event->VP_ID = mpi_reply->VP_ID; fw_event->event = event; _scsih_fw_event_add(ioc, fw_event); return 1; } /* shost template */ static struct scsi_host_template scsih_driver_template = { .module = THIS_MODULE, .name = "Fusion MPT SAS Host", .proc_name = MPT3SAS_DRIVER_NAME, .queuecommand = _scsih_qcmd, .target_alloc = _scsih_target_alloc, .slave_alloc = _scsih_slave_alloc, .slave_configure = _scsih_slave_configure, .target_destroy = _scsih_target_destroy, .slave_destroy = _scsih_slave_destroy, .scan_finished = _scsih_scan_finished, .scan_start = _scsih_scan_start, .change_queue_depth = _scsih_change_queue_depth, .change_queue_type = _scsih_change_queue_type, .eh_abort_handler = _scsih_abort, .eh_device_reset_handler = _scsih_dev_reset, .eh_target_reset_handler = _scsih_target_reset, .eh_host_reset_handler = _scsih_host_reset, .bios_param = _scsih_bios_param, .can_queue = 1, .this_id = -1, .sg_tablesize = MPT3SAS_SG_DEPTH, .max_sectors = 32767, .cmd_per_lun = 7, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = mpt3sas_host_attrs, .sdev_attrs = mpt3sas_dev_attrs, }; /** * _scsih_expander_node_remove - removing expander device from list. * @ioc: per adapter object * @sas_expander: the sas_device object * Context: Calling function should acquire ioc->sas_node_lock. * * Removing object and freeing associated memory from the * ioc->sas_expander_list. * * Return nothing. */ static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_expander) { struct _sas_port *mpt3sas_port, *next; /* remove sibling ports attached to this expander */ list_for_each_entry_safe(mpt3sas_port, next, &sas_expander->sas_port_list, port_list) { if (ioc->shost_recovery) return; if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) mpt3sas_device_remove_by_sas_address(ioc, mpt3sas_port->remote_identify.sas_address); else if (mpt3sas_port->remote_identify.device_type == SAS_EDGE_EXPANDER_DEVICE || mpt3sas_port->remote_identify.device_type == SAS_FANOUT_EXPANDER_DEVICE) mpt3sas_expander_remove(ioc, mpt3sas_port->remote_identify.sas_address); } mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, sas_expander->sas_address_parent); pr_info(MPT3SAS_FMT "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, sas_expander->handle, (unsigned long long) sas_expander->sas_address); kfree(sas_expander->phy); kfree(sas_expander); } /** * _scsih_ir_shutdown - IR shutdown notification * @ioc: per adapter object * * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that * the host system is shutting down. * * Return nothing. */ static void _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc) { Mpi2RaidActionRequest_t *mpi_request; Mpi2RaidActionReply_t *mpi_reply; u16 smid; /* is IR firmware build loaded ? */ if (!ioc->ir_firmware) return; /* are there any volumes ? */ if (list_empty(&ioc->raid_device_list)) return; mutex_lock(&ioc->scsih_cmds.mutex); if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n", ioc->name, __func__); goto out; } ioc->scsih_cmds.status = MPT3_CMD_PENDING; smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); if (!smid) { pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; goto out; } mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); ioc->scsih_cmds.smid = smid; memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t)); mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED; pr_info(MPT3SAS_FMT "IR shutdown (sending)\n", ioc->name); init_completion(&ioc->scsih_cmds.done); mpt3sas_base_put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, __func__); goto out; } if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { mpi_reply = ioc->scsih_cmds.reply; pr_info(MPT3SAS_FMT "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n", ioc->name, le16_to_cpu(mpi_reply->IOCStatus), le32_to_cpu(mpi_reply->IOCLogInfo)); } out: ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; mutex_unlock(&ioc->scsih_cmds.mutex); } /** * _scsih_remove - detach and remove add host * @pdev: PCI device struct * * Routine called when unloading the driver. * Return nothing. */ static void _scsih_remove(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); struct _sas_port *mpt3sas_port, *next_port; struct _raid_device *raid_device, *next; struct MPT3SAS_TARGET *sas_target_priv_data; struct workqueue_struct *wq; unsigned long flags; ioc->remove_host = 1; _scsih_fw_event_cleanup_queue(ioc); spin_lock_irqsave(&ioc->fw_event_lock, flags); wq = ioc->firmware_event_thread; ioc->firmware_event_thread = NULL; spin_unlock_irqrestore(&ioc->fw_event_lock, flags); if (wq) destroy_workqueue(wq); /* release all the volumes */ _scsih_ir_shutdown(ioc); list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list, list) { if (raid_device->starget) { sas_target_priv_data = raid_device->starget->hostdata; sas_target_priv_data->deleted = 1; scsi_remove_target(&raid_device->starget->dev); } pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n", ioc->name, raid_device->handle, (unsigned long long) raid_device->wwid); _scsih_raid_device_remove(ioc, raid_device); } /* free ports attached to the sas_host */ list_for_each_entry_safe(mpt3sas_port, next_port, &ioc->sas_hba.sas_port_list, port_list) { if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) mpt3sas_device_remove_by_sas_address(ioc, mpt3sas_port->remote_identify.sas_address); else if (mpt3sas_port->remote_identify.device_type == SAS_EDGE_EXPANDER_DEVICE || mpt3sas_port->remote_identify.device_type == SAS_FANOUT_EXPANDER_DEVICE) mpt3sas_expander_remove(ioc, mpt3sas_port->remote_identify.sas_address); } /* free phys attached to the sas_host */ if (ioc->sas_hba.num_phys) { kfree(ioc->sas_hba.phy); ioc->sas_hba.phy = NULL; ioc->sas_hba.num_phys = 0; } sas_remove_host(shost); mpt3sas_base_detach(ioc); list_del(&ioc->list); scsi_remove_host(shost); scsi_host_put(shost); } /** * _scsih_shutdown - routine call during system shutdown * @pdev: PCI device struct * * Return nothing. */ static void _scsih_shutdown(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); struct workqueue_struct *wq; unsigned long flags; ioc->remove_host = 1; _scsih_fw_event_cleanup_queue(ioc); spin_lock_irqsave(&ioc->fw_event_lock, flags); wq = ioc->firmware_event_thread; ioc->firmware_event_thread = NULL; spin_unlock_irqrestore(&ioc->fw_event_lock, flags); if (wq) destroy_workqueue(wq); _scsih_ir_shutdown(ioc); mpt3sas_base_detach(ioc); } /** * _scsih_probe_boot_devices - reports 1st device * @ioc: per adapter object * * If specified in bios page 2, this routine reports the 1st * device scsi-ml or sas transport for persistent boot device * purposes. Please refer to function _scsih_determine_boot_device() */ static void _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc) { u8 is_raid; void *device; struct _sas_device *sas_device; struct _raid_device *raid_device; u16 handle; u64 sas_address_parent; u64 sas_address; unsigned long flags; int rc; /* no Bios, return immediately */ if (!ioc->bios_pg3.BiosVersion) return; device = NULL; is_raid = 0; if (ioc->req_boot_device.device) { device = ioc->req_boot_device.device; is_raid = ioc->req_boot_device.is_raid; } else if (ioc->req_alt_boot_device.device) { device = ioc->req_alt_boot_device.device; is_raid = ioc->req_alt_boot_device.is_raid; } else if (ioc->current_boot_device.device) { device = ioc->current_boot_device.device; is_raid = ioc->current_boot_device.is_raid; } if (!device) return; if (is_raid) { raid_device = device; rc = scsi_add_device(ioc->shost, RAID_CHANNEL, raid_device->id, 0); if (rc) _scsih_raid_device_remove(ioc, raid_device); } else { spin_lock_irqsave(&ioc->sas_device_lock, flags); sas_device = device; handle = sas_device->handle; sas_address_parent = sas_device->sas_address_parent; sas_address = sas_device->sas_address; list_move_tail(&sas_device->list, &ioc->sas_device_list); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); if (!mpt3sas_transport_port_add(ioc, handle, sas_address_parent)) { _scsih_sas_device_remove(ioc, sas_device); } else if (!sas_device->starget) { if (!ioc->is_driver_loading) mpt3sas_transport_port_remove(ioc, sas_address, sas_address_parent); _scsih_sas_device_remove(ioc, sas_device); } } } /** * _scsih_probe_raid - reporting raid volumes to scsi-ml * @ioc: per adapter object * * Called during initial loading of the driver. */ static void _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc) { struct _raid_device *raid_device, *raid_next; int rc; list_for_each_entry_safe(raid_device, raid_next, &ioc->raid_device_list, list) { if (raid_device->starget) continue; rc = scsi_add_device(ioc->shost, RAID_CHANNEL, raid_device->id, 0); if (rc) _scsih_raid_device_remove(ioc, raid_device); } } /** * _scsih_probe_sas - reporting sas devices to sas transport * @ioc: per adapter object * * Called during initial loading of the driver. */ static void _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc) { struct _sas_device *sas_device, *next; unsigned long flags; /* SAS Device List */ list_for_each_entry_safe(sas_device, next, &ioc->sas_device_init_list, list) { if (!mpt3sas_transport_port_add(ioc, sas_device->handle, sas_device->sas_address_parent)) { list_del(&sas_device->list); kfree(sas_device); continue; } else if (!sas_device->starget) { /* * When asyn scanning is enabled, its not possible to * remove devices while scanning is turned on due to an * oops in scsi_sysfs_add_sdev()->add_device()-> * sysfs_addrm_start() */ if (!ioc->is_driver_loading) mpt3sas_transport_port_remove(ioc, sas_device->sas_address, sas_device->sas_address_parent); list_del(&sas_device->list); kfree(sas_device); continue; } spin_lock_irqsave(&ioc->sas_device_lock, flags); list_move_tail(&sas_device->list, &ioc->sas_device_list); spin_unlock_irqrestore(&ioc->sas_device_lock, flags); } } /** * _scsih_probe_devices - probing for devices * @ioc: per adapter object * * Called during initial loading of the driver. */ static void _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc) { u16 volume_mapping_flags; if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR)) return; /* return when IOC doesn't support initiator mode */ _scsih_probe_boot_devices(ioc); if (ioc->ir_firmware) { volume_mapping_flags = le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) & MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; if (volume_mapping_flags == MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) { _scsih_probe_raid(ioc); _scsih_probe_sas(ioc); } else { _scsih_probe_sas(ioc); _scsih_probe_raid(ioc); } } else _scsih_probe_sas(ioc); } /** * _scsih_scan_start - scsi lld callback for .scan_start * @shost: SCSI host pointer * * The shost has the ability to discover targets on its own instead * of scanning the entire bus. In our implemention, we will kick off * firmware discovery. */ static void _scsih_scan_start(struct Scsi_Host *shost) { struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); int rc; if (diag_buffer_enable != -1 && diag_buffer_enable != 0) mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable); if (disable_discovery > 0) return; ioc->start_scan = 1; rc = mpt3sas_port_enable(ioc); if (rc != 0) pr_info(MPT3SAS_FMT "port enable: FAILED\n", ioc->name); } /** * _scsih_scan_finished - scsi lld callback for .scan_finished * @shost: SCSI host pointer * @time: elapsed time of the scan in jiffies * * This function will be called periodicallyn until it returns 1 with the * scsi_host and the elapsed time of the scan in jiffies. In our implemention, * we wait for firmware discovery to complete, then return 1. */ static int _scsih_scan_finished(struct Scsi_Host *shost, unsigned long time) { struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); if (disable_discovery > 0) { ioc->is_driver_loading = 0; ioc->wait_for_discovery_to_complete = 0; return 1; } if (time >= (300 * HZ)) { ioc->base_cmds.status = MPT3_CMD_NOT_USED; pr_info(MPT3SAS_FMT "port enable: FAILED with timeout (timeout=300s)\n", ioc->name); ioc->is_driver_loading = 0; return 1; } if (ioc->start_scan) return 0; if (ioc->start_scan_failed) { pr_info(MPT3SAS_FMT "port enable: FAILED with (ioc_status=0x%08x)\n", ioc->name, ioc->start_scan_failed); ioc->is_driver_loading = 0; ioc->wait_for_discovery_to_complete = 0; ioc->remove_host = 1; return 1; } pr_info(MPT3SAS_FMT "port enable: SUCCESS\n", ioc->name); ioc->base_cmds.status = MPT3_CMD_NOT_USED; if (ioc->wait_for_discovery_to_complete) { ioc->wait_for_discovery_to_complete = 0; _scsih_probe_devices(ioc); } mpt3sas_base_start_watchdog(ioc); ioc->is_driver_loading = 0; return 1; } /** * _scsih_probe - attach and add scsi host * @pdev: PCI device struct * @id: pci device id * * Returns 0 success, anything else error. */ static int _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct MPT3SAS_ADAPTER *ioc; struct Scsi_Host *shost; shost = scsi_host_alloc(&scsih_driver_template, sizeof(struct MPT3SAS_ADAPTER)); if (!shost) return -ENODEV; /* init local params */ ioc = shost_priv(shost); memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER)); INIT_LIST_HEAD(&ioc->list); list_add_tail(&ioc->list, &mpt3sas_ioc_list); ioc->shost = shost; ioc->id = mpt_ids++; sprintf(ioc->name, "%s%d", MPT3SAS_DRIVER_NAME, ioc->id); ioc->pdev = pdev; ioc->scsi_io_cb_idx = scsi_io_cb_idx; ioc->tm_cb_idx = tm_cb_idx; ioc->ctl_cb_idx = ctl_cb_idx; ioc->base_cb_idx = base_cb_idx; ioc->port_enable_cb_idx = port_enable_cb_idx; ioc->transport_cb_idx = transport_cb_idx; ioc->scsih_cb_idx = scsih_cb_idx; ioc->config_cb_idx = config_cb_idx; ioc->tm_tr_cb_idx = tm_tr_cb_idx; ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx; ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx; ioc->logging_level = logging_level; ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds; /* misc semaphores and spin locks */ mutex_init(&ioc->reset_in_progress_mutex); spin_lock_init(&ioc->ioc_reset_in_progress_lock); spin_lock_init(&ioc->scsi_lookup_lock); spin_lock_init(&ioc->sas_device_lock); spin_lock_init(&ioc->sas_node_lock); spin_lock_init(&ioc->fw_event_lock); spin_lock_init(&ioc->raid_device_lock); spin_lock_init(&ioc->diag_trigger_lock); INIT_LIST_HEAD(&ioc->sas_device_list); INIT_LIST_HEAD(&ioc->sas_device_init_list); INIT_LIST_HEAD(&ioc->sas_expander_list); INIT_LIST_HEAD(&ioc->fw_event_list); INIT_LIST_HEAD(&ioc->raid_device_list); INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list); INIT_LIST_HEAD(&ioc->delayed_tr_list); INIT_LIST_HEAD(&ioc->delayed_tr_volume_list); /* init shost parameters */ shost->max_cmd_len = 32; shost->max_lun = max_lun; shost->transportt = mpt3sas_transport_template; shost->unique_id = ioc->id; if (max_sectors != 0xFFFF) { if (max_sectors < 64) { shost->max_sectors = 64; pr_warn(MPT3SAS_FMT "Invalid value %d passed " \ "for max_sectors, range is 64 to 32767. Assigning " "value of 64.\n", ioc->name, max_sectors); } else if (max_sectors > 32767) { shost->max_sectors = 32767; pr_warn(MPT3SAS_FMT "Invalid value %d passed " \ "for max_sectors, range is 64 to 32767. Assigning " "default value of 32767.\n", ioc->name, max_sectors); } else { shost->max_sectors = max_sectors & 0xFFFE; pr_info(MPT3SAS_FMT "The max_sectors value is set to %d\n", ioc->name, shost->max_sectors); } } if ((scsi_add_host(shost, &pdev->dev))) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); list_del(&ioc->list); goto out_add_shost_fail; } /* register EEDP capabilities with SCSI layer */ if (prot_mask > 0) scsi_host_set_prot(shost, prot_mask); else scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION); scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); /* event thread */ snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), "fw_event%d", ioc->id); ioc->firmware_event_thread = create_singlethread_workqueue( ioc->firmware_event_name); if (!ioc->firmware_event_thread) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out_thread_fail; } ioc->is_driver_loading = 1; if ((mpt3sas_base_attach(ioc))) { pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); goto out_attach_fail; } scsi_scan_host(shost); return 0; out_attach_fail: destroy_workqueue(ioc->firmware_event_thread); out_thread_fail: list_del(&ioc->list); scsi_remove_host(shost); out_add_shost_fail: scsi_host_put(shost); return -ENODEV; } #ifdef CONFIG_PM /** * _scsih_suspend - power management suspend main entry point * @pdev: PCI device struct * @state: PM state change to (usually PCI_D3) * * Returns 0 success, anything else error. */ static int _scsih_suspend(struct pci_dev *pdev, pm_message_t state) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); pci_power_t device_state; mpt3sas_base_stop_watchdog(ioc); flush_scheduled_work(); scsi_block_requests(shost); device_state = pci_choose_state(pdev, state); pr_info(MPT3SAS_FMT "pdev=0x%p, slot=%s, entering operating state [D%d]\n", ioc->name, pdev, pci_name(pdev), device_state); pci_save_state(pdev); mpt3sas_base_free_resources(ioc); pci_set_power_state(pdev, device_state); return 0; } /** * _scsih_resume - power management resume main entry point * @pdev: PCI device struct * * Returns 0 success, anything else error. */ static int _scsih_resume(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); pci_power_t device_state = pdev->current_state; int r; pr_info(MPT3SAS_FMT "pdev=0x%p, slot=%s, previous operating state [D%d]\n", ioc->name, pdev, pci_name(pdev), device_state); pci_set_power_state(pdev, PCI_D0); pci_enable_wake(pdev, PCI_D0, 0); pci_restore_state(pdev); ioc->pdev = pdev; r = mpt3sas_base_map_resources(ioc); if (r) return r; mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, SOFT_RESET); scsi_unblock_requests(shost); mpt3sas_base_start_watchdog(ioc); return 0; } #endif /* CONFIG_PM */ /** * _scsih_pci_error_detected - Called when a PCI error is detected. * @pdev: PCI device struct * @state: PCI channel state * * Description: Called when a PCI error is detected. * * Return value: * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT */ static pci_ers_result_t _scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); pr_info(MPT3SAS_FMT "PCI error: detected callback, state(%d)!!\n", ioc->name, state); switch (state) { case pci_channel_io_normal: return PCI_ERS_RESULT_CAN_RECOVER; case pci_channel_io_frozen: /* Fatal error, prepare for slot reset */ ioc->pci_error_recovery = 1; scsi_block_requests(ioc->shost); mpt3sas_base_stop_watchdog(ioc); mpt3sas_base_free_resources(ioc); return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: /* Permanent error, prepare for device removal */ ioc->pci_error_recovery = 1; mpt3sas_base_stop_watchdog(ioc); _scsih_flush_running_cmds(ioc); return PCI_ERS_RESULT_DISCONNECT; } return PCI_ERS_RESULT_NEED_RESET; } /** * _scsih_pci_slot_reset - Called when PCI slot has been reset. * @pdev: PCI device struct * * Description: This routine is called by the pci error recovery * code after the PCI slot has been reset, just before we * should resume normal operations. */ static pci_ers_result_t _scsih_pci_slot_reset(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); int rc; pr_info(MPT3SAS_FMT "PCI error: slot reset callback!!\n", ioc->name); ioc->pci_error_recovery = 0; ioc->pdev = pdev; pci_restore_state(pdev); rc = mpt3sas_base_map_resources(ioc); if (rc) return PCI_ERS_RESULT_DISCONNECT; rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); pr_warn(MPT3SAS_FMT "hard reset: %s\n", ioc->name, (rc == 0) ? "success" : "failed"); if (!rc) return PCI_ERS_RESULT_RECOVERED; else return PCI_ERS_RESULT_DISCONNECT; } /** * _scsih_pci_resume() - resume normal ops after PCI reset * @pdev: pointer to PCI device * * Called when the error recovery driver tells us that its * OK to resume normal operation. Use completion to allow * halted scsi ops to resume. */ static void _scsih_pci_resume(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); pr_info(MPT3SAS_FMT "PCI error: resume callback!!\n", ioc->name); pci_cleanup_aer_uncorrect_error_status(pdev); mpt3sas_base_start_watchdog(ioc); scsi_unblock_requests(ioc->shost); } /** * _scsih_pci_mmio_enabled - Enable MMIO and dump debug registers * @pdev: pointer to PCI device */ static pci_ers_result_t _scsih_pci_mmio_enabled(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); pr_info(MPT3SAS_FMT "PCI error: mmio enabled callback!!\n", ioc->name); /* TODO - dump whatever for debugging purposes */ /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } /* raid transport support */ static struct raid_function_template mpt3sas_raid_functions = { .cookie = &scsih_driver_template, .is_raid = _scsih_is_raid, .get_resync = _scsih_get_resync, .get_state = _scsih_get_state, }; static struct pci_error_handlers _scsih_err_handler = { .error_detected = _scsih_pci_error_detected, .mmio_enabled = _scsih_pci_mmio_enabled, .slot_reset = _scsih_pci_slot_reset, .resume = _scsih_pci_resume, }; static struct pci_driver scsih_driver = { .name = MPT3SAS_DRIVER_NAME, .id_table = scsih_pci_table, .probe = _scsih_probe, .remove = _scsih_remove, .shutdown = _scsih_shutdown, .err_handler = &_scsih_err_handler, #ifdef CONFIG_PM .suspend = _scsih_suspend, .resume = _scsih_resume, #endif }; /** * _scsih_init - main entry point for this driver. * * Returns 0 success, anything else error. */ static int __init _scsih_init(void) { int error; mpt_ids = 0; pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME, MPT3SAS_DRIVER_VERSION); mpt3sas_transport_template = sas_attach_transport(&mpt3sas_transport_functions); if (!mpt3sas_transport_template) return -ENODEV; /* raid transport support */ mpt3sas_raid_template = raid_class_attach(&mpt3sas_raid_functions); if (!mpt3sas_raid_template) { sas_release_transport(mpt3sas_transport_template); return -ENODEV; } mpt3sas_base_initialize_callback_handler(); /* queuecommand callback hander */ scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done); /* task managment callback handler */ tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done); /* base internal commands callback handler */ base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done); port_enable_cb_idx = mpt3sas_base_register_callback_handler( mpt3sas_port_enable_done); /* transport internal commands callback handler */ transport_cb_idx = mpt3sas_base_register_callback_handler( mpt3sas_transport_done); /* scsih internal commands callback handler */ scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done); /* configuration page API internal commands callback handler */ config_cb_idx = mpt3sas_base_register_callback_handler( mpt3sas_config_done); /* ctl module callback handler */ ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done); tm_tr_cb_idx = mpt3sas_base_register_callback_handler( _scsih_tm_tr_complete); tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler( _scsih_tm_volume_tr_complete); tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler( _scsih_sas_control_complete); mpt3sas_ctl_init(); error = pci_register_driver(&scsih_driver); if (error) { /* raid transport support */ raid_class_release(mpt3sas_raid_template); sas_release_transport(mpt3sas_transport_template); } return error; } /** * _scsih_exit - exit point for this driver (when it is a module). * * Returns 0 success, anything else error. */ static void __exit _scsih_exit(void) { pr_info("mpt3sas version %s unloading\n", MPT3SAS_DRIVER_VERSION); mpt3sas_ctl_exit(); pci_unregister_driver(&scsih_driver); mpt3sas_base_release_callback_handler(scsi_io_cb_idx); mpt3sas_base_release_callback_handler(tm_cb_idx); mpt3sas_base_release_callback_handler(base_cb_idx); mpt3sas_base_release_callback_handler(port_enable_cb_idx); mpt3sas_base_release_callback_handler(transport_cb_idx); mpt3sas_base_release_callback_handler(scsih_cb_idx); mpt3sas_base_release_callback_handler(config_cb_idx); mpt3sas_base_release_callback_handler(ctl_cb_idx); mpt3sas_base_release_callback_handler(tm_tr_cb_idx); mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx); mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx); /* raid transport support */ raid_class_release(mpt3sas_raid_template); sas_release_transport(mpt3sas_transport_template); } module_init(_scsih_init); module_exit(_scsih_exit);
gpl-2.0
TheTypoMaster/SM-G360T1_kernel
drivers/pcmcia/cardbus.c
2355
2995
/* * cardbus.c -- 16-bit PCMCIA core support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * The initial developer of the original code is David A. Hinds * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. * * (C) 1999 David A. Hinds */ /* * Cardbus handling has been re-written to be more of a PCI bridge thing, * and the PCI code basically does all the resource handling. * * Linus, Jan 2000 */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <pcmcia/ss.h> static void cardbus_config_irq_and_cls(struct pci_bus *bus, int irq) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { u8 irq_pin; /* * Since there is only one interrupt available to * CardBus devices, all devices downstream of this * device must be using this IRQ. */ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq_pin); if (irq_pin) { dev->irq = irq; pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); } /* * Some controllers transfer very slowly with 0 CLS. * Configure it. This may fail as CLS configuration * is mandatory only for MWI. */ pci_set_cacheline_size(dev); if (dev->subordinate) cardbus_config_irq_and_cls(dev->subordinate, irq); } } /** * cb_alloc() - add CardBus device * @s: the pcmcia_socket where the CardBus device is located * * cb_alloc() allocates the kernel data structures for a Cardbus device * and handles the lowest level PCI device setup issues. */ int __ref cb_alloc(struct pcmcia_socket *s) { struct pci_bus *bus = s->cb_dev->subordinate; struct pci_dev *dev; unsigned int max, pass; s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0)); pci_fixup_cardbus(bus); max = bus->busn_res.start; for (pass = 0; pass < 2; pass++) list_for_each_entry(dev, &bus->devices, bus_list) if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) max = pci_scan_bridge(bus, dev, max, pass); /* * Size all resources below the CardBus controller. */ pci_bus_size_bridges(bus); pci_bus_assign_resources(bus); cardbus_config_irq_and_cls(bus, s->pci_irq); /* socket specific tune function */ if (s->tune_bridge) s->tune_bridge(s, bus); pci_enable_bridges(bus); pci_bus_add_devices(bus); return 0; } /** * cb_free() - remove CardBus device * @s: the pcmcia_socket where the CardBus device was located * * cb_free() handles the lowest level PCI device cleanup. */ void cb_free(struct pcmcia_socket *s) { struct pci_dev *bridge, *dev, *tmp; struct pci_bus *bus; bridge = s->cb_dev; if (!bridge) return; bus = bridge->subordinate; if (!bus) return; list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) pci_stop_and_remove_bus_device(dev); }
gpl-2.0
VentureROM-L/android_kernel_moto_shamu
drivers/net/wireless/brcm80211/brcmutil/utils.c
3123
6532
/* * Copyright (c) 2010 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/netdevice.h> #include <linux/module.h> #include <brcmu_utils.h> MODULE_AUTHOR("Broadcom Corporation"); MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver utilities."); MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards"); MODULE_LICENSE("Dual BSD/GPL"); struct sk_buff *brcmu_pkt_buf_get_skb(uint len) { struct sk_buff *skb; skb = dev_alloc_skb(len); if (skb) { skb_put(skb, len); skb->priority = 0; } return skb; } EXPORT_SYMBOL(brcmu_pkt_buf_get_skb); /* Free the driver packet. Free the tag if present */ void brcmu_pkt_buf_free_skb(struct sk_buff *skb) { if (!skb) return; WARN_ON(skb->next); dev_kfree_skb_any(skb); } EXPORT_SYMBOL(brcmu_pkt_buf_free_skb); /* * osl multiple-precedence packet queue * hi_prec is always >= the number of the highest non-empty precedence */ struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec, struct sk_buff *p) { struct sk_buff_head *q; if (pktq_full(pq) || pktq_pfull(pq, prec)) return NULL; q = &pq->q[prec].skblist; skb_queue_tail(q, p); pq->len++; if (pq->hi_prec < prec) pq->hi_prec = (u8) prec; return p; } EXPORT_SYMBOL(brcmu_pktq_penq); struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec, struct sk_buff *p) { struct sk_buff_head *q; if (pktq_full(pq) || pktq_pfull(pq, prec)) return NULL; q = &pq->q[prec].skblist; skb_queue_head(q, p); pq->len++; if (pq->hi_prec < prec) pq->hi_prec = (u8) prec; return p; } EXPORT_SYMBOL(brcmu_pktq_penq_head); struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec) { struct sk_buff_head *q; struct sk_buff *p; q = &pq->q[prec].skblist; p = skb_dequeue(q); if (p == NULL) return NULL; pq->len--; return p; } EXPORT_SYMBOL(brcmu_pktq_pdeq); /* * precedence based dequeue with match function. Passing a NULL pointer * for the match function parameter is considered to be a wildcard so * any packet on the queue is returned. In that case it is no different * from brcmu_pktq_pdeq() above. */ struct sk_buff *brcmu_pktq_pdeq_match(struct pktq *pq, int prec, bool (*match_fn)(struct sk_buff *skb, void *arg), void *arg) { struct sk_buff_head *q; struct sk_buff *p, *next; q = &pq->q[prec].skblist; skb_queue_walk_safe(q, p, next) { if (match_fn == NULL || match_fn(p, arg)) { skb_unlink(p, q); pq->len--; return p; } } return NULL; } EXPORT_SYMBOL(brcmu_pktq_pdeq_match); struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec) { struct sk_buff_head *q; struct sk_buff *p; q = &pq->q[prec].skblist; p = skb_dequeue_tail(q); if (p == NULL) return NULL; pq->len--; return p; } EXPORT_SYMBOL(brcmu_pktq_pdeq_tail); void brcmu_pktq_pflush(struct pktq *pq, int prec, bool dir, bool (*fn)(struct sk_buff *, void *), void *arg) { struct sk_buff_head *q; struct sk_buff *p, *next; q = &pq->q[prec].skblist; skb_queue_walk_safe(q, p, next) { if (fn == NULL || (*fn) (p, arg)) { skb_unlink(p, q); brcmu_pkt_buf_free_skb(p); pq->len--; } } } EXPORT_SYMBOL(brcmu_pktq_pflush); void brcmu_pktq_flush(struct pktq *pq, bool dir, bool (*fn)(struct sk_buff *, void *), void *arg) { int prec; for (prec = 0; prec < pq->num_prec; prec++) brcmu_pktq_pflush(pq, prec, dir, fn, arg); } EXPORT_SYMBOL(brcmu_pktq_flush); void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len) { int prec; /* pq is variable size; only zero out what's requested */ memset(pq, 0, offsetof(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec)); pq->num_prec = (u16) num_prec; pq->max = (u16) max_len; for (prec = 0; prec < num_prec; prec++) { pq->q[prec].max = pq->max; skb_queue_head_init(&pq->q[prec].skblist); } } EXPORT_SYMBOL(brcmu_pktq_init); struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out) { int prec; if (pq->len == 0) return NULL; for (prec = 0; prec < pq->hi_prec; prec++) if (!skb_queue_empty(&pq->q[prec].skblist)) break; if (prec_out) *prec_out = prec; return skb_peek_tail(&pq->q[prec].skblist); } EXPORT_SYMBOL(brcmu_pktq_peek_tail); /* Return sum of lengths of a specific set of precedences */ int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp) { int prec, len; len = 0; for (prec = 0; prec <= pq->hi_prec; prec++) if (prec_bmp & (1 << prec)) len += pq->q[prec].skblist.qlen; return len; } EXPORT_SYMBOL(brcmu_pktq_mlen); /* Priority dequeue from a specific set of precedences */ struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out) { struct sk_buff_head *q; struct sk_buff *p; int prec; if (pq->len == 0) return NULL; while ((prec = pq->hi_prec) > 0 && skb_queue_empty(&pq->q[prec].skblist)) pq->hi_prec--; while ((prec_bmp & (1 << prec)) == 0 || skb_queue_empty(&pq->q[prec].skblist)) if (prec-- == 0) return NULL; q = &pq->q[prec].skblist; p = skb_dequeue(q); if (p == NULL) return NULL; pq->len--; if (prec_out) *prec_out = prec; return p; } EXPORT_SYMBOL(brcmu_pktq_mdeq); #if defined(DEBUG) /* pretty hex print a pkt buffer chain */ void brcmu_prpkt(const char *msg, struct sk_buff *p0) { struct sk_buff *p; if (msg && (msg[0] != '\0')) pr_debug("%s:\n", msg); for (p = p0; p; p = p->next) print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, p->data, p->len); } EXPORT_SYMBOL(brcmu_prpkt); void brcmu_dbg_hex_dump(const void *data, size_t size, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; pr_debug("%pV", &vaf); va_end(args); print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, data, size); } EXPORT_SYMBOL(brcmu_dbg_hex_dump); #endif /* defined(DEBUG) */
gpl-2.0
treznorx/TF201-9.4.2.7
arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
3891
4336
/* * MPC86xx HPCN board specific routines * * Recode: ZHANG WEI <wei.zhang@freescale.com> * Initial author: Xianghua Xiao <x.xiao@freescale.com> * * Copyright 2006 Freescale Semiconductor Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/of_platform.h> #include <linux/memblock.h> #include <asm/system.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/prom.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> #include <asm/swiotlb.h> #include <asm/mpic.h> #include <sysdev/fsl_pci.h> #include <sysdev/fsl_soc.h> #include "mpc86xx.h" #undef DEBUG #ifdef DEBUG #define DBG(fmt...) do { printk(KERN_ERR fmt); } while(0) #else #define DBG(fmt...) do { } while(0) #endif #ifdef CONFIG_PCI extern int uli_exclude_device(struct pci_controller *hose, u_char bus, u_char devfn); static int mpc86xx_exclude_device(struct pci_controller *hose, u_char bus, u_char devfn) { struct device_node* node; struct resource rsrc; node = hose->dn; of_address_to_resource(node, 0, &rsrc); if ((rsrc.start & 0xfffff) == 0x8000) { return uli_exclude_device(hose, bus, devfn); } return PCIBIOS_SUCCESSFUL; } #endif /* CONFIG_PCI */ static void __init mpc86xx_hpcn_setup_arch(void) { #ifdef CONFIG_PCI struct device_node *np; struct pci_controller *hose; #endif dma_addr_t max = 0xffffffff; if (ppc_md.progress) ppc_md.progress("mpc86xx_hpcn_setup_arch()", 0); #ifdef CONFIG_PCI for_each_compatible_node(np, "pci", "fsl,mpc8641-pcie") { struct resource rsrc; of_address_to_resource(np, 0, &rsrc); if ((rsrc.start & 0xfffff) == 0x8000) fsl_add_bridge(np, 1); else fsl_add_bridge(np, 0); hose = pci_find_hose_for_OF_device(np); max = min(max, hose->dma_window_base_cur + hose->dma_window_size); } ppc_md.pci_exclude_device = mpc86xx_exclude_device; #endif printk("MPC86xx HPCN board from Freescale Semiconductor\n"); #ifdef CONFIG_SMP mpc86xx_smp_init(); #endif #ifdef CONFIG_SWIOTLB if (memblock_end_of_DRAM() > max) { ppc_swiotlb_enable = 1; set_pci_dma_ops(&swiotlb_dma_ops); ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb; } #endif } static void mpc86xx_hpcn_show_cpuinfo(struct seq_file *m) { uint svid = mfspr(SPRN_SVR); seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n"); seq_printf(m, "SVR\t\t: 0x%x\n", svid); } /* * Called very early, device-tree isn't unflattened */ static int __init mpc86xx_hpcn_probe(void) { unsigned long root = of_get_flat_dt_root(); if (of_flat_dt_is_compatible(root, "fsl,mpc8641hpcn")) return 1; /* Looks good */ /* Be nice and don't give silent boot death. Delete this in 2.6.27 */ if (of_flat_dt_is_compatible(root, "mpc86xx")) { pr_warning("WARNING: your dts/dtb is old. You must update before the next kernel release\n"); return 1; } return 0; } static long __init mpc86xx_time_init(void) { unsigned int temp; /* Set the time base to zero */ mtspr(SPRN_TBWL, 0); mtspr(SPRN_TBWU, 0); temp = mfspr(SPRN_HID0); temp |= HID0_TBEN; mtspr(SPRN_HID0, temp); asm volatile("isync"); return 0; } static __initdata struct of_device_id of_bus_ids[] = { { .compatible = "simple-bus", }, { .compatible = "fsl,rapidio-delta", }, { .compatible = "gianfar", }, {}, }; static int __init declare_of_platform_devices(void) { of_platform_bus_probe(NULL, of_bus_ids, NULL); return 0; } machine_device_initcall(mpc86xx_hpcn, declare_of_platform_devices); machine_arch_initcall(mpc86xx_hpcn, swiotlb_setup_bus_notifier); define_machine(mpc86xx_hpcn) { .name = "MPC86xx HPCN", .probe = mpc86xx_hpcn_probe, .setup_arch = mpc86xx_hpcn_setup_arch, .init_IRQ = mpc86xx_init_irq, .show_cpuinfo = mpc86xx_hpcn_show_cpuinfo, .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, .time_init = mpc86xx_time_init, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif };
gpl-2.0
ztemt/NX507J_Lollipop_kernel
arch/x86/boot/compressed/mkpiggy.c
7219
2703
/* ----------------------------------------------------------------------- * * * Copyright (C) 2009 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * H. Peter Anvin <hpa@linux.intel.com> * * ----------------------------------------------------------------------- */ /* * Compute the desired load offset from a compressed program; outputs * a small assembly wrapper with the appropriate symbols defined. */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <inttypes.h> #include <tools/le_byteshift.h> int main(int argc, char *argv[]) { uint32_t olen; long ilen; unsigned long offs; FILE *f; if (argc < 2) { fprintf(stderr, "Usage: %s compressed_file\n", argv[0]); return 1; } /* Get the information for the compressed kernel image first */ f = fopen(argv[1], "r"); if (!f) { perror(argv[1]); return 1; } if (fseek(f, -4L, SEEK_END)) { perror(argv[1]); } if (fread(&olen, sizeof(olen), 1, f) != 1) { perror(argv[1]); return 1; } ilen = ftell(f); olen = get_unaligned_le32(&olen); fclose(f); /* * Now we have the input (compressed) and output (uncompressed) * sizes, compute the necessary decompression offset... */ offs = (olen > ilen) ? olen - ilen : 0; offs += olen >> 12; /* Add 8 bytes for each 32K block */ offs += 64*1024 + 128; /* Add 64K + 128 bytes slack */ offs = (offs+4095) & ~4095; /* Round to a 4K boundary */ printf(".section \".rodata..compressed\",\"a\",@progbits\n"); printf(".globl z_input_len\n"); printf("z_input_len = %lu\n", ilen); printf(".globl z_output_len\n"); printf("z_output_len = %lu\n", (unsigned long)olen); printf(".globl z_extract_offset\n"); printf("z_extract_offset = 0x%lx\n", offs); /* z_extract_offset_negative allows simplification of head_32.S */ printf(".globl z_extract_offset_negative\n"); printf("z_extract_offset_negative = -0x%lx\n", offs); printf(".globl input_data, input_data_end\n"); printf("input_data:\n"); printf(".incbin \"%s\"\n", argv[1]); printf("input_data_end:\n"); return 0; }
gpl-2.0
AccentureMobilityServices/kernel
drivers/media/video/hdpvr/hdpvr-control.c
8243
5043
/* * Hauppauge HD PVR USB driver - video 4 linux 2 interface * * Copyright (C) 2008 Janne Grunau (j@jannau.net) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/mutex.h> #include <linux/videodev2.h> #include <media/v4l2-common.h> #include "hdpvr.h" int hdpvr_config_call(struct hdpvr_device *dev, uint value, u8 valbuf) { int ret; char request_type = 0x38, snd_request = 0x01; mutex_lock(&dev->usbc_mutex); dev->usbc_buf[0] = valbuf; ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), snd_request, 0x00 | request_type, value, CTRL_DEFAULT_INDEX, dev->usbc_buf, 1, 10000); mutex_unlock(&dev->usbc_mutex); v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "config call request for value 0x%x returned %d\n", value, ret); return ret < 0 ? ret : 0; } struct hdpvr_video_info *get_video_info(struct hdpvr_device *dev) { struct hdpvr_video_info *vidinf = NULL; #ifdef HDPVR_DEBUG char print_buf[15]; #endif int ret; vidinf = kzalloc(sizeof(struct hdpvr_video_info), GFP_KERNEL); if (!vidinf) { v4l2_err(&dev->v4l2_dev, "out of memory\n"); goto err; } mutex_lock(&dev->usbc_mutex); ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), 0x81, 0x80 | 0x38, 0x1400, 0x0003, dev->usbc_buf, 5, 1000); if (ret == 5) { vidinf->width = dev->usbc_buf[1] << 8 | dev->usbc_buf[0]; vidinf->height = dev->usbc_buf[3] << 8 | dev->usbc_buf[2]; vidinf->fps = dev->usbc_buf[4]; } #ifdef HDPVR_DEBUG if (hdpvr_debug & MSG_INFO) { hex_dump_to_buffer(dev->usbc_buf, 5, 16, 1, print_buf, sizeof(print_buf), 0); v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "get video info returned: %d, %s\n", ret, print_buf); } #endif mutex_unlock(&dev->usbc_mutex); if (!vidinf->width || !vidinf->height || !vidinf->fps) { kfree(vidinf); vidinf = NULL; } err: return vidinf; } int get_input_lines_info(struct hdpvr_device *dev) { #ifdef HDPVR_DEBUG char print_buf[9]; #endif int ret, lines; mutex_lock(&dev->usbc_mutex); ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), 0x81, 0x80 | 0x38, 0x1800, 0x0003, dev->usbc_buf, 3, 1000); #ifdef HDPVR_DEBUG if (hdpvr_debug & MSG_INFO) { hex_dump_to_buffer(dev->usbc_buf, 3, 16, 1, print_buf, sizeof(print_buf), 0); v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "get input lines info returned: %d, %s\n", ret, print_buf); } #endif lines = dev->usbc_buf[1] << 8 | dev->usbc_buf[0]; mutex_unlock(&dev->usbc_mutex); return lines; } int hdpvr_set_bitrate(struct hdpvr_device *dev) { int ret; mutex_lock(&dev->usbc_mutex); memset(dev->usbc_buf, 0, 4); dev->usbc_buf[0] = dev->options.bitrate; dev->usbc_buf[2] = dev->options.peak_bitrate; ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 0x01, 0x38, CTRL_BITRATE_VALUE, CTRL_DEFAULT_INDEX, dev->usbc_buf, 4, 1000); mutex_unlock(&dev->usbc_mutex); return ret; } int hdpvr_set_audio(struct hdpvr_device *dev, u8 input, enum v4l2_mpeg_audio_encoding codec) { int ret = 0; if (dev->flags & HDPVR_FLAG_AC3_CAP) { mutex_lock(&dev->usbc_mutex); memset(dev->usbc_buf, 0, 2); dev->usbc_buf[0] = input; if (codec == V4L2_MPEG_AUDIO_ENCODING_AAC) dev->usbc_buf[1] = 0; else if (codec == V4L2_MPEG_AUDIO_ENCODING_AC3) dev->usbc_buf[1] = 1; else { mutex_unlock(&dev->usbc_mutex); v4l2_err(&dev->v4l2_dev, "invalid audio codec %d\n", codec); ret = -EINVAL; goto error; } ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 0x01, 0x38, CTRL_AUDIO_INPUT_VALUE, CTRL_DEFAULT_INDEX, dev->usbc_buf, 2, 1000); mutex_unlock(&dev->usbc_mutex); if (ret == 2) ret = 0; } else ret = hdpvr_config_call(dev, CTRL_AUDIO_INPUT_VALUE, input); error: return ret; } int hdpvr_set_options(struct hdpvr_device *dev) { hdpvr_config_call(dev, CTRL_VIDEO_STD_TYPE, dev->options.video_std); hdpvr_config_call(dev, CTRL_VIDEO_INPUT_VALUE, dev->options.video_input+1); hdpvr_set_audio(dev, dev->options.audio_input+1, dev->options.audio_codec); hdpvr_set_bitrate(dev); hdpvr_config_call(dev, CTRL_BITRATE_MODE_VALUE, dev->options.bitrate_mode); hdpvr_config_call(dev, CTRL_GOP_MODE_VALUE, dev->options.gop_mode); hdpvr_config_call(dev, CTRL_BRIGHTNESS, dev->options.brightness); hdpvr_config_call(dev, CTRL_CONTRAST, dev->options.contrast); hdpvr_config_call(dev, CTRL_HUE, dev->options.hue); hdpvr_config_call(dev, CTRL_SATURATION, dev->options.saturation); hdpvr_config_call(dev, CTRL_SHARPNESS, dev->options.sharpness); return 0; }
gpl-2.0
rastomanchik/android_kernel_xiaomi_armani
drivers/isdn/hardware/eicon/dqueue.c
9779
2173
/* $Id: dqueue.c,v 1.5 2003/04/12 21:40:49 schindler Exp $ * * Driver for Eicon DIVA Server ISDN cards. * User Mode IDI Interface * * Copyright 2000-2003 by Armin Schindler (mac@melware.de) * Copyright 2000-2003 Cytronics & Melware (info@melware.de) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. */ #include "platform.h" #include "dqueue.h" int diva_data_q_init(diva_um_idi_data_queue_t *q, int max_length, int max_segments) { int i; q->max_length = max_length; q->segments = max_segments; for (i = 0; i < q->segments; i++) { q->data[i] = NULL; q->length[i] = 0; } q->read = q->write = q->count = q->segment_pending = 0; for (i = 0; i < q->segments; i++) { if (!(q->data[i] = diva_os_malloc(0, q->max_length))) { diva_data_q_finit(q); return (-1); } } return (0); } int diva_data_q_finit(diva_um_idi_data_queue_t *q) { int i; for (i = 0; i < q->segments; i++) { if (q->data[i]) { diva_os_free(0, q->data[i]); } q->data[i] = NULL; q->length[i] = 0; } q->read = q->write = q->count = q->segment_pending = 0; return (0); } int diva_data_q_get_max_length(const diva_um_idi_data_queue_t *q) { return (q->max_length); } void *diva_data_q_get_segment4write(diva_um_idi_data_queue_t *q) { if ((!q->segment_pending) && (q->count < q->segments)) { q->segment_pending = 1; return (q->data[q->write]); } return NULL; } void diva_data_q_ack_segment4write(diva_um_idi_data_queue_t *q, int length) { if (q->segment_pending) { q->length[q->write] = length; q->count++; q->write++; if (q->write >= q->segments) { q->write = 0; } q->segment_pending = 0; } } const void *diva_data_q_get_segment4read(const diva_um_idi_data_queue_t * q) { if (q->count) { return (q->data[q->read]); } return NULL; } int diva_data_q_get_segment_length(const diva_um_idi_data_queue_t *q) { return (q->length[q->read]); } void diva_data_q_ack_segment4read(diva_um_idi_data_queue_t *q) { if (q->count) { q->length[q->read] = 0; q->count--; q->read++; if (q->read >= q->segments) { q->read = 0; } } }
gpl-2.0
davidevinavil/kernel_s500_cm10
drivers/video/via/via_aux.c
9779
2008
/* * Copyright 2011 Florian Tobias Schandinat <FlorianSchandinat@gmx.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; * either version 2, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even * the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE.See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* * infrastructure for devices connected via I2C */ #include <linux/slab.h> #include "via_aux.h" struct via_aux_bus *via_aux_probe(struct i2c_adapter *adap) { struct via_aux_bus *bus; if (!adap) return NULL; bus = kmalloc(sizeof(*bus), GFP_KERNEL); if (!bus) return NULL; bus->adap = adap; INIT_LIST_HEAD(&bus->drivers); via_aux_edid_probe(bus); via_aux_vt1636_probe(bus); via_aux_vt1632_probe(bus); via_aux_vt1631_probe(bus); via_aux_vt1625_probe(bus); via_aux_vt1622_probe(bus); via_aux_vt1621_probe(bus); via_aux_sii164_probe(bus); via_aux_ch7301_probe(bus); return bus; } void via_aux_free(struct via_aux_bus *bus) { struct via_aux_drv *pos, *n; if (!bus) return; list_for_each_entry_safe(pos, n, &bus->drivers, chain) { if (pos->cleanup) pos->cleanup(pos); list_del(&pos->chain); kfree(pos->data); kfree(pos); } kfree(bus); } const struct fb_videomode *via_aux_get_preferred_mode(struct via_aux_bus *bus) { struct via_aux_drv *pos; const struct fb_videomode *mode = NULL; if (!bus) return NULL; list_for_each_entry(pos, &bus->drivers, chain) { if (pos->get_preferred_mode) mode = pos->get_preferred_mode(pos); } return mode; }
gpl-2.0
KFire-Android/kernel_omap_otter-common
drivers/media/video/sn9c102/sn9c102_mi0343.c
12851
9686
/*************************************************************************** * Plug-in for MI-0343 image sensor connected to the SN9C1xx PC Camera * * Controllers * * * * Copyright (C) 2004-2007 by Luca Risolia <luca.risolia@studio.unibo.it> * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the Free Software * * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ***************************************************************************/ #include "sn9c102_sensor.h" #include "sn9c102_devtable.h" static int mi0343_init(struct sn9c102_device* cam) { struct sn9c102_sensor* s = sn9c102_get_sensor(cam); int err = 0; err = sn9c102_write_const_regs(cam, {0x00, 0x10}, {0x00, 0x11}, {0x0a, 0x14}, {0x40, 0x01}, {0x20, 0x17}, {0x07, 0x18}, {0xa0, 0x19}); err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x0d, 0x00, 0x01, 0, 0); err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x0d, 0x00, 0x00, 0, 0); err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x03, 0x01, 0xe1, 0, 0); err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x04, 0x02, 0x81, 0, 0); err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x05, 0x00, 0x17, 0, 0); err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x06, 0x00, 0x11, 0, 0); err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x62, 0x04, 0x9a, 0, 0); return err; } static int mi0343_get_ctrl(struct sn9c102_device* cam, struct v4l2_control* ctrl) { struct sn9c102_sensor* s = sn9c102_get_sensor(cam); u8 data[2]; switch (ctrl->id) { case V4L2_CID_EXPOSURE: if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x09, 2, data) < 0) return -EIO; ctrl->value = data[0]; return 0; case V4L2_CID_GAIN: if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x35, 2, data) < 0) return -EIO; break; case V4L2_CID_HFLIP: if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x20, 2, data) < 0) return -EIO; ctrl->value = data[1] & 0x20 ? 1 : 0; return 0; case V4L2_CID_VFLIP: if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x20, 2, data) < 0) return -EIO; ctrl->value = data[1] & 0x80 ? 1 : 0; return 0; case V4L2_CID_RED_BALANCE: if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x2d, 2, data) < 0) return -EIO; break; case V4L2_CID_BLUE_BALANCE: if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x2c, 2, data) < 0) return -EIO; break; case SN9C102_V4L2_CID_GREEN_BALANCE: if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x2e, 2, data) < 0) return -EIO; break; default: return -EINVAL; } switch (ctrl->id) { case V4L2_CID_GAIN: case V4L2_CID_RED_BALANCE: case V4L2_CID_BLUE_BALANCE: case SN9C102_V4L2_CID_GREEN_BALANCE: ctrl->value = data[1] | (data[0] << 8); if (ctrl->value >= 0x10 && ctrl->value <= 0x3f) ctrl->value -= 0x10; else if (ctrl->value >= 0x60 && ctrl->value <= 0x7f) ctrl->value -= 0x60; else if (ctrl->value >= 0xe0 && ctrl->value <= 0xff) ctrl->value -= 0xe0; } return 0; } static int mi0343_set_ctrl(struct sn9c102_device* cam, const struct v4l2_control* ctrl) { struct sn9c102_sensor* s = sn9c102_get_sensor(cam); u16 reg = 0; int err = 0; switch (ctrl->id) { case V4L2_CID_GAIN: case V4L2_CID_RED_BALANCE: case V4L2_CID_BLUE_BALANCE: case SN9C102_V4L2_CID_GREEN_BALANCE: if (ctrl->value <= (0x3f-0x10)) reg = 0x10 + ctrl->value; else if (ctrl->value <= ((0x3f-0x10) + (0x7f-0x60))) reg = 0x60 + (ctrl->value - (0x3f-0x10)); else reg = 0xe0 + (ctrl->value - (0x3f-0x10) - (0x7f-0x60)); break; } switch (ctrl->id) { case V4L2_CID_EXPOSURE: err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x09, ctrl->value, 0x00, 0, 0); break; case V4L2_CID_GAIN: err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x35, reg >> 8, reg & 0xff, 0, 0); break; case V4L2_CID_HFLIP: err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x20, ctrl->value ? 0x40:0x00, ctrl->value ? 0x20:0x00, 0, 0); break; case V4L2_CID_VFLIP: err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x20, ctrl->value ? 0x80:0x00, ctrl->value ? 0x80:0x00, 0, 0); break; case V4L2_CID_RED_BALANCE: err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x2d, reg >> 8, reg & 0xff, 0, 0); break; case V4L2_CID_BLUE_BALANCE: err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x2c, reg >> 8, reg & 0xff, 0, 0); break; case SN9C102_V4L2_CID_GREEN_BALANCE: err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x2b, reg >> 8, reg & 0xff, 0, 0); err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x2e, reg >> 8, reg & 0xff, 0, 0); break; default: return -EINVAL; } return err ? -EIO : 0; } static int mi0343_set_crop(struct sn9c102_device* cam, const struct v4l2_rect* rect) { struct sn9c102_sensor* s = sn9c102_get_sensor(cam); int err = 0; u8 h_start = (u8)(rect->left - s->cropcap.bounds.left) + 0, v_start = (u8)(rect->top - s->cropcap.bounds.top) + 2; err += sn9c102_write_reg(cam, h_start, 0x12); err += sn9c102_write_reg(cam, v_start, 0x13); return err; } static int mi0343_set_pix_format(struct sn9c102_device* cam, const struct v4l2_pix_format* pix) { struct sn9c102_sensor* s = sn9c102_get_sensor(cam); int err = 0; if (pix->pixelformat == V4L2_PIX_FMT_SN9C10X) { err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x0a, 0x00, 0x03, 0, 0); err += sn9c102_write_reg(cam, 0x20, 0x19); } else { err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x0a, 0x00, 0x05, 0, 0); err += sn9c102_write_reg(cam, 0xa0, 0x19); } return err; } static const struct sn9c102_sensor mi0343 = { .name = "MI-0343", .maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>", .supported_bridge = BRIDGE_SN9C101 | BRIDGE_SN9C102, .frequency = SN9C102_I2C_100KHZ, .interface = SN9C102_I2C_2WIRES, .i2c_slave_id = 0x5d, .init = &mi0343_init, .qctrl = { { .id = V4L2_CID_EXPOSURE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "exposure", .minimum = 0x00, .maximum = 0x0f, .step = 0x01, .default_value = 0x06, .flags = 0, }, { .id = V4L2_CID_GAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "global gain", .minimum = 0x00, .maximum = (0x3f-0x10)+(0x7f-0x60)+(0xff-0xe0),/*0x6d*/ .step = 0x01, .default_value = 0x00, .flags = 0, }, { .id = V4L2_CID_HFLIP, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "horizontal mirror", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, .flags = 0, }, { .id = V4L2_CID_VFLIP, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "vertical mirror", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, .flags = 0, }, { .id = V4L2_CID_RED_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "red balance", .minimum = 0x00, .maximum = (0x3f-0x10)+(0x7f-0x60)+(0xff-0xe0), .step = 0x01, .default_value = 0x00, .flags = 0, }, { .id = V4L2_CID_BLUE_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "blue balance", .minimum = 0x00, .maximum = (0x3f-0x10)+(0x7f-0x60)+(0xff-0xe0), .step = 0x01, .default_value = 0x00, .flags = 0, }, { .id = SN9C102_V4L2_CID_GREEN_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "green balance", .minimum = 0x00, .maximum = ((0x3f-0x10)+(0x7f-0x60)+(0xff-0xe0)), .step = 0x01, .default_value = 0x00, .flags = 0, }, }, .get_ctrl = &mi0343_get_ctrl, .set_ctrl = &mi0343_set_ctrl, .cropcap = { .bounds = { .left = 0, .top = 0, .width = 640, .height = 480, }, .defrect = { .left = 0, .top = 0, .width = 640, .height = 480, }, }, .set_crop = &mi0343_set_crop, .pix_format = { .width = 640, .height = 480, .pixelformat = V4L2_PIX_FMT_SBGGR8, .priv = 8, }, .set_pix_format = &mi0343_set_pix_format }; int sn9c102_probe_mi0343(struct sn9c102_device* cam) { u8 data[2]; if (sn9c102_write_const_regs(cam, {0x01, 0x01}, {0x00, 0x01}, {0x28, 0x17})) return -EIO; if (sn9c102_i2c_try_raw_read(cam, &mi0343, mi0343.i2c_slave_id, 0x00, 2, data) < 0) return -EIO; if (data[1] != 0x42 || data[0] != 0xe3) return -ENODEV; sn9c102_attach_sensor(cam, &mi0343); return 0; }
gpl-2.0
motley-git/Kernel-Nexus7
arch/m32r/platforms/mappi3/io.c
13875
11272
/* * linux/arch/m32r/platforms/mappi3/io.c * * Typical I/O routines for Mappi3 board. * * Copyright (c) 2001-2005 Hiroyuki Kondo, Hirokazu Takata, * Hitoshi Yamamoto, Mamoru Sakugawa */ #include <asm/m32r.h> #include <asm/page.h> #include <asm/io.h> #include <asm/byteorder.h> #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) #include <linux/types.h> #define M32R_PCC_IOMAP_SIZE 0x1000 #define M32R_PCC_IOSTART0 0x1000 #define M32R_PCC_IOEND0 (M32R_PCC_IOSTART0 + M32R_PCC_IOMAP_SIZE - 1) extern void pcc_ioread_byte(int, unsigned long, void *, size_t, size_t, int); extern void pcc_ioread_word(int, unsigned long, void *, size_t, size_t, int); extern void pcc_iowrite_byte(int, unsigned long, void *, size_t, size_t, int); extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int); #endif /* CONFIG_PCMCIA && CONFIG_M32R_CFC */ #define PORT2ADDR(port) _port2addr(port) #define PORT2ADDR_NE(port) _port2addr_ne(port) #define PORT2ADDR_USB(port) _port2addr_usb(port) static inline void *_port2addr(unsigned long port) { return (void *)(port | NONCACHE_OFFSET); } #if defined(CONFIG_IDE) static inline void *__port2addr_ata(unsigned long port) { static int dummy_reg; switch (port) { /* IDE0 CF */ case 0x1f0: return (void *)(0x14002000 | NONCACHE_OFFSET); case 0x1f1: return (void *)(0x14012800 | NONCACHE_OFFSET); case 0x1f2: return (void *)(0x14012002 | NONCACHE_OFFSET); case 0x1f3: return (void *)(0x14012802 | NONCACHE_OFFSET); case 0x1f4: return (void *)(0x14012004 | NONCACHE_OFFSET); case 0x1f5: return (void *)(0x14012804 | NONCACHE_OFFSET); case 0x1f6: return (void *)(0x14012006 | NONCACHE_OFFSET); case 0x1f7: return (void *)(0x14012806 | NONCACHE_OFFSET); case 0x3f6: return (void *)(0x1401200e | NONCACHE_OFFSET); /* IDE1 IDE */ case 0x170: /* Data 16bit */ return (void *)(0x14810000 | NONCACHE_OFFSET); case 0x171: /* Features / Error */ return (void *)(0x14810002 | NONCACHE_OFFSET); case 0x172: /* Sector count */ return (void *)(0x14810004 | NONCACHE_OFFSET); case 0x173: /* Sector number */ return (void *)(0x14810006 | NONCACHE_OFFSET); case 0x174: /* Cylinder low */ return (void *)(0x14810008 | NONCACHE_OFFSET); case 0x175: /* Cylinder high */ return (void *)(0x1481000a | NONCACHE_OFFSET); case 0x176: /* Device head */ return (void *)(0x1481000c | NONCACHE_OFFSET); case 0x177: /* Command */ return (void *)(0x1481000e | NONCACHE_OFFSET); case 0x376: /* Device control / Alt status */ return (void *)(0x1480800c | NONCACHE_OFFSET); default: return (void *)&dummy_reg; } } #endif #define LAN_IOSTART (0x300 | NONCACHE_OFFSET) #define LAN_IOEND (0x320 | NONCACHE_OFFSET) static inline void *_port2addr_ne(unsigned long port) { return (void *)(port + 0x10000000); } static inline void *_port2addr_usb(unsigned long port) { return (void *)(port + NONCACHE_OFFSET + 0x12000000); } static inline void delay(void) { __asm__ __volatile__ ("push r0; \n\t pop r0;" : : :"memory"); } /* * NIC I/O function */ static inline unsigned char _ne_inb(void *portp) { return (unsigned char) *(volatile unsigned char *)portp; } static inline unsigned short _ne_inw(void *portp) { return (unsigned short)le16_to_cpu(*(volatile unsigned short *)portp); } static inline void _ne_insb(void *portp, void * addr, unsigned long count) { unsigned char *buf = addr; while (count--) *buf++ = *(volatile unsigned char *)portp; } static inline void _ne_outb(unsigned char b, void *portp) { *(volatile unsigned char *)portp = (unsigned char)b; } static inline void _ne_outw(unsigned short w, void *portp) { *(volatile unsigned short *)portp = cpu_to_le16(w); } unsigned char _inb(unsigned long port) { if (port >= LAN_IOSTART && port < LAN_IOEND) return _ne_inb(PORT2ADDR_NE(port)); #if defined(CONFIG_IDE) else if ( ((port >= 0x170 && port <=0x177) || port == 0x376) || ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){ return *(volatile unsigned char *)__port2addr_ata(port); } #endif #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { unsigned char b; pcc_ioread_byte(0, port, &b, sizeof(b), 1, 0); return b; } else #endif return *(volatile unsigned char *)PORT2ADDR(port); } unsigned short _inw(unsigned long port) { if (port >= LAN_IOSTART && port < LAN_IOEND) return _ne_inw(PORT2ADDR_NE(port)); #if defined(CONFIG_IDE) else if ( ((port >= 0x170 && port <=0x177) || port == 0x376) || ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){ return *(volatile unsigned short *)__port2addr_ata(port); } #endif #if defined(CONFIG_USB) else if (port >= 0x340 && port < 0x3a0) return *(volatile unsigned short *)PORT2ADDR_USB(port); #endif #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { unsigned short w; pcc_ioread_word(0, port, &w, sizeof(w), 1, 0); return w; } else #endif return *(volatile unsigned short *)PORT2ADDR(port); } unsigned long _inl(unsigned long port) { #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { unsigned long l; pcc_ioread_word(0, port, &l, sizeof(l), 1, 0); return l; } else #endif return *(volatile unsigned long *)PORT2ADDR(port); } unsigned char _inb_p(unsigned long port) { unsigned char v = _inb(port); delay(); return (v); } unsigned short _inw_p(unsigned long port) { unsigned short v = _inw(port); delay(); return (v); } unsigned long _inl_p(unsigned long port) { unsigned long v = _inl(port); delay(); return (v); } void _outb(unsigned char b, unsigned long port) { if (port >= LAN_IOSTART && port < LAN_IOEND) _ne_outb(b, PORT2ADDR_NE(port)); else #if defined(CONFIG_IDE) if ( ((port >= 0x170 && port <=0x177) || port == 0x376) || ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){ *(volatile unsigned char *)__port2addr_ata(port) = b; } else #endif #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { pcc_iowrite_byte(0, port, &b, sizeof(b), 1, 0); } else #endif *(volatile unsigned char *)PORT2ADDR(port) = b; } void _outw(unsigned short w, unsigned long port) { if (port >= LAN_IOSTART && port < LAN_IOEND) _ne_outw(w, PORT2ADDR_NE(port)); else #if defined(CONFIG_IDE) if ( ((port >= 0x170 && port <=0x177) || port == 0x376) || ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){ *(volatile unsigned short *)__port2addr_ata(port) = w; } else #endif #if defined(CONFIG_USB) if (port >= 0x340 && port < 0x3a0) *(volatile unsigned short *)PORT2ADDR_USB(port) = w; else #endif #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { pcc_iowrite_word(0, port, &w, sizeof(w), 1, 0); } else #endif *(volatile unsigned short *)PORT2ADDR(port) = w; } void _outl(unsigned long l, unsigned long port) { #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { pcc_iowrite_word(0, port, &l, sizeof(l), 1, 0); } else #endif *(volatile unsigned long *)PORT2ADDR(port) = l; } void _outb_p(unsigned char b, unsigned long port) { _outb(b, port); delay(); } void _outw_p(unsigned short w, unsigned long port) { _outw(w, port); delay(); } void _outl_p(unsigned long l, unsigned long port) { _outl(l, port); delay(); } void _insb(unsigned int port, void * addr, unsigned long count) { if (port >= LAN_IOSTART && port < LAN_IOEND) _ne_insb(PORT2ADDR_NE(port), addr, count); #if defined(CONFIG_IDE) else if ( ((port >= 0x170 && port <=0x177) || port == 0x376) || ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){ unsigned char *buf = addr; unsigned char *portp = __port2addr_ata(port); while (count--) *buf++ = *(volatile unsigned char *)portp; } #endif #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { pcc_ioread_byte(0, port, (void *)addr, sizeof(unsigned char), count, 1); } #endif else { unsigned char *buf = addr; unsigned char *portp = PORT2ADDR(port); while (count--) *buf++ = *(volatile unsigned char *)portp; } } void _insw(unsigned int port, void * addr, unsigned long count) { unsigned short *buf = addr; unsigned short *portp; if (port >= LAN_IOSTART && port < LAN_IOEND) { portp = PORT2ADDR_NE(port); while (count--) *buf++ = *(volatile unsigned short *)portp; #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { pcc_ioread_word(9, port, (void *)addr, sizeof(unsigned short), count, 1); #endif #if defined(CONFIG_IDE) } else if ( ((port >= 0x170 && port <=0x177) || port == 0x376) || ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){ portp = __port2addr_ata(port); while (count--) *buf++ = *(volatile unsigned short *)portp; #endif } else { portp = PORT2ADDR(port); while (count--) *buf++ = *(volatile unsigned short *)portp; } } void _insl(unsigned int port, void * addr, unsigned long count) { unsigned long *buf = addr; unsigned long *portp; portp = PORT2ADDR(port); while (count--) *buf++ = *(volatile unsigned long *)portp; } void _outsb(unsigned int port, const void * addr, unsigned long count) { const unsigned char *buf = addr; unsigned char *portp; if (port >= LAN_IOSTART && port < LAN_IOEND) { portp = PORT2ADDR_NE(port); while (count--) _ne_outb(*buf++, portp); #if defined(CONFIG_IDE) } else if ( ((port >= 0x170 && port <=0x177) || port == 0x376) || ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){ portp = __port2addr_ata(port); while (count--) *(volatile unsigned char *)portp = *buf++; #endif #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { pcc_iowrite_byte(0, port, (void *)addr, sizeof(unsigned char), count, 1); #endif } else { portp = PORT2ADDR(port); while (count--) *(volatile unsigned char *)portp = *buf++; } } void _outsw(unsigned int port, const void * addr, unsigned long count) { const unsigned short *buf = addr; unsigned short *portp; if (port >= LAN_IOSTART && port < LAN_IOEND) { portp = PORT2ADDR_NE(port); while (count--) *(volatile unsigned short *)portp = *buf++; #if defined(CONFIG_IDE) } else if ( ((port >= 0x170 && port <=0x177) || port == 0x376) || ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){ portp = __port2addr_ata(port); while (count--) *(volatile unsigned short *)portp = *buf++; #endif #if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC) } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) { pcc_iowrite_word(9, port, (void *)addr, sizeof(unsigned short), count, 1); #endif } else { portp = PORT2ADDR(port); while (count--) *(volatile unsigned short *)portp = *buf++; } } void _outsl(unsigned int port, const void * addr, unsigned long count) { const unsigned long *buf = addr; unsigned char *portp; portp = PORT2ADDR(port); while (count--) *(volatile unsigned long *)portp = *buf++; }
gpl-2.0
FlukeNetworks/snackers-kernel
arch/arm/plat-mxc/devices/platform-imx-uart.c
52
6233
/* * Copyright (C) 2009-2012 Pengutronix * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include <mach/hardware.h> #include <mach/devices-common.h> #define imx_imx_uart_3irq_data_entry(soc, _id, _hwid, _size) \ [_id] = { \ .id = _id, \ .iobase = soc ## _UART ## _hwid ## _BASE_ADDR, \ .iosize = _size, \ .irqrx = soc ## _INT_UART ## _hwid ## RX, \ .irqtx = soc ## _INT_UART ## _hwid ## TX, \ .irqrts = soc ## _INT_UART ## _hwid ## RTS, \ } #define imx_imx_uart_1irq_data_entry(soc, _id, _hwid, _size) \ [_id] = { \ .id = _id, \ .iobase = soc ## _UART ## _hwid ## _BASE_ADDR, \ .iosize = _size, \ .irq = soc ## _INT_UART ## _hwid, \ } #ifdef CONFIG_SOC_IMX1 const struct imx_imx_uart_3irq_data imx1_imx_uart_data[] __initconst = { #define imx1_imx_uart_data_entry(_id, _hwid) \ imx_imx_uart_3irq_data_entry(MX1, _id, _hwid, 0xd0) imx1_imx_uart_data_entry(0, 1), imx1_imx_uart_data_entry(1, 2), }; #endif /* ifdef CONFIG_SOC_IMX1 */ #ifdef CONFIG_SOC_IMX21 const struct imx_imx_uart_1irq_data imx21_imx_uart_data[] __initconst = { #define imx21_imx_uart_data_entry(_id, _hwid) \ imx_imx_uart_1irq_data_entry(MX21, _id, _hwid, SZ_4K) imx21_imx_uart_data_entry(0, 1), imx21_imx_uart_data_entry(1, 2), imx21_imx_uart_data_entry(2, 3), imx21_imx_uart_data_entry(3, 4), }; #endif #ifdef CONFIG_SOC_IMX25 const struct imx_imx_uart_1irq_data imx25_imx_uart_data[] __initconst = { #define imx25_imx_uart_data_entry(_id, _hwid) \ imx_imx_uart_1irq_data_entry(MX25, _id, _hwid, SZ_16K) imx25_imx_uart_data_entry(0, 1), imx25_imx_uart_data_entry(1, 2), imx25_imx_uart_data_entry(2, 3), imx25_imx_uart_data_entry(3, 4), imx25_imx_uart_data_entry(4, 5), }; #endif /* ifdef CONFIG_SOC_IMX25 */ #ifdef CONFIG_SOC_IMX27 const struct imx_imx_uart_1irq_data imx27_imx_uart_data[] __initconst = { #define imx27_imx_uart_data_entry(_id, _hwid) \ imx_imx_uart_1irq_data_entry(MX27, _id, _hwid, SZ_4K) imx27_imx_uart_data_entry(0, 1), imx27_imx_uart_data_entry(1, 2), imx27_imx_uart_data_entry(2, 3), imx27_imx_uart_data_entry(3, 4), imx27_imx_uart_data_entry(4, 5), imx27_imx_uart_data_entry(5, 6), }; #endif /* ifdef CONFIG_SOC_IMX27 */ #ifdef CONFIG_SOC_IMX31 const struct imx_imx_uart_1irq_data imx31_imx_uart_data[] __initconst = { #define imx31_imx_uart_data_entry(_id, _hwid) \ imx_imx_uart_1irq_data_entry(MX31, _id, _hwid, SZ_4K) imx31_imx_uart_data_entry(0, 1), imx31_imx_uart_data_entry(1, 2), imx31_imx_uart_data_entry(2, 3), imx31_imx_uart_data_entry(3, 4), imx31_imx_uart_data_entry(4, 5), }; #endif /* ifdef CONFIG_SOC_IMX31 */ #ifdef CONFIG_SOC_IMX35 const struct imx_imx_uart_1irq_data imx35_imx_uart_data[] __initconst = { #define imx35_imx_uart_data_entry(_id, _hwid) \ imx_imx_uart_1irq_data_entry(MX31, _id, _hwid, SZ_16K) imx35_imx_uart_data_entry(0, 1), imx35_imx_uart_data_entry(1, 2), imx35_imx_uart_data_entry(2, 3), }; #endif /* ifdef CONFIG_SOC_IMX35 */ #ifdef CONFIG_SOC_IMX50 const struct imx_imx_uart_1irq_data imx50_imx_uart_data[] __initconst = { #define imx50_imx_uart_data_entry(_id, _hwid) \ imx_imx_uart_1irq_data_entry(MX50, _id, _hwid, SZ_4K) imx50_imx_uart_data_entry(0, 1), imx50_imx_uart_data_entry(1, 2), imx50_imx_uart_data_entry(2, 3), imx50_imx_uart_data_entry(3, 4), imx50_imx_uart_data_entry(4, 5), }; #endif /* ifdef CONFIG_SOC_IMX50 */ #ifdef CONFIG_SOC_IMX51 const struct imx_imx_uart_1irq_data imx51_imx_uart_data[] __initconst = { #define imx51_imx_uart_data_entry(_id, _hwid) \ imx_imx_uart_1irq_data_entry(MX51, _id, _hwid, SZ_4K) imx51_imx_uart_data_entry(0, 1), imx51_imx_uart_data_entry(1, 2), imx51_imx_uart_data_entry(2, 3), }; #endif /* ifdef CONFIG_SOC_IMX51 */ #ifdef CONFIG_SOC_IMX53 const struct imx_imx_uart_1irq_data imx53_imx_uart_data[] __initconst = { #define imx53_imx_uart_data_entry(_id, _hwid) \ imx_imx_uart_1irq_data_entry(MX53, _id, _hwid, SZ_4K) imx53_imx_uart_data_entry(0, 1), imx53_imx_uart_data_entry(1, 2), imx53_imx_uart_data_entry(2, 3), }; #endif /* ifdef CONFIG_SOC_IMX53 */ #ifdef CONFIG_SOC_IMX6Q const struct imx_imx_uart_1irq_data imx6q_imx_uart_data[] __initconst = { #define imx6q_imx_uart_data_entry(_id, _hwid) \ imx_imx_uart_1irq_data_entry(MX6Q, _id, _hwid, SZ_4K) imx6q_imx_uart_data_entry(0, 1), imx6q_imx_uart_data_entry(1, 2), imx6q_imx_uart_data_entry(2, 3), imx6q_imx_uart_data_entry(3, 4), imx6q_imx_uart_data_entry(4, 5), }; #endif /* ifdef CONFIG_SOC_IMX6Q */ #ifdef CONFIG_SOC_IMX6SL const struct imx_imx_uart_1irq_data imx6sl_imx_uart_data[] __initconst = { #define imx6sl_imx_uart_data_entry(_id, _hwid) \ imx_imx_uart_1irq_data_entry(MX6SL, _id, _hwid, SZ_4K) imx6sl_imx_uart_data_entry(0, 1), imx6sl_imx_uart_data_entry(1, 2), imx6sl_imx_uart_data_entry(2, 3), imx6sl_imx_uart_data_entry(3, 4), imx6sl_imx_uart_data_entry(4, 5), }; #endif /* ifdef CONFIG_SOC_IMX6SL */ struct platform_device *__init imx_add_imx_uart_3irq( const struct imx_imx_uart_3irq_data *data, const struct imxuart_platform_data *pdata) { struct resource res[] = { { .start = data->iobase, .end = data->iobase + data->iosize - 1, .flags = IORESOURCE_MEM, }, { .start = data->irqrx, .end = data->irqrx, .flags = IORESOURCE_IRQ, }, { .start = data->irqtx, .end = data->irqtx, .flags = IORESOURCE_IRQ, }, { .start = data->irqrts, .end = data->irqrx, .flags = IORESOURCE_IRQ, }, }; return imx_add_platform_device("imx-uart", data->id, res, ARRAY_SIZE(res), pdata, sizeof(*pdata)); } struct platform_device *__init imx_add_imx_uart_1irq( const struct imx_imx_uart_1irq_data *data, const struct imxuart_platform_data *pdata) { struct resource res[] = { { .start = data->iobase, .end = data->iobase + data->iosize - 1, .flags = IORESOURCE_MEM, }, { .start = data->irq, .end = data->irq, .flags = IORESOURCE_IRQ, }, }; return imx_add_platform_device("imx-uart", data->id, res, ARRAY_SIZE(res), pdata, sizeof(*pdata)); }
gpl-2.0
FusionSP/android_kernel_samsung_klte
drivers/hv/connection.c
564
8448
/* * * Copyright (c) 2009, Microsoft Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Authors: * Haiyang Zhang <haiyangz@microsoft.com> * Hank Janssen <hjanssen@microsoft.com> * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/delay.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/hyperv.h> #include <asm/hyperv.h> #include "hyperv_vmbus.h" struct vmbus_connection vmbus_connection = { .conn_state = DISCONNECTED, .next_gpadl_handle = ATOMIC_INIT(0xE1E10), }; /* * vmbus_connect - Sends a connect request on the partition service connection */ int vmbus_connect(void) { int ret = 0; int t; struct vmbus_channel_msginfo *msginfo = NULL; struct vmbus_channel_initiate_contact *msg; unsigned long flags; /* Initialize the vmbus connection */ vmbus_connection.conn_state = CONNECTING; vmbus_connection.work_queue = create_workqueue("hv_vmbus_con"); if (!vmbus_connection.work_queue) { ret = -ENOMEM; goto cleanup; } INIT_LIST_HEAD(&vmbus_connection.chn_msg_list); spin_lock_init(&vmbus_connection.channelmsg_lock); INIT_LIST_HEAD(&vmbus_connection.chn_list); spin_lock_init(&vmbus_connection.channel_lock); /* * Setup the vmbus event connection for channel interrupt * abstraction stuff */ vmbus_connection.int_page = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, 0); if (vmbus_connection.int_page == NULL) { ret = -ENOMEM; goto cleanup; } vmbus_connection.recv_int_page = vmbus_connection.int_page; vmbus_connection.send_int_page = (void *)((unsigned long)vmbus_connection.int_page + (PAGE_SIZE >> 1)); /* * Setup the monitor notification facility. The 1st page for * parent->child and the 2nd page for child->parent */ vmbus_connection.monitor_pages = (void *)__get_free_pages((GFP_KERNEL|__GFP_ZERO), 1); if (vmbus_connection.monitor_pages == NULL) { ret = -ENOMEM; goto cleanup; } msginfo = kzalloc(sizeof(*msginfo) + sizeof(struct vmbus_channel_initiate_contact), GFP_KERNEL); if (msginfo == NULL) { ret = -ENOMEM; goto cleanup; } init_completion(&msginfo->waitevent); msg = (struct vmbus_channel_initiate_contact *)msginfo->msg; msg->header.msgtype = CHANNELMSG_INITIATE_CONTACT; msg->vmbus_version_requested = VMBUS_REVISION_NUMBER; msg->interrupt_page = virt_to_phys(vmbus_connection.int_page); msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages); msg->monitor_page2 = virt_to_phys( (void *)((unsigned long)vmbus_connection.monitor_pages + PAGE_SIZE)); /* * Add to list before we send the request since we may * receive the response before returning from this routine */ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); list_add_tail(&msginfo->msglistentry, &vmbus_connection.chn_msg_list); spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_initiate_contact)); if (ret != 0) { spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); list_del(&msginfo->msglistentry); spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); goto cleanup; } /* Wait for the connection response */ t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ); if (t == 0) { spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); list_del(&msginfo->msglistentry); spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); ret = -ETIMEDOUT; goto cleanup; } spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); list_del(&msginfo->msglistentry); spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); /* Check if successful */ if (msginfo->response.version_response.version_supported) { vmbus_connection.conn_state = CONNECTED; } else { pr_err("Unable to connect, " "Version %d not supported by Hyper-V\n", VMBUS_REVISION_NUMBER); ret = -ECONNREFUSED; goto cleanup; } kfree(msginfo); return 0; cleanup: vmbus_connection.conn_state = DISCONNECTED; if (vmbus_connection.work_queue) destroy_workqueue(vmbus_connection.work_queue); if (vmbus_connection.int_page) { free_pages((unsigned long)vmbus_connection.int_page, 0); vmbus_connection.int_page = NULL; } if (vmbus_connection.monitor_pages) { free_pages((unsigned long)vmbus_connection.monitor_pages, 1); vmbus_connection.monitor_pages = NULL; } kfree(msginfo); return ret; } /* * relid2channel - Get the channel object given its * child relative id (ie channel id) */ struct vmbus_channel *relid2channel(u32 relid) { struct vmbus_channel *channel; struct vmbus_channel *found_channel = NULL; unsigned long flags; spin_lock_irqsave(&vmbus_connection.channel_lock, flags); list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { if (channel->offermsg.child_relid == relid) { found_channel = channel; break; } } spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); return found_channel; } /* * process_chn_event - Process a channel event notification */ static void process_chn_event(u32 relid) { struct vmbus_channel *channel; unsigned long flags; /* * Find the channel based on this relid and invokes the * channel callback to process the event */ channel = relid2channel(relid); if (!channel) { pr_err("channel not found for relid - %u\n", relid); return; } /* * A channel once created is persistent even when there * is no driver handling the device. An unloading driver * sets the onchannel_callback to NULL under the * protection of the channel inbound_lock. Thus, checking * and invoking the driver specific callback takes care of * orderly unloading of the driver. */ spin_lock_irqsave(&channel->inbound_lock, flags); if (channel->onchannel_callback != NULL) channel->onchannel_callback(channel->channel_callback_context); else pr_err("no channel callback for relid - %u\n", relid); spin_unlock_irqrestore(&channel->inbound_lock, flags); } /* * vmbus_on_event - Handler for events */ void vmbus_on_event(unsigned long data) { u32 dword; u32 maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5; int bit; u32 relid; u32 *recv_int_page = vmbus_connection.recv_int_page; /* Check events */ if (!recv_int_page) return; for (dword = 0; dword < maxdword; dword++) { if (!recv_int_page[dword]) continue; for (bit = 0; bit < 32; bit++) { if (sync_test_and_clear_bit(bit, (unsigned long *)&recv_int_page[dword])) { relid = (dword << 5) + bit; if (relid == 0) /* * Special case - vmbus * channel protocol msg */ continue; process_chn_event(relid); } } } } /* * vmbus_post_msg - Send a msg on the vmbus's message connection */ int vmbus_post_msg(void *buffer, size_t buflen) { union hv_connection_id conn_id; int ret = 0; int retries = 0; conn_id.asu32 = 0; conn_id.u.id = VMBUS_MESSAGE_CONNECTION_ID; /* * hv_post_message() can have transient failures because of * insufficient resources. Retry the operation a couple of * times before giving up. */ while (retries < 10) { ret = hv_post_message(conn_id, 1, buffer, buflen); switch (ret) { case HV_STATUS_INSUFFICIENT_BUFFERS: ret = -ENOMEM; case -ENOMEM: break; case HV_STATUS_SUCCESS: return ret; default: pr_err("hv_post_msg() failed; error code:%d\n", ret); return -EINVAL; } retries++; msleep(100); } return ret; } /* * vmbus_set_event - Send an event notification to the parent */ int vmbus_set_event(u32 child_relid) { /* Each u32 represents 32 channels */ sync_set_bit(child_relid & 31, (unsigned long *)vmbus_connection.send_int_page + (child_relid >> 5)); return hv_signal_event(); }
gpl-2.0
imnuts/sch-i510_kernel
drivers/gpu/drm/i915/i915_opregion.c
820
16119
/* * Copyright 2008 Intel Corporation <hong.liu@intel.com> * Copyright 2008 Red Hat <mjg@redhat.com> * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NON-INFRINGEMENT. IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/acpi.h> #include <acpi/video.h> #include "drmP.h" #include "i915_drm.h" #include "i915_drv.h" #define PCI_ASLE 0xe4 #define PCI_LBPC 0xf4 #define PCI_ASLS 0xfc #define OPREGION_SZ (8*1024) #define OPREGION_HEADER_OFFSET 0 #define OPREGION_ACPI_OFFSET 0x100 #define OPREGION_SWSCI_OFFSET 0x200 #define OPREGION_ASLE_OFFSET 0x300 #define OPREGION_VBT_OFFSET 0x1000 #define OPREGION_SIGNATURE "IntelGraphicsMem" #define MBOX_ACPI (1<<0) #define MBOX_SWSCI (1<<1) #define MBOX_ASLE (1<<2) struct opregion_header { u8 signature[16]; u32 size; u32 opregion_ver; u8 bios_ver[32]; u8 vbios_ver[16]; u8 driver_ver[16]; u32 mboxes; u8 reserved[164]; } __attribute__((packed)); /* OpRegion mailbox #1: public ACPI methods */ struct opregion_acpi { u32 drdy; /* driver readiness */ u32 csts; /* notification status */ u32 cevt; /* current event */ u8 rsvd1[20]; u32 didl[8]; /* supported display devices ID list */ u32 cpdl[8]; /* currently presented display list */ u32 cadl[8]; /* currently active display list */ u32 nadl[8]; /* next active devices list */ u32 aslp; /* ASL sleep time-out */ u32 tidx; /* toggle table index */ u32 chpd; /* current hotplug enable indicator */ u32 clid; /* current lid state*/ u32 cdck; /* current docking state */ u32 sxsw; /* Sx state resume */ u32 evts; /* ASL supported events */ u32 cnot; /* current OS notification */ u32 nrdy; /* driver status */ u8 rsvd2[60]; } __attribute__((packed)); /* OpRegion mailbox #2: SWSCI */ struct opregion_swsci { u32 scic; /* SWSCI command|status|data */ u32 parm; /* command parameters */ u32 dslp; /* driver sleep time-out */ u8 rsvd[244]; } __attribute__((packed)); /* OpRegion mailbox #3: ASLE */ struct opregion_asle { u32 ardy; /* driver readiness */ u32 aslc; /* ASLE interrupt command */ u32 tche; /* technology enabled indicator */ u32 alsi; /* current ALS illuminance reading */ u32 bclp; /* backlight brightness to set */ u32 pfit; /* panel fitting state */ u32 cblv; /* current brightness level */ u16 bclm[20]; /* backlight level duty cycle mapping table */ u32 cpfm; /* current panel fitting mode */ u32 epfm; /* enabled panel fitting modes */ u8 plut[74]; /* panel LUT and identifier */ u32 pfmb; /* PWM freq and min brightness */ u8 rsvd[102]; } __attribute__((packed)); /* ASLE irq request bits */ #define ASLE_SET_ALS_ILLUM (1 << 0) #define ASLE_SET_BACKLIGHT (1 << 1) #define ASLE_SET_PFIT (1 << 2) #define ASLE_SET_PWM_FREQ (1 << 3) #define ASLE_REQ_MSK 0xf /* response bits of ASLE irq request */ #define ASLE_ALS_ILLUM_FAIL (2<<10) #define ASLE_BACKLIGHT_FAIL (2<<12) #define ASLE_PFIT_FAIL (2<<14) #define ASLE_PWM_FREQ_FAIL (2<<16) #define ASLE_ALS_ILLUM_FAILED (1<<10) #define ASLE_BACKLIGHT_FAILED (1<<12) #define ASLE_PFIT_FAILED (1<<14) #define ASLE_PWM_FREQ_FAILED (1<<16) /* ASLE backlight brightness to set */ #define ASLE_BCLP_VALID (1<<31) #define ASLE_BCLP_MSK (~(1<<31)) /* ASLE panel fitting request */ #define ASLE_PFIT_VALID (1<<31) #define ASLE_PFIT_CENTER (1<<0) #define ASLE_PFIT_STRETCH_TEXT (1<<1) #define ASLE_PFIT_STRETCH_GFX (1<<2) /* PWM frequency and minimum brightness */ #define ASLE_PFMB_BRIGHTNESS_MASK (0xff) #define ASLE_PFMB_BRIGHTNESS_VALID (1<<8) #define ASLE_PFMB_PWM_MASK (0x7ffffe00) #define ASLE_PFMB_PWM_VALID (1<<31) #define ASLE_CBLV_VALID (1<<31) #define ACPI_OTHER_OUTPUT (0<<8) #define ACPI_VGA_OUTPUT (1<<8) #define ACPI_TV_OUTPUT (2<<8) #define ACPI_DIGITAL_OUTPUT (3<<8) #define ACPI_LVDS_OUTPUT (4<<8) static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) { struct drm_i915_private *dev_priv = dev->dev_private; struct opregion_asle *asle = dev_priv->opregion.asle; u32 blc_pwm_ctl, blc_pwm_ctl2; u32 max_backlight, level, shift; if (!(bclp & ASLE_BCLP_VALID)) return ASLE_BACKLIGHT_FAIL; bclp &= ASLE_BCLP_MSK; if (bclp < 0 || bclp > 255) return ASLE_BACKLIGHT_FAIL; blc_pwm_ctl = I915_READ(BLC_PWM_CTL); blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2); if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE)) pci_write_config_dword(dev->pdev, PCI_LBPC, bclp); else { if (IS_PINEVIEW(dev)) { blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> BACKLIGHT_MODULATION_FREQ_SHIFT; shift = BACKLIGHT_DUTY_CYCLE_SHIFT + 1; } else { blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK; max_backlight = ((blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; shift = BACKLIGHT_DUTY_CYCLE_SHIFT; } level = (bclp * max_backlight) / 255; I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | (level << shift)); } asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; return 0; } static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi) { /* alsi is the current ALS reading in lux. 0 indicates below sensor range, 0xffff indicates above sensor range. 1-0xfffe are valid */ return 0; } static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb) { struct drm_i915_private *dev_priv = dev->dev_private; if (pfmb & ASLE_PFMB_PWM_VALID) { u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL); u32 pwm = pfmb & ASLE_PFMB_PWM_MASK; blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK; pwm = pwm >> 9; /* FIXME - what do we do with the PWM? */ } return 0; } static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) { /* Panel fitting is currently controlled by the X code, so this is a noop until modesetting support works fully */ if (!(pfit & ASLE_PFIT_VALID)) return ASLE_PFIT_FAIL; return 0; } void opregion_asle_intr(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct opregion_asle *asle = dev_priv->opregion.asle; u32 asle_stat = 0; u32 asle_req; if (!asle) return; asle_req = asle->aslc & ASLE_REQ_MSK; if (!asle_req) { DRM_DEBUG_DRIVER("non asle set request??\n"); return; } if (asle_req & ASLE_SET_ALS_ILLUM) asle_stat |= asle_set_als_illum(dev, asle->alsi); if (asle_req & ASLE_SET_BACKLIGHT) asle_stat |= asle_set_backlight(dev, asle->bclp); if (asle_req & ASLE_SET_PFIT) asle_stat |= asle_set_pfit(dev, asle->pfit); if (asle_req & ASLE_SET_PWM_FREQ) asle_stat |= asle_set_pwm_freq(dev, asle->pfmb); asle->aslc = asle_stat; } static u32 asle_set_backlight_ironlake(struct drm_device *dev, u32 bclp) { struct drm_i915_private *dev_priv = dev->dev_private; struct opregion_asle *asle = dev_priv->opregion.asle; u32 cpu_pwm_ctl, pch_pwm_ctl2; u32 max_backlight, level; if (!(bclp & ASLE_BCLP_VALID)) return ASLE_BACKLIGHT_FAILED; bclp &= ASLE_BCLP_MSK; if (bclp < 0 || bclp > 255) return ASLE_BACKLIGHT_FAILED; cpu_pwm_ctl = I915_READ(BLC_PWM_CPU_CTL); pch_pwm_ctl2 = I915_READ(BLC_PWM_PCH_CTL2); /* get the max PWM frequency */ max_backlight = (pch_pwm_ctl2 >> 16) & BACKLIGHT_DUTY_CYCLE_MASK; /* calculate the expected PMW frequency */ level = (bclp * max_backlight) / 255; /* reserve the high 16 bits */ cpu_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK); /* write the updated PWM frequency */ I915_WRITE(BLC_PWM_CPU_CTL, cpu_pwm_ctl | level); asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; return 0; } void ironlake_opregion_gse_intr(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct opregion_asle *asle = dev_priv->opregion.asle; u32 asle_stat = 0; u32 asle_req; if (!asle) return; asle_req = asle->aslc & ASLE_REQ_MSK; if (!asle_req) { DRM_DEBUG_DRIVER("non asle set request??\n"); return; } if (asle_req & ASLE_SET_ALS_ILLUM) { DRM_DEBUG_DRIVER("Illum is not supported\n"); asle_stat |= ASLE_ALS_ILLUM_FAILED; } if (asle_req & ASLE_SET_BACKLIGHT) asle_stat |= asle_set_backlight_ironlake(dev, asle->bclp); if (asle_req & ASLE_SET_PFIT) { DRM_DEBUG_DRIVER("Pfit is not supported\n"); asle_stat |= ASLE_PFIT_FAILED; } if (asle_req & ASLE_SET_PWM_FREQ) { DRM_DEBUG_DRIVER("PWM freq is not supported\n"); asle_stat |= ASLE_PWM_FREQ_FAILED; } asle->aslc = asle_stat; } #define ASLE_ALS_EN (1<<0) #define ASLE_BLC_EN (1<<1) #define ASLE_PFIT_EN (1<<2) #define ASLE_PFMB_EN (1<<3) void opregion_enable_asle(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct opregion_asle *asle = dev_priv->opregion.asle; if (asle) { if (IS_MOBILE(dev)) { unsigned long irqflags; spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); intel_enable_asle(dev); spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); } asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | ASLE_PFMB_EN; asle->ardy = 1; } } #define ACPI_EV_DISPLAY_SWITCH (1<<0) #define ACPI_EV_LID (1<<1) #define ACPI_EV_DOCK (1<<2) static struct intel_opregion *system_opregion; static int intel_opregion_video_event(struct notifier_block *nb, unsigned long val, void *data) { /* The only video events relevant to opregion are 0x80. These indicate either a docking event, lid switch or display switch request. In Linux, these are handled by the dock, button and video drivers. We might want to fix the video driver to be opregion-aware in future, but right now we just indicate to the firmware that the request has been handled */ struct opregion_acpi *acpi; if (!system_opregion) return NOTIFY_DONE; acpi = system_opregion->acpi; acpi->csts = 0; return NOTIFY_OK; } static struct notifier_block intel_opregion_notifier = { .notifier_call = intel_opregion_video_event, }; /* * Initialise the DIDL field in opregion. This passes a list of devices to * the firmware. Values are defined by section B.4.2 of the ACPI specification * (version 3) */ static void intel_didl_outputs(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; struct drm_connector *connector; acpi_handle handle; struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; unsigned long long device_id; acpi_status status; int i = 0; handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) return; if (acpi_is_video_device(acpi_dev)) acpi_video_bus = acpi_dev; else { list_for_each_entry(acpi_cdev, &acpi_dev->children, node) { if (acpi_is_video_device(acpi_cdev)) { acpi_video_bus = acpi_cdev; break; } } } if (!acpi_video_bus) { printk(KERN_WARNING "No ACPI video bus found\n"); return; } list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) { if (i >= 8) { dev_printk (KERN_ERR, &dev->pdev->dev, "More than 8 outputs detected\n"); return; } status = acpi_evaluate_integer(acpi_cdev->handle, "_ADR", NULL, &device_id); if (ACPI_SUCCESS(status)) { if (!device_id) goto blind_set; opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f); i++; } } end: /* If fewer than 8 outputs, the list must be null terminated */ if (i < 8) opregion->acpi->didl[i] = 0; return; blind_set: i = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { int output_type = ACPI_OTHER_OUTPUT; if (i >= 8) { dev_printk (KERN_ERR, &dev->pdev->dev, "More than 8 outputs detected\n"); return; } switch (connector->connector_type) { case DRM_MODE_CONNECTOR_VGA: case DRM_MODE_CONNECTOR_DVIA: output_type = ACPI_VGA_OUTPUT; break; case DRM_MODE_CONNECTOR_Composite: case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_Component: case DRM_MODE_CONNECTOR_9PinDIN: output_type = ACPI_TV_OUTPUT; break; case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_DVID: case DRM_MODE_CONNECTOR_DisplayPort: case DRM_MODE_CONNECTOR_HDMIA: case DRM_MODE_CONNECTOR_HDMIB: output_type = ACPI_DIGITAL_OUTPUT; break; case DRM_MODE_CONNECTOR_LVDS: output_type = ACPI_LVDS_OUTPUT; break; } opregion->acpi->didl[i] |= (1<<31) | output_type | i; i++; } goto end; } int intel_opregion_init(struct drm_device *dev, int resume) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; void *base; u32 asls, mboxes; int err = 0; pci_read_config_dword(dev->pdev, PCI_ASLS, &asls); DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls); if (asls == 0) { DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n"); return -ENOTSUPP; } base = ioremap(asls, OPREGION_SZ); if (!base) return -ENOMEM; opregion->header = base; if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) { DRM_DEBUG_DRIVER("opregion signature mismatch\n"); err = -EINVAL; goto err_out; } mboxes = opregion->header->mboxes; if (mboxes & MBOX_ACPI) { DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); opregion->acpi = base + OPREGION_ACPI_OFFSET; if (drm_core_check_feature(dev, DRIVER_MODESET)) intel_didl_outputs(dev); } else { DRM_DEBUG_DRIVER("Public ACPI methods not supported\n"); err = -ENOTSUPP; goto err_out; } opregion->enabled = 1; if (mboxes & MBOX_SWSCI) { DRM_DEBUG_DRIVER("SWSCI supported\n"); opregion->swsci = base + OPREGION_SWSCI_OFFSET; } if (mboxes & MBOX_ASLE) { DRM_DEBUG_DRIVER("ASLE supported\n"); opregion->asle = base + OPREGION_ASLE_OFFSET; opregion_enable_asle(dev); } if (!resume) acpi_video_register(); /* Notify BIOS we are ready to handle ACPI video ext notifs. * Right now, all the events are handled by the ACPI video module. * We don't actually need to do anything with them. */ opregion->acpi->csts = 0; opregion->acpi->drdy = 1; system_opregion = opregion; register_acpi_notifier(&intel_opregion_notifier); return 0; err_out: iounmap(opregion->header); opregion->header = NULL; return err; } void intel_opregion_free(struct drm_device *dev, int suspend) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; if (!opregion->enabled) return; if (!suspend) acpi_video_unregister(); opregion->acpi->drdy = 0; system_opregion = NULL; unregister_acpi_notifier(&intel_opregion_notifier); /* just clear all opregion memory pointers now */ iounmap(opregion->header); opregion->header = NULL; opregion->acpi = NULL; opregion->swsci = NULL; opregion->asle = NULL; opregion->enabled = 0; }
gpl-2.0
Demon000/libra
drivers/net/vmxnet3/vmxnet3_ethtool.c
2612
19102
/* * Linux driver for VMware's vmxnet3 ethernet NIC. * * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; version 2 of the License and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * The full GNU General Public License is included in this distribution in * the file called "COPYING". * * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> * */ #include "vmxnet3_int.h" struct vmxnet3_stat_desc { char desc[ETH_GSTRING_LEN]; int offset; }; /* per tq stats maintained by the device */ static const struct vmxnet3_stat_desc vmxnet3_tq_dev_stats[] = { /* description, offset */ { "Tx Queue#", 0 }, { " TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) }, { " TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) }, { " ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) }, { " ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) }, { " mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) }, { " mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) }, { " bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) }, { " bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) }, { " pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) }, { " pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) }, }; /* per tq stats maintained by the driver */ static const struct vmxnet3_stat_desc vmxnet3_tq_driver_stats[] = { /* description, offset */ {" drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats, drop_total) }, { " too many frags", offsetof(struct vmxnet3_tq_driver_stats, drop_too_many_frags) }, { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, drop_oversized_hdr) }, { " hdr err", offsetof(struct vmxnet3_tq_driver_stats, drop_hdr_inspect_err) }, { " tso", offsetof(struct vmxnet3_tq_driver_stats, drop_tso) }, { " ring full", offsetof(struct vmxnet3_tq_driver_stats, tx_ring_full) }, { " pkts linearized", offsetof(struct vmxnet3_tq_driver_stats, linearized) }, { " hdr cloned", offsetof(struct vmxnet3_tq_driver_stats, copy_skb_header) }, { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, oversized_hdr) }, }; /* per rq stats maintained by the device */ static const struct vmxnet3_stat_desc vmxnet3_rq_dev_stats[] = { { "Rx Queue#", 0 }, { " LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) }, { " LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) }, { " ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) }, { " ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) }, { " mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) }, { " mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) }, { " bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) }, { " bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) }, { " pkts rx OOB", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) }, { " pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) }, }; /* per rq stats maintained by the driver */ static const struct vmxnet3_stat_desc vmxnet3_rq_driver_stats[] = { /* description, offset */ { " drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats, drop_total) }, { " err", offsetof(struct vmxnet3_rq_driver_stats, drop_err) }, { " fcs", offsetof(struct vmxnet3_rq_driver_stats, drop_fcs) }, { " rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, rx_buf_alloc_failure) }, }; /* gloabl stats maintained by the driver */ static const struct vmxnet3_stat_desc vmxnet3_global_stats[] = { /* description, offset */ { "tx timeout count", offsetof(struct vmxnet3_adapter, tx_timeout_count) } }; struct rtnl_link_stats64 * vmxnet3_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct vmxnet3_adapter *adapter; struct vmxnet3_tq_driver_stats *drvTxStats; struct vmxnet3_rq_driver_stats *drvRxStats; struct UPT1_TxStats *devTxStats; struct UPT1_RxStats *devRxStats; unsigned long flags; int i; adapter = netdev_priv(netdev); /* Collect the dev stats into the shared area */ spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); spin_unlock_irqrestore(&adapter->cmd_lock, flags); for (i = 0; i < adapter->num_tx_queues; i++) { devTxStats = &adapter->tqd_start[i].stats; drvTxStats = &adapter->tx_queue[i].stats; stats->tx_packets += devTxStats->ucastPktsTxOK + devTxStats->mcastPktsTxOK + devTxStats->bcastPktsTxOK; stats->tx_bytes += devTxStats->ucastBytesTxOK + devTxStats->mcastBytesTxOK + devTxStats->bcastBytesTxOK; stats->tx_errors += devTxStats->pktsTxError; stats->tx_dropped += drvTxStats->drop_total; } for (i = 0; i < adapter->num_rx_queues; i++) { devRxStats = &adapter->rqd_start[i].stats; drvRxStats = &adapter->rx_queue[i].stats; stats->rx_packets += devRxStats->ucastPktsRxOK + devRxStats->mcastPktsRxOK + devRxStats->bcastPktsRxOK; stats->rx_bytes += devRxStats->ucastBytesRxOK + devRxStats->mcastBytesRxOK + devRxStats->bcastBytesRxOK; stats->rx_errors += devRxStats->pktsRxError; stats->rx_dropped += drvRxStats->drop_total; stats->multicast += devRxStats->mcastPktsRxOK; } return stats; } static int vmxnet3_get_sset_count(struct net_device *netdev, int sset) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); switch (sset) { case ETH_SS_STATS: return (ARRAY_SIZE(vmxnet3_tq_dev_stats) + ARRAY_SIZE(vmxnet3_tq_driver_stats)) * adapter->num_tx_queues + (ARRAY_SIZE(vmxnet3_rq_dev_stats) + ARRAY_SIZE(vmxnet3_rq_driver_stats)) * adapter->num_rx_queues + ARRAY_SIZE(vmxnet3_global_stats); default: return -EOPNOTSUPP; } } /* Should be multiple of 4 */ #define NUM_TX_REGS 8 #define NUM_RX_REGS 12 static int vmxnet3_get_regs_len(struct net_device *netdev) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); return (adapter->num_tx_queues * NUM_TX_REGS * sizeof(u32) + adapter->num_rx_queues * NUM_RX_REGS * sizeof(u32)); } static void vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT, sizeof(drvinfo->version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); drvinfo->n_stats = vmxnet3_get_sset_count(netdev, ETH_SS_STATS); drvinfo->testinfo_len = 0; drvinfo->eedump_len = 0; drvinfo->regdump_len = vmxnet3_get_regs_len(netdev); } static void vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); if (stringset == ETH_SS_STATS) { int i, j; for (j = 0; j < adapter->num_tx_queues; j++) { for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) { memcpy(buf, vmxnet3_tq_dev_stats[i].desc, ETH_GSTRING_LEN); buf += ETH_GSTRING_LEN; } for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) { memcpy(buf, vmxnet3_tq_driver_stats[i].desc, ETH_GSTRING_LEN); buf += ETH_GSTRING_LEN; } } for (j = 0; j < adapter->num_rx_queues; j++) { for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) { memcpy(buf, vmxnet3_rq_dev_stats[i].desc, ETH_GSTRING_LEN); buf += ETH_GSTRING_LEN; } for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) { memcpy(buf, vmxnet3_rq_driver_stats[i].desc, ETH_GSTRING_LEN); buf += ETH_GSTRING_LEN; } } for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) { memcpy(buf, vmxnet3_global_stats[i].desc, ETH_GSTRING_LEN); buf += ETH_GSTRING_LEN; } } } int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); unsigned long flags; netdev_features_t changed = features ^ netdev->features; if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_CTAG_RX)) { if (features & NETIF_F_RXCSUM) adapter->shared->devRead.misc.uptFeatures |= UPT1_F_RXCSUM; else adapter->shared->devRead.misc.uptFeatures &= ~UPT1_F_RXCSUM; /* update harware LRO capability accordingly */ if (features & NETIF_F_LRO) adapter->shared->devRead.misc.uptFeatures |= UPT1_F_LRO; else adapter->shared->devRead.misc.uptFeatures &= ~UPT1_F_LRO; if (features & NETIF_F_HW_VLAN_CTAG_RX) adapter->shared->devRead.misc.uptFeatures |= UPT1_F_RXVLAN; else adapter->shared->devRead.misc.uptFeatures &= ~UPT1_F_RXVLAN; spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_FEATURE); spin_unlock_irqrestore(&adapter->cmd_lock, flags); } return 0; } static void vmxnet3_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *buf) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); unsigned long flags; u8 *base; int i; int j = 0; spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); spin_unlock_irqrestore(&adapter->cmd_lock, flags); /* this does assume each counter is 64-bit wide */ for (j = 0; j < adapter->num_tx_queues; j++) { base = (u8 *)&adapter->tqd_start[j].stats; *buf++ = (u64)j; for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset); base = (u8 *)&adapter->tx_queue[j].stats; for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset); } for (j = 0; j < adapter->num_tx_queues; j++) { base = (u8 *)&adapter->rqd_start[j].stats; *buf++ = (u64) j; for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset); base = (u8 *)&adapter->rx_queue[j].stats; for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset); } base = (u8 *)adapter; for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) *buf++ = *(u64 *)(base + vmxnet3_global_stats[i].offset); } static void vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); u32 *buf = p; int i = 0, j = 0; memset(p, 0, vmxnet3_get_regs_len(netdev)); regs->version = 1; /* Update vmxnet3_get_regs_len if we want to dump more registers */ /* make each ring use multiple of 16 bytes */ for (i = 0; i < adapter->num_tx_queues; i++) { buf[j++] = adapter->tx_queue[i].tx_ring.next2fill; buf[j++] = adapter->tx_queue[i].tx_ring.next2comp; buf[j++] = adapter->tx_queue[i].tx_ring.gen; buf[j++] = 0; buf[j++] = adapter->tx_queue[i].comp_ring.next2proc; buf[j++] = adapter->tx_queue[i].comp_ring.gen; buf[j++] = adapter->tx_queue[i].stopped; buf[j++] = 0; } for (i = 0; i < adapter->num_rx_queues; i++) { buf[j++] = adapter->rx_queue[i].rx_ring[0].next2fill; buf[j++] = adapter->rx_queue[i].rx_ring[0].next2comp; buf[j++] = adapter->rx_queue[i].rx_ring[0].gen; buf[j++] = 0; buf[j++] = adapter->rx_queue[i].rx_ring[1].next2fill; buf[j++] = adapter->rx_queue[i].rx_ring[1].next2comp; buf[j++] = adapter->rx_queue[i].rx_ring[1].gen; buf[j++] = 0; buf[j++] = adapter->rx_queue[i].comp_ring.next2proc; buf[j++] = adapter->rx_queue[i].comp_ring.gen; buf[j++] = 0; buf[j++] = 0; } } static void vmxnet3_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); wol->supported = WAKE_UCAST | WAKE_ARP | WAKE_MAGIC; wol->wolopts = adapter->wol; } static int vmxnet3_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); if (wol->wolopts & (WAKE_PHY | WAKE_MCAST | WAKE_BCAST | WAKE_MAGICSECURE)) { return -EOPNOTSUPP; } adapter->wol = wol->wolopts; device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); return 0; } static int vmxnet3_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); ecmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_TP; ecmd->advertising = ADVERTISED_TP; ecmd->port = PORT_TP; ecmd->transceiver = XCVR_INTERNAL; if (adapter->link_speed) { ethtool_cmd_speed_set(ecmd, adapter->link_speed); ecmd->duplex = DUPLEX_FULL; } else { ethtool_cmd_speed_set(ecmd, -1); ecmd->duplex = -1; } return 0; } static void vmxnet3_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *param) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE; param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE; param->rx_mini_max_pending = 0; param->rx_jumbo_max_pending = 0; param->rx_pending = adapter->rx_queue[0].rx_ring[0].size; param->tx_pending = adapter->tx_queue[0].tx_ring.size; param->rx_mini_pending = 0; param->rx_jumbo_pending = 0; } static int vmxnet3_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *param) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); u32 new_tx_ring_size, new_rx_ring_size; u32 sz; int err = 0; if (param->tx_pending == 0 || param->tx_pending > VMXNET3_TX_RING_MAX_SIZE) return -EINVAL; if (param->rx_pending == 0 || param->rx_pending > VMXNET3_RX_RING_MAX_SIZE) return -EINVAL; /* if adapter not yet initialized, do nothing */ if (adapter->rx_buf_per_pkt == 0) { netdev_err(netdev, "adapter not completely initialized, " "ring size cannot be changed yet\n"); return -EOPNOTSUPP; } /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */ new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) & ~VMXNET3_RING_SIZE_MASK; new_tx_ring_size = min_t(u32, new_tx_ring_size, VMXNET3_TX_RING_MAX_SIZE); if (new_tx_ring_size > VMXNET3_TX_RING_MAX_SIZE || (new_tx_ring_size % VMXNET3_RING_SIZE_ALIGN) != 0) return -EINVAL; /* ring0 has to be a multiple of * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN */ sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; new_rx_ring_size = (param->rx_pending + sz - 1) / sz * sz; new_rx_ring_size = min_t(u32, new_rx_ring_size, VMXNET3_RX_RING_MAX_SIZE / sz * sz); if (new_rx_ring_size > VMXNET3_RX_RING_MAX_SIZE || (new_rx_ring_size % sz) != 0) return -EINVAL; if (new_tx_ring_size == adapter->tx_queue[0].tx_ring.size && new_rx_ring_size == adapter->rx_queue[0].rx_ring[0].size) { return 0; } /* * Reset_work may be in the middle of resetting the device, wait for its * completion. */ while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) msleep(1); if (netif_running(netdev)) { vmxnet3_quiesce_dev(adapter); vmxnet3_reset_dev(adapter); /* recreate the rx queue and the tx queue based on the * new sizes */ vmxnet3_tq_destroy_all(adapter); vmxnet3_rq_destroy_all(adapter); err = vmxnet3_create_queues(adapter, new_tx_ring_size, new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE); if (err) { /* failed, most likely because of OOM, try default * size */ netdev_err(netdev, "failed to apply new sizes, " "try the default ones\n"); err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE, VMXNET3_DEF_RX_RING_SIZE, VMXNET3_DEF_RX_RING_SIZE); if (err) { netdev_err(netdev, "failed to create queues " "with default sizes. Closing it\n"); goto out; } } err = vmxnet3_activate_dev(adapter); if (err) netdev_err(netdev, "failed to re-activate, error %d." " Closing it\n", err); } out: clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); if (err) vmxnet3_force_close(adapter); return err; } static int vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, u32 *rules) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); switch (info->cmd) { case ETHTOOL_GRXRINGS: info->data = adapter->num_rx_queues; return 0; } return -EOPNOTSUPP; } #ifdef VMXNET3_RSS static u32 vmxnet3_get_rss_indir_size(struct net_device *netdev) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); struct UPT1_RSSConf *rssConf = adapter->rss_conf; return rssConf->indTableSize; } static int vmxnet3_get_rss_indir(struct net_device *netdev, u32 *p) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); struct UPT1_RSSConf *rssConf = adapter->rss_conf; unsigned int n = rssConf->indTableSize; while (n--) p[n] = rssConf->indTable[n]; return 0; } static int vmxnet3_set_rss_indir(struct net_device *netdev, const u32 *p) { unsigned int i; unsigned long flags; struct vmxnet3_adapter *adapter = netdev_priv(netdev); struct UPT1_RSSConf *rssConf = adapter->rss_conf; for (i = 0; i < rssConf->indTableSize; i++) rssConf->indTable[i] = p[i]; spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RSSIDT); spin_unlock_irqrestore(&adapter->cmd_lock, flags); return 0; } #endif static const struct ethtool_ops vmxnet3_ethtool_ops = { .get_settings = vmxnet3_get_settings, .get_drvinfo = vmxnet3_get_drvinfo, .get_regs_len = vmxnet3_get_regs_len, .get_regs = vmxnet3_get_regs, .get_wol = vmxnet3_get_wol, .set_wol = vmxnet3_set_wol, .get_link = ethtool_op_get_link, .get_strings = vmxnet3_get_strings, .get_sset_count = vmxnet3_get_sset_count, .get_ethtool_stats = vmxnet3_get_ethtool_stats, .get_ringparam = vmxnet3_get_ringparam, .set_ringparam = vmxnet3_set_ringparam, .get_rxnfc = vmxnet3_get_rxnfc, #ifdef VMXNET3_RSS .get_rxfh_indir_size = vmxnet3_get_rss_indir_size, .get_rxfh_indir = vmxnet3_get_rss_indir, .set_rxfh_indir = vmxnet3_set_rss_indir, #endif }; void vmxnet3_set_ethtool_ops(struct net_device *netdev) { SET_ETHTOOL_OPS(netdev, &vmxnet3_ethtool_ops); }
gpl-2.0
CyanogenMod/android_kernel_samsung_msm8660-q1
fs/ocfs2/dir.c
2868
118472
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * dir.c * * Creates, reads, walks and deletes directory-nodes * * Copyright (C) 2002, 2004 Oracle. All rights reserved. * * Portions of this code from linux/fs/ext3/dir.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/dir.c * * Copyright (C) 1991, 1992 Linux Torvalds * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/fs.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/quotaops.h> #include <linux/sort.h> #include <cluster/masklog.h> #include "ocfs2.h" #include "alloc.h" #include "blockcheck.h" #include "dir.h" #include "dlmglue.h" #include "extent_map.h" #include "file.h" #include "inode.h" #include "journal.h" #include "namei.h" #include "suballoc.h" #include "super.h" #include "sysfile.h" #include "uptodate.h" #include "ocfs2_trace.h" #include "buffer_head_io.h" #define NAMEI_RA_CHUNKS 2 #define NAMEI_RA_BLOCKS 4 #define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS) #define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b)) static unsigned char ocfs2_filetype_table[] = { DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK }; static int ocfs2_do_extend_dir(struct super_block *sb, handle_t *handle, struct inode *dir, struct buffer_head *parent_fe_bh, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac, struct buffer_head **new_bh); static int ocfs2_dir_indexed(struct inode *inode); /* * These are distinct checks because future versions of the file system will * want to have a trailing dirent structure independent of indexing. */ static int ocfs2_supports_dir_trailer(struct inode *dir) { struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) return 0; return ocfs2_meta_ecc(osb) || ocfs2_dir_indexed(dir); } /* * "new' here refers to the point at which we're creating a new * directory via "mkdir()", but also when we're expanding an inline * directory. In either case, we don't yet have the indexing bit set * on the directory, so the standard checks will fail in when metaecc * is turned off. Only directory-initialization type functions should * use this then. Everything else wants ocfs2_supports_dir_trailer() */ static int ocfs2_new_dir_wants_trailer(struct inode *dir) { struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); return ocfs2_meta_ecc(osb) || ocfs2_supports_indexed_dirs(osb); } static inline unsigned int ocfs2_dir_trailer_blk_off(struct super_block *sb) { return sb->s_blocksize - sizeof(struct ocfs2_dir_block_trailer); } #define ocfs2_trailer_from_bh(_bh, _sb) ((struct ocfs2_dir_block_trailer *) ((_bh)->b_data + ocfs2_dir_trailer_blk_off((_sb)))) /* XXX ocfs2_block_dqtrailer() is similar but not quite - can we make * them more consistent? */ struct ocfs2_dir_block_trailer *ocfs2_dir_trailer_from_size(int blocksize, void *data) { char *p = data; p += blocksize - sizeof(struct ocfs2_dir_block_trailer); return (struct ocfs2_dir_block_trailer *)p; } /* * XXX: This is executed once on every dirent. We should consider optimizing * it. */ static int ocfs2_skip_dir_trailer(struct inode *dir, struct ocfs2_dir_entry *de, unsigned long offset, unsigned long blklen) { unsigned long toff = blklen - sizeof(struct ocfs2_dir_block_trailer); if (!ocfs2_supports_dir_trailer(dir)) return 0; if (offset != toff) return 0; return 1; } static void ocfs2_init_dir_trailer(struct inode *inode, struct buffer_head *bh, u16 rec_len) { struct ocfs2_dir_block_trailer *trailer; trailer = ocfs2_trailer_from_bh(bh, inode->i_sb); strcpy(trailer->db_signature, OCFS2_DIR_TRAILER_SIGNATURE); trailer->db_compat_rec_len = cpu_to_le16(sizeof(struct ocfs2_dir_block_trailer)); trailer->db_parent_dinode = cpu_to_le64(OCFS2_I(inode)->ip_blkno); trailer->db_blkno = cpu_to_le64(bh->b_blocknr); trailer->db_free_rec_len = cpu_to_le16(rec_len); } /* * Link an unindexed block with a dir trailer structure into the index free * list. This function will modify dirdata_bh, but assumes you've already * passed it to the journal. */ static int ocfs2_dx_dir_link_trailer(struct inode *dir, handle_t *handle, struct buffer_head *dx_root_bh, struct buffer_head *dirdata_bh) { int ret; struct ocfs2_dx_root_block *dx_root; struct ocfs2_dir_block_trailer *trailer; ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } trailer = ocfs2_trailer_from_bh(dirdata_bh, dir->i_sb); dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; trailer->db_free_next = dx_root->dr_free_blk; dx_root->dr_free_blk = cpu_to_le64(dirdata_bh->b_blocknr); ocfs2_journal_dirty(handle, dx_root_bh); out: return ret; } static int ocfs2_free_list_at_root(struct ocfs2_dir_lookup_result *res) { return res->dl_prev_leaf_bh == NULL; } void ocfs2_free_dir_lookup_result(struct ocfs2_dir_lookup_result *res) { brelse(res->dl_dx_root_bh); brelse(res->dl_leaf_bh); brelse(res->dl_dx_leaf_bh); brelse(res->dl_prev_leaf_bh); } static int ocfs2_dir_indexed(struct inode *inode) { if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INDEXED_DIR_FL) return 1; return 0; } static inline int ocfs2_dx_root_inline(struct ocfs2_dx_root_block *dx_root) { return dx_root->dr_flags & OCFS2_DX_FLAG_INLINE; } /* * Hashing code adapted from ext3 */ #define DELTA 0x9E3779B9 static void TEA_transform(__u32 buf[4], __u32 const in[]) { __u32 sum = 0; __u32 b0 = buf[0], b1 = buf[1]; __u32 a = in[0], b = in[1], c = in[2], d = in[3]; int n = 16; do { sum += DELTA; b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b); b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d); } while (--n); buf[0] += b0; buf[1] += b1; } static void str2hashbuf(const char *msg, int len, __u32 *buf, int num) { __u32 pad, val; int i; pad = (__u32)len | ((__u32)len << 8); pad |= pad << 16; val = pad; if (len > num*4) len = num * 4; for (i = 0; i < len; i++) { if ((i % 4) == 0) val = pad; val = msg[i] + (val << 8); if ((i % 4) == 3) { *buf++ = val; val = pad; num--; } } if (--num >= 0) *buf++ = val; while (--num >= 0) *buf++ = pad; } static void ocfs2_dx_dir_name_hash(struct inode *dir, const char *name, int len, struct ocfs2_dx_hinfo *hinfo) { struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); const char *p; __u32 in[8], buf[4]; /* * XXX: Is this really necessary, if the index is never looked * at by readdir? Is a hash value of '0' a bad idea? */ if ((len == 1 && !strncmp(".", name, 1)) || (len == 2 && !strncmp("..", name, 2))) { buf[0] = buf[1] = 0; goto out; } #ifdef OCFS2_DEBUG_DX_DIRS /* * This makes it very easy to debug indexing problems. We * should never allow this to be selected without hand editing * this file though. */ buf[0] = buf[1] = len; goto out; #endif memcpy(buf, osb->osb_dx_seed, sizeof(buf)); p = name; while (len > 0) { str2hashbuf(p, len, in, 4); TEA_transform(buf, in); len -= 16; p += 16; } out: hinfo->major_hash = buf[0]; hinfo->minor_hash = buf[1]; } /* * bh passed here can be an inode block or a dir data block, depending * on the inode inline data flag. */ static int ocfs2_check_dir_entry(struct inode * dir, struct ocfs2_dir_entry * de, struct buffer_head * bh, unsigned long offset) { const char *error_msg = NULL; const int rlen = le16_to_cpu(de->rec_len); if (unlikely(rlen < OCFS2_DIR_REC_LEN(1))) error_msg = "rec_len is smaller than minimal"; else if (unlikely(rlen % 4 != 0)) error_msg = "rec_len % 4 != 0"; else if (unlikely(rlen < OCFS2_DIR_REC_LEN(de->name_len))) error_msg = "rec_len is too small for name_len"; else if (unlikely( ((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)) error_msg = "directory entry across blocks"; if (unlikely(error_msg != NULL)) mlog(ML_ERROR, "bad entry in directory #%llu: %s - " "offset=%lu, inode=%llu, rec_len=%d, name_len=%d\n", (unsigned long long)OCFS2_I(dir)->ip_blkno, error_msg, offset, (unsigned long long)le64_to_cpu(de->inode), rlen, de->name_len); return error_msg == NULL ? 1 : 0; } static inline int ocfs2_match(int len, const char * const name, struct ocfs2_dir_entry *de) { if (len != de->name_len) return 0; if (!de->inode) return 0; return !memcmp(name, de->name, len); } /* * Returns 0 if not found, -1 on failure, and 1 on success */ static inline int ocfs2_search_dirblock(struct buffer_head *bh, struct inode *dir, const char *name, int namelen, unsigned long offset, char *first_de, unsigned int bytes, struct ocfs2_dir_entry **res_dir) { struct ocfs2_dir_entry *de; char *dlimit, *de_buf; int de_len; int ret = 0; de_buf = first_de; dlimit = de_buf + bytes; while (de_buf < dlimit) { /* this code is executed quadratically often */ /* do minimal checking `by hand' */ de = (struct ocfs2_dir_entry *) de_buf; if (de_buf + namelen <= dlimit && ocfs2_match(namelen, name, de)) { /* found a match - just to be sure, do a full check */ if (!ocfs2_check_dir_entry(dir, de, bh, offset)) { ret = -1; goto bail; } *res_dir = de; ret = 1; goto bail; } /* prevent looping on a bad block */ de_len = le16_to_cpu(de->rec_len); if (de_len <= 0) { ret = -1; goto bail; } de_buf += de_len; offset += de_len; } bail: trace_ocfs2_search_dirblock(ret); return ret; } static struct buffer_head *ocfs2_find_entry_id(const char *name, int namelen, struct inode *dir, struct ocfs2_dir_entry **res_dir) { int ret, found; struct buffer_head *di_bh = NULL; struct ocfs2_dinode *di; struct ocfs2_inline_data *data; ret = ocfs2_read_inode_block(dir, &di_bh); if (ret) { mlog_errno(ret); goto out; } di = (struct ocfs2_dinode *)di_bh->b_data; data = &di->id2.i_data; found = ocfs2_search_dirblock(di_bh, dir, name, namelen, 0, data->id_data, i_size_read(dir), res_dir); if (found == 1) return di_bh; brelse(di_bh); out: return NULL; } static int ocfs2_validate_dir_block(struct super_block *sb, struct buffer_head *bh) { int rc; struct ocfs2_dir_block_trailer *trailer = ocfs2_trailer_from_bh(bh, sb); /* * We don't validate dirents here, that's handled * in-place when the code walks them. */ trace_ocfs2_validate_dir_block((unsigned long long)bh->b_blocknr); BUG_ON(!buffer_uptodate(bh)); /* * If the ecc fails, we return the error but otherwise * leave the filesystem running. We know any error is * local to this block. * * Note that we are safe to call this even if the directory * doesn't have a trailer. Filesystems without metaecc will do * nothing, and filesystems with it will have one. */ rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &trailer->db_check); if (rc) mlog(ML_ERROR, "Checksum failed for dinode %llu\n", (unsigned long long)bh->b_blocknr); return rc; } /* * Validate a directory trailer. * * We check the trailer here rather than in ocfs2_validate_dir_block() * because that function doesn't have the inode to test. */ static int ocfs2_check_dir_trailer(struct inode *dir, struct buffer_head *bh) { int rc = 0; struct ocfs2_dir_block_trailer *trailer; trailer = ocfs2_trailer_from_bh(bh, dir->i_sb); if (!OCFS2_IS_VALID_DIR_TRAILER(trailer)) { rc = -EINVAL; ocfs2_error(dir->i_sb, "Invalid dirblock #%llu: " "signature = %.*s\n", (unsigned long long)bh->b_blocknr, 7, trailer->db_signature); goto out; } if (le64_to_cpu(trailer->db_blkno) != bh->b_blocknr) { rc = -EINVAL; ocfs2_error(dir->i_sb, "Directory block #%llu has an invalid " "db_blkno of %llu", (unsigned long long)bh->b_blocknr, (unsigned long long)le64_to_cpu(trailer->db_blkno)); goto out; } if (le64_to_cpu(trailer->db_parent_dinode) != OCFS2_I(dir)->ip_blkno) { rc = -EINVAL; ocfs2_error(dir->i_sb, "Directory block #%llu on dinode " "#%llu has an invalid parent_dinode " "of %llu", (unsigned long long)bh->b_blocknr, (unsigned long long)OCFS2_I(dir)->ip_blkno, (unsigned long long)le64_to_cpu(trailer->db_blkno)); goto out; } out: return rc; } /* * This function forces all errors to -EIO for consistency with its * predecessor, ocfs2_bread(). We haven't audited what returning the * real error codes would do to callers. We log the real codes with * mlog_errno() before we squash them. */ static int ocfs2_read_dir_block(struct inode *inode, u64 v_block, struct buffer_head **bh, int flags) { int rc = 0; struct buffer_head *tmp = *bh; rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, flags, ocfs2_validate_dir_block); if (rc) { mlog_errno(rc); goto out; } if (!(flags & OCFS2_BH_READAHEAD) && ocfs2_supports_dir_trailer(inode)) { rc = ocfs2_check_dir_trailer(inode, tmp); if (rc) { if (!*bh) brelse(tmp); mlog_errno(rc); goto out; } } /* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */ if (!*bh) *bh = tmp; out: return rc ? -EIO : 0; } /* * Read the block at 'phys' which belongs to this directory * inode. This function does no virtual->physical block translation - * what's passed in is assumed to be a valid directory block. */ static int ocfs2_read_dir_block_direct(struct inode *dir, u64 phys, struct buffer_head **bh) { int ret; struct buffer_head *tmp = *bh; ret = ocfs2_read_block(INODE_CACHE(dir), phys, &tmp, ocfs2_validate_dir_block); if (ret) { mlog_errno(ret); goto out; } if (ocfs2_supports_dir_trailer(dir)) { ret = ocfs2_check_dir_trailer(dir, tmp); if (ret) { if (!*bh) brelse(tmp); mlog_errno(ret); goto out; } } if (!ret && !*bh) *bh = tmp; out: return ret; } static int ocfs2_validate_dx_root(struct super_block *sb, struct buffer_head *bh) { int ret; struct ocfs2_dx_root_block *dx_root; BUG_ON(!buffer_uptodate(bh)); dx_root = (struct ocfs2_dx_root_block *) bh->b_data; ret = ocfs2_validate_meta_ecc(sb, bh->b_data, &dx_root->dr_check); if (ret) { mlog(ML_ERROR, "Checksum failed for dir index root block %llu\n", (unsigned long long)bh->b_blocknr); return ret; } if (!OCFS2_IS_VALID_DX_ROOT(dx_root)) { ocfs2_error(sb, "Dir Index Root # %llu has bad signature %.*s", (unsigned long long)le64_to_cpu(dx_root->dr_blkno), 7, dx_root->dr_signature); return -EINVAL; } return 0; } static int ocfs2_read_dx_root(struct inode *dir, struct ocfs2_dinode *di, struct buffer_head **dx_root_bh) { int ret; u64 blkno = le64_to_cpu(di->i_dx_root); struct buffer_head *tmp = *dx_root_bh; ret = ocfs2_read_block(INODE_CACHE(dir), blkno, &tmp, ocfs2_validate_dx_root); /* If ocfs2_read_block() got us a new bh, pass it up. */ if (!ret && !*dx_root_bh) *dx_root_bh = tmp; return ret; } static int ocfs2_validate_dx_leaf(struct super_block *sb, struct buffer_head *bh) { int ret; struct ocfs2_dx_leaf *dx_leaf = (struct ocfs2_dx_leaf *)bh->b_data; BUG_ON(!buffer_uptodate(bh)); ret = ocfs2_validate_meta_ecc(sb, bh->b_data, &dx_leaf->dl_check); if (ret) { mlog(ML_ERROR, "Checksum failed for dir index leaf block %llu\n", (unsigned long long)bh->b_blocknr); return ret; } if (!OCFS2_IS_VALID_DX_LEAF(dx_leaf)) { ocfs2_error(sb, "Dir Index Leaf has bad signature %.*s", 7, dx_leaf->dl_signature); return -EROFS; } return 0; } static int ocfs2_read_dx_leaf(struct inode *dir, u64 blkno, struct buffer_head **dx_leaf_bh) { int ret; struct buffer_head *tmp = *dx_leaf_bh; ret = ocfs2_read_block(INODE_CACHE(dir), blkno, &tmp, ocfs2_validate_dx_leaf); /* If ocfs2_read_block() got us a new bh, pass it up. */ if (!ret && !*dx_leaf_bh) *dx_leaf_bh = tmp; return ret; } /* * Read a series of dx_leaf blocks. This expects all buffer_head * pointers to be NULL on function entry. */ static int ocfs2_read_dx_leaves(struct inode *dir, u64 start, int num, struct buffer_head **dx_leaf_bhs) { int ret; ret = ocfs2_read_blocks(INODE_CACHE(dir), start, num, dx_leaf_bhs, 0, ocfs2_validate_dx_leaf); if (ret) mlog_errno(ret); return ret; } static struct buffer_head *ocfs2_find_entry_el(const char *name, int namelen, struct inode *dir, struct ocfs2_dir_entry **res_dir) { struct super_block *sb; struct buffer_head *bh_use[NAMEI_RA_SIZE]; struct buffer_head *bh, *ret = NULL; unsigned long start, block, b; int ra_max = 0; /* Number of bh's in the readahead buffer, bh_use[] */ int ra_ptr = 0; /* Current index into readahead buffer */ int num = 0; int nblocks, i, err; sb = dir->i_sb; nblocks = i_size_read(dir) >> sb->s_blocksize_bits; start = OCFS2_I(dir)->ip_dir_start_lookup; if (start >= nblocks) start = 0; block = start; restart: do { /* * We deal with the read-ahead logic here. */ if (ra_ptr >= ra_max) { /* Refill the readahead buffer */ ra_ptr = 0; b = block; for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) { /* * Terminate if we reach the end of the * directory and must wrap, or if our * search has finished at this block. */ if (b >= nblocks || (num && block == start)) { bh_use[ra_max] = NULL; break; } num++; bh = NULL; err = ocfs2_read_dir_block(dir, b++, &bh, OCFS2_BH_READAHEAD); bh_use[ra_max] = bh; } } if ((bh = bh_use[ra_ptr++]) == NULL) goto next; if (ocfs2_read_dir_block(dir, block, &bh, 0)) { /* read error, skip block & hope for the best. * ocfs2_read_dir_block() has released the bh. */ ocfs2_error(dir->i_sb, "reading directory %llu, " "offset %lu\n", (unsigned long long)OCFS2_I(dir)->ip_blkno, block); goto next; } i = ocfs2_search_dirblock(bh, dir, name, namelen, block << sb->s_blocksize_bits, bh->b_data, sb->s_blocksize, res_dir); if (i == 1) { OCFS2_I(dir)->ip_dir_start_lookup = block; ret = bh; goto cleanup_and_exit; } else { brelse(bh); if (i < 0) goto cleanup_and_exit; } next: if (++block >= nblocks) block = 0; } while (block != start); /* * If the directory has grown while we were searching, then * search the last part of the directory before giving up. */ block = nblocks; nblocks = i_size_read(dir) >> sb->s_blocksize_bits; if (block < nblocks) { start = 0; goto restart; } cleanup_and_exit: /* Clean up the read-ahead blocks */ for (; ra_ptr < ra_max; ra_ptr++) brelse(bh_use[ra_ptr]); trace_ocfs2_find_entry_el(ret); return ret; } static int ocfs2_dx_dir_lookup_rec(struct inode *inode, struct ocfs2_extent_list *el, u32 major_hash, u32 *ret_cpos, u64 *ret_phys_blkno, unsigned int *ret_clen) { int ret = 0, i, found; struct buffer_head *eb_bh = NULL; struct ocfs2_extent_block *eb; struct ocfs2_extent_rec *rec = NULL; if (el->l_tree_depth) { ret = ocfs2_find_leaf(INODE_CACHE(inode), el, major_hash, &eb_bh); if (ret) { mlog_errno(ret); goto out; } eb = (struct ocfs2_extent_block *) eb_bh->b_data; el = &eb->h_list; if (el->l_tree_depth) { ocfs2_error(inode->i_sb, "Inode %lu has non zero tree depth in " "btree tree block %llu\n", inode->i_ino, (unsigned long long)eb_bh->b_blocknr); ret = -EROFS; goto out; } } found = 0; for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) { rec = &el->l_recs[i]; if (le32_to_cpu(rec->e_cpos) <= major_hash) { found = 1; break; } } if (!found) { ocfs2_error(inode->i_sb, "Inode %lu has bad extent " "record (%u, %u, 0) in btree", inode->i_ino, le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec)); ret = -EROFS; goto out; } if (ret_phys_blkno) *ret_phys_blkno = le64_to_cpu(rec->e_blkno); if (ret_cpos) *ret_cpos = le32_to_cpu(rec->e_cpos); if (ret_clen) *ret_clen = le16_to_cpu(rec->e_leaf_clusters); out: brelse(eb_bh); return ret; } /* * Returns the block index, from the start of the cluster which this * hash belongs too. */ static inline unsigned int __ocfs2_dx_dir_hash_idx(struct ocfs2_super *osb, u32 minor_hash) { return minor_hash & osb->osb_dx_mask; } static inline unsigned int ocfs2_dx_dir_hash_idx(struct ocfs2_super *osb, struct ocfs2_dx_hinfo *hinfo) { return __ocfs2_dx_dir_hash_idx(osb, hinfo->minor_hash); } static int ocfs2_dx_dir_lookup(struct inode *inode, struct ocfs2_extent_list *el, struct ocfs2_dx_hinfo *hinfo, u32 *ret_cpos, u64 *ret_phys_blkno) { int ret = 0; unsigned int cend, uninitialized_var(clen); u32 uninitialized_var(cpos); u64 uninitialized_var(blkno); u32 name_hash = hinfo->major_hash; ret = ocfs2_dx_dir_lookup_rec(inode, el, name_hash, &cpos, &blkno, &clen); if (ret) { mlog_errno(ret); goto out; } cend = cpos + clen; if (name_hash >= cend) { /* We want the last cluster */ blkno += ocfs2_clusters_to_blocks(inode->i_sb, clen - 1); cpos += clen - 1; } else { blkno += ocfs2_clusters_to_blocks(inode->i_sb, name_hash - cpos); cpos = name_hash; } /* * We now have the cluster which should hold our entry. To * find the exact block from the start of the cluster to * search, we take the lower bits of the hash. */ blkno += ocfs2_dx_dir_hash_idx(OCFS2_SB(inode->i_sb), hinfo); if (ret_phys_blkno) *ret_phys_blkno = blkno; if (ret_cpos) *ret_cpos = cpos; out: return ret; } static int ocfs2_dx_dir_search(const char *name, int namelen, struct inode *dir, struct ocfs2_dx_root_block *dx_root, struct ocfs2_dir_lookup_result *res) { int ret, i, found; u64 uninitialized_var(phys); struct buffer_head *dx_leaf_bh = NULL; struct ocfs2_dx_leaf *dx_leaf; struct ocfs2_dx_entry *dx_entry = NULL; struct buffer_head *dir_ent_bh = NULL; struct ocfs2_dir_entry *dir_ent = NULL; struct ocfs2_dx_hinfo *hinfo = &res->dl_hinfo; struct ocfs2_extent_list *dr_el; struct ocfs2_dx_entry_list *entry_list; ocfs2_dx_dir_name_hash(dir, name, namelen, &res->dl_hinfo); if (ocfs2_dx_root_inline(dx_root)) { entry_list = &dx_root->dr_entries; goto search; } dr_el = &dx_root->dr_list; ret = ocfs2_dx_dir_lookup(dir, dr_el, hinfo, NULL, &phys); if (ret) { mlog_errno(ret); goto out; } trace_ocfs2_dx_dir_search((unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name, hinfo->major_hash, hinfo->minor_hash, (unsigned long long)phys); ret = ocfs2_read_dx_leaf(dir, phys, &dx_leaf_bh); if (ret) { mlog_errno(ret); goto out; } dx_leaf = (struct ocfs2_dx_leaf *) dx_leaf_bh->b_data; trace_ocfs2_dx_dir_search_leaf_info( le16_to_cpu(dx_leaf->dl_list.de_num_used), le16_to_cpu(dx_leaf->dl_list.de_count)); entry_list = &dx_leaf->dl_list; search: /* * Empty leaf is legal, so no need to check for that. */ found = 0; for (i = 0; i < le16_to_cpu(entry_list->de_num_used); i++) { dx_entry = &entry_list->de_entries[i]; if (hinfo->major_hash != le32_to_cpu(dx_entry->dx_major_hash) || hinfo->minor_hash != le32_to_cpu(dx_entry->dx_minor_hash)) continue; /* * Search unindexed leaf block now. We're not * guaranteed to find anything. */ ret = ocfs2_read_dir_block_direct(dir, le64_to_cpu(dx_entry->dx_dirent_blk), &dir_ent_bh); if (ret) { mlog_errno(ret); goto out; } /* * XXX: We should check the unindexed block here, * before using it. */ found = ocfs2_search_dirblock(dir_ent_bh, dir, name, namelen, 0, dir_ent_bh->b_data, dir->i_sb->s_blocksize, &dir_ent); if (found == 1) break; if (found == -1) { /* This means we found a bad directory entry. */ ret = -EIO; mlog_errno(ret); goto out; } brelse(dir_ent_bh); dir_ent_bh = NULL; } if (found <= 0) { ret = -ENOENT; goto out; } res->dl_leaf_bh = dir_ent_bh; res->dl_entry = dir_ent; res->dl_dx_leaf_bh = dx_leaf_bh; res->dl_dx_entry = dx_entry; ret = 0; out: if (ret) { brelse(dx_leaf_bh); brelse(dir_ent_bh); } return ret; } static int ocfs2_find_entry_dx(const char *name, int namelen, struct inode *dir, struct ocfs2_dir_lookup_result *lookup) { int ret; struct buffer_head *di_bh = NULL; struct ocfs2_dinode *di; struct buffer_head *dx_root_bh = NULL; struct ocfs2_dx_root_block *dx_root; ret = ocfs2_read_inode_block(dir, &di_bh); if (ret) { mlog_errno(ret); goto out; } di = (struct ocfs2_dinode *)di_bh->b_data; ret = ocfs2_read_dx_root(dir, di, &dx_root_bh); if (ret) { mlog_errno(ret); goto out; } dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data; ret = ocfs2_dx_dir_search(name, namelen, dir, dx_root, lookup); if (ret) { if (ret != -ENOENT) mlog_errno(ret); goto out; } lookup->dl_dx_root_bh = dx_root_bh; dx_root_bh = NULL; out: brelse(di_bh); brelse(dx_root_bh); return ret; } /* * Try to find an entry of the provided name within 'dir'. * * If nothing was found, -ENOENT is returned. Otherwise, zero is * returned and the struct 'res' will contain information useful to * other directory manipulation functions. * * Caller can NOT assume anything about the contents of the * buffer_heads - they are passed back only so that it can be passed * into any one of the manipulation functions (add entry, delete * entry, etc). As an example, bh in the extent directory case is a * data block, in the inline-data case it actually points to an inode, * in the indexed directory case, multiple buffers are involved. */ int ocfs2_find_entry(const char *name, int namelen, struct inode *dir, struct ocfs2_dir_lookup_result *lookup) { struct buffer_head *bh; struct ocfs2_dir_entry *res_dir = NULL; if (ocfs2_dir_indexed(dir)) return ocfs2_find_entry_dx(name, namelen, dir, lookup); /* * The unindexed dir code only uses part of the lookup * structure, so there's no reason to push it down further * than this. */ if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) bh = ocfs2_find_entry_id(name, namelen, dir, &res_dir); else bh = ocfs2_find_entry_el(name, namelen, dir, &res_dir); if (bh == NULL) return -ENOENT; lookup->dl_leaf_bh = bh; lookup->dl_entry = res_dir; return 0; } /* * Update inode number and type of a previously found directory entry. */ int ocfs2_update_entry(struct inode *dir, handle_t *handle, struct ocfs2_dir_lookup_result *res, struct inode *new_entry_inode) { int ret; ocfs2_journal_access_func access = ocfs2_journal_access_db; struct ocfs2_dir_entry *de = res->dl_entry; struct buffer_head *de_bh = res->dl_leaf_bh; /* * The same code works fine for both inline-data and extent * based directories, so no need to split this up. The only * difference is the journal_access function. */ if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) access = ocfs2_journal_access_di; ret = access(handle, INODE_CACHE(dir), de_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } de->inode = cpu_to_le64(OCFS2_I(new_entry_inode)->ip_blkno); ocfs2_set_de_type(de, new_entry_inode->i_mode); ocfs2_journal_dirty(handle, de_bh); out: return ret; } /* * __ocfs2_delete_entry deletes a directory entry by merging it with the * previous entry */ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir, struct ocfs2_dir_entry *de_del, struct buffer_head *bh, char *first_de, unsigned int bytes) { struct ocfs2_dir_entry *de, *pde; int i, status = -ENOENT; ocfs2_journal_access_func access = ocfs2_journal_access_db; if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) access = ocfs2_journal_access_di; i = 0; pde = NULL; de = (struct ocfs2_dir_entry *) first_de; while (i < bytes) { if (!ocfs2_check_dir_entry(dir, de, bh, i)) { status = -EIO; mlog_errno(status); goto bail; } if (de == de_del) { status = access(handle, INODE_CACHE(dir), bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { status = -EIO; mlog_errno(status); goto bail; } if (pde) le16_add_cpu(&pde->rec_len, le16_to_cpu(de->rec_len)); else de->inode = 0; dir->i_version++; ocfs2_journal_dirty(handle, bh); goto bail; } i += le16_to_cpu(de->rec_len); pde = de; de = (struct ocfs2_dir_entry *)((char *)de + le16_to_cpu(de->rec_len)); } bail: return status; } static unsigned int ocfs2_figure_dirent_hole(struct ocfs2_dir_entry *de) { unsigned int hole; if (le64_to_cpu(de->inode) == 0) hole = le16_to_cpu(de->rec_len); else hole = le16_to_cpu(de->rec_len) - OCFS2_DIR_REC_LEN(de->name_len); return hole; } static int ocfs2_find_max_rec_len(struct super_block *sb, struct buffer_head *dirblock_bh) { int size, this_hole, largest_hole = 0; char *trailer, *de_buf, *limit, *start = dirblock_bh->b_data; struct ocfs2_dir_entry *de; trailer = (char *)ocfs2_trailer_from_bh(dirblock_bh, sb); size = ocfs2_dir_trailer_blk_off(sb); limit = start + size; de_buf = start; de = (struct ocfs2_dir_entry *)de_buf; do { if (de_buf != trailer) { this_hole = ocfs2_figure_dirent_hole(de); if (this_hole > largest_hole) largest_hole = this_hole; } de_buf += le16_to_cpu(de->rec_len); de = (struct ocfs2_dir_entry *)de_buf; } while (de_buf < limit); if (largest_hole >= OCFS2_DIR_MIN_REC_LEN) return largest_hole; return 0; } static void ocfs2_dx_list_remove_entry(struct ocfs2_dx_entry_list *entry_list, int index) { int num_used = le16_to_cpu(entry_list->de_num_used); if (num_used == 1 || index == (num_used - 1)) goto clear; memmove(&entry_list->de_entries[index], &entry_list->de_entries[index + 1], (num_used - index - 1)*sizeof(struct ocfs2_dx_entry)); clear: num_used--; memset(&entry_list->de_entries[num_used], 0, sizeof(struct ocfs2_dx_entry)); entry_list->de_num_used = cpu_to_le16(num_used); } static int ocfs2_delete_entry_dx(handle_t *handle, struct inode *dir, struct ocfs2_dir_lookup_result *lookup) { int ret, index, max_rec_len, add_to_free_list = 0; struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh; struct buffer_head *leaf_bh = lookup->dl_leaf_bh; struct ocfs2_dx_leaf *dx_leaf; struct ocfs2_dx_entry *dx_entry = lookup->dl_dx_entry; struct ocfs2_dir_block_trailer *trailer; struct ocfs2_dx_root_block *dx_root; struct ocfs2_dx_entry_list *entry_list; /* * This function gets a bit messy because we might have to * modify the root block, regardless of whether the indexed * entries are stored inline. */ /* * *Only* set 'entry_list' here, based on where we're looking * for the indexed entries. Later, we might still want to * journal both blocks, based on free list state. */ dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; if (ocfs2_dx_root_inline(dx_root)) { entry_list = &dx_root->dr_entries; } else { dx_leaf = (struct ocfs2_dx_leaf *) lookup->dl_dx_leaf_bh->b_data; entry_list = &dx_leaf->dl_list; } /* Neither of these are a disk corruption - that should have * been caught by lookup, before we got here. */ BUG_ON(le16_to_cpu(entry_list->de_count) <= 0); BUG_ON(le16_to_cpu(entry_list->de_num_used) <= 0); index = (char *)dx_entry - (char *)entry_list->de_entries; index /= sizeof(*dx_entry); if (index >= le16_to_cpu(entry_list->de_num_used)) { mlog(ML_ERROR, "Dir %llu: Bad dx_entry ptr idx %d, (%p, %p)\n", (unsigned long long)OCFS2_I(dir)->ip_blkno, index, entry_list, dx_entry); return -EIO; } /* * We know that removal of this dirent will leave enough room * for a new one, so add this block to the free list if it * isn't already there. */ trailer = ocfs2_trailer_from_bh(leaf_bh, dir->i_sb); if (trailer->db_free_rec_len == 0) add_to_free_list = 1; /* * Add the block holding our index into the journal before * removing the unindexed entry. If we get an error return * from __ocfs2_delete_entry(), then it hasn't removed the * entry yet. Likewise, successful return means we *must* * remove the indexed entry. * * We're also careful to journal the root tree block here as * the entry count needs to be updated. Also, we might be * adding to the start of the free list. */ ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } if (!ocfs2_dx_root_inline(dx_root)) { ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), lookup->dl_dx_leaf_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } } trace_ocfs2_delete_entry_dx((unsigned long long)OCFS2_I(dir)->ip_blkno, index); ret = __ocfs2_delete_entry(handle, dir, lookup->dl_entry, leaf_bh, leaf_bh->b_data, leaf_bh->b_size); if (ret) { mlog_errno(ret); goto out; } max_rec_len = ocfs2_find_max_rec_len(dir->i_sb, leaf_bh); trailer->db_free_rec_len = cpu_to_le16(max_rec_len); if (add_to_free_list) { trailer->db_free_next = dx_root->dr_free_blk; dx_root->dr_free_blk = cpu_to_le64(leaf_bh->b_blocknr); ocfs2_journal_dirty(handle, dx_root_bh); } /* leaf_bh was journal_accessed for us in __ocfs2_delete_entry */ ocfs2_journal_dirty(handle, leaf_bh); le32_add_cpu(&dx_root->dr_num_entries, -1); ocfs2_journal_dirty(handle, dx_root_bh); ocfs2_dx_list_remove_entry(entry_list, index); if (!ocfs2_dx_root_inline(dx_root)) ocfs2_journal_dirty(handle, lookup->dl_dx_leaf_bh); out: return ret; } static inline int ocfs2_delete_entry_id(handle_t *handle, struct inode *dir, struct ocfs2_dir_entry *de_del, struct buffer_head *bh) { int ret; struct buffer_head *di_bh = NULL; struct ocfs2_dinode *di; struct ocfs2_inline_data *data; ret = ocfs2_read_inode_block(dir, &di_bh); if (ret) { mlog_errno(ret); goto out; } di = (struct ocfs2_dinode *)di_bh->b_data; data = &di->id2.i_data; ret = __ocfs2_delete_entry(handle, dir, de_del, bh, data->id_data, i_size_read(dir)); brelse(di_bh); out: return ret; } static inline int ocfs2_delete_entry_el(handle_t *handle, struct inode *dir, struct ocfs2_dir_entry *de_del, struct buffer_head *bh) { return __ocfs2_delete_entry(handle, dir, de_del, bh, bh->b_data, bh->b_size); } /* * Delete a directory entry. Hide the details of directory * implementation from the caller. */ int ocfs2_delete_entry(handle_t *handle, struct inode *dir, struct ocfs2_dir_lookup_result *res) { if (ocfs2_dir_indexed(dir)) return ocfs2_delete_entry_dx(handle, dir, res); if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) return ocfs2_delete_entry_id(handle, dir, res->dl_entry, res->dl_leaf_bh); return ocfs2_delete_entry_el(handle, dir, res->dl_entry, res->dl_leaf_bh); } /* * Check whether 'de' has enough room to hold an entry of * 'new_rec_len' bytes. */ static inline int ocfs2_dirent_would_fit(struct ocfs2_dir_entry *de, unsigned int new_rec_len) { unsigned int de_really_used; /* Check whether this is an empty record with enough space */ if (le64_to_cpu(de->inode) == 0 && le16_to_cpu(de->rec_len) >= new_rec_len) return 1; /* * Record might have free space at the end which we can * use. */ de_really_used = OCFS2_DIR_REC_LEN(de->name_len); if (le16_to_cpu(de->rec_len) >= (de_really_used + new_rec_len)) return 1; return 0; } static void ocfs2_dx_dir_leaf_insert_tail(struct ocfs2_dx_leaf *dx_leaf, struct ocfs2_dx_entry *dx_new_entry) { int i; i = le16_to_cpu(dx_leaf->dl_list.de_num_used); dx_leaf->dl_list.de_entries[i] = *dx_new_entry; le16_add_cpu(&dx_leaf->dl_list.de_num_used, 1); } static void ocfs2_dx_entry_list_insert(struct ocfs2_dx_entry_list *entry_list, struct ocfs2_dx_hinfo *hinfo, u64 dirent_blk) { int i; struct ocfs2_dx_entry *dx_entry; i = le16_to_cpu(entry_list->de_num_used); dx_entry = &entry_list->de_entries[i]; memset(dx_entry, 0, sizeof(*dx_entry)); dx_entry->dx_major_hash = cpu_to_le32(hinfo->major_hash); dx_entry->dx_minor_hash = cpu_to_le32(hinfo->minor_hash); dx_entry->dx_dirent_blk = cpu_to_le64(dirent_blk); le16_add_cpu(&entry_list->de_num_used, 1); } static int __ocfs2_dx_dir_leaf_insert(struct inode *dir, handle_t *handle, struct ocfs2_dx_hinfo *hinfo, u64 dirent_blk, struct buffer_head *dx_leaf_bh) { int ret; struct ocfs2_dx_leaf *dx_leaf; ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), dx_leaf_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data; ocfs2_dx_entry_list_insert(&dx_leaf->dl_list, hinfo, dirent_blk); ocfs2_journal_dirty(handle, dx_leaf_bh); out: return ret; } static void ocfs2_dx_inline_root_insert(struct inode *dir, handle_t *handle, struct ocfs2_dx_hinfo *hinfo, u64 dirent_blk, struct ocfs2_dx_root_block *dx_root) { ocfs2_dx_entry_list_insert(&dx_root->dr_entries, hinfo, dirent_blk); } static int ocfs2_dx_dir_insert(struct inode *dir, handle_t *handle, struct ocfs2_dir_lookup_result *lookup) { int ret = 0; struct ocfs2_dx_root_block *dx_root; struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh; ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } dx_root = (struct ocfs2_dx_root_block *)lookup->dl_dx_root_bh->b_data; if (ocfs2_dx_root_inline(dx_root)) { ocfs2_dx_inline_root_insert(dir, handle, &lookup->dl_hinfo, lookup->dl_leaf_bh->b_blocknr, dx_root); } else { ret = __ocfs2_dx_dir_leaf_insert(dir, handle, &lookup->dl_hinfo, lookup->dl_leaf_bh->b_blocknr, lookup->dl_dx_leaf_bh); if (ret) goto out; } le32_add_cpu(&dx_root->dr_num_entries, 1); ocfs2_journal_dirty(handle, dx_root_bh); out: return ret; } static void ocfs2_remove_block_from_free_list(struct inode *dir, handle_t *handle, struct ocfs2_dir_lookup_result *lookup) { struct ocfs2_dir_block_trailer *trailer, *prev; struct ocfs2_dx_root_block *dx_root; struct buffer_head *bh; trailer = ocfs2_trailer_from_bh(lookup->dl_leaf_bh, dir->i_sb); if (ocfs2_free_list_at_root(lookup)) { bh = lookup->dl_dx_root_bh; dx_root = (struct ocfs2_dx_root_block *)bh->b_data; dx_root->dr_free_blk = trailer->db_free_next; } else { bh = lookup->dl_prev_leaf_bh; prev = ocfs2_trailer_from_bh(bh, dir->i_sb); prev->db_free_next = trailer->db_free_next; } trailer->db_free_rec_len = cpu_to_le16(0); trailer->db_free_next = cpu_to_le64(0); ocfs2_journal_dirty(handle, bh); ocfs2_journal_dirty(handle, lookup->dl_leaf_bh); } /* * This expects that a journal write has been reserved on * lookup->dl_prev_leaf_bh or lookup->dl_dx_root_bh */ static void ocfs2_recalc_free_list(struct inode *dir, handle_t *handle, struct ocfs2_dir_lookup_result *lookup) { int max_rec_len; struct ocfs2_dir_block_trailer *trailer; /* Walk dl_leaf_bh to figure out what the new free rec_len is. */ max_rec_len = ocfs2_find_max_rec_len(dir->i_sb, lookup->dl_leaf_bh); if (max_rec_len) { /* * There's still room in this block, so no need to remove it * from the free list. In this case, we just want to update * the rec len accounting. */ trailer = ocfs2_trailer_from_bh(lookup->dl_leaf_bh, dir->i_sb); trailer->db_free_rec_len = cpu_to_le16(max_rec_len); ocfs2_journal_dirty(handle, lookup->dl_leaf_bh); } else { ocfs2_remove_block_from_free_list(dir, handle, lookup); } } /* we don't always have a dentry for what we want to add, so people * like orphan dir can call this instead. * * The lookup context must have been filled from * ocfs2_prepare_dir_for_insert. */ int __ocfs2_add_entry(handle_t *handle, struct inode *dir, const char *name, int namelen, struct inode *inode, u64 blkno, struct buffer_head *parent_fe_bh, struct ocfs2_dir_lookup_result *lookup) { unsigned long offset; unsigned short rec_len; struct ocfs2_dir_entry *de, *de1; struct ocfs2_dinode *di = (struct ocfs2_dinode *)parent_fe_bh->b_data; struct super_block *sb = dir->i_sb; int retval, status; unsigned int size = sb->s_blocksize; struct buffer_head *insert_bh = lookup->dl_leaf_bh; char *data_start = insert_bh->b_data; if (!namelen) return -EINVAL; if (ocfs2_dir_indexed(dir)) { struct buffer_head *bh; /* * An indexed dir may require that we update the free space * list. Reserve a write to the previous node in the list so * that we don't fail later. * * XXX: This can be either a dx_root_block, or an unindexed * directory tree leaf block. */ if (ocfs2_free_list_at_root(lookup)) { bh = lookup->dl_dx_root_bh; retval = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), bh, OCFS2_JOURNAL_ACCESS_WRITE); } else { bh = lookup->dl_prev_leaf_bh; retval = ocfs2_journal_access_db(handle, INODE_CACHE(dir), bh, OCFS2_JOURNAL_ACCESS_WRITE); } if (retval) { mlog_errno(retval); return retval; } } else if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { data_start = di->id2.i_data.id_data; size = i_size_read(dir); BUG_ON(insert_bh != parent_fe_bh); } rec_len = OCFS2_DIR_REC_LEN(namelen); offset = 0; de = (struct ocfs2_dir_entry *) data_start; while (1) { BUG_ON((char *)de >= (size + data_start)); /* These checks should've already been passed by the * prepare function, but I guess we can leave them * here anyway. */ if (!ocfs2_check_dir_entry(dir, de, insert_bh, offset)) { retval = -ENOENT; goto bail; } if (ocfs2_match(namelen, name, de)) { retval = -EEXIST; goto bail; } /* We're guaranteed that we should have space, so we * can't possibly have hit the trailer...right? */ mlog_bug_on_msg(ocfs2_skip_dir_trailer(dir, de, offset, size), "Hit dir trailer trying to insert %.*s " "(namelen %d) into directory %llu. " "offset is %lu, trailer offset is %d\n", namelen, name, namelen, (unsigned long long)parent_fe_bh->b_blocknr, offset, ocfs2_dir_trailer_blk_off(dir->i_sb)); if (ocfs2_dirent_would_fit(de, rec_len)) { dir->i_mtime = dir->i_ctime = CURRENT_TIME; retval = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh); if (retval < 0) { mlog_errno(retval); goto bail; } if (insert_bh == parent_fe_bh) status = ocfs2_journal_access_di(handle, INODE_CACHE(dir), insert_bh, OCFS2_JOURNAL_ACCESS_WRITE); else { status = ocfs2_journal_access_db(handle, INODE_CACHE(dir), insert_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ocfs2_dir_indexed(dir)) { status = ocfs2_dx_dir_insert(dir, handle, lookup); if (status) { mlog_errno(status); goto bail; } } } /* By now the buffer is marked for journaling */ offset += le16_to_cpu(de->rec_len); if (le64_to_cpu(de->inode)) { de1 = (struct ocfs2_dir_entry *)((char *) de + OCFS2_DIR_REC_LEN(de->name_len)); de1->rec_len = cpu_to_le16(le16_to_cpu(de->rec_len) - OCFS2_DIR_REC_LEN(de->name_len)); de->rec_len = cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len)); de = de1; } de->file_type = OCFS2_FT_UNKNOWN; if (blkno) { de->inode = cpu_to_le64(blkno); ocfs2_set_de_type(de, inode->i_mode); } else de->inode = 0; de->name_len = namelen; memcpy(de->name, name, namelen); if (ocfs2_dir_indexed(dir)) ocfs2_recalc_free_list(dir, handle, lookup); dir->i_version++; ocfs2_journal_dirty(handle, insert_bh); retval = 0; goto bail; } offset += le16_to_cpu(de->rec_len); de = (struct ocfs2_dir_entry *) ((char *) de + le16_to_cpu(de->rec_len)); } /* when you think about it, the assert above should prevent us * from ever getting here. */ retval = -ENOSPC; bail: if (retval) mlog_errno(retval); return retval; } static int ocfs2_dir_foreach_blk_id(struct inode *inode, u64 *f_version, loff_t *f_pos, void *priv, filldir_t filldir, int *filldir_err) { int ret, i, filldir_ret; unsigned long offset = *f_pos; struct buffer_head *di_bh = NULL; struct ocfs2_dinode *di; struct ocfs2_inline_data *data; struct ocfs2_dir_entry *de; ret = ocfs2_read_inode_block(inode, &di_bh); if (ret) { mlog(ML_ERROR, "Unable to read inode block for dir %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); goto out; } di = (struct ocfs2_dinode *)di_bh->b_data; data = &di->id2.i_data; while (*f_pos < i_size_read(inode)) { revalidate: /* If the dir block has changed since the last call to * readdir(2), then we might be pointing to an invalid * dirent right now. Scan from the start of the block * to make sure. */ if (*f_version != inode->i_version) { for (i = 0; i < i_size_read(inode) && i < offset; ) { de = (struct ocfs2_dir_entry *) (data->id_data + i); /* It's too expensive to do a full * dirent test each time round this * loop, but we do have to test at * least that it is non-zero. A * failure will be detected in the * dirent test below. */ if (le16_to_cpu(de->rec_len) < OCFS2_DIR_REC_LEN(1)) break; i += le16_to_cpu(de->rec_len); } *f_pos = offset = i; *f_version = inode->i_version; } de = (struct ocfs2_dir_entry *) (data->id_data + *f_pos); if (!ocfs2_check_dir_entry(inode, de, di_bh, *f_pos)) { /* On error, skip the f_pos to the end. */ *f_pos = i_size_read(inode); goto out; } offset += le16_to_cpu(de->rec_len); if (le64_to_cpu(de->inode)) { /* We might block in the next section * if the data destination is * currently swapped out. So, use a * version stamp to detect whether or * not the directory has been modified * during the copy operation. */ u64 version = *f_version; unsigned char d_type = DT_UNKNOWN; if (de->file_type < OCFS2_FT_MAX) d_type = ocfs2_filetype_table[de->file_type]; filldir_ret = filldir(priv, de->name, de->name_len, *f_pos, le64_to_cpu(de->inode), d_type); if (filldir_ret) { if (filldir_err) *filldir_err = filldir_ret; break; } if (version != *f_version) goto revalidate; } *f_pos += le16_to_cpu(de->rec_len); } out: brelse(di_bh); return 0; } /* * NOTE: This function can be called against unindexed directories, * and indexed ones. */ static int ocfs2_dir_foreach_blk_el(struct inode *inode, u64 *f_version, loff_t *f_pos, void *priv, filldir_t filldir, int *filldir_err) { int error = 0; unsigned long offset, blk, last_ra_blk = 0; int i, stored; struct buffer_head * bh, * tmp; struct ocfs2_dir_entry * de; struct super_block * sb = inode->i_sb; unsigned int ra_sectors = 16; stored = 0; bh = NULL; offset = (*f_pos) & (sb->s_blocksize - 1); while (!error && !stored && *f_pos < i_size_read(inode)) { blk = (*f_pos) >> sb->s_blocksize_bits; if (ocfs2_read_dir_block(inode, blk, &bh, 0)) { /* Skip the corrupt dirblock and keep trying */ *f_pos += sb->s_blocksize - offset; continue; } /* The idea here is to begin with 8k read-ahead and to stay * 4k ahead of our current position. * * TODO: Use the pagecache for this. We just need to * make sure it's cluster-safe... */ if (!last_ra_blk || (((last_ra_blk - blk) << 9) <= (ra_sectors / 2))) { for (i = ra_sectors >> (sb->s_blocksize_bits - 9); i > 0; i--) { tmp = NULL; if (!ocfs2_read_dir_block(inode, ++blk, &tmp, OCFS2_BH_READAHEAD)) brelse(tmp); } last_ra_blk = blk; ra_sectors = 8; } revalidate: /* If the dir block has changed since the last call to * readdir(2), then we might be pointing to an invalid * dirent right now. Scan from the start of the block * to make sure. */ if (*f_version != inode->i_version) { for (i = 0; i < sb->s_blocksize && i < offset; ) { de = (struct ocfs2_dir_entry *) (bh->b_data + i); /* It's too expensive to do a full * dirent test each time round this * loop, but we do have to test at * least that it is non-zero. A * failure will be detected in the * dirent test below. */ if (le16_to_cpu(de->rec_len) < OCFS2_DIR_REC_LEN(1)) break; i += le16_to_cpu(de->rec_len); } offset = i; *f_pos = ((*f_pos) & ~(sb->s_blocksize - 1)) | offset; *f_version = inode->i_version; } while (!error && *f_pos < i_size_read(inode) && offset < sb->s_blocksize) { de = (struct ocfs2_dir_entry *) (bh->b_data + offset); if (!ocfs2_check_dir_entry(inode, de, bh, offset)) { /* On error, skip the f_pos to the next block. */ *f_pos = ((*f_pos) | (sb->s_blocksize - 1)) + 1; brelse(bh); goto out; } offset += le16_to_cpu(de->rec_len); if (le64_to_cpu(de->inode)) { /* We might block in the next section * if the data destination is * currently swapped out. So, use a * version stamp to detect whether or * not the directory has been modified * during the copy operation. */ unsigned long version = *f_version; unsigned char d_type = DT_UNKNOWN; if (de->file_type < OCFS2_FT_MAX) d_type = ocfs2_filetype_table[de->file_type]; error = filldir(priv, de->name, de->name_len, *f_pos, le64_to_cpu(de->inode), d_type); if (error) { if (filldir_err) *filldir_err = error; break; } if (version != *f_version) goto revalidate; stored ++; } *f_pos += le16_to_cpu(de->rec_len); } offset = 0; brelse(bh); bh = NULL; } stored = 0; out: return stored; } static int ocfs2_dir_foreach_blk(struct inode *inode, u64 *f_version, loff_t *f_pos, void *priv, filldir_t filldir, int *filldir_err) { if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) return ocfs2_dir_foreach_blk_id(inode, f_version, f_pos, priv, filldir, filldir_err); return ocfs2_dir_foreach_blk_el(inode, f_version, f_pos, priv, filldir, filldir_err); } /* * This is intended to be called from inside other kernel functions, * so we fake some arguments. */ int ocfs2_dir_foreach(struct inode *inode, loff_t *f_pos, void *priv, filldir_t filldir) { int ret = 0, filldir_err = 0; u64 version = inode->i_version; while (*f_pos < i_size_read(inode)) { ret = ocfs2_dir_foreach_blk(inode, &version, f_pos, priv, filldir, &filldir_err); if (ret || filldir_err) break; } if (ret > 0) ret = -EIO; return 0; } /* * ocfs2_readdir() * */ int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir) { int error = 0; struct inode *inode = filp->f_path.dentry->d_inode; int lock_level = 0; trace_ocfs2_readdir((unsigned long long)OCFS2_I(inode)->ip_blkno); error = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level); if (lock_level && error >= 0) { /* We release EX lock which used to update atime * and get PR lock again to reduce contention * on commonly accessed directories. */ ocfs2_inode_unlock(inode, 1); lock_level = 0; error = ocfs2_inode_lock(inode, NULL, 0); } if (error < 0) { if (error != -ENOENT) mlog_errno(error); /* we haven't got any yet, so propagate the error. */ goto bail_nolock; } error = ocfs2_dir_foreach_blk(inode, &filp->f_version, &filp->f_pos, dirent, filldir, NULL); ocfs2_inode_unlock(inode, lock_level); if (error) mlog_errno(error); bail_nolock: return error; } /* * NOTE: this should always be called with parent dir i_mutex taken. */ int ocfs2_find_files_on_disk(const char *name, int namelen, u64 *blkno, struct inode *inode, struct ocfs2_dir_lookup_result *lookup) { int status = -ENOENT; trace_ocfs2_find_files_on_disk(namelen, name, blkno, (unsigned long long)OCFS2_I(inode)->ip_blkno); status = ocfs2_find_entry(name, namelen, inode, lookup); if (status) goto leave; *blkno = le64_to_cpu(lookup->dl_entry->inode); status = 0; leave: return status; } /* * Convenience function for callers which just want the block number * mapped to a name and don't require the full dirent info, etc. */ int ocfs2_lookup_ino_from_name(struct inode *dir, const char *name, int namelen, u64 *blkno) { int ret; struct ocfs2_dir_lookup_result lookup = { NULL, }; ret = ocfs2_find_files_on_disk(name, namelen, blkno, dir, &lookup); ocfs2_free_dir_lookup_result(&lookup); return ret; } /* Check for a name within a directory. * * Return 0 if the name does not exist * Return -EEXIST if the directory contains the name * * Callers should have i_mutex + a cluster lock on dir */ int ocfs2_check_dir_for_entry(struct inode *dir, const char *name, int namelen) { int ret; struct ocfs2_dir_lookup_result lookup = { NULL, }; trace_ocfs2_check_dir_for_entry( (unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name); ret = -EEXIST; if (ocfs2_find_entry(name, namelen, dir, &lookup) == 0) goto bail; ret = 0; bail: ocfs2_free_dir_lookup_result(&lookup); if (ret) mlog_errno(ret); return ret; } struct ocfs2_empty_dir_priv { unsigned seen_dot; unsigned seen_dot_dot; unsigned seen_other; unsigned dx_dir; }; static int ocfs2_empty_dir_filldir(void *priv, const char *name, int name_len, loff_t pos, u64 ino, unsigned type) { struct ocfs2_empty_dir_priv *p = priv; /* * Check the positions of "." and ".." records to be sure * they're in the correct place. * * Indexed directories don't need to proceed past the first * two entries, so we end the scan after seeing '..'. Despite * that, we allow the scan to proceed In the event that we * have a corrupted indexed directory (no dot or dot dot * entries). This allows us to double check for existing * entries which might not have been found in the index. */ if (name_len == 1 && !strncmp(".", name, 1) && pos == 0) { p->seen_dot = 1; return 0; } if (name_len == 2 && !strncmp("..", name, 2) && pos == OCFS2_DIR_REC_LEN(1)) { p->seen_dot_dot = 1; if (p->dx_dir && p->seen_dot) return 1; return 0; } p->seen_other = 1; return 1; } static int ocfs2_empty_dir_dx(struct inode *inode, struct ocfs2_empty_dir_priv *priv) { int ret; struct buffer_head *di_bh = NULL; struct buffer_head *dx_root_bh = NULL; struct ocfs2_dinode *di; struct ocfs2_dx_root_block *dx_root; priv->dx_dir = 1; ret = ocfs2_read_inode_block(inode, &di_bh); if (ret) { mlog_errno(ret); goto out; } di = (struct ocfs2_dinode *)di_bh->b_data; ret = ocfs2_read_dx_root(inode, di, &dx_root_bh); if (ret) { mlog_errno(ret); goto out; } dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; if (le32_to_cpu(dx_root->dr_num_entries) != 2) priv->seen_other = 1; out: brelse(di_bh); brelse(dx_root_bh); return ret; } /* * routine to check that the specified directory is empty (for rmdir) * * Returns 1 if dir is empty, zero otherwise. * * XXX: This is a performance problem for unindexed directories. */ int ocfs2_empty_dir(struct inode *inode) { int ret; loff_t start = 0; struct ocfs2_empty_dir_priv priv; memset(&priv, 0, sizeof(priv)); if (ocfs2_dir_indexed(inode)) { ret = ocfs2_empty_dir_dx(inode, &priv); if (ret) mlog_errno(ret); /* * We still run ocfs2_dir_foreach to get the checks * for "." and "..". */ } ret = ocfs2_dir_foreach(inode, &start, &priv, ocfs2_empty_dir_filldir); if (ret) mlog_errno(ret); if (!priv.seen_dot || !priv.seen_dot_dot) { mlog(ML_ERROR, "bad directory (dir #%llu) - no `.' or `..'\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); /* * XXX: Is it really safe to allow an unlink to continue? */ return 1; } return !priv.seen_other; } /* * Fills "." and ".." dirents in a new directory block. Returns dirent for * "..", which might be used during creation of a directory with a trailing * header. It is otherwise safe to ignore the return code. */ static struct ocfs2_dir_entry *ocfs2_fill_initial_dirents(struct inode *inode, struct inode *parent, char *start, unsigned int size) { struct ocfs2_dir_entry *de = (struct ocfs2_dir_entry *)start; de->inode = cpu_to_le64(OCFS2_I(inode)->ip_blkno); de->name_len = 1; de->rec_len = cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len)); strcpy(de->name, "."); ocfs2_set_de_type(de, S_IFDIR); de = (struct ocfs2_dir_entry *) ((char *)de + le16_to_cpu(de->rec_len)); de->inode = cpu_to_le64(OCFS2_I(parent)->ip_blkno); de->rec_len = cpu_to_le16(size - OCFS2_DIR_REC_LEN(1)); de->name_len = 2; strcpy(de->name, ".."); ocfs2_set_de_type(de, S_IFDIR); return de; } /* * This works together with code in ocfs2_mknod_locked() which sets * the inline-data flag and initializes the inline-data section. */ static int ocfs2_fill_new_dir_id(struct ocfs2_super *osb, handle_t *handle, struct inode *parent, struct inode *inode, struct buffer_head *di_bh) { int ret; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_inline_data *data = &di->id2.i_data; unsigned int size = le16_to_cpu(data->id_count); ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } ocfs2_fill_initial_dirents(inode, parent, data->id_data, size); ocfs2_journal_dirty(handle, di_bh); i_size_write(inode, size); inode->i_nlink = 2; inode->i_blocks = ocfs2_inode_sector_count(inode); ret = ocfs2_mark_inode_dirty(handle, inode, di_bh); if (ret < 0) mlog_errno(ret); out: return ret; } static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb, handle_t *handle, struct inode *parent, struct inode *inode, struct buffer_head *fe_bh, struct ocfs2_alloc_context *data_ac, struct buffer_head **ret_new_bh) { int status; unsigned int size = osb->sb->s_blocksize; struct buffer_head *new_bh = NULL; struct ocfs2_dir_entry *de; if (ocfs2_new_dir_wants_trailer(inode)) size = ocfs2_dir_trailer_blk_off(parent->i_sb); status = ocfs2_do_extend_dir(osb->sb, handle, inode, fe_bh, data_ac, NULL, &new_bh); if (status < 0) { mlog_errno(status); goto bail; } ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh); status = ocfs2_journal_access_db(handle, INODE_CACHE(inode), new_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (status < 0) { mlog_errno(status); goto bail; } memset(new_bh->b_data, 0, osb->sb->s_blocksize); de = ocfs2_fill_initial_dirents(inode, parent, new_bh->b_data, size); if (ocfs2_new_dir_wants_trailer(inode)) { int size = le16_to_cpu(de->rec_len); /* * Figure out the size of the hole left over after * insertion of '.' and '..'. The trailer wants this * information. */ size -= OCFS2_DIR_REC_LEN(2); size -= sizeof(struct ocfs2_dir_block_trailer); ocfs2_init_dir_trailer(inode, new_bh, size); } ocfs2_journal_dirty(handle, new_bh); i_size_write(inode, inode->i_sb->s_blocksize); inode->i_nlink = 2; inode->i_blocks = ocfs2_inode_sector_count(inode); status = ocfs2_mark_inode_dirty(handle, inode, fe_bh); if (status < 0) { mlog_errno(status); goto bail; } status = 0; if (ret_new_bh) { *ret_new_bh = new_bh; new_bh = NULL; } bail: brelse(new_bh); return status; } static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb, handle_t *handle, struct inode *dir, struct buffer_head *di_bh, struct buffer_head *dirdata_bh, struct ocfs2_alloc_context *meta_ac, int dx_inline, u32 num_entries, struct buffer_head **ret_dx_root_bh) { int ret; struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data; u16 dr_suballoc_bit; u64 suballoc_loc, dr_blkno; unsigned int num_bits; struct buffer_head *dx_root_bh = NULL; struct ocfs2_dx_root_block *dx_root; struct ocfs2_dir_block_trailer *trailer = ocfs2_trailer_from_bh(dirdata_bh, dir->i_sb); ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc, &dr_suballoc_bit, &num_bits, &dr_blkno); if (ret) { mlog_errno(ret); goto out; } trace_ocfs2_dx_dir_attach_index( (unsigned long long)OCFS2_I(dir)->ip_blkno, (unsigned long long)dr_blkno); dx_root_bh = sb_getblk(osb->sb, dr_blkno); if (dx_root_bh == NULL) { ret = -EIO; goto out; } ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), dx_root_bh); ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (ret < 0) { mlog_errno(ret); goto out; } dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; memset(dx_root, 0, osb->sb->s_blocksize); strcpy(dx_root->dr_signature, OCFS2_DX_ROOT_SIGNATURE); dx_root->dr_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot); dx_root->dr_suballoc_loc = cpu_to_le64(suballoc_loc); dx_root->dr_suballoc_bit = cpu_to_le16(dr_suballoc_bit); dx_root->dr_fs_generation = cpu_to_le32(osb->fs_generation); dx_root->dr_blkno = cpu_to_le64(dr_blkno); dx_root->dr_dir_blkno = cpu_to_le64(OCFS2_I(dir)->ip_blkno); dx_root->dr_num_entries = cpu_to_le32(num_entries); if (le16_to_cpu(trailer->db_free_rec_len)) dx_root->dr_free_blk = cpu_to_le64(dirdata_bh->b_blocknr); else dx_root->dr_free_blk = cpu_to_le64(0); if (dx_inline) { dx_root->dr_flags |= OCFS2_DX_FLAG_INLINE; dx_root->dr_entries.de_count = cpu_to_le16(ocfs2_dx_entries_per_root(osb->sb)); } else { dx_root->dr_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_dx_root(osb->sb)); } ocfs2_journal_dirty(handle, dx_root_bh); ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (ret) { mlog_errno(ret); goto out; } di->i_dx_root = cpu_to_le64(dr_blkno); spin_lock(&OCFS2_I(dir)->ip_lock); OCFS2_I(dir)->ip_dyn_features |= OCFS2_INDEXED_DIR_FL; di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features); spin_unlock(&OCFS2_I(dir)->ip_lock); ocfs2_journal_dirty(handle, di_bh); *ret_dx_root_bh = dx_root_bh; dx_root_bh = NULL; out: brelse(dx_root_bh); return ret; } static int ocfs2_dx_dir_format_cluster(struct ocfs2_super *osb, handle_t *handle, struct inode *dir, struct buffer_head **dx_leaves, int num_dx_leaves, u64 start_blk) { int ret, i; struct ocfs2_dx_leaf *dx_leaf; struct buffer_head *bh; for (i = 0; i < num_dx_leaves; i++) { bh = sb_getblk(osb->sb, start_blk + i); if (bh == NULL) { ret = -EIO; goto out; } dx_leaves[i] = bh; ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), bh); ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), bh, OCFS2_JOURNAL_ACCESS_CREATE); if (ret < 0) { mlog_errno(ret); goto out; } dx_leaf = (struct ocfs2_dx_leaf *) bh->b_data; memset(dx_leaf, 0, osb->sb->s_blocksize); strcpy(dx_leaf->dl_signature, OCFS2_DX_LEAF_SIGNATURE); dx_leaf->dl_fs_generation = cpu_to_le32(osb->fs_generation); dx_leaf->dl_blkno = cpu_to_le64(bh->b_blocknr); dx_leaf->dl_list.de_count = cpu_to_le16(ocfs2_dx_entries_per_leaf(osb->sb)); trace_ocfs2_dx_dir_format_cluster( (unsigned long long)OCFS2_I(dir)->ip_blkno, (unsigned long long)bh->b_blocknr, le16_to_cpu(dx_leaf->dl_list.de_count)); ocfs2_journal_dirty(handle, bh); } ret = 0; out: return ret; } /* * Allocates and formats a new cluster for use in an indexed dir * leaf. This version will not do the extent insert, so that it can be * used by operations which need careful ordering. */ static int __ocfs2_dx_dir_new_cluster(struct inode *dir, u32 cpos, handle_t *handle, struct ocfs2_alloc_context *data_ac, struct buffer_head **dx_leaves, int num_dx_leaves, u64 *ret_phys_blkno) { int ret; u32 phys, num; u64 phys_blkno; struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); /* * XXX: For create, this should claim cluster for the index * *before* the unindexed insert so that we have a better * chance of contiguousness as the directory grows in number * of entries. */ ret = __ocfs2_claim_clusters(handle, data_ac, 1, 1, &phys, &num); if (ret) { mlog_errno(ret); goto out; } /* * Format the new cluster first. That way, we're inserting * valid data. */ phys_blkno = ocfs2_clusters_to_blocks(osb->sb, phys); ret = ocfs2_dx_dir_format_cluster(osb, handle, dir, dx_leaves, num_dx_leaves, phys_blkno); if (ret) { mlog_errno(ret); goto out; } *ret_phys_blkno = phys_blkno; out: return ret; } static int ocfs2_dx_dir_new_cluster(struct inode *dir, struct ocfs2_extent_tree *et, u32 cpos, handle_t *handle, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac, struct buffer_head **dx_leaves, int num_dx_leaves) { int ret; u64 phys_blkno; ret = __ocfs2_dx_dir_new_cluster(dir, cpos, handle, data_ac, dx_leaves, num_dx_leaves, &phys_blkno); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_insert_extent(handle, et, cpos, phys_blkno, 1, 0, meta_ac); if (ret) mlog_errno(ret); out: return ret; } static struct buffer_head **ocfs2_dx_dir_kmalloc_leaves(struct super_block *sb, int *ret_num_leaves) { int num_dx_leaves = ocfs2_clusters_to_blocks(sb, 1); struct buffer_head **dx_leaves; dx_leaves = kcalloc(num_dx_leaves, sizeof(struct buffer_head *), GFP_NOFS); if (dx_leaves && ret_num_leaves) *ret_num_leaves = num_dx_leaves; return dx_leaves; } static int ocfs2_fill_new_dir_dx(struct ocfs2_super *osb, handle_t *handle, struct inode *parent, struct inode *inode, struct buffer_head *di_bh, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac) { int ret; struct buffer_head *leaf_bh = NULL; struct buffer_head *dx_root_bh = NULL; struct ocfs2_dx_hinfo hinfo; struct ocfs2_dx_root_block *dx_root; struct ocfs2_dx_entry_list *entry_list; /* * Our strategy is to create the directory as though it were * unindexed, then add the index block. This works with very * little complication since the state of a new directory is a * very well known quantity. * * Essentially, we have two dirents ("." and ".."), in the 1st * block which need indexing. These are easily inserted into * the index block. */ ret = ocfs2_fill_new_dir_el(osb, handle, parent, inode, di_bh, data_ac, &leaf_bh); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_dx_dir_attach_index(osb, handle, inode, di_bh, leaf_bh, meta_ac, 1, 2, &dx_root_bh); if (ret) { mlog_errno(ret); goto out; } dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; entry_list = &dx_root->dr_entries; /* Buffer has been journaled for us by ocfs2_dx_dir_attach_index */ ocfs2_dx_dir_name_hash(inode, ".", 1, &hinfo); ocfs2_dx_entry_list_insert(entry_list, &hinfo, leaf_bh->b_blocknr); ocfs2_dx_dir_name_hash(inode, "..", 2, &hinfo); ocfs2_dx_entry_list_insert(entry_list, &hinfo, leaf_bh->b_blocknr); out: brelse(dx_root_bh); brelse(leaf_bh); return ret; } int ocfs2_fill_new_dir(struct ocfs2_super *osb, handle_t *handle, struct inode *parent, struct inode *inode, struct buffer_head *fe_bh, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac) { BUG_ON(!ocfs2_supports_inline_data(osb) && data_ac == NULL); if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) return ocfs2_fill_new_dir_id(osb, handle, parent, inode, fe_bh); if (ocfs2_supports_indexed_dirs(osb)) return ocfs2_fill_new_dir_dx(osb, handle, parent, inode, fe_bh, data_ac, meta_ac); return ocfs2_fill_new_dir_el(osb, handle, parent, inode, fe_bh, data_ac, NULL); } static int ocfs2_dx_dir_index_block(struct inode *dir, handle_t *handle, struct buffer_head **dx_leaves, int num_dx_leaves, u32 *num_dx_entries, struct buffer_head *dirent_bh) { int ret = 0, namelen, i; char *de_buf, *limit; struct ocfs2_dir_entry *de; struct buffer_head *dx_leaf_bh; struct ocfs2_dx_hinfo hinfo; u64 dirent_blk = dirent_bh->b_blocknr; de_buf = dirent_bh->b_data; limit = de_buf + dir->i_sb->s_blocksize; while (de_buf < limit) { de = (struct ocfs2_dir_entry *)de_buf; namelen = de->name_len; if (!namelen || !de->inode) goto inc; ocfs2_dx_dir_name_hash(dir, de->name, namelen, &hinfo); i = ocfs2_dx_dir_hash_idx(OCFS2_SB(dir->i_sb), &hinfo); dx_leaf_bh = dx_leaves[i]; ret = __ocfs2_dx_dir_leaf_insert(dir, handle, &hinfo, dirent_blk, dx_leaf_bh); if (ret) { mlog_errno(ret); goto out; } *num_dx_entries = *num_dx_entries + 1; inc: de_buf += le16_to_cpu(de->rec_len); } out: return ret; } /* * XXX: This expects dx_root_bh to already be part of the transaction. */ static void ocfs2_dx_dir_index_root_block(struct inode *dir, struct buffer_head *dx_root_bh, struct buffer_head *dirent_bh) { char *de_buf, *limit; struct ocfs2_dx_root_block *dx_root; struct ocfs2_dir_entry *de; struct ocfs2_dx_hinfo hinfo; u64 dirent_blk = dirent_bh->b_blocknr; dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; de_buf = dirent_bh->b_data; limit = de_buf + dir->i_sb->s_blocksize; while (de_buf < limit) { de = (struct ocfs2_dir_entry *)de_buf; if (!de->name_len || !de->inode) goto inc; ocfs2_dx_dir_name_hash(dir, de->name, de->name_len, &hinfo); trace_ocfs2_dx_dir_index_root_block( (unsigned long long)dir->i_ino, hinfo.major_hash, hinfo.minor_hash, de->name_len, de->name, le16_to_cpu(dx_root->dr_entries.de_num_used)); ocfs2_dx_entry_list_insert(&dx_root->dr_entries, &hinfo, dirent_blk); le32_add_cpu(&dx_root->dr_num_entries, 1); inc: de_buf += le16_to_cpu(de->rec_len); } } /* * Count the number of inline directory entries in di_bh and compare * them against the number of entries we can hold in an inline dx root * block. */ static int ocfs2_new_dx_should_be_inline(struct inode *dir, struct buffer_head *di_bh) { int dirent_count = 0; char *de_buf, *limit; struct ocfs2_dir_entry *de; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; de_buf = di->id2.i_data.id_data; limit = de_buf + i_size_read(dir); while (de_buf < limit) { de = (struct ocfs2_dir_entry *)de_buf; if (de->name_len && de->inode) dirent_count++; de_buf += le16_to_cpu(de->rec_len); } /* We are careful to leave room for one extra record. */ return dirent_count < ocfs2_dx_entries_per_root(dir->i_sb); } /* * Expand rec_len of the rightmost dirent in a directory block so that it * contains the end of our valid space for dirents. We do this during * expansion from an inline directory to one with extents. The first dir block * in that case is taken from the inline data portion of the inode block. * * This will also return the largest amount of contiguous space for a dirent * in the block. That value is *not* necessarily the last dirent, even after * expansion. The directory indexing code wants this value for free space * accounting. We do this here since we're already walking the entire dir * block. * * We add the dir trailer if this filesystem wants it. */ static unsigned int ocfs2_expand_last_dirent(char *start, unsigned int old_size, struct inode *dir) { struct super_block *sb = dir->i_sb; struct ocfs2_dir_entry *de; struct ocfs2_dir_entry *prev_de; char *de_buf, *limit; unsigned int new_size = sb->s_blocksize; unsigned int bytes, this_hole; unsigned int largest_hole = 0; if (ocfs2_new_dir_wants_trailer(dir)) new_size = ocfs2_dir_trailer_blk_off(sb); bytes = new_size - old_size; limit = start + old_size; de_buf = start; de = (struct ocfs2_dir_entry *)de_buf; do { this_hole = ocfs2_figure_dirent_hole(de); if (this_hole > largest_hole) largest_hole = this_hole; prev_de = de; de_buf += le16_to_cpu(de->rec_len); de = (struct ocfs2_dir_entry *)de_buf; } while (de_buf < limit); le16_add_cpu(&prev_de->rec_len, bytes); /* We need to double check this after modification of the final * dirent. */ this_hole = ocfs2_figure_dirent_hole(prev_de); if (this_hole > largest_hole) largest_hole = this_hole; if (largest_hole >= OCFS2_DIR_MIN_REC_LEN) return largest_hole; return 0; } /* * We allocate enough clusters to fulfill "blocks_wanted", but set * i_size to exactly one block. Ocfs2_extend_dir() will handle the * rest automatically for us. * * *first_block_bh is a pointer to the 1st data block allocated to the * directory. */ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh, unsigned int blocks_wanted, struct ocfs2_dir_lookup_result *lookup, struct buffer_head **first_block_bh) { u32 alloc, dx_alloc, bit_off, len, num_dx_entries = 0; struct super_block *sb = dir->i_sb; int ret, i, num_dx_leaves = 0, dx_inline = 0, credits = ocfs2_inline_to_extents_credits(sb); u64 dx_insert_blkno, blkno, bytes = blocks_wanted << sb->s_blocksize_bits; struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); struct ocfs2_inode_info *oi = OCFS2_I(dir); struct ocfs2_alloc_context *data_ac = NULL; struct ocfs2_alloc_context *meta_ac = NULL; struct buffer_head *dirdata_bh = NULL; struct buffer_head *dx_root_bh = NULL; struct buffer_head **dx_leaves = NULL; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; handle_t *handle; struct ocfs2_extent_tree et; struct ocfs2_extent_tree dx_et; int did_quota = 0, bytes_allocated = 0; ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(dir), di_bh); alloc = ocfs2_clusters_for_bytes(sb, bytes); dx_alloc = 0; down_write(&oi->ip_alloc_sem); if (ocfs2_supports_indexed_dirs(osb)) { credits += ocfs2_add_dir_index_credits(sb); dx_inline = ocfs2_new_dx_should_be_inline(dir, di_bh); if (!dx_inline) { /* Add one more cluster for an index leaf */ dx_alloc++; dx_leaves = ocfs2_dx_dir_kmalloc_leaves(sb, &num_dx_leaves); if (!dx_leaves) { ret = -ENOMEM; mlog_errno(ret); goto out; } } /* This gets us the dx_root */ ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac); if (ret) { mlog_errno(ret); goto out; } } /* * We should never need more than 2 clusters for the unindexed * tree - maximum dirent size is far less than one block. In * fact, the only time we'd need more than one cluster is if * blocksize == clustersize and the dirent won't fit in the * extra space that the expansion to a single block gives. As * of today, that only happens on 4k/4k file systems. */ BUG_ON(alloc > 2); ret = ocfs2_reserve_clusters(osb, alloc + dx_alloc, &data_ac); if (ret) { mlog_errno(ret); goto out; } /* * Prepare for worst case allocation scenario of two separate * extents in the unindexed tree. */ if (alloc == 2) credits += OCFS2_SUBALLOC_ALLOC; handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } ret = dquot_alloc_space_nodirty(dir, ocfs2_clusters_to_bytes(osb->sb, alloc + dx_alloc)); if (ret) goto out_commit; did_quota = 1; if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) { /* * Allocate our index cluster first, to maximize the * possibility that unindexed leaves grow * contiguously. */ ret = __ocfs2_dx_dir_new_cluster(dir, 0, handle, data_ac, dx_leaves, num_dx_leaves, &dx_insert_blkno); if (ret) { mlog_errno(ret); goto out_commit; } bytes_allocated += ocfs2_clusters_to_bytes(dir->i_sb, 1); } /* * Try to claim as many clusters as the bitmap can give though * if we only get one now, that's enough to continue. The rest * will be claimed after the conversion to extents. */ if (ocfs2_dir_resv_allowed(osb)) data_ac->ac_resv = &oi->ip_la_data_resv; ret = ocfs2_claim_clusters(handle, data_ac, 1, &bit_off, &len); if (ret) { mlog_errno(ret); goto out_commit; } bytes_allocated += ocfs2_clusters_to_bytes(dir->i_sb, 1); /* * Operations are carefully ordered so that we set up the new * data block first. The conversion from inline data to * extents follows. */ blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off); dirdata_bh = sb_getblk(sb, blkno); if (!dirdata_bh) { ret = -EIO; mlog_errno(ret); goto out_commit; } ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), dirdata_bh); ret = ocfs2_journal_access_db(handle, INODE_CACHE(dir), dirdata_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (ret) { mlog_errno(ret); goto out_commit; } memcpy(dirdata_bh->b_data, di->id2.i_data.id_data, i_size_read(dir)); memset(dirdata_bh->b_data + i_size_read(dir), 0, sb->s_blocksize - i_size_read(dir)); i = ocfs2_expand_last_dirent(dirdata_bh->b_data, i_size_read(dir), dir); if (ocfs2_new_dir_wants_trailer(dir)) { /* * Prepare the dir trailer up front. It will otherwise look * like a valid dirent. Even if inserting the index fails * (unlikely), then all we'll have done is given first dir * block a small amount of fragmentation. */ ocfs2_init_dir_trailer(dir, dirdata_bh, i); } ocfs2_journal_dirty(handle, dirdata_bh); if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) { /* * Dx dirs with an external cluster need to do this up * front. Inline dx root's get handled later, after * we've allocated our root block. We get passed back * a total number of items so that dr_num_entries can * be correctly set once the dx_root has been * allocated. */ ret = ocfs2_dx_dir_index_block(dir, handle, dx_leaves, num_dx_leaves, &num_dx_entries, dirdata_bh); if (ret) { mlog_errno(ret); goto out_commit; } } /* * Set extent, i_size, etc on the directory. After this, the * inode should contain the same exact dirents as before and * be fully accessible from system calls. * * We let the later dirent insert modify c/mtime - to the user * the data hasn't changed. */ ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (ret) { mlog_errno(ret); goto out_commit; } spin_lock(&oi->ip_lock); oi->ip_dyn_features &= ~OCFS2_INLINE_DATA_FL; di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); spin_unlock(&oi->ip_lock); ocfs2_dinode_new_extent_list(dir, di); i_size_write(dir, sb->s_blocksize); dir->i_mtime = dir->i_ctime = CURRENT_TIME; di->i_size = cpu_to_le64(sb->s_blocksize); di->i_ctime = di->i_mtime = cpu_to_le64(dir->i_ctime.tv_sec); di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(dir->i_ctime.tv_nsec); /* * This should never fail as our extent list is empty and all * related blocks have been journaled already. */ ret = ocfs2_insert_extent(handle, &et, 0, blkno, len, 0, NULL); if (ret) { mlog_errno(ret); goto out_commit; } /* * Set i_blocks after the extent insert for the most up to * date ip_clusters value. */ dir->i_blocks = ocfs2_inode_sector_count(dir); ocfs2_journal_dirty(handle, di_bh); if (ocfs2_supports_indexed_dirs(osb)) { ret = ocfs2_dx_dir_attach_index(osb, handle, dir, di_bh, dirdata_bh, meta_ac, dx_inline, num_dx_entries, &dx_root_bh); if (ret) { mlog_errno(ret); goto out_commit; } if (dx_inline) { ocfs2_dx_dir_index_root_block(dir, dx_root_bh, dirdata_bh); } else { ocfs2_init_dx_root_extent_tree(&dx_et, INODE_CACHE(dir), dx_root_bh); ret = ocfs2_insert_extent(handle, &dx_et, 0, dx_insert_blkno, 1, 0, NULL); if (ret) mlog_errno(ret); } } /* * We asked for two clusters, but only got one in the 1st * pass. Claim the 2nd cluster as a separate extent. */ if (alloc > len) { ret = ocfs2_claim_clusters(handle, data_ac, 1, &bit_off, &len); if (ret) { mlog_errno(ret); goto out_commit; } blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off); ret = ocfs2_insert_extent(handle, &et, 1, blkno, len, 0, NULL); if (ret) { mlog_errno(ret); goto out_commit; } bytes_allocated += ocfs2_clusters_to_bytes(dir->i_sb, 1); } *first_block_bh = dirdata_bh; dirdata_bh = NULL; if (ocfs2_supports_indexed_dirs(osb)) { unsigned int off; if (!dx_inline) { /* * We need to return the correct block within the * cluster which should hold our entry. */ off = ocfs2_dx_dir_hash_idx(OCFS2_SB(dir->i_sb), &lookup->dl_hinfo); get_bh(dx_leaves[off]); lookup->dl_dx_leaf_bh = dx_leaves[off]; } lookup->dl_dx_root_bh = dx_root_bh; dx_root_bh = NULL; } out_commit: if (ret < 0 && did_quota) dquot_free_space_nodirty(dir, bytes_allocated); ocfs2_commit_trans(osb, handle); out: up_write(&oi->ip_alloc_sem); if (data_ac) ocfs2_free_alloc_context(data_ac); if (meta_ac) ocfs2_free_alloc_context(meta_ac); if (dx_leaves) { for (i = 0; i < num_dx_leaves; i++) brelse(dx_leaves[i]); kfree(dx_leaves); } brelse(dirdata_bh); brelse(dx_root_bh); return ret; } /* returns a bh of the 1st new block in the allocation. */ static int ocfs2_do_extend_dir(struct super_block *sb, handle_t *handle, struct inode *dir, struct buffer_head *parent_fe_bh, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac, struct buffer_head **new_bh) { int status; int extend, did_quota = 0; u64 p_blkno, v_blkno; spin_lock(&OCFS2_I(dir)->ip_lock); extend = (i_size_read(dir) == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters)); spin_unlock(&OCFS2_I(dir)->ip_lock); if (extend) { u32 offset = OCFS2_I(dir)->ip_clusters; status = dquot_alloc_space_nodirty(dir, ocfs2_clusters_to_bytes(sb, 1)); if (status) goto bail; did_quota = 1; status = ocfs2_add_inode_data(OCFS2_SB(sb), dir, &offset, 1, 0, parent_fe_bh, handle, data_ac, meta_ac, NULL); BUG_ON(status == -EAGAIN); if (status < 0) { mlog_errno(status); goto bail; } } v_blkno = ocfs2_blocks_for_bytes(sb, i_size_read(dir)); status = ocfs2_extent_map_get_blocks(dir, v_blkno, &p_blkno, NULL, NULL); if (status < 0) { mlog_errno(status); goto bail; } *new_bh = sb_getblk(sb, p_blkno); if (!*new_bh) { status = -EIO; mlog_errno(status); goto bail; } status = 0; bail: if (did_quota && status < 0) dquot_free_space_nodirty(dir, ocfs2_clusters_to_bytes(sb, 1)); return status; } /* * Assumes you already have a cluster lock on the directory. * * 'blocks_wanted' is only used if we have an inline directory which * is to be turned into an extent based one. The size of the dirent to * insert might be larger than the space gained by growing to just one * block, so we may have to grow the inode by two blocks in that case. * * If the directory is already indexed, dx_root_bh must be provided. */ static int ocfs2_extend_dir(struct ocfs2_super *osb, struct inode *dir, struct buffer_head *parent_fe_bh, unsigned int blocks_wanted, struct ocfs2_dir_lookup_result *lookup, struct buffer_head **new_de_bh) { int status = 0; int credits, num_free_extents, drop_alloc_sem = 0; loff_t dir_i_size; struct ocfs2_dinode *fe = (struct ocfs2_dinode *) parent_fe_bh->b_data; struct ocfs2_extent_list *el = &fe->id2.i_list; struct ocfs2_alloc_context *data_ac = NULL; struct ocfs2_alloc_context *meta_ac = NULL; handle_t *handle = NULL; struct buffer_head *new_bh = NULL; struct ocfs2_dir_entry * de; struct super_block *sb = osb->sb; struct ocfs2_extent_tree et; struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh; if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { /* * This would be a code error as an inline directory should * never have an index root. */ BUG_ON(dx_root_bh); status = ocfs2_expand_inline_dir(dir, parent_fe_bh, blocks_wanted, lookup, &new_bh); if (status) { mlog_errno(status); goto bail; } /* Expansion from inline to an indexed directory will * have given us this. */ dx_root_bh = lookup->dl_dx_root_bh; if (blocks_wanted == 1) { /* * If the new dirent will fit inside the space * created by pushing out to one block, then * we can complete the operation * here. Otherwise we have to expand i_size * and format the 2nd block below. */ BUG_ON(new_bh == NULL); goto bail_bh; } /* * Get rid of 'new_bh' - we want to format the 2nd * data block and return that instead. */ brelse(new_bh); new_bh = NULL; down_write(&OCFS2_I(dir)->ip_alloc_sem); drop_alloc_sem = 1; dir_i_size = i_size_read(dir); credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS; goto do_extend; } down_write(&OCFS2_I(dir)->ip_alloc_sem); drop_alloc_sem = 1; dir_i_size = i_size_read(dir); trace_ocfs2_extend_dir((unsigned long long)OCFS2_I(dir)->ip_blkno, dir_i_size); /* dir->i_size is always block aligned. */ spin_lock(&OCFS2_I(dir)->ip_lock); if (dir_i_size == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters)) { spin_unlock(&OCFS2_I(dir)->ip_lock); ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(dir), parent_fe_bh); num_free_extents = ocfs2_num_free_extents(osb, &et); if (num_free_extents < 0) { status = num_free_extents; mlog_errno(status); goto bail; } if (!num_free_extents) { status = ocfs2_reserve_new_metadata(osb, el, &meta_ac); if (status < 0) { if (status != -ENOSPC) mlog_errno(status); goto bail; } } status = ocfs2_reserve_clusters(osb, 1, &data_ac); if (status < 0) { if (status != -ENOSPC) mlog_errno(status); goto bail; } if (ocfs2_dir_resv_allowed(osb)) data_ac->ac_resv = &OCFS2_I(dir)->ip_la_data_resv; credits = ocfs2_calc_extend_credits(sb, el, 1); } else { spin_unlock(&OCFS2_I(dir)->ip_lock); credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS; } do_extend: if (ocfs2_dir_indexed(dir)) credits++; /* For attaching the new dirent block to the * dx_root */ handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { status = PTR_ERR(handle); handle = NULL; mlog_errno(status); goto bail; } status = ocfs2_do_extend_dir(osb->sb, handle, dir, parent_fe_bh, data_ac, meta_ac, &new_bh); if (status < 0) { mlog_errno(status); goto bail; } ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), new_bh); status = ocfs2_journal_access_db(handle, INODE_CACHE(dir), new_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (status < 0) { mlog_errno(status); goto bail; } memset(new_bh->b_data, 0, sb->s_blocksize); de = (struct ocfs2_dir_entry *) new_bh->b_data; de->inode = 0; if (ocfs2_supports_dir_trailer(dir)) { de->rec_len = cpu_to_le16(ocfs2_dir_trailer_blk_off(sb)); ocfs2_init_dir_trailer(dir, new_bh, le16_to_cpu(de->rec_len)); if (ocfs2_dir_indexed(dir)) { status = ocfs2_dx_dir_link_trailer(dir, handle, dx_root_bh, new_bh); if (status) { mlog_errno(status); goto bail; } } } else { de->rec_len = cpu_to_le16(sb->s_blocksize); } ocfs2_journal_dirty(handle, new_bh); dir_i_size += dir->i_sb->s_blocksize; i_size_write(dir, dir_i_size); dir->i_blocks = ocfs2_inode_sector_count(dir); status = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh); if (status < 0) { mlog_errno(status); goto bail; } bail_bh: *new_de_bh = new_bh; get_bh(*new_de_bh); bail: if (handle) ocfs2_commit_trans(osb, handle); if (drop_alloc_sem) up_write(&OCFS2_I(dir)->ip_alloc_sem); if (data_ac) ocfs2_free_alloc_context(data_ac); if (meta_ac) ocfs2_free_alloc_context(meta_ac); brelse(new_bh); return status; } static int ocfs2_find_dir_space_id(struct inode *dir, struct buffer_head *di_bh, const char *name, int namelen, struct buffer_head **ret_de_bh, unsigned int *blocks_wanted) { int ret; struct super_block *sb = dir->i_sb; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_dir_entry *de, *last_de = NULL; char *de_buf, *limit; unsigned long offset = 0; unsigned int rec_len, new_rec_len, free_space = dir->i_sb->s_blocksize; /* * This calculates how many free bytes we'd have in block zero, should * this function force expansion to an extent tree. */ if (ocfs2_new_dir_wants_trailer(dir)) free_space = ocfs2_dir_trailer_blk_off(sb) - i_size_read(dir); else free_space = dir->i_sb->s_blocksize - i_size_read(dir); de_buf = di->id2.i_data.id_data; limit = de_buf + i_size_read(dir); rec_len = OCFS2_DIR_REC_LEN(namelen); while (de_buf < limit) { de = (struct ocfs2_dir_entry *)de_buf; if (!ocfs2_check_dir_entry(dir, de, di_bh, offset)) { ret = -ENOENT; goto out; } if (ocfs2_match(namelen, name, de)) { ret = -EEXIST; goto out; } /* * No need to check for a trailing dirent record here as * they're not used for inline dirs. */ if (ocfs2_dirent_would_fit(de, rec_len)) { /* Ok, we found a spot. Return this bh and let * the caller actually fill it in. */ *ret_de_bh = di_bh; get_bh(*ret_de_bh); ret = 0; goto out; } last_de = de; de_buf += le16_to_cpu(de->rec_len); offset += le16_to_cpu(de->rec_len); } /* * We're going to require expansion of the directory - figure * out how many blocks we'll need so that a place for the * dirent can be found. */ *blocks_wanted = 1; new_rec_len = le16_to_cpu(last_de->rec_len) + free_space; if (new_rec_len < (rec_len + OCFS2_DIR_REC_LEN(last_de->name_len))) *blocks_wanted = 2; ret = -ENOSPC; out: return ret; } static int ocfs2_find_dir_space_el(struct inode *dir, const char *name, int namelen, struct buffer_head **ret_de_bh) { unsigned long offset; struct buffer_head *bh = NULL; unsigned short rec_len; struct ocfs2_dir_entry *de; struct super_block *sb = dir->i_sb; int status; int blocksize = dir->i_sb->s_blocksize; status = ocfs2_read_dir_block(dir, 0, &bh, 0); if (status) { mlog_errno(status); goto bail; } rec_len = OCFS2_DIR_REC_LEN(namelen); offset = 0; de = (struct ocfs2_dir_entry *) bh->b_data; while (1) { if ((char *)de >= sb->s_blocksize + bh->b_data) { brelse(bh); bh = NULL; if (i_size_read(dir) <= offset) { /* * Caller will have to expand this * directory. */ status = -ENOSPC; goto bail; } status = ocfs2_read_dir_block(dir, offset >> sb->s_blocksize_bits, &bh, 0); if (status) { mlog_errno(status); goto bail; } /* move to next block */ de = (struct ocfs2_dir_entry *) bh->b_data; } if (!ocfs2_check_dir_entry(dir, de, bh, offset)) { status = -ENOENT; goto bail; } if (ocfs2_match(namelen, name, de)) { status = -EEXIST; goto bail; } if (ocfs2_skip_dir_trailer(dir, de, offset % blocksize, blocksize)) goto next; if (ocfs2_dirent_would_fit(de, rec_len)) { /* Ok, we found a spot. Return this bh and let * the caller actually fill it in. */ *ret_de_bh = bh; get_bh(*ret_de_bh); status = 0; goto bail; } next: offset += le16_to_cpu(de->rec_len); de = (struct ocfs2_dir_entry *)((char *) de + le16_to_cpu(de->rec_len)); } status = 0; bail: brelse(bh); if (status) mlog_errno(status); return status; } static int dx_leaf_sort_cmp(const void *a, const void *b) { const struct ocfs2_dx_entry *entry1 = a; const struct ocfs2_dx_entry *entry2 = b; u32 major_hash1 = le32_to_cpu(entry1->dx_major_hash); u32 major_hash2 = le32_to_cpu(entry2->dx_major_hash); u32 minor_hash1 = le32_to_cpu(entry1->dx_minor_hash); u32 minor_hash2 = le32_to_cpu(entry2->dx_minor_hash); if (major_hash1 > major_hash2) return 1; if (major_hash1 < major_hash2) return -1; /* * It is not strictly necessary to sort by minor */ if (minor_hash1 > minor_hash2) return 1; if (minor_hash1 < minor_hash2) return -1; return 0; } static void dx_leaf_sort_swap(void *a, void *b, int size) { struct ocfs2_dx_entry *entry1 = a; struct ocfs2_dx_entry *entry2 = b; struct ocfs2_dx_entry tmp; BUG_ON(size != sizeof(*entry1)); tmp = *entry1; *entry1 = *entry2; *entry2 = tmp; } static int ocfs2_dx_leaf_same_major(struct ocfs2_dx_leaf *dx_leaf) { struct ocfs2_dx_entry_list *dl_list = &dx_leaf->dl_list; int i, num = le16_to_cpu(dl_list->de_num_used); for (i = 0; i < (num - 1); i++) { if (le32_to_cpu(dl_list->de_entries[i].dx_major_hash) != le32_to_cpu(dl_list->de_entries[i + 1].dx_major_hash)) return 0; } return 1; } /* * Find the optimal value to split this leaf on. This expects the leaf * entries to be in sorted order. * * leaf_cpos is the cpos of the leaf we're splitting. insert_hash is * the hash we want to insert. * * This function is only concerned with the major hash - that which * determines which cluster an item belongs to. */ static int ocfs2_dx_dir_find_leaf_split(struct ocfs2_dx_leaf *dx_leaf, u32 leaf_cpos, u32 insert_hash, u32 *split_hash) { struct ocfs2_dx_entry_list *dl_list = &dx_leaf->dl_list; int i, num_used = le16_to_cpu(dl_list->de_num_used); int allsame; /* * There's a couple rare, but nasty corner cases we have to * check for here. All of them involve a leaf where all value * have the same hash, which is what we look for first. * * Most of the time, all of the above is false, and we simply * pick the median value for a split. */ allsame = ocfs2_dx_leaf_same_major(dx_leaf); if (allsame) { u32 val = le32_to_cpu(dl_list->de_entries[0].dx_major_hash); if (val == insert_hash) { /* * No matter where we would choose to split, * the new entry would want to occupy the same * block as these. Since there's no space left * in their existing block, we know there * won't be space after the split. */ return -ENOSPC; } if (val == leaf_cpos) { /* * Because val is the same as leaf_cpos (which * is the smallest value this leaf can have), * yet is not equal to insert_hash, then we * know that insert_hash *must* be larger than * val (and leaf_cpos). At least cpos+1 in value. * * We also know then, that there cannot be an * adjacent extent (otherwise we'd be looking * at it). Choosing this value gives us a * chance to get some contiguousness. */ *split_hash = leaf_cpos + 1; return 0; } if (val > insert_hash) { /* * val can not be the same as insert hash, and * also must be larger than leaf_cpos. Also, * we know that there can't be a leaf between * cpos and val, otherwise the entries with * hash 'val' would be there. */ *split_hash = val; return 0; } *split_hash = insert_hash; return 0; } /* * Since the records are sorted and the checks above * guaranteed that not all records in this block are the same, * we simple travel forward, from the median, and pick the 1st * record whose value is larger than leaf_cpos. */ for (i = (num_used / 2); i < num_used; i++) if (le32_to_cpu(dl_list->de_entries[i].dx_major_hash) > leaf_cpos) break; BUG_ON(i == num_used); /* Should be impossible */ *split_hash = le32_to_cpu(dl_list->de_entries[i].dx_major_hash); return 0; } /* * Transfer all entries in orig_dx_leaves whose major hash is equal to or * larger than split_hash into new_dx_leaves. We use a temporary * buffer (tmp_dx_leaf) to make the changes to the original leaf blocks. * * Since the block offset inside a leaf (cluster) is a constant mask * of minor_hash, we can optimize - an item at block offset X within * the original cluster, will be at offset X within the new cluster. */ static void ocfs2_dx_dir_transfer_leaf(struct inode *dir, u32 split_hash, handle_t *handle, struct ocfs2_dx_leaf *tmp_dx_leaf, struct buffer_head **orig_dx_leaves, struct buffer_head **new_dx_leaves, int num_dx_leaves) { int i, j, num_used; u32 major_hash; struct ocfs2_dx_leaf *orig_dx_leaf, *new_dx_leaf; struct ocfs2_dx_entry_list *orig_list, *new_list, *tmp_list; struct ocfs2_dx_entry *dx_entry; tmp_list = &tmp_dx_leaf->dl_list; for (i = 0; i < num_dx_leaves; i++) { orig_dx_leaf = (struct ocfs2_dx_leaf *) orig_dx_leaves[i]->b_data; orig_list = &orig_dx_leaf->dl_list; new_dx_leaf = (struct ocfs2_dx_leaf *) new_dx_leaves[i]->b_data; new_list = &new_dx_leaf->dl_list; num_used = le16_to_cpu(orig_list->de_num_used); memcpy(tmp_dx_leaf, orig_dx_leaf, dir->i_sb->s_blocksize); tmp_list->de_num_used = cpu_to_le16(0); memset(&tmp_list->de_entries, 0, sizeof(*dx_entry)*num_used); for (j = 0; j < num_used; j++) { dx_entry = &orig_list->de_entries[j]; major_hash = le32_to_cpu(dx_entry->dx_major_hash); if (major_hash >= split_hash) ocfs2_dx_dir_leaf_insert_tail(new_dx_leaf, dx_entry); else ocfs2_dx_dir_leaf_insert_tail(tmp_dx_leaf, dx_entry); } memcpy(orig_dx_leaf, tmp_dx_leaf, dir->i_sb->s_blocksize); ocfs2_journal_dirty(handle, orig_dx_leaves[i]); ocfs2_journal_dirty(handle, new_dx_leaves[i]); } } static int ocfs2_dx_dir_rebalance_credits(struct ocfs2_super *osb, struct ocfs2_dx_root_block *dx_root) { int credits = ocfs2_clusters_to_blocks(osb->sb, 2); credits += ocfs2_calc_extend_credits(osb->sb, &dx_root->dr_list, 1); credits += ocfs2_quota_trans_credits(osb->sb); return credits; } /* * Find the median value in dx_leaf_bh and allocate a new leaf to move * half our entries into. */ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir, struct buffer_head *dx_root_bh, struct buffer_head *dx_leaf_bh, struct ocfs2_dx_hinfo *hinfo, u32 leaf_cpos, u64 leaf_blkno) { struct ocfs2_dx_leaf *dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data; int credits, ret, i, num_used, did_quota = 0; u32 cpos, split_hash, insert_hash = hinfo->major_hash; u64 orig_leaves_start; int num_dx_leaves; struct buffer_head **orig_dx_leaves = NULL; struct buffer_head **new_dx_leaves = NULL; struct ocfs2_alloc_context *data_ac = NULL, *meta_ac = NULL; struct ocfs2_extent_tree et; handle_t *handle = NULL; struct ocfs2_dx_root_block *dx_root; struct ocfs2_dx_leaf *tmp_dx_leaf = NULL; trace_ocfs2_dx_dir_rebalance((unsigned long long)OCFS2_I(dir)->ip_blkno, (unsigned long long)leaf_blkno, insert_hash); ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh); dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; /* * XXX: This is a rather large limit. We should use a more * realistic value. */ if (le32_to_cpu(dx_root->dr_clusters) == UINT_MAX) return -ENOSPC; num_used = le16_to_cpu(dx_leaf->dl_list.de_num_used); if (num_used < le16_to_cpu(dx_leaf->dl_list.de_count)) { mlog(ML_ERROR, "DX Dir: %llu, Asked to rebalance empty leaf: " "%llu, %d\n", (unsigned long long)OCFS2_I(dir)->ip_blkno, (unsigned long long)leaf_blkno, num_used); ret = -EIO; goto out; } orig_dx_leaves = ocfs2_dx_dir_kmalloc_leaves(osb->sb, &num_dx_leaves); if (!orig_dx_leaves) { ret = -ENOMEM; mlog_errno(ret); goto out; } new_dx_leaves = ocfs2_dx_dir_kmalloc_leaves(osb->sb, NULL); if (!new_dx_leaves) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_lock_allocators(dir, &et, 1, 0, &data_ac, &meta_ac); if (ret) { if (ret != -ENOSPC) mlog_errno(ret); goto out; } credits = ocfs2_dx_dir_rebalance_credits(osb, dx_root); handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); handle = NULL; mlog_errno(ret); goto out; } ret = dquot_alloc_space_nodirty(dir, ocfs2_clusters_to_bytes(dir->i_sb, 1)); if (ret) goto out_commit; did_quota = 1; ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), dx_leaf_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; } /* * This block is changing anyway, so we can sort it in place. */ sort(dx_leaf->dl_list.de_entries, num_used, sizeof(struct ocfs2_dx_entry), dx_leaf_sort_cmp, dx_leaf_sort_swap); ocfs2_journal_dirty(handle, dx_leaf_bh); ret = ocfs2_dx_dir_find_leaf_split(dx_leaf, leaf_cpos, insert_hash, &split_hash); if (ret) { mlog_errno(ret); goto out_commit; } trace_ocfs2_dx_dir_rebalance_split(leaf_cpos, split_hash, insert_hash); /* * We have to carefully order operations here. There are items * which want to be in the new cluster before insert, but in * order to put those items in the new cluster, we alter the * old cluster. A failure to insert gets nasty. * * So, start by reserving writes to the old * cluster. ocfs2_dx_dir_new_cluster will reserve writes on * the new cluster for us, before inserting it. The insert * won't happen if there's an error before that. Once the * insert is done then, we can transfer from one leaf into the * other without fear of hitting any error. */ /* * The leaf transfer wants some scratch space so that we don't * wind up doing a bunch of expensive memmove(). */ tmp_dx_leaf = kmalloc(osb->sb->s_blocksize, GFP_NOFS); if (!tmp_dx_leaf) { ret = -ENOMEM; mlog_errno(ret); goto out_commit; } orig_leaves_start = ocfs2_block_to_cluster_start(dir->i_sb, leaf_blkno); ret = ocfs2_read_dx_leaves(dir, orig_leaves_start, num_dx_leaves, orig_dx_leaves); if (ret) { mlog_errno(ret); goto out_commit; } cpos = split_hash; ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle, data_ac, meta_ac, new_dx_leaves, num_dx_leaves); if (ret) { mlog_errno(ret); goto out_commit; } for (i = 0; i < num_dx_leaves; i++) { ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), orig_dx_leaves[i], OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; } ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), new_dx_leaves[i], OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; } } ocfs2_dx_dir_transfer_leaf(dir, split_hash, handle, tmp_dx_leaf, orig_dx_leaves, new_dx_leaves, num_dx_leaves); out_commit: if (ret < 0 && did_quota) dquot_free_space_nodirty(dir, ocfs2_clusters_to_bytes(dir->i_sb, 1)); ocfs2_commit_trans(osb, handle); out: if (orig_dx_leaves || new_dx_leaves) { for (i = 0; i < num_dx_leaves; i++) { if (orig_dx_leaves) brelse(orig_dx_leaves[i]); if (new_dx_leaves) brelse(new_dx_leaves[i]); } kfree(orig_dx_leaves); kfree(new_dx_leaves); } if (meta_ac) ocfs2_free_alloc_context(meta_ac); if (data_ac) ocfs2_free_alloc_context(data_ac); kfree(tmp_dx_leaf); return ret; } static int ocfs2_find_dir_space_dx(struct ocfs2_super *osb, struct inode *dir, struct buffer_head *di_bh, struct buffer_head *dx_root_bh, const char *name, int namelen, struct ocfs2_dir_lookup_result *lookup) { int ret, rebalanced = 0; struct ocfs2_dx_root_block *dx_root; struct buffer_head *dx_leaf_bh = NULL; struct ocfs2_dx_leaf *dx_leaf; u64 blkno; u32 leaf_cpos; dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; restart_search: ret = ocfs2_dx_dir_lookup(dir, &dx_root->dr_list, &lookup->dl_hinfo, &leaf_cpos, &blkno); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_read_dx_leaf(dir, blkno, &dx_leaf_bh); if (ret) { mlog_errno(ret); goto out; } dx_leaf = (struct ocfs2_dx_leaf *)dx_leaf_bh->b_data; if (le16_to_cpu(dx_leaf->dl_list.de_num_used) >= le16_to_cpu(dx_leaf->dl_list.de_count)) { if (rebalanced) { /* * Rebalancing should have provided us with * space in an appropriate leaf. * * XXX: Is this an abnormal condition then? * Should we print a message here? */ ret = -ENOSPC; goto out; } ret = ocfs2_dx_dir_rebalance(osb, dir, dx_root_bh, dx_leaf_bh, &lookup->dl_hinfo, leaf_cpos, blkno); if (ret) { if (ret != -ENOSPC) mlog_errno(ret); goto out; } /* * Restart the lookup. The rebalance might have * changed which block our item fits into. Mark our * progress, so we only execute this once. */ brelse(dx_leaf_bh); dx_leaf_bh = NULL; rebalanced = 1; goto restart_search; } lookup->dl_dx_leaf_bh = dx_leaf_bh; dx_leaf_bh = NULL; out: brelse(dx_leaf_bh); return ret; } static int ocfs2_search_dx_free_list(struct inode *dir, struct buffer_head *dx_root_bh, int namelen, struct ocfs2_dir_lookup_result *lookup) { int ret = -ENOSPC; struct buffer_head *leaf_bh = NULL, *prev_leaf_bh = NULL; struct ocfs2_dir_block_trailer *db; u64 next_block; int rec_len = OCFS2_DIR_REC_LEN(namelen); struct ocfs2_dx_root_block *dx_root; dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; next_block = le64_to_cpu(dx_root->dr_free_blk); while (next_block) { brelse(prev_leaf_bh); prev_leaf_bh = leaf_bh; leaf_bh = NULL; ret = ocfs2_read_dir_block_direct(dir, next_block, &leaf_bh); if (ret) { mlog_errno(ret); goto out; } db = ocfs2_trailer_from_bh(leaf_bh, dir->i_sb); if (rec_len <= le16_to_cpu(db->db_free_rec_len)) { lookup->dl_leaf_bh = leaf_bh; lookup->dl_prev_leaf_bh = prev_leaf_bh; leaf_bh = NULL; prev_leaf_bh = NULL; break; } next_block = le64_to_cpu(db->db_free_next); } if (!next_block) ret = -ENOSPC; out: brelse(leaf_bh); brelse(prev_leaf_bh); return ret; } static int ocfs2_expand_inline_dx_root(struct inode *dir, struct buffer_head *dx_root_bh) { int ret, num_dx_leaves, i, j, did_quota = 0; struct buffer_head **dx_leaves = NULL; struct ocfs2_extent_tree et; u64 insert_blkno; struct ocfs2_alloc_context *data_ac = NULL; struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); handle_t *handle = NULL; struct ocfs2_dx_root_block *dx_root; struct ocfs2_dx_entry_list *entry_list; struct ocfs2_dx_entry *dx_entry; struct ocfs2_dx_leaf *target_leaf; ret = ocfs2_reserve_clusters(osb, 1, &data_ac); if (ret) { mlog_errno(ret); goto out; } dx_leaves = ocfs2_dx_dir_kmalloc_leaves(osb->sb, &num_dx_leaves); if (!dx_leaves) { ret = -ENOMEM; mlog_errno(ret); goto out; } handle = ocfs2_start_trans(osb, ocfs2_calc_dxi_expand_credits(osb->sb)); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } ret = dquot_alloc_space_nodirty(dir, ocfs2_clusters_to_bytes(osb->sb, 1)); if (ret) goto out_commit; did_quota = 1; /* * We do this up front, before the allocation, so that a * failure to add the dx_root_bh to the journal won't result * us losing clusters. */ ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; } ret = __ocfs2_dx_dir_new_cluster(dir, 0, handle, data_ac, dx_leaves, num_dx_leaves, &insert_blkno); if (ret) { mlog_errno(ret); goto out_commit; } /* * Transfer the entries from our dx_root into the appropriate * block */ dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data; entry_list = &dx_root->dr_entries; for (i = 0; i < le16_to_cpu(entry_list->de_num_used); i++) { dx_entry = &entry_list->de_entries[i]; j = __ocfs2_dx_dir_hash_idx(osb, le32_to_cpu(dx_entry->dx_minor_hash)); target_leaf = (struct ocfs2_dx_leaf *)dx_leaves[j]->b_data; ocfs2_dx_dir_leaf_insert_tail(target_leaf, dx_entry); /* Each leaf has been passed to the journal already * via __ocfs2_dx_dir_new_cluster() */ } dx_root->dr_flags &= ~OCFS2_DX_FLAG_INLINE; memset(&dx_root->dr_list, 0, osb->sb->s_blocksize - offsetof(struct ocfs2_dx_root_block, dr_list)); dx_root->dr_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_dx_root(osb->sb)); /* This should never fail considering we start with an empty * dx_root. */ ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh); ret = ocfs2_insert_extent(handle, &et, 0, insert_blkno, 1, 0, NULL); if (ret) mlog_errno(ret); did_quota = 0; ocfs2_journal_dirty(handle, dx_root_bh); out_commit: if (ret < 0 && did_quota) dquot_free_space_nodirty(dir, ocfs2_clusters_to_bytes(dir->i_sb, 1)); ocfs2_commit_trans(osb, handle); out: if (data_ac) ocfs2_free_alloc_context(data_ac); if (dx_leaves) { for (i = 0; i < num_dx_leaves; i++) brelse(dx_leaves[i]); kfree(dx_leaves); } return ret; } static int ocfs2_inline_dx_has_space(struct buffer_head *dx_root_bh) { struct ocfs2_dx_root_block *dx_root; struct ocfs2_dx_entry_list *entry_list; dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data; entry_list = &dx_root->dr_entries; if (le16_to_cpu(entry_list->de_num_used) >= le16_to_cpu(entry_list->de_count)) return -ENOSPC; return 0; } static int ocfs2_prepare_dx_dir_for_insert(struct inode *dir, struct buffer_head *di_bh, const char *name, int namelen, struct ocfs2_dir_lookup_result *lookup) { int ret, free_dx_root = 1; struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); struct buffer_head *dx_root_bh = NULL; struct buffer_head *leaf_bh = NULL; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_dx_root_block *dx_root; ret = ocfs2_read_dx_root(dir, di, &dx_root_bh); if (ret) { mlog_errno(ret); goto out; } dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; if (le32_to_cpu(dx_root->dr_num_entries) == OCFS2_DX_ENTRIES_MAX) { ret = -ENOSPC; mlog_errno(ret); goto out; } if (ocfs2_dx_root_inline(dx_root)) { ret = ocfs2_inline_dx_has_space(dx_root_bh); if (ret == 0) goto search_el; /* * We ran out of room in the root block. Expand it to * an extent, then allow ocfs2_find_dir_space_dx to do * the rest. */ ret = ocfs2_expand_inline_dx_root(dir, dx_root_bh); if (ret) { mlog_errno(ret); goto out; } } /* * Insert preparation for an indexed directory is split into two * steps. The call to find_dir_space_dx reserves room in the index for * an additional item. If we run out of space there, it's a real error * we can't continue on. */ ret = ocfs2_find_dir_space_dx(osb, dir, di_bh, dx_root_bh, name, namelen, lookup); if (ret) { mlog_errno(ret); goto out; } search_el: /* * Next, we need to find space in the unindexed tree. This call * searches using the free space linked list. If the unindexed tree * lacks sufficient space, we'll expand it below. The expansion code * is smart enough to add any new blocks to the free space list. */ ret = ocfs2_search_dx_free_list(dir, dx_root_bh, namelen, lookup); if (ret && ret != -ENOSPC) { mlog_errno(ret); goto out; } /* Do this up here - ocfs2_extend_dir might need the dx_root */ lookup->dl_dx_root_bh = dx_root_bh; free_dx_root = 0; if (ret == -ENOSPC) { ret = ocfs2_extend_dir(osb, dir, di_bh, 1, lookup, &leaf_bh); if (ret) { mlog_errno(ret); goto out; } /* * We make the assumption here that new leaf blocks are added * to the front of our free list. */ lookup->dl_prev_leaf_bh = NULL; lookup->dl_leaf_bh = leaf_bh; } out: if (free_dx_root) brelse(dx_root_bh); return ret; } /* * Get a directory ready for insert. Any directory allocation required * happens here. Success returns zero, and enough context in the dir * lookup result that ocfs2_add_entry() will be able complete the task * with minimal performance impact. */ int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb, struct inode *dir, struct buffer_head *parent_fe_bh, const char *name, int namelen, struct ocfs2_dir_lookup_result *lookup) { int ret; unsigned int blocks_wanted = 1; struct buffer_head *bh = NULL; trace_ocfs2_prepare_dir_for_insert( (unsigned long long)OCFS2_I(dir)->ip_blkno, namelen); if (!namelen) { ret = -EINVAL; mlog_errno(ret); goto out; } /* * Do this up front to reduce confusion. * * The directory might start inline, then be turned into an * indexed one, in which case we'd need to hash deep inside * ocfs2_find_dir_space_id(). Since * ocfs2_prepare_dx_dir_for_insert() also needs this hash * done, there seems no point in spreading out the calls. We * can optimize away the case where the file system doesn't * support indexing. */ if (ocfs2_supports_indexed_dirs(osb)) ocfs2_dx_dir_name_hash(dir, name, namelen, &lookup->dl_hinfo); if (ocfs2_dir_indexed(dir)) { ret = ocfs2_prepare_dx_dir_for_insert(dir, parent_fe_bh, name, namelen, lookup); if (ret) mlog_errno(ret); goto out; } if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { ret = ocfs2_find_dir_space_id(dir, parent_fe_bh, name, namelen, &bh, &blocks_wanted); } else ret = ocfs2_find_dir_space_el(dir, name, namelen, &bh); if (ret && ret != -ENOSPC) { mlog_errno(ret); goto out; } if (ret == -ENOSPC) { /* * We have to expand the directory to add this name. */ BUG_ON(bh); ret = ocfs2_extend_dir(osb, dir, parent_fe_bh, blocks_wanted, lookup, &bh); if (ret) { if (ret != -ENOSPC) mlog_errno(ret); goto out; } BUG_ON(!bh); } lookup->dl_leaf_bh = bh; bh = NULL; out: brelse(bh); return ret; } static int ocfs2_dx_dir_remove_index(struct inode *dir, struct buffer_head *di_bh, struct buffer_head *dx_root_bh) { int ret; struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_dx_root_block *dx_root; struct inode *dx_alloc_inode = NULL; struct buffer_head *dx_alloc_bh = NULL; handle_t *handle; u64 blk; u16 bit; u64 bg_blkno; dx_root = (struct ocfs2_dx_root_block *) dx_root_bh->b_data; dx_alloc_inode = ocfs2_get_system_file_inode(osb, EXTENT_ALLOC_SYSTEM_INODE, le16_to_cpu(dx_root->dr_suballoc_slot)); if (!dx_alloc_inode) { ret = -ENOMEM; mlog_errno(ret); goto out; } mutex_lock(&dx_alloc_inode->i_mutex); ret = ocfs2_inode_lock(dx_alloc_inode, &dx_alloc_bh, 1); if (ret) { mlog_errno(ret); goto out_mutex; } handle = ocfs2_start_trans(osb, OCFS2_DX_ROOT_REMOVE_CREDITS); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out_unlock; } ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; } spin_lock(&OCFS2_I(dir)->ip_lock); OCFS2_I(dir)->ip_dyn_features &= ~OCFS2_INDEXED_DIR_FL; di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features); spin_unlock(&OCFS2_I(dir)->ip_lock); di->i_dx_root = cpu_to_le64(0ULL); ocfs2_journal_dirty(handle, di_bh); blk = le64_to_cpu(dx_root->dr_blkno); bit = le16_to_cpu(dx_root->dr_suballoc_bit); if (dx_root->dr_suballoc_loc) bg_blkno = le64_to_cpu(dx_root->dr_suballoc_loc); else bg_blkno = ocfs2_which_suballoc_group(blk, bit); ret = ocfs2_free_suballoc_bits(handle, dx_alloc_inode, dx_alloc_bh, bit, bg_blkno, 1); if (ret) mlog_errno(ret); out_commit: ocfs2_commit_trans(osb, handle); out_unlock: ocfs2_inode_unlock(dx_alloc_inode, 1); out_mutex: mutex_unlock(&dx_alloc_inode->i_mutex); brelse(dx_alloc_bh); out: iput(dx_alloc_inode); return ret; } int ocfs2_dx_dir_truncate(struct inode *dir, struct buffer_head *di_bh) { int ret; unsigned int uninitialized_var(clen); u32 major_hash = UINT_MAX, p_cpos, uninitialized_var(cpos); u64 uninitialized_var(blkno); struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); struct buffer_head *dx_root_bh = NULL; struct ocfs2_dx_root_block *dx_root; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_cached_dealloc_ctxt dealloc; struct ocfs2_extent_tree et; ocfs2_init_dealloc_ctxt(&dealloc); if (!ocfs2_dir_indexed(dir)) return 0; ret = ocfs2_read_dx_root(dir, di, &dx_root_bh); if (ret) { mlog_errno(ret); goto out; } dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; if (ocfs2_dx_root_inline(dx_root)) goto remove_index; ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh); /* XXX: What if dr_clusters is too large? */ while (le32_to_cpu(dx_root->dr_clusters)) { ret = ocfs2_dx_dir_lookup_rec(dir, &dx_root->dr_list, major_hash, &cpos, &blkno, &clen); if (ret) { mlog_errno(ret); goto out; } p_cpos = ocfs2_blocks_to_clusters(dir->i_sb, blkno); ret = ocfs2_remove_btree_range(dir, &et, cpos, p_cpos, clen, 0, &dealloc, 0); if (ret) { mlog_errno(ret); goto out; } if (cpos == 0) break; major_hash = cpos - 1; } remove_index: ret = ocfs2_dx_dir_remove_index(dir, di_bh, dx_root_bh); if (ret) { mlog_errno(ret); goto out; } ocfs2_remove_from_cache(INODE_CACHE(dir), dx_root_bh); out: ocfs2_schedule_truncate_log_flush(osb, 1); ocfs2_run_deallocs(osb, &dealloc); brelse(dx_root_bh); return ret; }
gpl-2.0
Luavis/SOS
net/atm/mpoa_proc.c
3636
7326
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #ifdef CONFIG_PROC_FS #include <linux/errno.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/time.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/atmmpc.h> #include <linux/atm.h> #include <linux/gfp.h> #include "mpc.h" #include "mpoa_caches.h" /* * mpoa_proc.c: Implementation MPOA client's proc * file system statistics */ #if 1 #define dprintk(format, args...) \ printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */ #else #define dprintk(format, args...) \ do { if (0) \ printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\ } while (0) #endif #if 0 #define ddprintk(format, args...) \ printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */ #else #define ddprintk(format, args...) \ do { if (0) \ printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\ } while (0) #endif #define STAT_FILE_NAME "mpc" /* Our statistic file's name */ extern struct mpoa_client *mpcs; extern struct proc_dir_entry *atm_proc_root; /* from proc.c. */ static int proc_mpc_open(struct inode *inode, struct file *file); static ssize_t proc_mpc_write(struct file *file, const char __user *buff, size_t nbytes, loff_t *ppos); static int parse_qos(const char *buff); /* * Define allowed FILE OPERATIONS */ static const struct file_operations mpc_file_operations = { .owner = THIS_MODULE, .open = proc_mpc_open, .read = seq_read, .llseek = seq_lseek, .write = proc_mpc_write, .release = seq_release, }; /* * Returns the state of an ingress cache entry as a string */ static const char *ingress_state_string(int state) { switch (state) { case INGRESS_RESOLVING: return "resolving "; case INGRESS_RESOLVED: return "resolved "; case INGRESS_INVALID: return "invalid "; case INGRESS_REFRESHING: return "refreshing "; } return ""; } /* * Returns the state of an egress cache entry as a string */ static const char *egress_state_string(int state) { switch (state) { case EGRESS_RESOLVED: return "resolved "; case EGRESS_PURGE: return "purge "; case EGRESS_INVALID: return "invalid "; } return ""; } /* * FIXME: mpcs (and per-mpc lists) have no locking whatsoever. */ static void *mpc_start(struct seq_file *m, loff_t *pos) { loff_t l = *pos; struct mpoa_client *mpc; if (!l--) return SEQ_START_TOKEN; for (mpc = mpcs; mpc; mpc = mpc->next) if (!l--) return mpc; return NULL; } static void *mpc_next(struct seq_file *m, void *v, loff_t *pos) { struct mpoa_client *p = v; (*pos)++; return v == SEQ_START_TOKEN ? mpcs : p->next; } static void mpc_stop(struct seq_file *m, void *v) { } /* * READING function - called when the /proc/atm/mpoa file is read from. */ static int mpc_show(struct seq_file *m, void *v) { struct mpoa_client *mpc = v; int i; in_cache_entry *in_entry; eg_cache_entry *eg_entry; struct timeval now; unsigned char ip_string[16]; if (v == SEQ_START_TOKEN) { atm_mpoa_disp_qos(m); return 0; } seq_printf(m, "\nInterface %d:\n\n", mpc->dev_num); seq_printf(m, "Ingress Entries:\nIP address State Holding time Packets fwded VPI VCI\n"); do_gettimeofday(&now); for (in_entry = mpc->in_cache; in_entry; in_entry = in_entry->next) { sprintf(ip_string, "%pI4", &in_entry->ctrl_info.in_dst_ip); seq_printf(m, "%-16s%s%-14lu%-12u", ip_string, ingress_state_string(in_entry->entry_state), in_entry->ctrl_info.holding_time - (now.tv_sec-in_entry->tv.tv_sec), in_entry->packets_fwded); if (in_entry->shortcut) seq_printf(m, " %-3d %-3d", in_entry->shortcut->vpi, in_entry->shortcut->vci); seq_printf(m, "\n"); } seq_printf(m, "\n"); seq_printf(m, "Egress Entries:\nIngress MPC ATM addr\nCache-id State Holding time Packets recvd Latest IP addr VPI VCI\n"); for (eg_entry = mpc->eg_cache; eg_entry; eg_entry = eg_entry->next) { unsigned char *p = eg_entry->ctrl_info.in_MPC_data_ATM_addr; for (i = 0; i < ATM_ESA_LEN; i++) seq_printf(m, "%02x", p[i]); seq_printf(m, "\n%-16lu%s%-14lu%-15u", (unsigned long)ntohl(eg_entry->ctrl_info.cache_id), egress_state_string(eg_entry->entry_state), (eg_entry->ctrl_info.holding_time - (now.tv_sec-eg_entry->tv.tv_sec)), eg_entry->packets_rcvd); /* latest IP address */ sprintf(ip_string, "%pI4", &eg_entry->latest_ip_addr); seq_printf(m, "%-16s", ip_string); if (eg_entry->shortcut) seq_printf(m, " %-3d %-3d", eg_entry->shortcut->vpi, eg_entry->shortcut->vci); seq_printf(m, "\n"); } seq_printf(m, "\n"); return 0; } static const struct seq_operations mpc_op = { .start = mpc_start, .next = mpc_next, .stop = mpc_stop, .show = mpc_show }; static int proc_mpc_open(struct inode *inode, struct file *file) { return seq_open(file, &mpc_op); } static ssize_t proc_mpc_write(struct file *file, const char __user *buff, size_t nbytes, loff_t *ppos) { char *page, *p; unsigned int len; if (nbytes == 0) return 0; if (nbytes >= PAGE_SIZE) nbytes = PAGE_SIZE-1; page = (char *)__get_free_page(GFP_KERNEL); if (!page) return -ENOMEM; for (p = page, len = 0; len < nbytes; p++, len++) { if (get_user(*p, buff++)) { free_page((unsigned long)page); return -EFAULT; } if (*p == '\0' || *p == '\n') break; } *p = '\0'; if (!parse_qos(page)) printk("mpoa: proc_mpc_write: could not parse '%s'\n", page); free_page((unsigned long)page); return len; } static int parse_qos(const char *buff) { /* possible lines look like this * add 130.230.54.142 tx=max_pcr,max_sdu rx=max_pcr,max_sdu */ unsigned char ip[4]; int tx_pcr, tx_sdu, rx_pcr, rx_sdu; __be32 ipaddr; struct atm_qos qos; memset(&qos, 0, sizeof(struct atm_qos)); if (sscanf(buff, "del %hhu.%hhu.%hhu.%hhu", ip, ip+1, ip+2, ip+3) == 4) { ipaddr = *(__be32 *)ip; return atm_mpoa_delete_qos(atm_mpoa_search_qos(ipaddr)); } if (sscanf(buff, "add %hhu.%hhu.%hhu.%hhu tx=%d,%d rx=tx", ip, ip+1, ip+2, ip+3, &tx_pcr, &tx_sdu) == 6) { rx_pcr = tx_pcr; rx_sdu = tx_sdu; } else if (sscanf(buff, "add %hhu.%hhu.%hhu.%hhu tx=%d,%d rx=%d,%d", ip, ip+1, ip+2, ip+3, &tx_pcr, &tx_sdu, &rx_pcr, &rx_sdu) != 8) return 0; ipaddr = *(__be32 *)ip; qos.txtp.traffic_class = ATM_CBR; qos.txtp.max_pcr = tx_pcr; qos.txtp.max_sdu = tx_sdu; qos.rxtp.traffic_class = ATM_CBR; qos.rxtp.max_pcr = rx_pcr; qos.rxtp.max_sdu = rx_sdu; qos.aal = ATM_AAL5; dprintk("parse_qos(): setting qos paramameters to tx=%d,%d rx=%d,%d\n", qos.txtp.max_pcr, qos.txtp.max_sdu, qos.rxtp.max_pcr, qos.rxtp.max_sdu); atm_mpoa_add_qos(ipaddr, &qos); return 1; } /* * INITIALIZATION function - called when module is initialized/loaded. */ int mpc_proc_init(void) { struct proc_dir_entry *p; p = proc_create(STAT_FILE_NAME, 0, atm_proc_root, &mpc_file_operations); if (!p) { pr_err("Unable to initialize /proc/atm/%s\n", STAT_FILE_NAME); return -ENOMEM; } return 0; } /* * DELETING function - called when module is removed. */ void mpc_proc_clean(void) { remove_proc_entry(STAT_FILE_NAME, atm_proc_root); } #endif /* CONFIG_PROC_FS */
gpl-2.0
StelixROM/kernel_lge_msm8974
drivers/base/power/domain.c
4148
45518
/* * drivers/base/power/domain.c - Common code related to device power domains. * * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. * * This file is released under the GPLv2. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/pm_runtime.h> #include <linux/pm_domain.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/sched.h> #include <linux/suspend.h> #include <linux/export.h> #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ ({ \ type (*__routine)(struct device *__d); \ type __ret = (type)0; \ \ __routine = genpd->dev_ops.callback; \ if (__routine) { \ __ret = __routine(dev); \ } else { \ __routine = dev_gpd_data(dev)->ops.callback; \ if (__routine) \ __ret = __routine(dev); \ } \ __ret; \ }) #define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \ ({ \ ktime_t __start = ktime_get(); \ type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \ s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \ struct generic_pm_domain_data *__gpd_data = dev_gpd_data(dev); \ if (__elapsed > __gpd_data->td.field) { \ __gpd_data->td.field = __elapsed; \ dev_warn(dev, name " latency exceeded, new value %lld ns\n", \ __elapsed); \ } \ __retval; \ }) static LIST_HEAD(gpd_list); static DEFINE_MUTEX(gpd_list_lock); #ifdef CONFIG_PM struct generic_pm_domain *dev_to_genpd(struct device *dev) { if (IS_ERR_OR_NULL(dev->pm_domain)) return ERR_PTR(-EINVAL); return pd_to_genpd(dev->pm_domain); } static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev, stop_latency_ns, "stop"); } static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev, start_latency_ns, "start"); } static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, save_state_latency_ns, "state save"); } static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, restore_state_latency_ns, "state restore"); } static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) { bool ret = false; if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) ret = !!atomic_dec_and_test(&genpd->sd_count); return ret; } static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) { atomic_inc(&genpd->sd_count); smp_mb__after_atomic_inc(); } static void genpd_acquire_lock(struct generic_pm_domain *genpd) { DEFINE_WAIT(wait); mutex_lock(&genpd->lock); /* * Wait for the domain to transition into either the active, * or the power off state. */ for (;;) { prepare_to_wait(&genpd->status_wait_queue, &wait, TASK_UNINTERRUPTIBLE); if (genpd->status == GPD_STATE_ACTIVE || genpd->status == GPD_STATE_POWER_OFF) break; mutex_unlock(&genpd->lock); schedule(); mutex_lock(&genpd->lock); } finish_wait(&genpd->status_wait_queue, &wait); } static void genpd_release_lock(struct generic_pm_domain *genpd) { mutex_unlock(&genpd->lock); } static void genpd_set_active(struct generic_pm_domain *genpd) { if (genpd->resume_count == 0) genpd->status = GPD_STATE_ACTIVE; } /** * __pm_genpd_poweron - Restore power to a given PM domain and its masters. * @genpd: PM domain to power up. * * Restore power to @genpd and all of its masters so that it is possible to * resume a device belonging to it. */ int __pm_genpd_poweron(struct generic_pm_domain *genpd) __releases(&genpd->lock) __acquires(&genpd->lock) { struct gpd_link *link; DEFINE_WAIT(wait); int ret = 0; /* If the domain's master is being waited for, we have to wait too. */ for (;;) { prepare_to_wait(&genpd->status_wait_queue, &wait, TASK_UNINTERRUPTIBLE); if (genpd->status != GPD_STATE_WAIT_MASTER) break; mutex_unlock(&genpd->lock); schedule(); mutex_lock(&genpd->lock); } finish_wait(&genpd->status_wait_queue, &wait); if (genpd->status == GPD_STATE_ACTIVE || (genpd->prepared_count > 0 && genpd->suspend_power_off)) return 0; if (genpd->status != GPD_STATE_POWER_OFF) { genpd_set_active(genpd); return 0; } /* * The list is guaranteed not to change while the loop below is being * executed, unless one of the masters' .power_on() callbacks fiddles * with it. */ list_for_each_entry(link, &genpd->slave_links, slave_node) { genpd_sd_counter_inc(link->master); genpd->status = GPD_STATE_WAIT_MASTER; mutex_unlock(&genpd->lock); ret = pm_genpd_poweron(link->master); mutex_lock(&genpd->lock); /* * The "wait for parent" status is guaranteed not to change * while the master is powering on. */ genpd->status = GPD_STATE_POWER_OFF; wake_up_all(&genpd->status_wait_queue); if (ret) { genpd_sd_counter_dec(link->master); goto err; } } if (genpd->power_on) { ktime_t time_start = ktime_get(); s64 elapsed_ns; ret = genpd->power_on(genpd); if (ret) goto err; elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); if (elapsed_ns > genpd->power_on_latency_ns) { genpd->power_on_latency_ns = elapsed_ns; if (genpd->name) pr_warning("%s: Power-on latency exceeded, " "new value %lld ns\n", genpd->name, elapsed_ns); } } genpd_set_active(genpd); return 0; err: list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node) genpd_sd_counter_dec(link->master); return ret; } /** * pm_genpd_poweron - Restore power to a given PM domain and its masters. * @genpd: PM domain to power up. */ int pm_genpd_poweron(struct generic_pm_domain *genpd) { int ret; mutex_lock(&genpd->lock); ret = __pm_genpd_poweron(genpd); mutex_unlock(&genpd->lock); return ret; } #endif /* CONFIG_PM */ #ifdef CONFIG_PM_RUNTIME /** * __pm_genpd_save_device - Save the pre-suspend state of a device. * @pdd: Domain data of the device to save the state of. * @genpd: PM domain the device belongs to. */ static int __pm_genpd_save_device(struct pm_domain_data *pdd, struct generic_pm_domain *genpd) __releases(&genpd->lock) __acquires(&genpd->lock) { struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); struct device *dev = pdd->dev; int ret = 0; if (gpd_data->need_restore) return 0; mutex_unlock(&genpd->lock); genpd_start_dev(genpd, dev); ret = genpd_save_dev(genpd, dev); genpd_stop_dev(genpd, dev); mutex_lock(&genpd->lock); if (!ret) gpd_data->need_restore = true; return ret; } /** * __pm_genpd_restore_device - Restore the pre-suspend state of a device. * @pdd: Domain data of the device to restore the state of. * @genpd: PM domain the device belongs to. */ static void __pm_genpd_restore_device(struct pm_domain_data *pdd, struct generic_pm_domain *genpd) __releases(&genpd->lock) __acquires(&genpd->lock) { struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); struct device *dev = pdd->dev; if (!gpd_data->need_restore) return; mutex_unlock(&genpd->lock); genpd_start_dev(genpd, dev); genpd_restore_dev(genpd, dev); genpd_stop_dev(genpd, dev); mutex_lock(&genpd->lock); gpd_data->need_restore = false; } /** * genpd_abort_poweroff - Check if a PM domain power off should be aborted. * @genpd: PM domain to check. * * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during * a "power off" operation, which means that a "power on" has occured in the * meantime, or if its resume_count field is different from zero, which means * that one of its devices has been resumed in the meantime. */ static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) { return genpd->status == GPD_STATE_WAIT_MASTER || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; } /** * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff(). * @genpd: PM domait to power off. * * Queue up the execution of pm_genpd_poweroff() unless it's already been done * before. */ void genpd_queue_power_off_work(struct generic_pm_domain *genpd) { if (!work_pending(&genpd->power_off_work)) queue_work(pm_wq, &genpd->power_off_work); } /** * pm_genpd_poweroff - Remove power from a given PM domain. * @genpd: PM domain to power down. * * If all of the @genpd's devices have been suspended and all of its subdomains * have been powered down, run the runtime suspend callbacks provided by all of * the @genpd's devices' drivers and remove power from @genpd. */ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) __releases(&genpd->lock) __acquires(&genpd->lock) { struct pm_domain_data *pdd; struct gpd_link *link; unsigned int not_suspended; int ret = 0; start: /* * Do not try to power off the domain in the following situations: * (1) The domain is already in the "power off" state. * (2) The domain is waiting for its master to power up. * (3) One of the domain's devices is being resumed right now. * (4) System suspend is in progress. */ if (genpd->status == GPD_STATE_POWER_OFF || genpd->status == GPD_STATE_WAIT_MASTER || genpd->resume_count > 0 || genpd->prepared_count > 0) return 0; if (atomic_read(&genpd->sd_count) > 0) return -EBUSY; not_suspended = 0; list_for_each_entry(pdd, &genpd->dev_list, list_node) if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) || pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on)) not_suspended++; if (not_suspended > genpd->in_progress) return -EBUSY; if (genpd->poweroff_task) { /* * Another instance of pm_genpd_poweroff() is executing * callbacks, so tell it to start over and return. */ genpd->status = GPD_STATE_REPEAT; return 0; } if (genpd->gov && genpd->gov->power_down_ok) { if (!genpd->gov->power_down_ok(&genpd->domain)) return -EAGAIN; } genpd->status = GPD_STATE_BUSY; genpd->poweroff_task = current; list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { ret = atomic_read(&genpd->sd_count) == 0 ? __pm_genpd_save_device(pdd, genpd) : -EBUSY; if (genpd_abort_poweroff(genpd)) goto out; if (ret) { genpd_set_active(genpd); goto out; } if (genpd->status == GPD_STATE_REPEAT) { genpd->poweroff_task = NULL; goto start; } } if (genpd->power_off) { ktime_t time_start; s64 elapsed_ns; if (atomic_read(&genpd->sd_count) > 0) { ret = -EBUSY; goto out; } time_start = ktime_get(); /* * If sd_count > 0 at this point, one of the subdomains hasn't * managed to call pm_genpd_poweron() for the master yet after * incrementing it. In that case pm_genpd_poweron() will wait * for us to drop the lock, so we can call .power_off() and let * the pm_genpd_poweron() restore power for us (this shouldn't * happen very often). */ ret = genpd->power_off(genpd); if (ret == -EBUSY) { genpd_set_active(genpd); goto out; } elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); if (elapsed_ns > genpd->power_off_latency_ns) { genpd->power_off_latency_ns = elapsed_ns; if (genpd->name) pr_warning("%s: Power-off latency exceeded, " "new value %lld ns\n", genpd->name, elapsed_ns); } } genpd->status = GPD_STATE_POWER_OFF; genpd->power_off_time = ktime_get(); /* Update PM QoS information for devices in the domain. */ list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { struct gpd_timing_data *td = &to_gpd_data(pdd)->td; pm_runtime_update_max_time_suspended(pdd->dev, td->start_latency_ns + td->restore_state_latency_ns + genpd->power_on_latency_ns); } list_for_each_entry(link, &genpd->slave_links, slave_node) { genpd_sd_counter_dec(link->master); genpd_queue_power_off_work(link->master); } out: genpd->poweroff_task = NULL; wake_up_all(&genpd->status_wait_queue); return ret; } /** * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. * @work: Work structure used for scheduling the execution of this function. */ static void genpd_power_off_work_fn(struct work_struct *work) { struct generic_pm_domain *genpd; genpd = container_of(work, struct generic_pm_domain, power_off_work); genpd_acquire_lock(genpd); pm_genpd_poweroff(genpd); genpd_release_lock(genpd); } /** * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain. * @dev: Device to suspend. * * Carry out a runtime suspend of a device under the assumption that its * pm_domain field points to the domain member of an object of type * struct generic_pm_domain representing a PM domain consisting of I/O devices. */ static int pm_genpd_runtime_suspend(struct device *dev) { struct generic_pm_domain *genpd; bool (*stop_ok)(struct device *__dev); int ret; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; might_sleep_if(!genpd->dev_irq_safe); if (dev_gpd_data(dev)->always_on) return -EBUSY; stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; if (stop_ok && !stop_ok(dev)) return -EBUSY; ret = genpd_stop_dev(genpd, dev); if (ret) return ret; pm_runtime_update_max_time_suspended(dev, dev_gpd_data(dev)->td.start_latency_ns); /* * If power.irq_safe is set, this routine will be run with interrupts * off, so it can't use mutexes. */ if (dev->power.irq_safe) return 0; mutex_lock(&genpd->lock); genpd->in_progress++; pm_genpd_poweroff(genpd); genpd->in_progress--; mutex_unlock(&genpd->lock); return 0; } /** * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. * @dev: Device to resume. * * Carry out a runtime resume of a device under the assumption that its * pm_domain field points to the domain member of an object of type * struct generic_pm_domain representing a PM domain consisting of I/O devices. */ static int pm_genpd_runtime_resume(struct device *dev) { struct generic_pm_domain *genpd; DEFINE_WAIT(wait); int ret; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; might_sleep_if(!genpd->dev_irq_safe); /* If power.irq_safe, the PM domain is never powered off. */ if (dev->power.irq_safe) goto out; mutex_lock(&genpd->lock); ret = __pm_genpd_poweron(genpd); if (ret) { mutex_unlock(&genpd->lock); return ret; } genpd->status = GPD_STATE_BUSY; genpd->resume_count++; for (;;) { prepare_to_wait(&genpd->status_wait_queue, &wait, TASK_UNINTERRUPTIBLE); /* * If current is the powering off task, we have been called * reentrantly from one of the device callbacks, so we should * not wait. */ if (!genpd->poweroff_task || genpd->poweroff_task == current) break; mutex_unlock(&genpd->lock); schedule(); mutex_lock(&genpd->lock); } finish_wait(&genpd->status_wait_queue, &wait); __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd); genpd->resume_count--; genpd_set_active(genpd); wake_up_all(&genpd->status_wait_queue); mutex_unlock(&genpd->lock); out: genpd_start_dev(genpd, dev); return 0; } /** * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use. */ void pm_genpd_poweroff_unused(void) { struct generic_pm_domain *genpd; mutex_lock(&gpd_list_lock); list_for_each_entry(genpd, &gpd_list, gpd_list_node) genpd_queue_power_off_work(genpd); mutex_unlock(&gpd_list_lock); } #else static inline void genpd_power_off_work_fn(struct work_struct *work) {} #define pm_genpd_runtime_suspend NULL #define pm_genpd_runtime_resume NULL #endif /* CONFIG_PM_RUNTIME */ #ifdef CONFIG_PM_SLEEP static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev); } static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_CALLBACK(genpd, int, suspend, dev); } static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev); } static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev); } static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_CALLBACK(genpd, int, resume, dev); } static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_CALLBACK(genpd, int, freeze, dev); } static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev); } static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev); } static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_CALLBACK(genpd, int, thaw, dev); } /** * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. * @genpd: PM domain to power off, if possible. * * Check if the given PM domain can be powered off (during system suspend or * hibernation) and do that if so. Also, in that case propagate to its masters. * * This function is only called in "noirq" stages of system power transitions, * so it need not acquire locks (all of the "noirq" callbacks are executed * sequentially, so it is guaranteed that it will never run twice in parallel). */ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) { struct gpd_link *link; if (genpd->status == GPD_STATE_POWER_OFF) return; if (genpd->suspended_count != genpd->device_count || atomic_read(&genpd->sd_count) > 0) return; if (genpd->power_off) genpd->power_off(genpd); genpd->status = GPD_STATE_POWER_OFF; list_for_each_entry(link, &genpd->slave_links, slave_node) { genpd_sd_counter_dec(link->master); pm_genpd_sync_poweroff(link->master); } } /** * resume_needed - Check whether to resume a device before system suspend. * @dev: Device to check. * @genpd: PM domain the device belongs to. * * There are two cases in which a device that can wake up the system from sleep * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled * to wake up the system and it has to remain active for this purpose while the * system is in the sleep state and (2) if the device is not enabled to wake up * the system from sleep states and it generally doesn't generate wakeup signals * by itself (those signals are generated on its behalf by other parts of the * system). In the latter case it may be necessary to reconfigure the device's * wakeup settings during system suspend, because it may have been set up to * signal remote wakeup from the system's working state as needed by runtime PM. * Return 'true' in either of the above cases. */ static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd) { bool active_wakeup; if (!device_can_wakeup(dev)) return false; active_wakeup = genpd_dev_active_wakeup(genpd, dev); return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; } /** * pm_genpd_prepare - Start power transition of a device in a PM domain. * @dev: Device to start the transition of. * * Start a power transition of a device (during a system-wide power transition) * under the assumption that its pm_domain field points to the domain member of * an object of type struct generic_pm_domain representing a PM domain * consisting of I/O devices. */ static int pm_genpd_prepare(struct device *dev) { struct generic_pm_domain *genpd; int ret; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; /* * If a wakeup request is pending for the device, it should be woken up * at this point and a system wakeup event should be reported if it's * set up to wake up the system from sleep states. */ pm_runtime_get_noresume(dev); if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) pm_wakeup_event(dev, 0); if (pm_wakeup_pending()) { pm_runtime_put_sync(dev); return -EBUSY; } if (resume_needed(dev, genpd)) pm_runtime_resume(dev); genpd_acquire_lock(genpd); if (genpd->prepared_count++ == 0) { genpd->suspended_count = 0; genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; } genpd_release_lock(genpd); if (genpd->suspend_power_off) { pm_runtime_put_noidle(dev); return 0; } /* * The PM domain must be in the GPD_STATE_ACTIVE state at this point, * so pm_genpd_poweron() will return immediately, but if the device * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need * to make it operational. */ pm_runtime_resume(dev); __pm_runtime_disable(dev, false); ret = pm_generic_prepare(dev); if (ret) { mutex_lock(&genpd->lock); if (--genpd->prepared_count == 0) genpd->suspend_power_off = false; mutex_unlock(&genpd->lock); pm_runtime_enable(dev); } pm_runtime_put_sync(dev); return ret; } /** * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain. * @dev: Device to suspend. * * Suspend a device under the assumption that its pm_domain field points to the * domain member of an object of type struct generic_pm_domain representing * a PM domain consisting of I/O devices. */ static int pm_genpd_suspend(struct device *dev) { struct generic_pm_domain *genpd; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev); } /** * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain. * @dev: Device to suspend. * * Carry out a late suspend of a device under the assumption that its * pm_domain field points to the domain member of an object of type * struct generic_pm_domain representing a PM domain consisting of I/O devices. */ static int pm_genpd_suspend_late(struct device *dev) { struct generic_pm_domain *genpd; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev); } /** * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain. * @dev: Device to suspend. * * Stop the device and remove power from the domain if all devices in it have * been stopped. */ static int pm_genpd_suspend_noirq(struct device *dev) { struct generic_pm_domain *genpd; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) return 0; genpd_stop_dev(genpd, dev); /* * Since all of the "noirq" callbacks are executed sequentially, it is * guaranteed that this function will never run twice in parallel for * the same PM domain, so it is not necessary to use locking here. */ genpd->suspended_count++; pm_genpd_sync_poweroff(genpd); return 0; } /** * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain. * @dev: Device to resume. * * Restore power to the device's PM domain, if necessary, and start the device. */ static int pm_genpd_resume_noirq(struct device *dev) { struct generic_pm_domain *genpd; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) return 0; /* * Since all of the "noirq" callbacks are executed sequentially, it is * guaranteed that this function will never run twice in parallel for * the same PM domain, so it is not necessary to use locking here. */ pm_genpd_poweron(genpd); genpd->suspended_count--; return genpd_start_dev(genpd, dev); } /** * pm_genpd_resume_early - Early resume of a device in an I/O PM domain. * @dev: Device to resume. * * Carry out an early resume of a device under the assumption that its * pm_domain field points to the domain member of an object of type * struct generic_pm_domain representing a power domain consisting of I/O * devices. */ static int pm_genpd_resume_early(struct device *dev) { struct generic_pm_domain *genpd; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev); } /** * pm_genpd_resume - Resume of device in an I/O PM domain. * @dev: Device to resume. * * Resume a device under the assumption that its pm_domain field points to the * domain member of an object of type struct generic_pm_domain representing * a power domain consisting of I/O devices. */ static int pm_genpd_resume(struct device *dev) { struct generic_pm_domain *genpd; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev); } /** * pm_genpd_freeze - Freezing a device in an I/O PM domain. * @dev: Device to freeze. * * Freeze a device under the assumption that its pm_domain field points to the * domain member of an object of type struct generic_pm_domain representing * a power domain consisting of I/O devices. */ static int pm_genpd_freeze(struct device *dev) { struct generic_pm_domain *genpd; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev); } /** * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain. * @dev: Device to freeze. * * Carry out a late freeze of a device under the assumption that its * pm_domain field points to the domain member of an object of type * struct generic_pm_domain representing a power domain consisting of I/O * devices. */ static int pm_genpd_freeze_late(struct device *dev) { struct generic_pm_domain *genpd; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev); } /** * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain. * @dev: Device to freeze. * * Carry out a late freeze of a device under the assumption that its * pm_domain field points to the domain member of an object of type * struct generic_pm_domain representing a power domain consisting of I/O * devices. */ static int pm_genpd_freeze_noirq(struct device *dev) { struct generic_pm_domain *genpd; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ? 0 : genpd_stop_dev(genpd, dev); } /** * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain. * @dev: Device to thaw. * * Start the device, unless power has been removed from the domain already * before the system transition. */ static int pm_genpd_thaw_noirq(struct device *dev) { struct generic_pm_domain *genpd; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev); } /** * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain. * @dev: Device to thaw. * * Carry out an early thaw of a device under the assumption that its * pm_domain field points to the domain member of an object of type * struct generic_pm_domain representing a power domain consisting of I/O * devices. */ static int pm_genpd_thaw_early(struct device *dev) { struct generic_pm_domain *genpd; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev); } /** * pm_genpd_thaw - Thaw a device belonging to an I/O power domain. * @dev: Device to thaw. * * Thaw a device under the assumption that its pm_domain field points to the * domain member of an object of type struct generic_pm_domain representing * a power domain consisting of I/O devices. */ static int pm_genpd_thaw(struct device *dev) { struct generic_pm_domain *genpd; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev); } /** * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain. * @dev: Device to resume. * * Make sure the domain will be in the same power state as before the * hibernation the system is resuming from and start the device if necessary. */ static int pm_genpd_restore_noirq(struct device *dev) { struct generic_pm_domain *genpd; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; /* * Since all of the "noirq" callbacks are executed sequentially, it is * guaranteed that this function will never run twice in parallel for * the same PM domain, so it is not necessary to use locking here. * * At this point suspended_count == 0 means we are being run for the * first time for the given domain in the present cycle. */ if (genpd->suspended_count++ == 0) { /* * The boot kernel might put the domain into arbitrary state, * so make it appear as powered off to pm_genpd_poweron(), so * that it tries to power it on in case it was really off. */ genpd->status = GPD_STATE_POWER_OFF; if (genpd->suspend_power_off) { /* * If the domain was off before the hibernation, make * sure it will be off going forward. */ if (genpd->power_off) genpd->power_off(genpd); return 0; } } if (genpd->suspend_power_off) return 0; pm_genpd_poweron(genpd); return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev); } /** * pm_genpd_complete - Complete power transition of a device in a power domain. * @dev: Device to complete the transition of. * * Complete a power transition of a device (during a system-wide power * transition) under the assumption that its pm_domain field points to the * domain member of an object of type struct generic_pm_domain representing * a power domain consisting of I/O devices. */ static void pm_genpd_complete(struct device *dev) { struct generic_pm_domain *genpd; bool run_complete; dev_dbg(dev, "%s()\n", __func__); genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return; mutex_lock(&genpd->lock); run_complete = !genpd->suspend_power_off; if (--genpd->prepared_count == 0) genpd->suspend_power_off = false; mutex_unlock(&genpd->lock); if (run_complete) { pm_generic_complete(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); pm_runtime_idle(dev); } } #else #define pm_genpd_prepare NULL #define pm_genpd_suspend NULL #define pm_genpd_suspend_late NULL #define pm_genpd_suspend_noirq NULL #define pm_genpd_resume_early NULL #define pm_genpd_resume_noirq NULL #define pm_genpd_resume NULL #define pm_genpd_freeze NULL #define pm_genpd_freeze_late NULL #define pm_genpd_freeze_noirq NULL #define pm_genpd_thaw_early NULL #define pm_genpd_thaw_noirq NULL #define pm_genpd_thaw NULL #define pm_genpd_restore_noirq NULL #define pm_genpd_complete NULL #endif /* CONFIG_PM_SLEEP */ /** * __pm_genpd_add_device - Add a device to an I/O PM domain. * @genpd: PM domain to add the device to. * @dev: Device to be added. * @td: Set of PM QoS timing parameters to attach to the device. */ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, struct gpd_timing_data *td) { struct generic_pm_domain_data *gpd_data; struct pm_domain_data *pdd; int ret = 0; dev_dbg(dev, "%s()\n", __func__); if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) return -EINVAL; genpd_acquire_lock(genpd); if (genpd->status == GPD_STATE_POWER_OFF) { ret = -EINVAL; goto out; } if (genpd->prepared_count > 0) { ret = -EAGAIN; goto out; } list_for_each_entry(pdd, &genpd->dev_list, list_node) if (pdd->dev == dev) { ret = -EINVAL; goto out; } gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); if (!gpd_data) { ret = -ENOMEM; goto out; } genpd->device_count++; dev->pm_domain = &genpd->domain; dev_pm_get_subsys_data(dev); dev->power.subsys_data->domain_data = &gpd_data->base; gpd_data->base.dev = dev; gpd_data->need_restore = false; list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); if (td) gpd_data->td = *td; out: genpd_release_lock(genpd); return ret; } /** * __pm_genpd_of_add_device - Add a device to an I/O PM domain. * @genpd_node: Device tree node pointer representing a PM domain to which the * the device is added to. * @dev: Device to be added. * @td: Set of PM QoS timing parameters to attach to the device. */ int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev, struct gpd_timing_data *td) { struct generic_pm_domain *genpd = NULL, *gpd; dev_dbg(dev, "%s()\n", __func__); if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev)) return -EINVAL; mutex_lock(&gpd_list_lock); list_for_each_entry(gpd, &gpd_list, gpd_list_node) { if (gpd->of_node == genpd_node) { genpd = gpd; break; } } mutex_unlock(&gpd_list_lock); if (!genpd) return -EINVAL; return __pm_genpd_add_device(genpd, dev, td); } /** * pm_genpd_remove_device - Remove a device from an I/O PM domain. * @genpd: PM domain to remove the device from. * @dev: Device to be removed. */ int pm_genpd_remove_device(struct generic_pm_domain *genpd, struct device *dev) { struct pm_domain_data *pdd; int ret = -EINVAL; dev_dbg(dev, "%s()\n", __func__); if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) return -EINVAL; genpd_acquire_lock(genpd); if (genpd->prepared_count > 0) { ret = -EAGAIN; goto out; } list_for_each_entry(pdd, &genpd->dev_list, list_node) { if (pdd->dev != dev) continue; list_del_init(&pdd->list_node); pdd->dev = NULL; dev_pm_put_subsys_data(dev); dev->pm_domain = NULL; kfree(to_gpd_data(pdd)); genpd->device_count--; ret = 0; break; } out: genpd_release_lock(genpd); return ret; } /** * pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device. * @dev: Device to set/unset the flag for. * @val: The new value of the device's "always on" flag. */ void pm_genpd_dev_always_on(struct device *dev, bool val) { struct pm_subsys_data *psd; unsigned long flags; spin_lock_irqsave(&dev->power.lock, flags); psd = dev_to_psd(dev); if (psd && psd->domain_data) to_gpd_data(psd->domain_data)->always_on = val; spin_unlock_irqrestore(&dev->power.lock, flags); } EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on); /** * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. * @genpd: Master PM domain to add the subdomain to. * @subdomain: Subdomain to be added. */ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *subdomain) { struct gpd_link *link; int ret = 0; if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) return -EINVAL; start: genpd_acquire_lock(genpd); mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); if (subdomain->status != GPD_STATE_POWER_OFF && subdomain->status != GPD_STATE_ACTIVE) { mutex_unlock(&subdomain->lock); genpd_release_lock(genpd); goto start; } if (genpd->status == GPD_STATE_POWER_OFF && subdomain->status != GPD_STATE_POWER_OFF) { ret = -EINVAL; goto out; } list_for_each_entry(link, &genpd->slave_links, slave_node) { if (link->slave == subdomain && link->master == genpd) { ret = -EINVAL; goto out; } } link = kzalloc(sizeof(*link), GFP_KERNEL); if (!link) { ret = -ENOMEM; goto out; } link->master = genpd; list_add_tail(&link->master_node, &genpd->master_links); link->slave = subdomain; list_add_tail(&link->slave_node, &subdomain->slave_links); if (subdomain->status != GPD_STATE_POWER_OFF) genpd_sd_counter_inc(genpd); out: mutex_unlock(&subdomain->lock); genpd_release_lock(genpd); return ret; } /** * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. * @genpd: Master PM domain to remove the subdomain from. * @subdomain: Subdomain to be removed. */ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *subdomain) { struct gpd_link *link; int ret = -EINVAL; if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) return -EINVAL; start: genpd_acquire_lock(genpd); list_for_each_entry(link, &genpd->master_links, master_node) { if (link->slave != subdomain) continue; mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); if (subdomain->status != GPD_STATE_POWER_OFF && subdomain->status != GPD_STATE_ACTIVE) { mutex_unlock(&subdomain->lock); genpd_release_lock(genpd); goto start; } list_del(&link->master_node); list_del(&link->slave_node); kfree(link); if (subdomain->status != GPD_STATE_POWER_OFF) genpd_sd_counter_dec(genpd); mutex_unlock(&subdomain->lock); ret = 0; break; } genpd_release_lock(genpd); return ret; } /** * pm_genpd_add_callbacks - Add PM domain callbacks to a given device. * @dev: Device to add the callbacks to. * @ops: Set of callbacks to add. * @td: Timing data to add to the device along with the callbacks (optional). */ int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops, struct gpd_timing_data *td) { struct pm_domain_data *pdd; int ret = 0; if (!(dev && dev->power.subsys_data && ops)) return -EINVAL; pm_runtime_disable(dev); device_pm_lock(); pdd = dev->power.subsys_data->domain_data; if (pdd) { struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); gpd_data->ops = *ops; if (td) gpd_data->td = *td; } else { ret = -EINVAL; } device_pm_unlock(); pm_runtime_enable(dev); return ret; } EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks); /** * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device. * @dev: Device to remove the callbacks from. * @clear_td: If set, clear the device's timing data too. */ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) { struct pm_domain_data *pdd; int ret = 0; if (!(dev && dev->power.subsys_data)) return -EINVAL; pm_runtime_disable(dev); device_pm_lock(); pdd = dev->power.subsys_data->domain_data; if (pdd) { struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); gpd_data->ops = (struct gpd_dev_ops){ 0 }; if (clear_td) gpd_data->td = (struct gpd_timing_data){ 0 }; } else { ret = -EINVAL; } device_pm_unlock(); pm_runtime_enable(dev); return ret; } EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); /* Default device callbacks for generic PM domains. */ /** * pm_genpd_default_save_state - Default "save device state" for PM domians. * @dev: Device to handle. */ static int pm_genpd_default_save_state(struct device *dev) { int (*cb)(struct device *__dev); struct device_driver *drv = dev->driver; cb = dev_gpd_data(dev)->ops.save_state; if (cb) return cb(dev); if (drv && drv->pm && drv->pm->runtime_suspend) return drv->pm->runtime_suspend(dev); return 0; } /** * pm_genpd_default_restore_state - Default PM domians "restore device state". * @dev: Device to handle. */ static int pm_genpd_default_restore_state(struct device *dev) { int (*cb)(struct device *__dev); struct device_driver *drv = dev->driver; cb = dev_gpd_data(dev)->ops.restore_state; if (cb) return cb(dev); if (drv && drv->pm && drv->pm->runtime_resume) return drv->pm->runtime_resume(dev); return 0; } #ifdef CONFIG_PM_SLEEP /** * pm_genpd_default_suspend - Default "device suspend" for PM domians. * @dev: Device to handle. */ static int pm_genpd_default_suspend(struct device *dev) { int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend; return cb ? cb(dev) : pm_generic_suspend(dev); } /** * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians. * @dev: Device to handle. */ static int pm_genpd_default_suspend_late(struct device *dev) { int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late; return cb ? cb(dev) : pm_generic_suspend_late(dev); } /** * pm_genpd_default_resume_early - Default "early device resume" for PM domians. * @dev: Device to handle. */ static int pm_genpd_default_resume_early(struct device *dev) { int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early; return cb ? cb(dev) : pm_generic_resume_early(dev); } /** * pm_genpd_default_resume - Default "device resume" for PM domians. * @dev: Device to handle. */ static int pm_genpd_default_resume(struct device *dev) { int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume; return cb ? cb(dev) : pm_generic_resume(dev); } /** * pm_genpd_default_freeze - Default "device freeze" for PM domians. * @dev: Device to handle. */ static int pm_genpd_default_freeze(struct device *dev) { int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze; return cb ? cb(dev) : pm_generic_freeze(dev); } /** * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians. * @dev: Device to handle. */ static int pm_genpd_default_freeze_late(struct device *dev) { int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late; return cb ? cb(dev) : pm_generic_freeze_late(dev); } /** * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians. * @dev: Device to handle. */ static int pm_genpd_default_thaw_early(struct device *dev) { int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early; return cb ? cb(dev) : pm_generic_thaw_early(dev); } /** * pm_genpd_default_thaw - Default "device thaw" for PM domians. * @dev: Device to handle. */ static int pm_genpd_default_thaw(struct device *dev) { int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw; return cb ? cb(dev) : pm_generic_thaw(dev); } #else /* !CONFIG_PM_SLEEP */ #define pm_genpd_default_suspend NULL #define pm_genpd_default_suspend_late NULL #define pm_genpd_default_resume_early NULL #define pm_genpd_default_resume NULL #define pm_genpd_default_freeze NULL #define pm_genpd_default_freeze_late NULL #define pm_genpd_default_thaw_early NULL #define pm_genpd_default_thaw NULL #endif /* !CONFIG_PM_SLEEP */ /** * pm_genpd_init - Initialize a generic I/O PM domain object. * @genpd: PM domain object to initialize. * @gov: PM domain governor to associate with the domain (may be NULL). * @is_off: Initial value of the domain's power_is_off field. */ void pm_genpd_init(struct generic_pm_domain *genpd, struct dev_power_governor *gov, bool is_off) { if (IS_ERR_OR_NULL(genpd)) return; INIT_LIST_HEAD(&genpd->master_links); INIT_LIST_HEAD(&genpd->slave_links); INIT_LIST_HEAD(&genpd->dev_list); mutex_init(&genpd->lock); genpd->gov = gov; INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); genpd->in_progress = 0; atomic_set(&genpd->sd_count, 0); genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; init_waitqueue_head(&genpd->status_wait_queue); genpd->poweroff_task = NULL; genpd->resume_count = 0; genpd->device_count = 0; genpd->max_off_time_ns = -1; genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; genpd->domain.ops.prepare = pm_genpd_prepare; genpd->domain.ops.suspend = pm_genpd_suspend; genpd->domain.ops.suspend_late = pm_genpd_suspend_late; genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; genpd->domain.ops.resume_early = pm_genpd_resume_early; genpd->domain.ops.resume = pm_genpd_resume; genpd->domain.ops.freeze = pm_genpd_freeze; genpd->domain.ops.freeze_late = pm_genpd_freeze_late; genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; genpd->domain.ops.thaw_early = pm_genpd_thaw_early; genpd->domain.ops.thaw = pm_genpd_thaw; genpd->domain.ops.poweroff = pm_genpd_suspend; genpd->domain.ops.poweroff_late = pm_genpd_suspend_late; genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq; genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; genpd->domain.ops.restore_early = pm_genpd_resume_early; genpd->domain.ops.restore = pm_genpd_resume; genpd->domain.ops.complete = pm_genpd_complete; genpd->dev_ops.save_state = pm_genpd_default_save_state; genpd->dev_ops.restore_state = pm_genpd_default_restore_state; genpd->dev_ops.suspend = pm_genpd_default_suspend; genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late; genpd->dev_ops.resume_early = pm_genpd_default_resume_early; genpd->dev_ops.resume = pm_genpd_default_resume; genpd->dev_ops.freeze = pm_genpd_default_freeze; genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late; genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early; genpd->dev_ops.thaw = pm_genpd_default_thaw; mutex_lock(&gpd_list_lock); list_add(&genpd->gpd_list_node, &gpd_list); mutex_unlock(&gpd_list_lock); }
gpl-2.0